diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 19dd4d9889c5..000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,79 +0,0 @@ -version: 2.1 - -workflows: - circleci_tests: - # Note: all end-to-end tests have now been moved off CircleCI. - - jobs: - - setup_and_typescript_tests - -var_for_docker_image: &docker_image circleci/python:3.7.10-browsers - -anchor_for_job_defaults: &job_defaults - working_directory: /home/circleci/oppia - docker: - - image: *docker_image - -commands: - merge_target_branch: - description: "Merge to target branch" - steps: - - run: - name: Merge to target branch - command: | - if [[ -n ${CIRCLE_PULL_REQUEST} ]] - then - - git config --global user.email "$( git log --format='%ae' $CIRCLE_SHA1^! )" - git config --global user.name "$( git log --format='%an' $CIRCLE_SHA1^! )" - - regexp="[[:digit:]]\+$" - PR_NUMBER=`echo $CIRCLE_PULL_REQUEST | grep -o $regexp` - - curl -L "https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64" -o jq - chmod +x jq - - url="https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls/$PR_NUMBER?access_token=$GITHUB_TOKEN" - - target_branch=$( - curl "$url" | ./jq '.base.ref' | tr -d '"' - ) - - rm jq - if [[ "$target_branch" == null ]] - then - git pull git@github.com:oppia/oppia.git develop --no-edit - else - git pull origin $target_branch --no-edit - fi - fi - -jobs: - setup_and_typescript_tests: - <<: *job_defaults - steps: - - checkout - - merge_target_branch - - run: - name: Setup python by installing wheel - command: pip install wheel==0.35.0 - - run: - name: Install dependencies - command: python -m scripts.install_third_party_libs - - run: - name: Check that all e2e test files are captured in protractor.conf.js - command: python -m scripts.check_e2e_tests_are_captured_in_ci - - run: - name: Run typescript tests - command: | - python -m scripts.typescript_checks - - run: - name: Run typescript tests in strict mode - command: | - python -m scripts.typescript_checks --strict_checks - -notify: - webhooks: - # A list of hook hashes, containing the url field - # gitter hook - - url: https://webhooks.gitter.im/e/71ac71505d1d45161035 diff --git a/.coveragerc b/.coveragerc index cd049ad6c344..dd3a59767db8 100644 --- a/.coveragerc +++ b/.coveragerc @@ -8,6 +8,13 @@ omit = *core/tests/* *scripts/linters/test_files/* *__init__.py - # TODO(#13935): Remove python_utils from the list once the codebase is - # run under Python 3. - core/python_utils.py + +# The 'exclude_lines' is used to skip a particular clause in coverage. +# We exclude any line with a comment of “pragma: no cover” as this is the default option +# provided by the coverage library. +# We also exclude @overload, because it is used for defining multiple MyPy definitions +# of a function and cannot be tested by Python tests. +[report] +exclude_lines = + pragma: no cover + @overload diff --git a/.eslintignore b/.eslintignore index e27c1600bbef..f9eedf97ce99 100644 --- a/.eslintignore +++ b/.eslintignore @@ -1,10 +1,11 @@ core/templates/expressions/parser.js core/templates/google-analytics.initializer.ts backend_prod_files/* -core/tests/protractor.conf.js integrations/* integrations_dev/* assets/scripts/* third_party/* build/* *.min.js +dump.rdb +.direnv/* diff --git a/.eslintrc b/.eslintrc index 5038142197ad..9c1a1fbd2030 100644 --- a/.eslintrc +++ b/.eslintrc @@ -16,18 +16,21 @@ "overrides": [ { // We exclude test files from dependency injection format checks because - // they are never minified. - "files": ["*Spec.js", "*Spec.ts"], + // they are never minified. Also, we exclude no-loss-of-precision check as + // it is only relevant for non-test files, since when running + // frontend tests there is no loss of precision. + "files": ["*Spec.js", "*Spec.ts", "*.spec.ts"], "rules": { - "angular/di": "off" + "angular/di": "off", + "no-loss-of-precision": "off" } }, { "files": ["*.js", "*.ts"], "excludedFiles": [ - "complete.spec.ts", - "student.spec.ts", - "teacher.spec.ts" + "complete.spec.ts", + "student.spec.ts", + "teacher.spec.ts" ], "rules": { "oppia/no-to-throw": "error" @@ -37,9 +40,9 @@ // We only run the e2e action checks on end-to-end test files, and // we exclude files that we haven't fixed yet. "files": [ - "core/tests/protractor/*.js", - "core/tests/protractor_utils/*.js", - "core/tests/protractor_desktop/*.js" + "core/tests/webdriverio/*.js", + "core/tests/webdriverio_utils/*.js", + "core/tests/webdriverio_desktop/*.js" ], "excludedFiles": [ "ClassroomPage.js", @@ -54,7 +57,6 @@ "LearnerDashboardPage.js", "LibraryPage.js", "ProfilePage.js", - "ProtractorConstants.js", "SkillEditorPage.js", "StoryEditorPage.js", "SubscriptionDashboardPage.js", @@ -110,22 +112,15 @@ } }, { - // We only run the protractor practices checks on protractor end-to-end test files. + // We only run the e2e practices checks on webdriverio + // end-to-end test files. "files": [ - "core/tests/protractor/*.js", - "core/tests/protractor_utils/*.js", - "core/tests/protractor_desktop/*.js" + "core/tests/webdriverio/*.js", + "core/tests/webdriverio_utils/*.js", + "core/tests/webdriverio_desktop/*.js" ], "rules": { - "oppia/protractor-practices": "error" - } - }, - { - "files": [ - "core/tests/protractor_utils/*.js", - ], - "rules": { - "oppia/check-element-selector-at-top": "error" + "oppia/e2e-practices": "error" } }, { @@ -138,12 +133,12 @@ { "files": ["*.js", "*.ts"], "excludedFiles": [ - "Polyfills.ts", - "ck-editor-copy-content.service.spec.ts", - "unit-test-utils.ajs.ts", - "mathjax.directive.ts", - "math-expression-content-editor.component.ts", - "core/tests/*" + "Polyfills.ts", + "ck-editor-copy-content.service.spec.ts", + "unit-test-utils.ajs.ts", + "mathjax.directive.ts", + "math-expression-content-editor.component.ts", + "core/tests/*" ], "rules": { "oppia/no-inner-html": "error" @@ -152,7 +147,7 @@ { "files": ["*.js", "*.ts"], "excludedFiles": [ - "core/tests/**" + "core/tests/**" ], "rules": { "oppia/no-relative-import": "error" @@ -252,7 +247,9 @@ "MemberExpression": 1, "SwitchCase": 1, "ignoredNodes": [ - "ConditionalExpression" + "ConditionalExpression", + // TODO(#14575): Remove once we have linting for class declaration. + "ClassDeclaration" ] } ], @@ -331,11 +328,7 @@ "never" ], "quotes": [ - "error", - "single", - { - "avoidEscape": true - } + "off" ], "quote-props": [ "error", @@ -350,6 +343,7 @@ // The following must be off so that we can enable // "@typescript-eslint/no-extra-semi". "no-extra-semi": "off", + "lines-between-class-members": "off", "semi-spacing": "error", "space-before-blocks": [ "error", @@ -456,6 +450,23 @@ "before": false, "after": true } - ] + ], + "@typescript-eslint/lines-between-class-members": [ + "error", + "always", + { + "exceptAfterSingleLine": true + } + ], + "@typescript-eslint/quotes": [ + "error", + "single", + { + "avoidEscape": true + } + ], + "no-dupe-class-members": "off", + "@typescript-eslint/no-dupe-class-members": ["error"], + "@typescript-eslint/no-namespace": ["error"] } } diff --git a/.gcloudignore b/.gcloudignore index f2b26473618f..258fd0f3f601 100644 --- a/.gcloudignore +++ b/.gcloudignore @@ -1,4 +1,4 @@ -# Various compiled, temporary, and hidden files +# Various compiled, temporary, and hidden files. *~ *.pyc *.pyo @@ -6,17 +6,30 @@ *.swo *.bak .* -# Typescript files in core -core/**.ts -# Typescript output log file -tsc_output_log.txt -# Python test files +__pycache__/ + +# Markdown files. +*.md + +# Typescript files in core and extension they are not needed as +# we compile webpack which contains these files. +core/**/*.ts +extensions/**/*.ts + +# Python test files. *_test.py -# Other folders to ignore -core/tests/ -node_modules/ -scripts/ -third_party/python3_libs/ + +# Other folders to ignore. +/backend_prod_files/webpack_bundles/ +/core/tests/ +/local_compiled_js_for_test/ +/node_modules/ +/scripts/ +/stubs/ +/third_party/python3_libs/ +/third_party/python_libs/ +/typings/ +/webpack_bundles/ # Some third_party static scripts are directly imported, namely: jquery, # jqueryui, angularjs, jqueryui-touch-punch, MathJax, code-mirror, # ui-codemirror, d3js, midi-js, ui-map, guppy, skulpt, math-expressions. @@ -34,11 +47,8 @@ third_party/static/bower-angular-translate-interpolation-messageformat-2.18.1/ third_party/static/bower-angular-translate-loader-partial-2.18.1/ third_party/static/bower-angular-translate-loader-static-files-2.18.1/ third_party/static/bower-angular-translate-storage-cookie-2.18.1/ -third_party/static/bower-material-1.1.19/ -third_party/python_libs/google/appengine/ -third_party/python_libs/google/net/ -third_party/python_libs/google/pyglib/ -third_party/python_libs/grpc/ +third_party/static/bower-material-1.1.19/layouts/ +third_party/static/bower-material-1.1.19/modules/ # CKEditor-4.12.1 plugins in the download from the CKEditor website include # only a11yhelp, about, clipboard, colordialog, copyformatting, dialog, div, # find, flash, forms, iframe, image, link, liststyle, magicline, pagebreak, diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e63300e966af..e298b32dc01b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -33,47 +33,52 @@ # files/folders are created in the following directories, these codeowners would # be superseded by the relevant codeowners mentioned elsewhere in this file. # (Reference: https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners) -/core/controllers/ @aks681 -/core/domain/ @DubeySandeep -/core/templates/components/ @srijanreddy98 -/core/templates/domain/ @kevintab95 -/core/templates/pages/ @srijanreddy98 -/core/templates/services/ @nithusha21 -/extensions/ @vojtechjelinek -/scripts/ @DubeySandeep +/core/controllers/ @oppia/lace-backend-reviewers +/core/domain/ @oppia/data-and-stability-reviewers +/core/templates/components/ @oppia/lace-frontend-reviewers +/core/templates/domain/ @oppia/lace-frontend-reviewers +/core/templates/pages/ @oppia/lace-frontend-reviewers +/core/templates/services/ @oppia/lace-frontend-reviewers +/core/templates/utility/ @oppia/lace-frontend-reviewers +/extensions/ @oppia/data-and-stability-reviewers +/scripts/ @oppia/dev-workflow-reviewers # Angular Migration team -/core/controllers/oppia_root*.py @srijanreddy98 @ashutoshc8101 -/core/templates/components/shared-component.module.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/loader.service*.ts @srijanreddy98 @ashutoshc8101 -/core/templates/components/code-mirror/ @srijanreddy98 @ashutoshc8101 -/core/templates/components/filter-fields/ @srijanreddy98 @ashutoshc8101 -/core/templates/components/oppia-angular-root.component.* @srijanreddy98 @ashutoshc8101 -/core/templates/modules/ @srijanreddy98 @ashutoshc8101 -/core/templates/pages/common-imports.ts @srijanreddy98 @ashutoshc8101 -/core/templates/pages/mock-ajs.ts @srijanreddy98 @ashutoshc8101 -/core/templates/pages/oppia-root/ @srijanreddy98 @ashutoshc8101 -/core/templates/i18n/ @srijanreddy98 @ashutoshc8101 -/core/templates/base-components/oppia-root.directive.ts @srijanreddy98 @ashutoshc8101 -/core/templates/base-components/oppia-root.directive.html @srijanreddy98 @ashutoshc8101 -/core/templates/services/angular-services.index.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/contextual/logger.service.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/contextual/logger.service.spec.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/page-head.service*.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/i18n-language-code.service.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/loader.service*.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/question-validation.service*.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/request-interceptor.service*.ts @srijanreddy98 @ashutoshc8101 -/core/templates/services/UpgradedServices.ts @srijanreddy98 @ashutoshc8101 -/core/templates/hybrid-router-module-provider*.ts @srijanreddy98 @ashutoshc8101 - +/core/controllers/oppia_root*.py @oppia/angular-migration-reviewers +/core/templates/components/shared-component.module.ts @oppia/angular-migration-reviewers +/core/templates/services/loader.service*.ts @oppia/angular-migration-reviewers +/core/templates/components/code-mirror/ @oppia/angular-migration-reviewers +/core/templates/components/filter-fields/ @oppia/angular-migration-reviewers +/core/templates/components/oppia-angular-root.component.* @oppia/angular-migration-reviewers +/core/templates/modules/ @oppia/angular-migration-reviewers +/core/templates/pages/common-imports.ts @oppia/angular-migration-reviewers +/core/templates/pages/mock-ajs.ts @oppia/angular-migration-reviewers +/core/templates/pages/oppia-root/ @oppia/angular-migration-reviewers +/core/templates/pages/lightweight-oppia-root/ @oppia/angular-migration-reviewers +/core/templates/i18n/ @oppia/angular-migration-reviewers +/core/templates/base-components/oppia-root.directive.ts @oppia/angular-migration-reviewers +/core/templates/base-components/oppia-root.directive.html @oppia/angular-migration-reviewers +/core/templates/services/angular-services.index.ts @oppia/angular-migration-reviewers +/core/templates/services/contextual/logger.service.ts @oppia/angular-migration-reviewers +/core/templates/services/contextual/logger.service.spec.ts @oppia/angular-migration-reviewers +/core/templates/services/page-head.service*.ts @oppia/angular-migration-reviewers +/core/templates/services/i18n-language-code.service.ts @oppia/angular-migration-reviewers +/core/templates/services/question-validation.service*.ts @oppia/angular-migration-reviewers +/core/templates/services/request-interceptor.service*.ts @oppia/angular-migration-reviewers +/core/templates/services/UpgradedServices.ts @oppia/angular-migration-reviewers +/core/templates/hybrid-router-module-provider*.ts @oppia/angular-migration-reviewers +/core/templates/utility/hashes.ts @oppia/angular-migration-reviewers +/core/templates/utility/string-utility*.ts @oppia/angular-migration-reviewers +/proxy.conf.json @oppia/angular-migration-reviewers +/angular-template-style-url-replacer.webpack-loader.js @oppia/angular-migration-reviewers +/angular.json @oppia/angular-migration-reviewers +/src @oppia/frontend-infrastructure-reviewers # TS typing -/typings/ @vojtechjelinek -/tsconfig.json @vojtechjelinek -/tsconfig-strict.json @vojtechjelinek -/scripts/typescript_checks*.py @vojtechjelinek +/typings/ @oppia/data-and-stability-reviewers +/tsconfig*.json @oppia/data-and-stability-reviewers +/scripts/typescript_checks*.py @oppia/data-and-stability-reviewers # Answer classification team. @@ -88,256 +93,259 @@ # App Feedback Reporting project -# TODO(#14285): Replace @seanlip with @BenHenning for both. -/core/domain/app_feedback_report_*.py @seanlip -/core/controllers/incoming_app_feedback_report*.py @seanlip +/core/domain/app_feedback_report_*.py @oppia/web-android-compatibility-reviewers +/core/controllers/incoming_app_feedback_report*.py @oppia/web-android-compatibility-reviewers # Audio and Translation team. -/core/controllers/voice_artist*.py @DubeySandeep -/core/templates/pages/exploration-editor-page/translation-tab/ @DubeySandeep -/core/templates/services/audio-bar-status.service*.ts @DubeySandeep -/core/templates/services/audio-player.service.ts @DubeySandeep -/core/templates/services/autogenerated-audio-player.service.ts @DubeySandeep -/core/templates/services/generate-content-id.service*.ts @DubeySandeep -/core/templates/services/speech-synthesis-chunker.service*.ts @DubeySandeep -/core/templates/services/assets-backend-api.service*.ts @DubeySandeep +/core/controllers/voice_artist*.py @oppia/lace-backend-reviewers +/core/templates/domain/translation/ @oppia/lace-frontend-reviewers +/core/templates/pages/exploration-editor-page/translation-tab/ @oppia/lace-frontend-reviewers +/core/templates/services/audio-bar-status.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/audio-player.service.ts @oppia/lace-frontend-reviewers +/core/templates/services/autogenerated-audio-player.service.ts @oppia/lace-frontend-reviewers +/core/templates/services/generate-content-id.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/speech-synthesis-chunker.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/assets-backend-api.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/entity-translations.services.ts @oppia/lace-frontend-reviewers # Blog project -/core/domain/blog*.py @iamprayush @aks681 -/core/controllers/blog_dashboard*.py @aks681 -/core/controllers/blog_homepage*.py @aks681 -/core/templates/domain/blog/ @iamprayush @kevintab95 -/core/templates/pages/blog-dashboard-page/ @aks681 @iamprayush +/core/domain/blog*.py @oppia/lace-backend-reviewers +/core/controllers/blog_dashboard*.py @oppia/lace-backend-reviewers +/core/controllers/blog_homepage*.py @oppia/lace-backend-reviewers +/core/templates/domain/blog/ @oppia/lace-frontend-reviewers +/core/templates/pages/blog-dashboard-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/blog-home-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/blog-post-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/blog-author-profile-page/ @oppia/lace-frontend-reviewers # Collection project. -/core/controllers/collection*.py @aks681 -/core/domain/collection*.py @aks681 -/core/templates/components/entity-creation-services/collection-creation.service*.ts @EricZLou -/core/templates/components/entity-creation-services/collection-creation-backend-api.service*.ts @EricZLou -/core/templates/domain/collection/ @EricZLou -/core/templates/pages/collection-editor-page/ @EricZLou -/core/templates/pages/collection-player-page/ @EricZLou +/core/controllers/collection*.py @oppia/lace-backend-reviewers +/core/domain/collection*.py @oppia/lace-backend-reviewers +/core/templates/components/entity-creation-services/collection-creation.service*.ts @oppia/lace-frontend-reviewers +/core/templates/components/entity-creation-services/collection-creation-backend-api.service*.ts @oppia/lace-frontend-reviewers +/core/templates/domain/collection/ @oppia/lace-frontend-reviewers +/core/templates/pages/collection-editor-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/collection-player-page/ @oppia/lace-frontend-reviewers # Contributor Dashboard project. /core/controllers/contributor_dashboard*.py @oppia/contributor-experience-team /core/domain/opportunity*.py @oppia/contributor-experience-team /core/domain/translation*.py @oppia/contributor-experience-team -/core/domain/voiceover_services*.py @oppia/contributor-experience-team -/core/storage/translation/ @vojtechjelinek /core/templates/domain/opportunity/ @oppia/contributor-experience-team /core/templates/pages/contributor-dashboard-page/ @oppia/contributor-experience-team /core/templates/pages/contributor-dashboard-admin-page/ @oppia/contributor-experience-team +/scripts/contributor_dashboard_debug.py @oppia/contributor-experience-team # Core documentation -# TODO(#14285): Re-add @BenHenning. -/AUTHORS @seanlip -/CHANGELOG @seanlip @vojtechjelinek -# TODO(#14285): Re-add @BenHenning. -/CONTRIBUTORS @seanlip -/LICENSE @seanlip -/NOTICE @seanlip -/core/templates/pages/about-page/ @DubeySandeep -# TODO(#14285): Re-add @BenHenning. -/core/templates/pages/about-page/about-page.constants.ts @seanlip -/core/templates/pages/license-page/ @vojtechjelinek -/core/templates/pages/license-page/license-page.component.html @seanlip -/core/templates/pages/privacy-page/ @DubeySandeep -# TODO(#14285): Re-add @BenHenning. -/core/templates/pages/privacy-page/privacy-page.component.html @seanlip -/core/templates/pages/privacy-page/privacy-page.component.ts @DubeySandeep -/core/templates/pages/terms-page/ @DubeySandeep -# TODO(#14285): Re-add @BenHenning. -/core/templates/pages/terms-page/terms-page.component.html @seanlip +/CHANGELOG @oppia/release-workflow-reviewers +/core/templates/pages/android-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/about-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/about-page/about-page.constants.ts @oppia/angular-migration-reviewers +/core/templates/pages/license-page/ @oppia/angular-migration-reviewers +/core/templates/pages/privacy-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/privacy-page/privacy-page.component.ts @oppia/lace-frontend-reviewers +/core/templates/pages/terms-page/ @oppia/lace-frontend-reviewers # Dashboard pages. -/core/controllers/creator_dashboard*.py @nithusha21 -/core/controllers/email_dashboard*.py @nithusha21 -/core/controllers/learner_dashboard*.py @nithusha21 -/core/controllers/learner_goals*.py @nithusha21 -/core/controllers/learner_playlist*.py @nithusha21 -/core/controllers/subscriptions*.py @nithusha21 -/core/domain/activity*.py @nithusha21 -/core/domain/learner_goals_services*.py @nithusha21 -/core/domain/learner_playlist_services*.py @nithusha21 -/core/domain/learner_progress*.py @nithusha21 -/core/domain/subscription_services*.py @nithusha21 -/core/templates/domain/creator_dashboard/ @nithusha21 -/core/templates/domain/email-dashboard/ @nithusha21 -/core/templates/domain/learner_dashboard/ @nithusha21 -/core/templates/pages/creator-dashboard-page/ @nithusha21 -/core/templates/pages/email-dashboard-pages/ @nithusha21 -/core/templates/pages/learner-dashboard-page/ @nithusha21 +/core/controllers/creator_dashboard*.py @oppia/lace-backend-reviewers +/core/controllers/email_dashboard*.py @oppia/lace-backend-reviewers +/core/controllers/learner_dashboard*.py @oppia/lace-backend-reviewers +/core/controllers/learner_goals*.py @oppia/lace-backend-reviewers +/core/controllers/learner_playlist*.py @oppia/lace-backend-reviewers +/core/controllers/subscriptions*.py @oppia/lace-backend-reviewers +/core/domain/activity*.py @oppia/lace-backend-reviewers +/core/domain/learner_goals_services*.py @oppia/lace-backend-reviewers +/core/domain/learner_playlist_services*.py @oppia/lace-backend-reviewers +/core/domain/learner_progress*.py @oppia/lace-backend-reviewers +/core/domain/subscription_services*.py @oppia/lace-backend-reviewers +/core/templates/domain/creator_dashboard/ @oppia/lace-frontend-reviewers +/core/templates/domain/email-dashboard/ @oppia/lace-frontend-reviewers +/core/templates/domain/learner_dashboard/ @oppia/lace-frontend-reviewers +/core/templates/pages/creator-dashboard-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/email-dashboard-pages/ @oppia/lace-frontend-reviewers +/core/templates/pages/learner-dashboard-page/ @oppia/lace-frontend-reviewers # Dev workflow team. -/.circleci/ @oppia/automated-qa-reviewers -/.eslintignore @Hudda @DubeySandeep -/.eslintrc @Hudda @DubeySandeep -/.htmllintrc @Hudda @DubeySandeep -/.coveragerc @Hudda @DubeySandeep -/.gitattributes @DubeySandeep -/.gitignore @DubeySandeep -/.isort.cfg @Hudda @DubeySandeep -/.pylintrc @Hudda @DubeySandeep -/.stylelintrc @Hudda @DubeySandeep -/.yarnrc @DubeySandeep -/core/templates/css/.stylelintrc @Hudda @DubeySandeep -/tox.ini @Hudda @DubeySandeep -/scripts/*.py @DubeySandeep -/scripts/check_e2e_tests_are_captured_in_ci*.py @DubeySandeep -/scripts/check_frontend_test_coverage*.py @DubeySandeep @nithusha21 -/scripts/create_topological_sort_of_all_services*.py @DubeySandeep -/scripts/install_prerequisites.sh @DubeySandeep -/scripts/linters/ @Hudda @DubeySandeep -/scripts/linters/codeowner_linter*.py @sajalasati @DubeySandeep -/scripts/linters/pylint_extensions*.py @sajalasati @DubeySandeep -/scripts/linters/python_linter*.py @sajalasati @DubeySandeep -/ubuntu_dockerfile @DubeySandeep +/.eslintignore @oppia/dev-workflow-reviewers +/.eslintrc @oppia/dev-workflow-reviewers +/.htmllintrc @oppia/dev-workflow-reviewers +/.coveragerc @oppia/dev-workflow-reviewers +/.gitattributes @oppia/dev-workflow-reviewers +/.gitignore @oppia/dev-workflow-reviewers +/.isort.cfg @oppia/dev-workflow-reviewers +/.pylintrc @oppia/dev-workflow-reviewers +/.stylelintrc @oppia/dev-workflow-reviewers +/.yarnrc @oppia/dev-workflow-reviewers +/tox.ini @oppia/dev-workflow-reviewers +/scripts/*.py @oppia/dev-workflow-reviewers +/scripts/check_e2e_tests_are_captured_in_ci*.py @oppia/automated-qa-reviewers +/scripts/check_frontend_test_coverage*.py @oppia/automated-qa-reviewers +/scripts/create_topological_sort_of_all_services*.py @oppia/dev-workflow-reviewers +/scripts/install_prerequisites.sh @oppia/dev-workflow-reviewers +/scripts/linters/ @oppia/dev-workflow-reviewers +/scripts/linters/codeowner_linter*.py @oppia/dev-workflow-reviewers +/scripts/linters/pylint_extensions*.py @oppia/dev-workflow-reviewers +/scripts/linters/python_linter*.py @oppia/dev-workflow-reviewers +/ubuntu_dockerfile @oppia/dev-workflow-reviewers # Exploration project. -/core/controllers/editor*.py @aks681 -/core/controllers/reader*.py @aks681 -/core/controllers/resources*.py @vojtechjelinek -/core/domain/exp*.py @aks681 -/core/domain/fs*.py @aks681 -/core/domain/param_domain*.py @aks681 -/core/domain/rating_services*.py @aks681 -/core/domain/state_domain*.py @aks681 -/core/domain/summary_services*.py @aks681 -/core/domain/value_generators_domain*.py @vojtechjelinek -/core/templates/components/entity-creation-services/exploration-creation.service*.ts @EricZLou -/core/templates/components/entity-creation-services/exploration-creation-backend-api.service*.ts @EricZLou -/core/templates/components/button-directives/hint-and-solution-buttons.component*.ts @kevintab95 -/core/templates/components/button-directives/hint-and-solution-buttons.component.html @kevintab95 -/core/templates/components/state-directives/ @kevintab95 -/core/templates/components/graph-services/ @kevintab95 -/core/templates/components/on-screen-keyboard/ @aks681 -/core/templates/components/version-diff-visualization/ @aks681 -/core/templates/domain/editor/ @kevintab95 -/core/templates/domain/exploration/ @kevintab95 -/core/templates/domain/recommendations/ @kevintab95 -/core/templates/domain/state/ @kevintab95 -/core/templates/domain/summary/ @kevintab95 -/core/templates/domain/state_card/ @kevintab95 -/core/templates/pages/exploration-editor-page/ @EricZLou -/core/templates/pages/exploration-editor-page/history-tab/ @EricZLou -/core/templates/pages/exploration-editor-page/settings-tab/ @EricZLou -/core/templates/pages/exploration-player-page/ @kevintab95 -/core/templates/components/state-editor/ @EricZLou -/core/templates/services/compute-graph.service.ts @EricZLou -/core/templates/services/context.service*.ts @kevintab95 -/core/templates/services/editability.service*.ts @kevintab95 -/core/templates/services/exploration-features*.ts @kevintab95 -/core/templates/services/exploration-html-formatter.service*.ts @kevintab95 -/core/templates/services/image-upload-helper.service*.ts @kevintab95 -/core/templates/services/validators.service*.ts @EricZLou +/core/controllers/editor*.py @oppia/lace-backend-reviewers +/core/controllers/reader*.py @oppia/lace-backend-reviewers +/core/controllers/resources*.py @oppia/data-and-stability-reviewers +/core/domain/exp*.py @oppia/lace-backend-reviewers +/core/domain/fs*.py @oppia/lace-backend-reviewers +/core/domain/param_domain*.py @oppia/lace-backend-reviewers +/core/domain/rating_services*.py @oppia/lace-backend-reviewers +/core/domain/state_domain*.py @oppia/lace-backend-reviewers +/core/domain/summary_services*.py @oppia/lace-backend-reviewers +/core/domain/value_generators_domain*.py @oppia/data-and-stability-reviewers +/core/templates/components/entity-creation-services/exploration-creation.service*.ts @oppia/lace-frontend-reviewers +/core/templates/components/entity-creation-services/exploration-creation-backend-api.service*.ts @oppia/lace-frontend-reviewers +/core/templates/components/button-directives/hint-and-solution-buttons.component*.ts @oppia/lace-frontend-reviewers +/core/templates/components/button-directives/hint-and-solution-buttons.component.html @oppia/lace-frontend-reviewers +/core/templates/components/checkpoint-celebration-modal/ @oppia/lace-frontend-reviewers +/core/templates/components/state-directives/ @oppia/lace-frontend-reviewers +/core/templates/components/graph-services/ @oppia/lace-frontend-reviewers +/core/templates/components/on-screen-keyboard/ @oppia/lace-backend-reviewers +/core/templates/components/recommendations/ @oppia/lace-frontend-reviewers +/core/templates/components/save-pending-changes/ @oppia/lace-frontend-reviewers +/core/templates/components/stale-tab-info/ @oppia/lace-frontend-reviewers +/core/templates/components/unsaved-changes-status-info/ @oppia/lace-frontend-reviewers +/core/templates/components/version-diff-visualization/ @oppia/lace-frontend-reviewers +/core/templates/domain/editor/ @oppia/lace-frontend-reviewers +/core/templates/domain/entity_editor_browser_tabs_info/ @oppia/lace-frontend-reviewers +/core/templates/domain/exploration/ @oppia/lace-frontend-reviewers +/core/templates/domain/recommendations/ @oppia/lace-frontend-reviewers +/core/templates/domain/state/ @oppia/lace-frontend-reviewers +/core/templates/domain/summary/ @oppia/lace-frontend-reviewers +/core/templates/domain/state_card/ @oppia/lace-frontend-reviewers +/core/templates/pages/exploration-editor-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/exploration-editor-page/history-tab/ @oppia/lace-frontend-reviewers +/core/templates/pages/exploration-editor-page/settings-tab/ @oppia/lace-frontend-reviewers +/core/templates/pages/exploration-player-page/ @oppia/lace-frontend-reviewers +/core/templates/components/state-editor/ @oppia/lace-frontend-reviewers +/core/templates/services/blog-search.service.ts @oppia/lace-frontend-reviewers +/core/templates/services/compute-graph.service.ts @oppia/lace-frontend-reviewers +/core/templates/services/context.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/editability.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/exploration-features*.ts @oppia/lace-frontend-reviewers +/core/templates/services/exploration-html-formatter.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/favicon.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/image-upload-helper.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/staleness-detection.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/validators.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/yaml.service*.ts @oppia/lace-frontend-reviewers +/core/templates/components/copy-url/ @oppia/lace-frontend-reviewers # Forms pages. -/core/templates/components/forms/ @EricZLou -/core/templates/components/common-layout-directives/common-elements/confirmation-modal.template.html @srijanreddy98 -/core/templates/components/ck-editor-helpers/customize-rte-component-modal.template.html @DubeySandeep -/core/templates/services/schema-default-value.service*.ts @EricZLou -/core/templates/services/schema-form-submitted.service*.ts @EricZLou -/core/templates/services/schema-undefined-last-element.service*.ts @EricZLou +/core/templates/components/forms/ @oppia/lace-frontend-reviewers +/core/templates/components/common-layout-directives/common-elements/confirmation-modal.template.html @oppia/lace-frontend-reviewers +/core/templates/components/ck-editor-helpers/customize-rte-component-modal.template.html @oppia/lace-frontend-reviewers +/core/templates/services/schema-default-value.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/schema-form-submitted.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/schema-undefined-last-element.service*.ts @oppia/lace-frontend-reviewers # Google-app-engine configurations -/app_dev.yaml @nithusha21 @vojtechjelinek -/cron.yaml @nithusha21 @vojtechjelinek -/index.yaml @nithusha21 @vojtechjelinek -/queue.yaml @nithusha21 @vojtechjelinek +/app_dev.yaml @oppia/release-workflow-reviewers +/cron.yaml @oppia/release-workflow-reviewers +/index.yaml @oppia/release-workflow-reviewers +/queue.yaml @oppia/release-workflow-reviewers # Global components and filters. -/core/templates/components/angular-html-bind/ @srijanreddy98 -/core/templates/components/button-directives/* @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/alert-message.component*.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/attribution-guide.component.html @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/attribution-guide.component*.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/background-banner.component*.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/background-banner.component.html @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/confirm-or-cancel-modal.controller*.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/confirm-or-cancel-modal.component*.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/lazy-loading.component.html @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/lazy-loading.component.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/loading-dots.component.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/loading-dots.component.html @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/promo-bar.component*.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/promo-bar.component.html @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/sharing-links.component.html @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/sharing-links.component*.ts @srijanreddy98 -/core/templates/components/common-layout-directives/common-elements/common-elements.module.ts @srijanreddy98 -/core/templates/components/profile-link-directives/ @srijanreddy98 -/core/templates/components/summary-tile/ @srijanreddy98 -/core/templates/directives/angular-html-bind.directive*.ts @srijanreddy98 -/core/templates/directives/directives.module.ts @srijanreddy98 -/core/templates/directives/focus-on.directive*.ts @srijanreddy98 -/core/templates/directives/headroom.directive.ts @srijanreddy98 -/core/templates/directives/ng-init.directive.ts @srijanreddy98 -/core/templates/domain/promo_bar/ @srijanreddy98 -/core/templates/filters/ @srijanreddy98 -/core/templates/services/attribution.service*.ts @srijanreddy98 -/core/templates/services/ngb-modal.service.ts @srijanreddy98 +/core/templates/components/interaction-display/ @oppia/angular-migration-reviewers +/core/templates/components/button-directives/* @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/alert-message.component*.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/attribution-guide.component.html @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/attribution-guide.component*.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/attribution-guide.component*.css @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/background-banner.*.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/background-banner.component.html @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/confirm-or-cancel-modal.controller*.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/confirm-or-cancel-modal.component*.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/lazy-loading.component.html @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/lazy-loading.component.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/loading-dots.component.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/loading-dots.component.html @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/promo-bar.component*.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/promo-bar.component.html @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/sharing-links.component.html @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/sharing-links.component*.ts @oppia/angular-migration-reviewers +/core/templates/components/common-layout-directives/common-elements/common-elements.module.ts @oppia/angular-migration-reviewers +/core/templates/components/profile-link-directives/ @oppia/angular-migration-reviewers +/core/templates/components/summary-tile/ @oppia/angular-migration-reviewers +/core/templates/directives/angular-html-bind.directive*.ts @oppia/angular-migration-reviewers +/core/templates/directives/directives.module.ts @oppia/angular-migration-reviewers +/core/templates/directives/focus-on.directive*.ts @oppia/angular-migration-reviewers +/core/templates/directives/headroom.directive.ts @oppia/angular-migration-reviewers +/core/templates/directives/ng-init.directive.ts @oppia/angular-migration-reviewers +/core/templates/domain/promo_bar/ @oppia/angular-migration-reviewers +/core/templates/filters/ @oppia/angular-migration-reviewers +/core/templates/services/attribution.service*.ts @oppia/angular-migration-reviewers +/core/templates/services/ngb-modal.service.ts @oppia/angular-migration-reviewers # Global frontend services -/core/templates/services/alerts.service*.ts @srijanreddy98 -/core/templates/services/date-time-format.service*.ts @srijanreddy98 -/core/templates/services/debouncer.service*.ts @srijanreddy98 -/core/templates/services/extension-tag-assembler.service*.ts @srijanreddy98 -/core/templates/services/html-escaper.service*.ts @srijanreddy98 -/core/templates/services/id-generation.service*.ts @srijanreddy98 -/core/templates/services/keyboard-shortcut.service*.ts @jimbyo -/core/templates/services/local-storage.service*.ts @srijanreddy98 -/core/templates/services/nested-directives*.ts @srijanreddy98 -/core/templates/services/page-title.service*.ts @srijanreddy98 -/core/templates/services/prevent-page-unload-event.service*.ts @srijanreddy98 -/core/templates/services/promo-bar-backend-api.service*.ts @srijanreddy98 -/core/templates/services/site-analytics.service*.ts @srijanreddy98 -/core/templates/services/utils.service*.ts @srijanreddy98 -/core/templates/services/stateful/background-mask.service*.ts @srijanreddy98 -/core/templates/services/stateful/focus-manager.service*.ts @srijanreddy98 -/core/templates/services/contextual/device-info.service*.ts @srijanreddy98 -/core/templates/services/contextual/url.service*.ts @srijanreddy98 -/core/templates/services/contextual/window-dimensions.service*.ts @srijanreddy98 -/core/templates/services/contextual/window-ref.service*.ts @srijanreddy98 -/core/templates/services/internet-connectivity.service*.ts @srijanreddy98 -/core/templates/services/server-connection-backend-api.service*.ts @srijanreddy98 +/core/templates/services/alerts.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/date-time-format.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/debouncer.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/extension-tag-assembler.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/html-escaper.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/id-generation.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/keyboard-shortcut.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/local-storage.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/nested-directives*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/page-title.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/prevent-page-unload-event.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/promo-bar-backend-api.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/site-analytics.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/utils.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/stateful/background-mask.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/stateful/focus-manager.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/contextual/device-info.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/contextual/url.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/contextual/window-dimensions.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/contextual/window-ref.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/internet-connectivity.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/server-connection-backend-api.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/number-conversion.service*.ts @oppia/frontend-infrastructure-reviewers # Global stylesheet. -/core/templates/css/oppia.css @srijanreddy98 -/core/templates/css/oppia-material.css @srijanreddy98 +/core/templates/css/oppia.css @oppia/frontend-infrastructure-reviewers +/core/templates/css/oppia-material.css @oppia/frontend-infrastructure-reviewers # Interaction project. -/core/domain/customization_args_util*.py @iamprayush -/core/domain/expression_parser*.py @iamprayush -/core/domain/interaction_registry*.py @iamprayush -/core/domain/object_registry*.py @iamprayush -/core/domain/rules_registry*.py @aks681 -/core/domain/translatable_object_registry*.py @iamprayush -/core/templates/components/keyboard-shortcut-help/ @iamprayush -/core/templates/domain/objects/ @iamprayush -/core/templates/expressions/ @iamprayush @vojtechjelinek -/core/templates/services/code-normalizer.service*.ts @iamprayush -/core/templates/services/guppy-configuration.service.ts @iamprayush -/core/templates/services/guppy-initialization.service.ts @iamprayush -/core/templates/services/math-interactions.service.ts @iamprayush -/extensions/domain*.py @iamprayush -/extensions/interactions/ @iamprayush @vojtechjelinek -/extensions/objects/ @iamprayush @vojtechjelinek -/extensions/objects/templates/filepath-editor.component*.ts @kevintab95 -/extensions/objects/templates/filepath-editor.component.html @kevintab95 -/extensions/value_generators/ @iamprayush @vojtechjelinek -/extensions/__init__.py @iamprayush @vojtechjelinek +/core/domain/customization_args_util*.py @oppia/lace-frontend-reviewers +/core/domain/expression_parser*.py @oppia/lace-frontend-reviewers +/core/domain/interaction_registry*.py @oppia/lace-frontend-reviewers +/core/domain/object_registry*.py @oppia/lace-frontend-reviewers +/core/domain/rules_registry*.py @oppia/lace-backend-reviewers +/core/domain/translatable_object_registry*.py @oppia/lace-frontend-reviewers +/core/templates/components/keyboard-shortcut-help/ @oppia/lace-backend-reviewers +/core/templates/domain/objects/ @oppia/lace-backend-reviewers +/core/templates/expressions/ @oppia/lace-backend-reviewers +/core/templates/services/code-normalizer.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/guppy-configuration.service.ts @oppia/lace-frontend-reviewers +/core/templates/services/guppy-initialization.service.ts @oppia/lace-frontend-reviewers +/core/templates/services/math-interactions.service.ts @oppia/lace-frontend-reviewers +/extensions/domain*.py @oppia/lace-frontend-reviewers +/extensions/interactions/ @oppia/lace-backend-reviewers +/extensions/objects/ @oppia/lace-backend-reviewers +/extensions/objects/templates/filepath-editor.component*.ts @oppia/lace-frontend-reviewers +/extensions/objects/templates/filepath-editor.component.html @oppia/lace-frontend-reviewers +/extensions/value_generators/ @oppia/lace-backend-reviewers +/extensions/__init__.py @oppia/lace-frontend-reviewers # Lesson Analytics team. @@ -351,9 +359,8 @@ /core/domain/playthrough_issue_registry*.py @oppia/learner-analytics-reviewers /core/domain/stats*.py @oppia/learner-analytics-reviewers /core/domain/visualization_registry*.py @oppia/learner-analytics-reviewers -/core/templates/components/common-layout-directives/common-elements/answer-content-modal.controller*.ts @oppia/learner-analytics-reviewers -/core/templates/components/common-layout-directives/common-elements/answer-content-modal.template.html @oppia/learner-analytics-reviewers -/core/templates/components/common-layout-directives/common-elements/confirmation-modal.template.html @oppia/learner-analytics-reviewers +/core/templates/components/common-layout-directives/common-elements/answer-content-modal.component*.ts @oppia/learner-analytics-reviewers +/core/templates/components/common-layout-directives/common-elements/answer-content-modal.component.html @oppia/learner-analytics-reviewers /core/templates/components/statistics-directives/ @oppia/learner-analytics-reviewers /core/templates/domain/statistics/ @oppia/learner-analytics-reviewers /core/templates/domain/improvements/ @oppia/learner-analytics-reviewers @@ -376,289 +383,294 @@ # Library page. -/core/controllers/library*.py @aks681 -/core/domain/recommendations*.py @aks681 -/core/domain/search_services*.py @aks681 -/core/templates/pages/library-page/ @kevintab95 -/core/templates/services/construct-translation-ids.service*.ts @kevintab95 -/core/templates/services/search-backend-api.service*.ts @kevintab95 -/core/templates/services/search.service*.ts @kevintab95 +/core/controllers/library*.py @oppia/lace-backend-reviewers +/core/domain/recommendations*.py @oppia/lace-backend-reviewers +/core/domain/search_services*.py @oppia/lace-backend-reviewers +/core/templates/pages/library-page/ @oppia/lace-frontend-reviewers +/core/templates/services/construct-translation-ids.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/search-backend-api.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/search.service*.ts @oppia/lace-frontend-reviewers # Navigation bar project. -/core/templates/components/button-directives/create-activity-button.component.ts @nithusha21 -/core/templates/components/button-directives/create-activity-button.component.html @nithusha21 -/core/templates/components/common-layout-directives/navigation-bars/ @nithusha21 -/core/templates/services/bottom-navbar-status.service*.ts @nithusha21 -/core/templates/services/navigation.service*.ts @nithusha21 -/core/templates/services/sidebar-status.service*.ts @nithusha21 +/core/templates/components/button-directives/create-activity-button.component.ts @oppia/lace-frontend-reviewers +/core/templates/components/button-directives/create-activity-button.component.html @oppia/lace-frontend-reviewers +/core/templates/components/common-layout-directives/navigation-bars/ @oppia/lace-frontend-reviewers +/core/templates/services/bottom-navbar-status.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/navigation.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/sidebar-status.service*.ts @oppia/lace-frontend-reviewers # New structures team. # Question project. -/core/controllers/practice_sessions*.py @aks681 -/core/controllers/question*.py @aks681 -/core/controllers/review_tests*.py @aks681 -/core/domain/classroom_domain.py @aks681 -/core/domain/classroom_services*.py @aks681 -/core/domain/question*.py @aks681 -/core/domain/rights_domain*.py @aks681 -/core/templates/components/concept-card/ @EricZLou -/core/templates/components/score-ring/ @EricZLou -/core/templates/components/skill-mastery/ @EricZLou -/core/templates/components/skills-mastery-list/ @EricZLou -/core/templates/domain/question/ @EricZLou -/core/templates/domain/review_test/ @kevintab95 -/core/templates/pages/practice-session-page/ @kevintab95 -/core/templates/pages/review-test-page/ @kevintab95 -/core/templates/services/questions-list.service*.ts @kevintab95 -/core/templates/components/question-directives/ @kevintab95 +/core/controllers/practice_sessions*.py @oppia/lace-backend-reviewers +/core/controllers/question*.py @oppia/lace-backend-reviewers +/core/controllers/review_tests*.py @oppia/lace-backend-reviewers +/core/domain/classroom*.py @oppia/lace-backend-reviewers +/core/controllers/diagnostic_test_player*.py @oppia/lace-backend-reviewers +/core/domain/classroom_services*.py @oppia/lace-backend-reviewers +/core/domain/question*.py @oppia/lace-backend-reviewers +/core/domain/rights_domain*.py @oppia/lace-backend-reviewers +/core/templates/components/concept-card/ @oppia/lace-frontend-reviewers +/core/templates/components/score-ring/ @oppia/lace-frontend-reviewers +/core/templates/components/skill-mastery/ @oppia/lace-frontend-reviewers +/core/templates/components/skills-mastery-list/ @oppia/lace-frontend-reviewers +/core/templates/domain/question/ @oppia/lace-frontend-reviewers +/core/templates/domain/review_test/ @oppia/lace-frontend-reviewers +/core/templates/pages/practice-session-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/review-test-page/ @oppia/lace-frontend-reviewers +/core/templates/services/questions-list.service*.ts @oppia/lace-frontend-reviewers +/core/templates/components/question-directives/ @oppia/lace-frontend-reviewers # Readme -/core/README.md @srijanreddy98 -/core/templates/css/README.md @srijanreddy98 -/extensions/README.md @srijanreddy98 -/scripts/README.md @srijanreddy98 +/core/README.md @oppia/data-and-stability-reviewers +/core/templates/css/README.md @oppia/frontend-infrastructure-reviewers +/extensions/README.md @oppia/data-and-stability-reviewers +/scripts/README.md @oppia/data-and-stability-reviewers # Skill project. -/core/controllers/concept_card_viewer*.py @aks681 -/core/controllers/skill*.py @aks681 -/core/domain/skill*.py @aks681 -/core/templates/components/entity-creation-services/skill-creation.service*.ts @kevintab95 -/core/templates/components/question-difficulty-selector/ @aks681 -/core/templates/components/review-material-editor/ @kevintab95 -/core/templates/components/rubrics-editor/ @kevintab95 -/core/templates/components/skill-selector/ @kevintab95 -/core/templates/domain/skill/ @kevintab95 -/core/templates/pages/skill-editor-page/ @kevintab95 +/core/controllers/concept_card_viewer*.py @oppia/lace-backend-reviewers +/core/controllers/skill*.py @oppia/lace-backend-reviewers +/core/domain/skill*.py @oppia/lace-backend-reviewers +/core/templates/components/entity-creation-services/skill-creation.service*.ts @oppia/lace-frontend-reviewers +/core/templates/components/question-difficulty-selector/ @oppia/lace-frontend-reviewers +/core/templates/components/review-material-editor/ @oppia/lace-frontend-reviewers +/core/templates/components/rubrics-editor/ @oppia/lace-frontend-reviewers +/core/templates/components/skill-selector/ @oppia/lace-frontend-reviewers +/core/templates/domain/skill/ @oppia/lace-frontend-reviewers +/core/templates/pages/skill-editor-page/ @oppia/lace-frontend-reviewers # Story project. -/core/controllers/story*.py @aks681 -/core/domain/story*.py @aks681 -/core/templates/components/entity-creation-services/story-creation.service*.ts @EricZLou -/core/templates/domain/story/ @kevintab95 -/core/templates/domain/story_viewer/ @kevintab95 -/core/templates/pages/story-editor-page/ @EricZLou -/core/templates/pages/story-viewer-page/ @kevintab95 +/core/controllers/story*.py @oppia/lace-backend-reviewers +/core/domain/story*.py @oppia/lace-backend-reviewers +/core/templates/components/entity-creation-services/story-creation.service*.ts @oppia/lace-frontend-reviewers +/core/templates/domain/story/ @oppia/lace-frontend-reviewers +/core/templates/domain/story_viewer/ @oppia/lace-frontend-reviewers +/core/templates/pages/story-editor-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/story-viewer-page/ @oppia/lace-frontend-reviewers # Topic project. # Instead of * we have used _* to avoid topics_and_skills_dashboard related files. -/core/controllers/classroom*.py @aks681 -/core/controllers/topic_*.py @aks681 -/core/domain/subtopic_page_domain*.py @aks681 -/core/domain/subtopic_page_services*.py @aks681 -/core/domain/topic*.py @aks681 -/core/templates/components/entity-creation-services/topic-creation.service*.ts @EricZLou -/core/templates/domain/classroom/ @aks681 @kevintab95 -/core/templates/domain/subtopic_viewer/ @aks681 @kevintab95 -/core/templates/domain/topic/ @aks681 @kevintab95 -/core/templates/domain/topic_viewer @aks681 @kevintab95 -/core/templates/pages/classroom-page/ @aks681 @kevintab95 -/core/templates/pages/subtopic-viewer-page/ @aks681 @kevintab95 -/core/templates/pages/topic-editor-page/ @aks681 @kevintab95 -/core/templates/pages/topic-viewer-page/ @aks681 @kevintab95 +/core/controllers/classroom*.py @oppia/lace-backend-reviewers +/core/controllers/topic_*.py @oppia/lace-backend-reviewers +/core/domain/subtopic_page_domain*.py @oppia/lace-backend-reviewers +/core/domain/subtopic_page_services*.py @oppia/lace-backend-reviewers +/core/domain/topic*.py @oppia/lace-backend-reviewers +/core/templates/components/entity-creation-services/topic-creation.service*.ts @oppia/lace-frontend-reviewers +/core/templates/domain/classroom/ @oppia/lace-frontend-reviewers +/core/templates/domain/subtopic_viewer/ @oppia/lace-frontend-reviewers +/core/templates/domain/topic/ @oppia/lace-frontend-reviewers +/core/templates/domain/topic_viewer @oppia/lace-frontend-reviewers +/core/templates/pages/classroom-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/diagnostic-test-player-page @oppia/lace-frontend-reviewers +/core/templates/pages/subtopic-viewer-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/topic-editor-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/topic-viewer-page/ @oppia/lace-frontend-reviewers # Topics and skills dashboard project. -/core/controllers/subtopic_viewer*.py @aks681 -/core/controllers/topics_and_skills_dashboard*.py @aks681 -/core/templates/domain/topics_and_skills_dashboard/ @aks681 @kevintab95 -/core/templates/pages/topics-and-skills-dashboard-page/ @aks681 @kevintab95 - +/core/controllers/subtopic_viewer*.py @oppia/lace-backend-reviewers +/core/controllers/topics_and_skills_dashboard*.py @oppia/lace-backend-reviewers +/core/templates/domain/topics_and_skills_dashboard/ @oppia/lace-frontend-reviewers +/core/templates/pages/topics-and-skills-dashboard-page/ @oppia/lace-frontend-reviewers + +# Learner group project. +/core/controllers/learner_group*.py @oppia/lace-backend-reviewers +/core/domain/learner_group*.py @oppia/lace-backend-reviewers +/core/templates/domain/learner_group/ @oppia/lace-frontend-reviewers +/core/templates/pages/facilitator-dashboard-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/learner-group-pages/ @oppia/lace-frontend-reviewers # Beam jobs -/setup.py @vojtechjelinek @brianrodri -/MANIFEST.in @vojtechjelinek @brianrodri -/core/jobs/ @vojtechjelinek -/core/jobs/transforms @vojtechjelinek -/core/jobs/types @vojtechjelinek -/core/templates/domain/jobs/ @vojtechjelinek -/core/controllers/beam_jobs*.py @brianrodri +/setup*.py @oppia/data-and-stability-reviewers +/MANIFEST.in @oppia/data-and-stability-reviewers +/core/jobs/ @oppia/data-and-stability-reviewers +/core/jobs/transforms @oppia/data-and-stability-reviewers +/core/jobs/types @oppia/data-and-stability-reviewers +/core/templates/domain/jobs/ @oppia/data-and-stability-reviewers +/core/controllers/beam_jobs*.py @oppia/data-and-stability-reviewers +/core/domain/beam_job*.py @oppia/data-and-stability-reviewers # Infrastructure. -/core/controllers/cron*.py @nithusha21 -/core/domain/cron_services*.py @nithusha21 -/main*.py @DubeySandeep @nithusha21 -/core/feconf.py @DubeySandeep @nithusha21 @seanlip -/core/constants*.py @nithusha21 -/assets/constants.ts @nithusha21 -/core/controllers/tasks*.py @aks681 -/core/domain/beam_job*.py @vojtechjelinek -/core/domain/email*.py @aks681 -/core/domain/image_service*.py @DubeySandeep -/core/platform/ @vojtechjelinek -/core/templates/App*.ts @srijanreddy98 -/core/templates/app.constants.ts @srijanreddy98 -/core/templates/app.constants.ajs.ts @srijanreddy98 -/core/templates/combined-tests.spec.ts @srijanreddy98 -/core/templates/pages/interaction-specs.constants.ajs.ts @nithusha21 -/core/templates/pages/interaction-specs.constants.ts @nithusha21 -/core/templates/pages/login-page/ @vojtechjelinek -/core/templates/pages/logout-page/ @vojtechjelinek -/core/templates/app-events/ @srijanreddy98 -/core/templates/services/app.service*.ts @srijanreddy98 -/core/templates/services/contextual/document-attribute-customization.service*.ts @srijanreddy98 -/core/templates/services/contextual/meta-tag-customization.service*.ts @srijanreddy98 -/core/templates/services/interaction-rules-registry.service*.ts @DubeySandeep -/core/templates/services/interaction-specs.service*.ts @DubeySandeep -/core/templates/services/translation-file-hash-loader-backend-api.service.ts @nithusha21 -/redis.conf @seanlip @vojtechjelinek -/.firebase.json @seanlip @vojtechjelinek +/core/controllers/cron*.py @oppia/data-and-stability-reviewers +/core/domain/cron_services*.py @oppia/data-and-stability-reviewers +/main*.py @oppia/data-and-stability-reviewers +/core/feconf*.py @oppia/data-and-stability-reviewers +/core/constants*.py @oppia/data-and-stability-reviewers +/assets/constants.ts @oppia/data-and-stability-reviewers +/core/controllers/tasks*.py @oppia/lace-backend-reviewers +/core/domain/email*.py @oppia/lace-backend-reviewers +/core/domain/image_service*.py @oppia/lace-backend-reviewers +/core/platform/ @oppia/data-and-stability-reviewers +/core/templates/App*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/app.constants.ts @oppia/frontend-infrastructure-reviewers +/core/templates/app.constants.ajs.ts @oppia/frontend-infrastructure-reviewers +/core/templates/combined-tests.spec.ts @oppia/frontend-infrastructure-reviewers +/core/templates/pages/interaction-specs.constants.ajs.ts @oppia/frontend-infrastructure-reviewers +/core/templates/pages/interaction-specs.constants.ts @oppia/frontend-infrastructure-reviewers +/core/templates/pages/login-page/ @oppia/frontend-infrastructure-reviewers +/core/templates/pages/logout-page/ @oppia/frontend-infrastructure-reviewers +/core/templates/app-events/ @oppia/frontend-infrastructure-reviewers +/core/templates/services/app.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/contextual/document-attribute-customization.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/contextual/meta-tag-customization.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/interaction-rules-registry.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/interaction-specs.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/translation-file-hash-loader-backend-api.service.ts @oppia/angular-migration-reviewers +/redis.conf @oppia/data-and-stability-reviewers +/.firebase.json @oppia/data-and-stability-reviewers # Miscellaneous. -/__init__.py @nithusha21 -/core/__init__.py @nithusha21 -/core/controllers/__init__.py @nithusha21 -/core/domain/__init__.py @nithusha21 -/core/domain/config*.py @DubeySandeep -/core/domain/change_domain.py @DubeySandeep -/core/templates/tests/ @nithusha21 -/core/templates/domain/utilities/ @DubeySandeep -/core/templates/Polyfills.ts @srijanreddy98 -/extensions/extensions.module.ts @vojtechjelinek -/core/python_utils*.py @DubeySandeep -/core/schema_utils*.py @DubeySandeep -/core/utils*.py @aks681 +/__init__.py @oppia/data-and-stability-reviewers +/core/__init__.py @oppia/data-and-stability-reviewers +/core/controllers/__init__.py @oppia/data-and-stability-reviewers +/core/domain/__init__.py @oppia/data-and-stability-reviewers +/core/domain/config*.py @oppia/lace-backend-reviewers +/core/domain/change_domain*.py @oppia/lace-backend-reviewers +/core/templates/tests/ @oppia/angular-migration-reviewers +/core/templates/domain/utilities/ @oppia/frontend-infrastructure-reviewers +/core/templates/Polyfills.ts @oppia/frontend-infrastructure-reviewers +/extensions/extensions.module.ts @oppia/frontend-infrastructure-reviewers +/core/schema_utils*.py @oppia/data-and-stability-reviewers +/core/utils*.py @oppia/lace-backend-reviewers +/.rtlcssrc @oppia/lace-frontend-reviewers # Python typing -/mypy.ini @hardikkat24 @vojtechjelinek -/mypy_imports.py @hardikkat24 @vojtechjelinek -/mypy_requirements.txt @hardikkat24 @vojtechjelinek +/mypy.ini @oppia/mypy-reviewers +/mypy_imports*.py @oppia/mypy-reviewers +/mypy_requirements.txt @oppia/mypy-reviewers /stubs/ @oppia/mypy-reviewers /scripts/run_mypy_checks*.py @oppia/mypy-reviewers # Restricted pages. -/core/controllers/admin*.py @DubeySandeep @nithusha21 -/core/controllers/blog_admin*.py @DubeySandeep -/core/controllers/moderator*.py @DubeySandeep -/core/controllers/recent_commits*.py @DubeySandeep -/core/domain/moderator_services*.py @DubeySandeep -/core/templates/domain/admin/ @DubeySandeep @nithusha21 -/core/templates/domain/blog-admin/ @DubeySandeep -/core/templates/pages/admin-page/ @DubeySandeep @nithusha21 -/core/templates/pages/moderator-page/ @DubeySandeep @nithusha21 -/core/templates/pages/blog-admin-page/ @DubeySandeep @nithusha21 +/core/controllers/admin*.py @oppia/data-and-stability-reviewers +/core/controllers/blog_admin*.py @oppia/lace-backend-reviewers +/core/controllers/moderator*.py @oppia/data-and-stability-reviewers +/core/controllers/recent_commits*.py @oppia/data-and-stability-reviewers +/core/domain/moderator_services*.py @oppia/data-and-stability-reviewers +/core/templates/domain/admin/ @oppia/lace-frontend-reviewers +/core/templates/domain/blog-admin/ @oppia/lace-frontend-reviewers +/core/templates/pages/admin-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/moderator-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/blog-admin-page/ @oppia/lace-frontend-reviewers +/core/templates/pages/classroom-admin-page/ @oppia/lace-frontend-reviewers # Release team. -/.gcloudignore @nithusha21 @vojtechjelinek -/core/controllers/release_coordinator*.py @nithusha21 @vojtechjelinek -/core/templates/pages/release-coordinator-page/ @nithusha21 @vojtechjelinek -/scripts/release_scripts/ @nithusha21 @vojtechjelinek -/assets/release_constants.json @nithusha21 @vojtechjelinek +/.gcloudignore @oppia/release-workflow-reviewers +/core/controllers/release_coordinator*.py @oppia/release-workflow-reviewers +/core/templates/pages/release-coordinator-page/ @oppia/release-workflow-reviewers +/scripts/release_scripts/ @oppia/release-workflow-reviewers +/assets/release_constants.json @oppia/release-workflow-reviewers # Rich text editor team. -/core/templates/components/ck-editor-helpers/ck-editor-4-rte.component.html @aks681 -/core/templates/components/ck-editor-helpers/ck-editor-4-rte.component.ts @aks681 -/core/templates/components/ck-editor-helpers/ckeditor4.module.ts @aks681 -/core/templates/directives/mathjax-bind.directive.ts @aks681 -/core/templates/directives/mathjax.directive.ts @aks681 -/core/templates/mathjaxConfig.ts @aks681 -/core/templates/components/ck-editor-helpers/ck-editor-4-widgets.initializer.ts @aks681 -/core/templates/components/ck-editor-helpers/ck-editor-copy-content.service*.ts @DubeySandeep @aks681 -/core/templates/components/ck-editor-helpers/ck-editor-copy-toolbar/ck-editor-copy-toolbar.component.html @DubeySandeep @aks681 -/core/templates/components/ck-editor-helpers/ck-editor-copy-toolbar/ck-editor-copy-toolbar.component*.ts @DubeySandeep @aks681 -/core/templates/services/autoplayed-videos.service*.ts @aks681 -/core/templates/services/external-save.service*.ts @srijanreddy98 -/core/templates/services/external-rte-save.service*.ts @srijanreddy98 -/core/templates/services/image-local-storage*.ts @DubeySandeep -/core/templates/services/rte-helper.service*.ts @aks681 -/core/templates/services/rte-helper-modal.controller*.ts @aks681 -/core/domain/image_validation_services*.py @DubeySandeep -/core/domain/rte_component_registry*.py @vojtechjelinek -/extensions/ckeditor_plugins/ @aks681 -/extensions/rich_text_components/ @aks681 -/assets/rich_text_components_definitions.ts @aks681 +/core/templates/components/ck-editor-helpers/ck-editor-4-rte.component.html @oppia/lace-frontend-reviewers +/core/templates/components/ck-editor-helpers/ck-editor-4-rte.component.ts @oppia/lace-frontend-reviewers +/core/templates/components/ck-editor-helpers/ckeditor4.module.ts @oppia/lace-frontend-reviewers +/core/templates/directives/mathjax-bind.directive.ts @oppia/lace-frontend-reviewers +/core/templates/directives/mathjax.directive.ts @oppia/lace-frontend-reviewers +/core/templates/mathjaxConfig.ts @oppia/lace-frontend-reviewers +/core/templates/components/ck-editor-helpers/ck-editor-4-widgets.initializer.ts @oppia/lace-frontend-reviewers +/core/templates/components/ck-editor-helpers/ck-editor-copy-content.service*.ts @oppia/lace-frontend-reviewers +/core/templates/components/ck-editor-helpers/ck-editor-copy-toolbar/ck-editor-copy-toolbar.component.html @oppia/lace-frontend-reviewers +/core/templates/components/ck-editor-helpers/ck-editor-copy-toolbar/ck-editor-copy-toolbar.component*.ts @oppia/lace-frontend-reviewers +/core/templates/components/ck-editor-helpers/ck-editor-copy-toolbar/ck-editor-copy-toolbar.module.ts @oppia/lace-frontend-reviewers +/core/templates/services/autoplayed-videos.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/external-save.service*.ts @oppia/angular-migration-reviewers +/core/templates/services/external-rte-save.service*.ts @oppia/angular-migration-reviewers +/core/templates/services/oppia-rte-parser.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/image-local-storage*.ts @oppia/lace-frontend-reviewers +/core/templates/services/rte-helper.service*.ts @oppia/lace-frontend-reviewers +/core/templates/services/rte-helper-modal.controller*.ts @oppia/lace-frontend-reviewers +/core/domain/image_validation_services*.py @oppia/lace-backend-reviewers +/core/domain/rte_component_registry*.py @oppia/lace-frontend-reviewers +/extensions/ckeditor_plugins/ @oppia/lace-backend-reviewers +/extensions/rich_text_components/rte-output-display.component* @oppia/lace-frontend-reviewers +/extensions/rich_text_components/ @oppia/lace-backend-reviewers +/assets/rich_text_components_definitions.ts @oppia/lace-frontend-reviewers # Suggestion and feedback team. # Suggestion project. -/core/controllers/suggestion*.py @nithusha21 -/core/domain/suggestion*.py @nithusha21 -/core/templates/domain/suggestion/ @nithusha21 -/core/templates/services/suggestion-modal.service*.ts @nithusha21 -/core/templates/services/suggestions.service*.ts @nithusha21 +/core/controllers/suggestion*.py @oppia/contributor-experience-team +/core/domain/suggestion*.py @oppia/contributor-experience-team +/core/templates/domain/suggestion/ @oppia/contributor-experience-team +/core/templates/services/suggestion-modal.service*.ts @oppia/contributor-experience-team +/core/templates/services/suggestions.service*.ts @oppia/contributor-experience-team # Feedback project. -/core/controllers/feedback*.py @nithusha21 -/core/domain/feedback*.py @nithusha21 -/core/templates/components/ratings/rating-display/rating-display.component.html @nithusha21 -/core/templates/components/ratings/ @nithusha21 -/core/templates/domain/feedback_message/ @nithusha21 -/core/templates/domain/feedback_thread/ @nithusha21 -/core/templates/pages/exploration-editor-page/feedback-tab/ @nithusha21 +/core/controllers/feedback*.py @oppia/contributor-experience-team +/core/domain/feedback*.py @oppia/contributor-experience-team +/core/templates/components/ratings/rating-display/rating-display.component.html @oppia/lace-frontend-reviewers +/core/templates/components/ratings/ @oppia/lace-frontend-reviewers +/core/templates/domain/feedback_message/ @oppia/contributor-experience-team +/core/templates/domain/feedback_thread/ @oppia/contributor-experience-team +/core/templates/pages/exploration-editor-page/feedback-tab/ @oppia/lace-frontend-reviewers # Simple pages. -/core/controllers/pages*.py @vojtechjelinek -/core/controllers/custom_landing_pages*.py @vojtechjelinek -/core/templates/pages/contact-page/ @vojtechjelinek -/core/templates/pages/delete-account-page/ @vojtechjelinek -/core/templates/pages/donate-page/ @vojtechjelinek -/core/templates/pages/error-pages/ @vojtechjelinek -/core/templates/pages/get-started-page/ @vojtechjelinek -/core/templates/pages/landing-pages/ @vojtechjelinek -/core/templates/pages/maintenance-page/ @vojtechjelinek -/core/templates/pages/partnerships-page/ @vojtechjelinek -/core/templates/pages/pending-account-deletion-page/ @vojtechjelinek -/core/templates/pages/preferences-page/ @vojtechjelinek -/core/templates/pages/signup-page/ @vojtechjelinek -/core/templates/pages/splash-page/ @vojtechjelinek -/core/templates/pages/teach-page/ @vojtechjelinek -/core/templates/pages/thanks-page/ @vojtechjelinek -/core/templates/pages/participation-playbook/ @vojtechjelinek -/core/templates/pages/about-foundation-page/ @vojtechjelinek -/core/templates/pages/volunteer-page/ @vojtechjelinek +/core/controllers/pages*.py @oppia/data-and-stability-reviewers +/core/controllers/custom_landing_pages*.py @oppia/data-and-stability-reviewers +/core/templates/pages/contact-page/ @oppia/angular-migration-reviewers +/core/templates/pages/delete-account-page/ @oppia/angular-migration-reviewers +/core/templates/pages/donate-page/ @oppia/angular-migration-reviewers +/core/templates/pages/error-pages/ @oppia/angular-migration-reviewers +/core/templates/pages/get-started-page/ @oppia/angular-migration-reviewers +/core/templates/pages/landing-pages/ @oppia/angular-migration-reviewers +/core/templates/pages/maintenance-page/ @oppia/angular-migration-reviewers +/core/templates/pages/partnerships-page/ @oppia/angular-migration-reviewers +/core/templates/pages/pending-account-deletion-page/ @oppia/angular-migration-reviewers +/core/templates/pages/preferences-page/ @oppia/angular-migration-reviewers +/core/templates/pages/signup-page/ @oppia/angular-migration-reviewers +/core/templates/pages/splash-page/ @oppia/angular-migration-reviewers +/core/templates/pages/teach-page/ @oppia/angular-migration-reviewers +/core/templates/pages/thanks-page/ @oppia/angular-migration-reviewers +/core/templates/pages/participation-playbook/ @oppia/angular-migration-reviewers +/core/templates/pages/about-foundation-page/ @oppia/angular-migration-reviewers +/core/templates/pages/volunteer-page/ @oppia/angular-migration-reviewers # Speed Improvement team. -/core/templates/google-analytics.initializer.ts @vojtechjelinek -/core/templates/base-components/ @nithusha21 -/core/templates/pages/Base.ts @nithusha21 -/core/templates/pages/footer_js_libs.html @nithusha21 -/core/templates/pages/header_css_libs.html @nithusha21 -/core/templates/pages/header_js_libs.html @nithusha21 -/core/templates/services/csrf-token.service*.ts @nithusha21 -/core/templates/third-party-imports/ @nithusha21 @vojtechjelinek -/.lighthouserc*.js @jimbyo @vojtechjelinek -/puppeteer-login-script.js @jimbyo @vojtechjelinek -/scripts/run_lighthouse_tests.py @vojtechjelinek -/webpack.*.ts @nithusha21 @vojtechjelinek - - -# User Data Takeout. -# TODO(#14285): Replace @seanlip with @BenHenning for both. -/core/domain/takeout_*.py @seanlip @vojtechjelinek -/core/domain/wipeout_*.py @seanlip @vojtechjelinek +/core/templates/google-analytics.initializer.ts @oppia/frontend-infrastructure-reviewers +/core/templates/base-components/ @oppia/frontend-infrastructure-reviewers +/core/templates/pages/Base.ts @oppia/frontend-infrastructure-reviewers +/core/templates/pages/footer_js_libs.html @oppia/frontend-infrastructure-reviewers +/core/templates/pages/header_css_libs.html @oppia/frontend-infrastructure-reviewers +/core/templates/pages/header_js_libs.html @oppia/frontend-infrastructure-reviewers +/core/templates/services/csrf-token.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/third-party-imports/ @oppia/frontend-infrastructure-reviewers +/.lighthouserc*.js @oppia/automated-qa-reviewers +/puppeteer-login-script.js @oppia/automated-qa-reviewers +/scripts/run_lighthouse_tests.py @oppia/automated-qa-reviewers +/webpack.*.ts @oppia/data-and-stability-reviewers # User’s profile page. -/core/controllers/profile*.py @vojtechjelinek -/core/templates/domain/user/ @vojtechjelinek -/core/templates/pages/profile-page/ @vojtechjelinek -/core/templates/services/user.service*.ts @vojtechjelinek -/core/templates/services/user-backend-api.service*.ts @vojtechjelinek +/core/controllers/profile*.py @oppia/data-and-stability-reviewers +/core/templates/domain/user/ @oppia/frontend-infrastructure-reviewers +/core/templates/pages/profile-page/ @oppia/frontend-infrastructure-reviewers +/core/templates/services/user.service*.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/user-backend-api.service*.ts @oppia/frontend-infrastructure-reviewers # Service constants -/core/templates/services/services.constants.ajs.ts @vojtechjelinek -/core/templates/services/services.constants.ts @vojtechjelinek +/core/templates/services/services.constants.ajs.ts @oppia/frontend-infrastructure-reviewers +/core/templates/services/services.constants.ts @oppia/frontend-infrastructure-reviewers # Dynamic Feature Gating project -# TODO(#14285): Replace @seanlip with @BenHenning for all of the patterns in this section. -/core/controllers/platform_feature*.py @seanlip -/core/domain/platform_feature_services*.py @seanlip -/core/domain/platform_parameter*.py @seanlip -/core/platform_feature*.py @seanlip -/core/templates/domain/platform_feature/ @seanlip -/core/templates/pages/admin-page/features-tab/ @seanlip -/core/templates/services/platform-feature*.ts @seanlip +/core/controllers/platform_feature*.py @oppia/web-android-compatibility-reviewers +/core/domain/platform_feature_services*.py @oppia/web-android-compatibility-reviewers +/core/domain/platform_parameter*.py @oppia/web-android-compatibility-reviewers +/core/platform_feature*.py @oppia/web-android-compatibility-reviewers +/core/templates/domain/platform_feature/ @oppia/web-android-compatibility-reviewers +/core/templates/pages/admin-page/features-tab/ @oppia/web-android-compatibility-reviewers +/core/templates/services/platform-feature*.ts @oppia/web-android-compatibility-reviewers # Frontend unit tests @@ -668,40 +680,42 @@ /extensions/**/*Spec.ts @oppia/frontend-test-reviewers # Draft version upgrade. -/core/domain/draft_upgrade_services*.py @DubeySandeep +/core/domain/draft_upgrade_services*.py @oppia/lace-backend-reviewers +# Mailing List +/core/templates/domain/mailing-list/mailing-list-backend-api.service*.ts @oppia/lace-frontend-reviewers # Data stability. -/core/controllers/domain_objects_validator*.py @vojtechjelinek -/core/controllers/payload_validator*.py @vojtechjelinek -/core/handler_schema_constants.py @vojtechjelinek +/core/controllers/domain_objects_validator*.py @oppia/data-and-stability-reviewers +/core/controllers/payload_validator*.py @oppia/data-and-stability-reviewers +/core/handler_schema_constants*.py @oppia/data-and-stability-reviewers # QA team. -/core/tests/ @nithusha21 -/assets/ @nithusha21 -/data/ @nithusha21 -/core/templates/karma.module.ts @vojtechjelinek -/core/tests/protractor_utils/ @oppia/automated-qa-reviewers -/core/tests/protractor_desktop/ @oppia/automated-qa-reviewers -/core/tests/protractor/ @oppia/automated-qa-reviewers -/core/tests/protractor.conf.js @oppia/automated-qa-reviewers -/extensions/interactions/*/protractor.js @oppia/automated-qa-reviewers -/extensions/interactions/protractor.js @oppia/automated-qa-reviewers -/extensions/rich_text_components/*/protractor.js @oppia/automated-qa-reviewers -/extensions/rich_text_components/protractor.js @oppia/automated-qa-reviewers -/extensions/objects/protractor.js @oppia/automated-qa-reviewers +/core/tests/ @oppia/automated-qa-reviewers +/assets/ @oppia/data-and-stability-reviewers +/data/ @oppia/data-and-stability-reviewers +/core/templates/karma.module.ts @oppia/lace-frontend-reviewers +/core/tests/webdriverio_utils/ @oppia/automated-qa-reviewers +/core/tests/webdriverio_desktop/ @oppia/automated-qa-reviewers +/core/tests/webdriverio/ @oppia/automated-qa-reviewers +/core/tests/wdio.conf.js @oppia/automated-qa-reviewers +/extensions/interactions/*/webdriverio.js @oppia/automated-qa-reviewers +/extensions/interactions/webdriverio.js @oppia/automated-qa-reviewers +/extensions/rich_text_components/*/webdriverio.js @oppia/automated-qa-reviewers +/extensions/rich_text_components/webdriverio.js @oppia/automated-qa-reviewers +/extensions/objects/webdriverio.js @oppia/automated-qa-reviewers /scripts/backend_test_shards.json @oppia/automated-qa-reviewers /scripts/backend_tests_incomplete_coverage.txt @oppia/automated-qa-reviewers /.github/actions @oppia/automated-qa-reviewers # Python 3 Migration project. -/core/domain/auth*.py @vojtechjelinek -/core/domain/caching*.py @vojtechjelinek -/core/domain/taskqueue*.py @vojtechjelinek -/core/templates/services/auth.service*.ts @vojtechjelinek -/core/templates/services/auth-backend-api.service*.ts @vojtechjelinek +/core/domain/auth*.py @oppia/data-and-stability-reviewers +/core/domain/caching*.py @oppia/data-and-stability-reviewers +/core/domain/taskqueue*.py @oppia/data-and-stability-reviewers +/core/templates/services/auth.service*.ts @oppia/data-and-stability-reviewers +/core/templates/services/auth-backend-api.service*.ts @oppia/data-and-stability-reviewers # Critical files. @@ -714,28 +728,39 @@ # # On modifying this list make sure to keep the CODEOWNER_IMPORTANT_PATHS list # in scripts/linters/codeowner_linter.py in sync with the modifications. -/core/templates/services/svg-sanitizer.service.ts @seanlip -/scripts/linters/warranted_angular_security_bypasses.py @seanlip -/core/controllers/access_validators*.py @seanlip -/core/controllers/acl_decorators*.py @seanlip -/core/controllers/base*.py @seanlip -/core/domain/html*.py @seanlip -/core/domain/rights_manager*.py @seanlip -/core/domain/role_services*.py @seanlip -/core/storage/ @seanlip -/core/domain/user*.py @seanlip -/requirements.txt @vojtechjelinek -/requirements.in @vojtechjelinek -/dependencies.json @vojtechjelinek -/package.json @vojtechjelinek -/yarn.lock @vojtechjelinek -/scripts/install_third_party_libs.py @DubeySandeep -/.github/ @DubeySandeep -/.github/CODEOWNERS @DubeySandeep -/.github/stale.yml @vojtechjelinek -/.github/workflows/ @DubeySandeep @oppia/automated-qa-reviewers +/requirements.txt @oppia/dependency-reviewers +/requirements.in @oppia/dependency-reviewers +/requirements_dev.txt @oppia/dependency-reviewers +/requirements_dev.in @oppia/dependency-reviewers +/dependencies.json @oppia/dependency-reviewers +/package.json @oppia/dependency-reviewers +/yarn.lock @oppia/dependency-reviewers +/scripts/install_third_party_libs.py @oppia/dev-workflow-reviewers +/.github/ @oppia/dev-workflow-reviewers +/.github/CODEOWNERS @oppia/dev-workflow-reviewers +/.github/stale.yml @oppia/dev-workflow-reviewers +/.github/workflows/ @oppia/dev-workflow-reviewers @oppia/automated-qa-reviewers # Files needed by the Android team. -# TODO(#14285): Re-add @BenHenning for the following 3. -/core/controllers/android_e2e_config*.py @seanlip -/core/android_validation_constants*.py @seanlip -/extensions/interactions/rule_templates.json @seanlip +/core/controllers/android_e2e_config*.py @oppia/web-android-compatibility-reviewers +/core/android_validation_constants*.py @oppia/web-android-compatibility-reviewers +/extensions/interactions/rule_templates.json @oppia/web-android-compatibility-reviewers +# Files owned by core reviewers. +/core/templates/services/svg-sanitizer.service.ts @oppia/core-reviewers +/scripts/linters/warranted_angular_security_bypasses.py @oppia/core-reviewers +/core/controllers/access_validators*.py @oppia/core-reviewers +/core/controllers/acl_decorators*.py @oppia/core-reviewers +/core/controllers/base*.py @oppia/core-reviewers +/core/domain/html*.py @oppia/core-reviewers +/core/domain/rights_manager*.py @oppia/core-reviewers +/core/domain/role_services*.py @oppia/core-reviewers +/core/storage/ @oppia/core-reviewers +/core/domain/user*.py @oppia/core-reviewers +/AUTHORS @oppia/core-reviewers +/CONTRIBUTORS @oppia/core-reviewers +/LICENSE @oppia/core-reviewers +/NOTICE @oppia/core-reviewers +/core/templates/pages/terms-page/terms-page.component.html @oppia/core-reviewers +/core/templates/pages/privacy-page/privacy-page.component.html @oppia/core-reviewers +/core/templates/pages/license-page/license-page.component.html @oppia/core-reviewers +/core/domain/takeout_*.py @oppia/core-reviewers +/core/domain/wipeout_*.py @oppia/core-reviewers diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index a6bd54a4cc36..dc255245dcbb 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1,6 +1,6 @@ Thanks for your interest in contributing to the Oppia project, and making it easier for students to learn online in an effective and enjoyable way! -There are lots of ways to help out, from joining our team of lesson creators to fixing a bug in the [codebase](https://github.com/oppia/oppia/). Don't worry if you're new to "open source", or if you're still a student -- many of our contributors are, too, and we'd be happy to provide mentorship and support if this is your first time contributing to an open source project. The main thing is that you care deeply about helping people around the world to learn things better, and that you're responsible and reliable. +There are lots of ways to help out and become an Oppia contributor, from joining our team of lesson creators to fixing a bug in the [codebase](https://github.com/oppia/oppia/). Don't worry if you're new to "open source", or if you're still a student -- many of our contributors are, too, and we'd be happy to provide mentorship and support if this is your first time contributing to an open source project. The main thing is that you care deeply about helping people around the world to learn things better, and that you're responsible and reliable. To make it easier to get started, we've catalogued some of the different ways to help out. Please feel free to take a look through them, and see if any interest you: @@ -10,6 +10,8 @@ To make it easier to get started, we've catalogued some of the different ways to * [Voice artists](https://github.com/oppia/oppia/wiki/Instructions-for-voice-artists) * [Designers and artists](https://github.com/oppia/oppia/wiki/Contributing-to-Oppia%27s-design) +If you are interested in working on Oppia's Android app, you should also take a look at the [oppia/oppia-android repository](https://github.com/oppia/oppia-android). + If you run into any problems, you can check out the [user documentation](http://oppia.github.io/) or post to our [developer mailing list](https://groups.google.com/forum/?fromgroups#!forum/oppia-dev). Feel free to drop into our [Gitter chat channel](https://gitter.im/oppia/oppia-chat) to say hello, too :) Finally, if, after reading the above links, you'd like to help, but aren't sure how -- don't worry! The Oppia project is very multi-faceted, and we'd be glad to help you find something to do that matches your interests and skills. Just fill out [this form](https://forms.gle/jEytndtgdsx7BrnV6) to let us know what you'd like to help with, or write to us at [volunteer@oppia.org](mailto:volunteer@oppia.org) and tell us a bit about yourself and what you'd like to do. We'll do our best to help you get started! diff --git a/.github/DISCUSSION_TEMPLATE/gsoc-2023-proposal-reviews.yml b/.github/DISCUSSION_TEMPLATE/gsoc-2023-proposal-reviews.yml new file mode 100644 index 000000000000..a282f7011b89 --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/gsoc-2023-proposal-reviews.yml @@ -0,0 +1,33 @@ +title: "GSoC’23 Proposal Review | {{Project Name}} ({{Your Name}})" +body: +- type: input + attributes: + label: Google doc link to Proposal + description: "Please paste the google doc link to the proposal here." + validations: + required: true +- type: checkboxes + id: sharing + attributes: + label: I confirm that I set the sharing options of the document to “anyone with the link can comment”. + options: + - label: "Yes" + required: true +- type: dropdown + id: review-section + attributes: + label: Which section should the reviewer take a look at? + options: + - "3.1: WHAT" + - "3.2: HOW" + validations: + required: true +- type: textarea + id: additional + attributes: + label: Additional information + description: | + Add any other information about the proposal here. + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false diff --git a/.github/DISCUSSION_TEMPLATE/q-a-installation.yml b/.github/DISCUSSION_TEMPLATE/q-a-installation.yml new file mode 100644 index 000000000000..4f2233fcf03a --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/q-a-installation.yml @@ -0,0 +1,47 @@ +body: +- type: input + attributes: + label: Operating System + description: "What operating system (OS) are you using?" + validations: + required: true +- type: input + attributes: + label: RAM size + description: "How much RAM does your computer have?" + validations: + required: true +- type: textarea + id: steps + attributes: + label: Which step are you stuck on? + description: | + If you encountered this error while following a wiki page, provide a link to the page and specify which step failed. + Otherwise, list what steps caused the error. + These should be detailed enough for someone else to follow them. + validations: + required: true +- type: textarea + id: error + attributes: + label: Error Log + description: "Please copy and paste the error log." + render: bash + validations: + required: true +- type: textarea + id: approaches + attributes: + label: Approaches already used to resolve the issue + description: "What appoaches have you tried to resolve the above issue?" + validations: + required: false +- type: textarea + id: additional + attributes: + label: Additional information + description: | + Add any other context about the problem here. + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 8106b7ee98a2..000000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve ---- - - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: - 1. Go to '...' - 2. Click on '....' - 3. Scroll down to '....' - 4. See error - -**Observed behavior** -A clear and concise description of what you have observed. - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Desktop (please complete the following information; delete this section if the issue does not arise on desktop):** - - OS: [e.g. iOS] - - Browser [e.g. chrome, safari] - - Browser-version [e.g. 22] - -**Smartphone (please complete the following information; delete this section if the issue does not arise on smartphones):** - - Device: [e.g. iPhone6] - - OS: [e.g. iOS8.1] - - Browser [e.g. stock browser, safari] - - Browser-version [e.g. 22] - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_report_form.yml b/.github/ISSUE_TEMPLATE/bug_report_form.yml new file mode 100644 index 000000000000..96e606821396 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report_form.yml @@ -0,0 +1,90 @@ +name: Bug report +description: Create a report to help us improve +title: "[BUG]: " +labels: [triage needed, bug] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to report a bug in the Oppia project. + - type: markdown + attributes: + value: | + Before filing a new issue, **please do a quick search** to check that it hasn't already been filed on the issue tracker. You can do this by going to [this link](https://github.com/oppia/oppia/issues) and typing some words related to the issue in the search box next to the "New issue" button. + - type: textarea + attributes: + label: Describe the bug + description: A concise description of what issue you're experiencing. **Example:** "When I click on the 'Sign in' button, I expect to be redirected to the login page, but instead I get a 404 error." + validations: + required: true + - type: textarea + attributes: + label: Steps To Reproduce + description: Steps to reproduce the behavior. **Example:** "1. Go to the library page. 2. Click on the 'Sign in' button. 3. See error." + validations: + required: false + - type: textarea + attributes: + label: Expected Behavior + description: A clear and concise description of what you expected to happen. **Example:** "I expect to be redirected to the login page." + validations: + required: false + - type: textarea + attributes: + label: Screenshots/Videos + description: | + If applicable, add screenshots or videos to help explain your problem. + + **Tip:** You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false + - type: dropdown + attributes: + label: What device are you using? + description: Please select the device you're using. + multiple: true + options: + - Desktop + - Mobile + - type: dropdown + attributes: + label: Operating System + description: Please select the operating system you're using. **Example:** "Windows". If you're using multiple Operating systems, please select all that apply. + multiple: true + options: + - Windows + - MacOS + - Linux + - Android + - IOS + - Other + - type: dropdown + attributes: + label: What browsers are you seeing the problem on? + description: Please select the browser you're using. If you're using multiple browsers, please select all that apply. + multiple: true + options: + - Chrome + - Firefox + - Safari + - Edge + - Opera + - Internet Explorer + - Other + validations: + required: false + - type: input + attributes: + label: Browser version + description: Please enter the version of the browser you're using. **Example:** "90.0.4430.212". You can find this by clicking on the three dots in the top right corner of your browser and selecting "Help" or "About". If you're using multiple browsers, please enter the version for each browser. **Example:** "90.0.4430.212 (Chrome), 88.1.1 (Firefox)" + validations: + required: false + - type: textarea + attributes: + label: Additional context + description: | + Add any other context about the problem here. + + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000000..23870468a61f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: true +contact_links: + - name: Oppia Community Support + url: https://github.com/oppia/oppia/discussions + about: Please ask and answer questions here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index e0c9c7fae6a9..000000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -**Describe the solution you'd like** -A clear and concise description of what you want to happen. -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/feature_request_form.yml b/.github/ISSUE_TEMPLATE/feature_request_form.yml new file mode 100644 index 000000000000..330cbc69252b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request_form.yml @@ -0,0 +1,44 @@ +name: Feature request +description: Suggest an idea for this project +title: "[Feature Request]: " +labels: ["triage needed", "enhancement"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to report a bug in the Oppia project. + - type: markdown + attributes: + value: | + Before filing a new issue, **please do a quick search** to check that it hasn't already been filed on the issue tracker. You can do this by going to [this link](https://github.com/oppia/oppia/issues) and typing some words related to the issue in the search box next to the "New issue" button. + - type: textarea + id: problem + attributes: + label: Is your feature request related to a problem? Please describe. + description: A clear and concise description of what the problem is. **Example:** "I'm always frustrated when [...]" + validations: + required: true + - type: textarea + id: solution + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. **Example:** "I would like to have [...]" + validations: + required: true + - type: textarea + id: alternatives + attributes: + label: Describe alternatives you've considered + description: A clear and concise description of any alternative solutions or features you've considered. **Example:** "I have considered [...]" + validations: + required: false + - type: textarea + id: additional-context + attributes: + label: Additional context + description: | + Add any other context or screenshots about the feature request here. + + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/server_error_template.md b/.github/ISSUE_TEMPLATE/server_error_template.md index 9b9a95a4b3b0..ef89288d22fb 100644 --- a/.github/ISSUE_TEMPLATE/server_error_template.md +++ b/.github/ISSUE_TEMPLATE/server_error_template.md @@ -1,6 +1,7 @@ --- name: Server error about: Report a production bug from the server logs +labels: triage needed, server errors --- + +#### Proof of changes on desktop with slow/throttled network + + + +#### Proof of changes on mobile phone + + + +#### Proof of changes in Arabic language + + ## PR Pointers -- Make sure to follow the [instructions for making a code change](https://github.com/oppia/oppia/wiki/Contributing-code-to-Oppia#instructions-for-making-a-code-change). -- Oppiabot will notify you when you don't add a PR_CHANGELOG label. If you are unable to do so, please @-mention a code owner (who will be in the Reviewers list), or ask on [Gitter](https://gitter.im/oppia/oppia-chat). -- For what code owners will expect, see the [Code Owner's wiki page](https://github.com/oppia/oppia/wiki/Oppia%27s-code-owners-and-checks-to-be-carried-out-by-developers). +- Make sure to follow the [instructions for making a code change](https://github.com/oppia/oppia/wiki/Make-a-pull-request). +- If you need a review or an answer to a question, and don't have permissions to assign people, **leave a comment** like the following: "{{Question/comment}} @{{reviewer_username}} PTAL". Oppiabot will help assign that person for you. +- For what code owners will expect, see the [Code Owner's wiki page](https://github.com/oppia/oppia/wiki/Oppia's-code-owners-and-checks-to-be-carried-out-by-developers). - Make sure your PR follows conventions in the [style guide](https://github.com/oppia/oppia/wiki/Coding-style-guide), otherwise this will lead to review delays. - Never force push. If you do, your PR will be closed. -- Oppiabot can assign anyone for review/help if you leave a comment like the following: "{{Question/comment}} @{{reviewer_username}} PTAL" -- Some of the e2e tests are flaky, and can fail for reasons unrelated to your PR. We are working on fixing this, but in the meantime, if you need to restart the tests, please check the ["If your build fails" wiki page](https://github.com/oppia/oppia/wiki/If-your-build-fails). +- Some of the e2e tests are flaky, and can fail for reasons unrelated to your PR. We are working on fixing this, but in the meantime, if you need to restart the tests, please check the ["If your build fails" wiki page](https://github.com/oppia/oppia/wiki/If-CI-checks-fail-on-your-PR). diff --git a/.github/README.md b/.github/README.md index e9942f1f19c3..e4611f8873b0 100644 --- a/.github/README.md +++ b/.github/README.md @@ -1,4 +1,4 @@ -# [Oppia](https://www.oppia.org) [![Actions](https://github.com/oppia/oppia/workflows/End-to-End%20tests/badge.svg?branch=develop)](https://github.com/oppia/oppia/actions?query=branch%3Adevelop+workflow%3A%22End-to-End+tests%22) [![CircleCI](https://circleci.com/gh/oppia/oppia/tree/develop.svg?style=svg)](https://circleci.com/gh/oppia/oppia/tree/develop) [![Join the chat at https://gitter.im/oppia/oppia-chat](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/oppia/oppia-chat?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +# [Oppia](https://www.oppia.org) [![Actions](https://github.com/oppia/oppia/workflows/End-to-End%20tests/badge.svg?branch=develop)](https://github.com/oppia/oppia/actions?query=branch%3Adevelop+workflow%3A%22End-to-End+tests%22) Oppia is an online learning tool that enables anyone to easily create and share interactive activities (called 'explorations'). These activities simulate a one-on-one conversation with a tutor, making it possible for students to learn by doing while getting feedback. @@ -9,7 +9,8 @@ Oppia is written using Python and AngularJS, and is built on top of Google App E * [Oppia.org community site](https://www.oppia.org) * [User Documentation](https://oppia.github.io/) * [Contributors' wiki](https://github.com/oppia/oppia/wiki) - * [Developer mailing list](http://groups.google.com/group/oppia-dev) + * [GitHub Discussions](https://github.com/oppia/oppia/discussions) + * [Developer announcements](http://groups.google.com/group/oppia-dev) * [File an issue](https://github.com/oppia/oppia/issues/new/choose)

@@ -45,9 +46,15 @@ The Oppia code is released under the [Apache v2 license](https://github.com/oppi ## Keeping in touch - * [Blog](https://medium.com/oppia-org) - * [Discussion forum](http://groups.google.com/group/oppia) + * [Discussion forum](https://github.com/oppia/oppia/discussions) * [Announcements mailing list](http://groups.google.com/group/oppia-announce) - * Social media: [YouTube](https://www.youtube.com/channel/UC5c1G7BNDCfv1rczcBp9FPw), [FB](https://www.facebook.com/oppiaorg), [Twitter](https://twitter.com/oppiaorg) -We also have public chat rooms on Gitter: [https://gitter.im/oppia/oppia-chat](https://gitter.im/oppia/oppia-chat). Drop by and say hello! +## Social Media +[][twitter] [][LinkedIn] [][Facebook] [][medium] [][oppia-org-youtube] [][dev-youtube] + +[twitter]: https://twitter.com/oppiaorg +[linkedIn]: https://www.linkedin.com/company/oppia-org/ +[medium]: https://medium.com/@oppia.org +[facebook]: https://www.facebook.com/oppiaorg/ +[oppia-org-youtube]: https://www.youtube.com/channel/UC5c1G7BNDCfv1rczcBp9FPw +[dev-youtube]: https://www.youtube.com/channel/UCsrAX-oeqm0-NIQzQrdiUkQ diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 000000000000..54409cfa6148 --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1,129 @@ +# Oppia Vulnerability Disclosure Process + +This vulnerability disclosure process describes how we accept and respond to security vulnerabilities from both Oppia developers and others. Our process follows 4 steps: Report, Investigate, Remediate, and Disclose. + +## Report + +Reporters should email [security@oppia.org](mailto:security@oppia.org) or open a [GitHub Security Vulnerability Report](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability) with: + +* A description of the problem. +* Steps we can follow to reproduce the problem. +* Affected versions. +* If known, mitigations for the problem. + +We will respond within 3 days of the reporter's submission to acknowledge receipt of their report. Here is a template acknowledgement message: + +``` +Hi $REPORTER, + +Thank you for reporting this problem to us. We are currently investigating and will reach out sometime in the next 7 days once we have decided how to move forward or if we have any questions. + +Thanks, + +$OPPIA_SECURITY_TEAM_MEMBER +``` + +## Investigate + +Immediately upon receiving a report of a security problem, a member of Oppia’s security team (the tech leads for now) will assemble a vulnerability response team (VRT). This team should: + +* Include an Oppia tech lead. +* Include developers (most likely 1-2) with expertise in the part of the app affected by the problem. +* Include as few developers as possible to avoid premature publication of the problem. + +The tech lead will designate one VRT member as the VRT lead responsible for driving our response. The VRT lead will immediately (ideally within 24 hours) investigate the report and classify it as: + +* **Won’t fix**: The app is working as intended, the bug is not important enough for us to spend resources fixing, or the requested feature is not something we plan to implement. + +* **Bug**: The problem identified is legitimate, but it is not a security issue. It will be resolved through our normal bug-fixing process. + +* **Feature request**: The report identifies a new feature that should be implemented. It will be handled through our normal feature-development process. + +* **Low-severity vulnerability**: The report identifies a security vulnerability that does not meet the high-severity criteria. It will be resolved through our normal bug-fixing process. A "security vulnerability" is unintended behavior with security implications. This is distinct from a feature request, which refers to cases where the code behaves as intended, but the reporter disagrees with that intention. + + For example, suppose we improperly sanitized user-provided data at the models layer such that user-provided code could be executed, but validation checks at the controller layer prevented an attacker from actually exploiting the vulnerability. This would be a security vulnerability because we do not intend for the models layer to allow user-provided code to execute, but it would be low-severity because the controllers layer would prevent exploitation. + +* **High-severity vulnerability**: The report identifies an exploitable security vulnerability that, if exploited, could result in any of the following: + + * (Confidentiality) Unauthorized access to any sensitive data that shouldn't be made public. Here, "sensitive data" generally refers to both private user data, as well as information that could be used to gain access to private user data; if in doubt, consult the data owners. + * (Integrity) Unauthorized edits to any data. + * (Availability) Degraded system performance of the platform for users. + +Note that while the VRT contains multiple individuals, it’s ultimately expected that the VRT lead drives the effort. This should be in collaboration with VRT members, but in cases of urgent vulnerabilities the VRT lead can operate authoritatively to mitigate or remediate the issue (i.e. they do not need VRT consensus or review, but they should leverage VRT team members as a source for information and help). + +The VRT lead will notify the reporter of the classification and the reasoning behind the VRT’s decision within 7 days of the acknowledgement message. This notification should include links to any issues that were opened as a result of the report. For problems classified as bugs, feature requests, or low-severity vulnerabilities, issues will be opened and assigned to the relevant team. + +The rest of this document describes how we handle high-severity vulnerabilities. + +## Remediate + +### Create a Coordination Channel + +If the problem is confirmed as a high-severity vulnerability, the VRT will open a [GitHub security advisory](https://docs.github.com/en/code-security/repository-security-advisories/about-github-security-advisories-for-repositories) and, if both the VRT and reporter agree, add the reporter to the advisory so we can collaborate on it. We will coordinate work on the vulnerability via: + +* **The GitHub security advisory.** These advisories will let us collaborate in private, and they are appropriate in cases where disclosing the vulnerability prior to remediating it could put our users or developers at risk. +* **(Optionally) An additional GitHub issue and pull request.** This will immediately disclose the vulnerability, and we will take this approach when immediate disclosure poses little risk to our users and developers. For example, when the vulnerability is already publicly known. Unlike security advisories, CI checks can run on these PRs. + +Note that we will create a GitHub security advisory even if we choose to collaborate on a fix using an open issue or PR because we’ll want to publish the advisory when disclosing the vulnerability. + +### Request a CVE + +A CVE should be requested for all security vulnerabilities. Since we create a GitHub Security Advisory, we can [get a CVE from GitHub](https://docs.github.com/en/code-security/repository-security-advisories/about-github-security-advisories-for-repositories#cve-identification-numbers). As a backup, we can [get a CVE from MITRE](https://cveform.mitre.org/). + +### Develop a Patch + +Regardless of which approach we take, the VRT will prioritize developing a patch to fix (or at least mitigate) the vulnerability. If the vulnerability is easily exploitable, mitigation will take priority over all other work. Mitigation should be completed within 7 days of the report being classified as a high-severity vulnerability. Once mitigated, additional remediation steps can be handled through our usual bug-fixing process. + +## Disclose + +We generally follow a 90-day disclosure timeframe, meaning that we ask that reporters give us 90 days to fix vulnerabilities before publicly disclosing them. 90 days should be viewed as an upper bound, and we aim to remediate vulnerabilities as quickly as possible. In all cases, the disclosure timeline will be explicitly coordinated with the reporter, and we prefer to publicly disclose the vulnerability simultaneously with the reporter. Our disclosure will include credit for the reporter if they so wish. + +In rare cases, it may be appropriate to delay public disclosure even after the patch has been published and deployed. However, since our source code is public, we must assume that attackers will quickly reverse-engineer the vulnerability from our patch, so we will err on the side of disclosing early. + +Our public disclosure should take the form of a published GitHub Security Advisory. Here is a template: + +``` +# Security advisory for $CVEID + +## Summary + + + +* **CVE**: $CVE-NUMBER +* **Affected versions**: $VERSIONS +* **Reporter**: $NAME $AFFILIATION + +## Severity + + +## Proof of Concept + + +## Remediation and Mitigation + + +## Further Analysis + + +## Timeline + +* Date reported: $REPORT_DATE + +* Date fixed: $FIX_DATE + +* Date disclosed: $DISCLOSURE_DATE +``` + +## References + +This document was developed with the help of the [OSS Vulnerability Guide](https://github.com/ossf/oss-vulnerability-guide) and the [Secure Software Development Fundamentals course](https://github.com/ossf/secure-sw-dev-fundamentals/blob/main/secure_software_development_fundamentals.md) by the [Open Source Security Foundation](https://openssf.org/). diff --git a/.github/actions/check-if-pr-is-low-risk/action.yml b/.github/actions/check-if-pr-is-low-risk/action.yml deleted file mode 100644 index 826d403fedf4..000000000000 --- a/.github/actions/check-if-pr-is-low-risk/action.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: 'Check if PR is low-risk' -description: 'This action checks whether a PR is low-risk, which means we can skip some CI checks.' -outputs: - is-low-risk: - description: '0 if PR is low risk, 1 otherwise.' - value: ${{ steps.check-risk.outputs.is-low-risk }} -runs: - using: 'composite' - steps: - - id: check-risk - run: | - python -m scripts.check_if_pr_is_low_risk ${{ github.event.pull_request.html_url }} && true - echo "::set-output name=is-low-risk::$?" - shell: bash diff --git a/.github/actions/install-oppia-dependencies/action.yml b/.github/actions/install-oppia-dependencies/action.yml index 7dfdcad37d81..5524273b40e6 100644 --- a/.github/actions/install-oppia-dependencies/action.yml +++ b/.github/actions/install-oppia-dependencies/action.yml @@ -6,6 +6,11 @@ runs: - name: Setup python by installing wheel run: pip install wheel==0.35.0 shell: bash + - name: Install python 2 for GAE + run: | + sudo apt update + sudo apt install python2 -y + shell: bash - name: Check Yarn Cache run: | # Move outside repository root to ignore the configs in diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml new file mode 100644 index 000000000000..f2532951ba6a --- /dev/null +++ b/.github/codeql/codeql-config.yml @@ -0,0 +1,22 @@ +name: "CodeQL Security Analysis" + +paths-ignore: + - core/tests/data + # TODO(#16763): The use of SHA-1 in this file causes CodeQL to throw the error + # "Use of a broken or weak cryptographic hashing algorithm on sensitive data." + # The results of the function using SHA-1 are stored in some places, but the + # function is not used for enciphering and preimage attacks are not a big risk + # for SHA-1. We can migrate to a stronger cryptographic algorithm, replacing + # all SHA-1, but that would require changes to the existing data we have. + - core/utils.py + - scripts/release_scripts/update_configs.py + # TODO(#17117): These files cause an "Incomplete multi-character sanitization" + # error to be thrown by CodeQL (cases enumerated in the issue). If we define a + # central method for this stripping, we can remove these files. + - core/templates/pages/exploration-player-page/services/audio-translation-manager.service.ts + - core/templates/pages/exploration-editor-page/changes-in-human-readable-form/changes-in-human-readable-form.component.spec.ts + - core/templates/components/ck-editor-helpers/ck-editor-copy-content.service.ts + - core/templates/filters/format-rte-preview.pipe.ts + - core/templates/domain/objects/NumberWithUnitsObjectFactory.ts + - extensions/rich_text_components/rte-output-display.component.spec.ts + - core/tests/webdriverio_utils/forms.js diff --git a/.github/workflows/backend_associated_test_file_check.yml b/.github/workflows/backend_associated_test_file_check.yml new file mode 100644 index 000000000000..8e96a01ed65f --- /dev/null +++ b/.github/workflows/backend_associated_test_file_check.yml @@ -0,0 +1,47 @@ +name: Backend associated test file check +on: + push: + branches: + - develop + - release-* + pull_request: + branches: + - develop + - release-* + +jobs: + run_tests: + name: Run backend associated test file checks + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04] + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: '3.8.15' + architecture: 'x64' + - uses: ./.github/actions/merge + - name: Cache node modules and third_party/static + uses: actions/cache@v3 + env: + cache-name: cache-node-modules + with: + path: | + /home/runner/work/oppia/yarn_cache + /home/runner/work/oppia/oppia/third_party/static + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock', 'dependencies.json') }} + restore-keys: | + ${{ runner.os }}-build-${{ env.cache-name }}- + ${{ runner.os }}-build- + ${{ runner.os }}- + - uses: ./.github/actions/install-oppia-dependencies + - name: Run backend associated test file check + run: PYTHONIOENCODING=utf-8 python -m scripts.check_backend_associated_test_file + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "Some backend files lack an associated test file." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.github/workflows/backend_tests.yml b/.github/workflows/backend_tests.yml index 08466cb3cdb9..c830a9480725 100644 --- a/.github/workflows/backend_tests.yml +++ b/.github/workflows/backend_tests.yml @@ -15,21 +15,23 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] - shard: [1, 2, 3, 4] + os: [ubuntu-22.04] + shard: [1, 2, 3, 4, 5] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' - - name: Cache node modules - uses: actions/cache@v2 + - name: Cache node modules and third_party/static + uses: actions/cache@v3 env: cache-name: cache-node-modules with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} + path: | + /home/runner/work/oppia/yarn_cache + /home/runner/work/oppia/oppia/third_party/static + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock', 'dependencies.json') }} restore-keys: | ${{ runner.os }}-build-${{ env.cache-name }}- ${{ runner.os }}-build- @@ -38,31 +40,38 @@ jobs: if: startsWith(github.head_ref, 'update-changelog-for-release') == false - name: Install packages for optimized coverage if: startsWith(github.head_ref, 'update-changelog-for-release') == false - run: sudo apt-get install -y python-dev gcc + run: sudo apt-get install -y python-dev-is-python3 gcc - name: Install coverage and configparser if: startsWith(github.head_ref, 'update-changelog-for-release') == false run: pip install coverage configparser - name: Run backend test shard if: startsWith(github.head_ref, 'update-changelog-for-release') == false run: PYTHONIOENCODING=utf-8 python -m scripts.run_backend_tests --generate_coverage_report --ignore_coverage --exclude_load_tests --test_shard ${{ matrix.shard }} + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "A backend test failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} - name: Upload coverage report if: startsWith(github.head_ref, 'update-changelog-for-release') == false - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ${{ format('backend_test_coverage_shard_{0}', matrix.shard) }} path: .coverage + retention-days: 1 check_combined_coverage: name: Check combined backend test coverage needs: run_tests - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' - name: Cache node modules - uses: actions/cache@v2 + uses: actions/cache@v3 env: cache-name: cache-node-modules with: @@ -80,33 +89,39 @@ jobs: run: pip install coverage - name: Download coverage report for shard 1 if: startsWith(github.head_ref, 'update-changelog-for-release') == false - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: backend_test_coverage_shard_1 path: coverage/coverage_1 - name: Download coverage report for shard 2 if: startsWith(github.head_ref, 'update-changelog-for-release') == false - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: backend_test_coverage_shard_2 path: coverage/coverage_2 - name: Download coverage report for shard 3 if: startsWith(github.head_ref, 'update-changelog-for-release') == false - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: backend_test_coverage_shard_3 path: coverage/coverage_3 - name: Download coverage report for shard 4 if: startsWith(github.head_ref, 'update-changelog-for-release') == false - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: backend_test_coverage_shard_4 path: coverage/coverage_4 + - name: Download coverage report for shard 5 + if: startsWith(github.head_ref, 'update-changelog-for-release') == false + uses: actions/download-artifact@v3 + with: + name: backend_test_coverage_shard_5 + path: coverage/coverage_5 - name: Move coverage reports from artifact folders to coverage folder if: startsWith(github.head_ref, 'update-changelog-for-release') == false shell: bash run: | - for i in {1..4}; do cp coverage/coverage_$i/.coverage coverage/.coverage.$i; done + for i in {1..5}; do cp coverage/coverage_$i/.coverage coverage/.coverage.$i; done - name: Combine coverage reports if: startsWith(github.head_ref, 'update-changelog-for-release') == false shell: bash @@ -114,4 +129,10 @@ jobs: - name: Check coverage if: startsWith(github.head_ref, 'update-changelog-for-release') == false shell: bash - run: coverage report --fail-under 100 -m + run: PYTHONIOENCODING=utf-8 python -m scripts.check_overall_backend_test_coverage + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "Backend coverage checks failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 000000000000..70d2a9d0d578 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,40 @@ +# Generated by GitHub. +name: "CodeQL" + +on: + push: + branches: ["develop"] + pull_request: + # The branches below must be a subset of the branches above + branches: ["develop"] + schedule: + - cron: "20 13 * * 2" + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: ["javascript", "python"] + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - uses: ./.github/actions/merge + + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + config-file: ./.github/codeql/codeql-config.yml + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/develop_commit_notification.yml b/.github/workflows/develop_commit_notification.yml new file mode 100644 index 000000000000..d7f4da073c41 --- /dev/null +++ b/.github/workflows/develop_commit_notification.yml @@ -0,0 +1,25 @@ +name: Broadcast 'push to develop' event to our release-scripts repo +on: + push: + branches: + - develop + +jobs: + send_notification: + name: Send notification + runs-on: ubuntu-22.04 + if: github.repository == 'oppia/oppia' + steps: + - name: Generate token + id: generate_token + uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 + with: + app_id: ${{ secrets.NOTIFICATIONS_TOKEN_GENERATOR_GH_APP_ID }} + private_key: ${{ secrets.NOTIFICATIONS_TOKEN_GENERATOR_GH_APP_PRIVATE_KEY }} + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@87c5425cae5ba8b5bc7da27674076c78588babf3 + with: + token: ${{ steps.generate_token.outputs.token }} + repository: oppia/release-scripts + event-type: develop-commit + client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' diff --git a/.github/workflows/e2e_additional_editor_and_player.yml b/.github/workflows/e2e_additional_editor_and_player.yml deleted file mode 100644 index 5ed40d94f1aa..000000000000 --- a/.github/workflows/e2e_additional_editor_and_player.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_additional_editor_and_player: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run Additional Editor E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="additionalEditorFeatures" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Additional Editor Modals E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="additionalEditorFeaturesModals" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Additional Player E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="additionalPlayerFeatures" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_creator_learner_dashboard_and_editor_tabs.yml b/.github/workflows/e2e_creator_learner_dashboard_and_editor_tabs.yml deleted file mode 100644 index a21c990bbc19..000000000000 --- a/.github/workflows/e2e_creator_learner_dashboard_and_editor_tabs.yml +++ /dev/null @@ -1,74 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_creator_learner_dashboard_and_editor_tabs: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run Creator Dashboard E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="creatorDashboard" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Improvements Tab E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="explorationImprovementsTab" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Feedback Tab E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="explorationFeedbackTab" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Learner Dashboard E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="learnerDashboard" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_history_statistics_tabs_and_extensions.yml b/.github/workflows/e2e_history_statistics_tabs_and_extensions.yml deleted file mode 100644 index 38a3905dbb81..000000000000 --- a/.github/workflows/e2e_history_statistics_tabs_and_extensions.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_history_statistics_tabs_and_extensions: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run History Tab E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="explorationHistoryTab" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Statistics Tab E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="explorationStatisticsTab" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Extensions E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="extensions" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_learner_flow_skill_editor_and_embedding.yml b/.github/workflows/e2e_learner_flow_skill_editor_and_embedding.yml deleted file mode 100644 index 3b1133aa67b4..000000000000 --- a/.github/workflows/e2e_learner_flow_skill_editor_and_embedding.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_learner_flow_skill_editor_and_embedding: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run Learner Flow E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="learner" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Skill Editor E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="skillEditor" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Embedding E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="embedding" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_miscellaneous_tests.yml b/.github/workflows/e2e_miscellaneous_tests.yml deleted file mode 100644 index 41208b4b9580..000000000000 --- a/.github/workflows/e2e_miscellaneous_tests.yml +++ /dev/null @@ -1,89 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_miscellaneous_tests: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run e2e File Upload Features Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="fileUploadFeatures" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Play Voiceovers Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="playVoiceovers" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e File Upload Extensions Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="fileUploadExtensions" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Contributor Dashboard Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="contributorDashboard" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Blog Dashboard Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="blogDashboard" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Publication Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="publication" --skip-install --skip-build --server_log_level="info" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Feature Gating Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --suite="featureGating" - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_other_tests.yml b/.github/workflows/e2e_other_tests.yml deleted file mode 100644 index 0064bb998e3e..000000000000 --- a/.github/workflows/e2e_other_tests.yml +++ /dev/null @@ -1,79 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_other_tests: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run e2e Collections Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="collections" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Accessibility Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="accessibility" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Classroom Page Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="classroomPageFileUploadFeatures" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Navigation Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --prod_env --skip-install --skip-build --suite="navigation" - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e Admin Page Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --prod_env --skip-install --skip-build --suite="adminPage" - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_tests.yml b/.github/workflows/e2e_tests.yml new file mode 100644 index 000000000000..baff22ee80c8 --- /dev/null +++ b/.github/workflows/e2e_tests.yml @@ -0,0 +1,190 @@ +name: End-to-End tests +on: + push: + branches: + - develop + - release-* + pull_request: + branches: + - develop + - release-* + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04] + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: '3.8.15' + architecture: 'x64' + cache: 'pip' + cache-dependency-path: | + requirements.in + requirements.txt + requirements_dev.in + requirements_dev.txt + - uses: ./.github/actions/merge + - name: Cache node modules and third_party/static + uses: actions/cache@v3 + env: + cache-name: cache-node-modules + with: + path: | + /home/runner/work/oppia/yarn_cache + /home/runner/work/oppia/oppia/third_party/static + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock', 'dependencies.json') }} + restore-keys: | + ${{ runner.os }}-build-${{ env.cache-name }}- + ${{ runner.os }}-build- + ${{ runner.os }}- + - uses: ./.github/actions/install-oppia-dependencies + - name: Install chrome + run: python -m scripts.install_chrome_for_ci + - name: Install ffmpeg + run: sudo apt install ffmpeg + - name: Build Webpack + run: python -m scripts.build --prod_env + - name: Zip build files + # We avoid using ../ or absolute paths because unzip treats these as + # security issues and will refuse to follow them. + run: | + zip -rqy build_files.zip oppia/third_party oppia_tools oppia/build oppia/webpack_bundles oppia/proto_files oppia/app.yaml oppia/assets/hashes.json oppia/proto_files oppia/extensions/classifiers/proto/* oppia/backend_prod_files oppia/dist + working-directory: /home/runner/work/oppia + - name: Upload build files artifact + uses: actions/upload-artifact@v3 + with: + name: build_files + path: /home/runner/work/oppia/build_files.zip + retention-days: 1 + e2e_test: + needs: build + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04] + suite: + - accessibility + - additionalEditorFeatures + - additionalEditorFeaturesModals + - additionalPlayerFeatures + - adminPage + - blogDashboard + - blog + - checkpointFeatures + - classroomPage + - classroomPageFileUploadFeatures + - collections + - contributorDashboard + - coreEditorAndPlayerFeatures + - creatorDashboard + - diagnosticTestPage + - embedding + - explorationFeedbackTab + - explorationImprovementsTab + - explorationHistoryTab + - explorationStatisticsTab + - explorationTranslationTab + - extensions + - featureGating + - fileUploadExtensions + - fileUploadFeatures + - learner + - learnerDashboard + - library + - navigation + - playVoiceovers + - preferences + - profileFeatures + - profileMenu + - publication + - skillEditor + - subscriptions + - topicsAndSkillsDashboard + - topicAndStoryEditor + - topicAndStoryEditorFileUploadFeatures + - topicAndStoryViewer + - users + - wipeout + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: '3.8.15' + architecture: 'x64' + cache: 'pip' + cache-dependency-path: | + requirements.in + requirements.txt + requirements_dev.in + requirements_dev.txt + - uses: ./.github/actions/merge + - name: Cache node modules + uses: actions/cache@v3 + env: + cache-name: cache-node-modules + with: + path: /home/runner/work/oppia/yarn_cache + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} + restore-keys: | + ${{ runner.os }}-build-${{ env.cache-name }}- + ${{ runner.os }}-build- + ${{ runner.os }}- + - name: Download build files artifact + uses: actions/download-artifact@v3 + with: + name: build_files + path: /home/runner/work/oppia/ + - name: Unzip build files + run: | + ls -la + unzip build_files.zip + rm build_files.zip + echo "Files in ./:" + ls -la . + echo "Files in oppia_tools:" + ls -la oppia_tools + echo "Files in oppia:" + ls -la oppia + echo "Files in build:" + ls -la oppia/build + echo "Files in third_party:" + ls -la oppia/third_party + working-directory: /home/runner/work/oppia + shell: bash + - uses: ./.github/actions/install-oppia-dependencies + - name: Install chrome + run: python -m scripts.install_chrome_for_ci + - name: Install ffmpeg + run: sudo apt install ffmpeg + - name: Run E2E Test ${{ matrix.suite }} + run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite=${{ matrix.suite }} --prod_env + env: + VIDEO_RECORDING_IS_ENABLED: 0 + - name: Uploading webdriverio-video as Artifacts + if: ${{ failure() }} + uses: actions/upload-artifact@v3 + with: + name: webdriverio-video + path: /home/runner/work/oppia/webdriverio-video + - name: Uploading webdriverio screenshots as Artifacts + if: ${{ failure() }} + uses: actions/upload-artifact@v3 + with: + name: webdriverio-screenshots + path: /home/runner/work/oppia/webdriverio-screenshots + - name: Uploading webpack bundles as an artifact + if: ${{ failure() }} + uses: actions/upload-artifact@v3 + with: + name: webpack-bundles + path: /home/runner/work/oppia/oppia/build + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "An E2E test failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.github/workflows/e2e_topic_tests.yml b/.github/workflows/e2e_topic_tests.yml deleted file mode 100644 index b6c229b111b0..000000000000 --- a/.github/workflows/e2e_topic_tests.yml +++ /dev/null @@ -1,74 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_topic_tests: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run Topics and Skills Dashboard E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="topicsAndSkillsDashboard" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Topic and Story Editor E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="topicAndStoryEditor" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Topic and Story Editor File Upload E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="topicAndStoryEditorFileUploadFeatures" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Topic and Story Viewer E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="topicAndStoryViewer" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_translation_classroom_and_core_features.yml b/.github/workflows/e2e_translation_classroom_and_core_features.yml deleted file mode 100644 index 61333e95fa55..000000000000 --- a/.github/workflows/e2e_translation_classroom_and_core_features.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_translation_classroom_and_core_features: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run Exploration Translation Tab E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="explorationTranslationTab" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Classroom Page E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="classroomPage" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run Core Editor And Player E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-build --skip-install --suite="coreEditorAndPlayerFeatures" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_user_features_and_library.yml b/.github/workflows/e2e_user_features_and_library.yml deleted file mode 100644 index 11ad0a59ab2b..000000000000 --- a/.github/workflows/e2e_user_features_and_library.yml +++ /dev/null @@ -1,74 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_user_features_and_library: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run Library E2E Test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="library" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e preferences test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="preferences" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e subscriptions test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="subscriptions" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e wipeout test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="wipeout" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/e2e_user_profile.yml b/.github/workflows/e2e_user_profile.yml deleted file mode 100644 index 72889baddf63..000000000000 --- a/.github/workflows/e2e_user_profile.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: End-to-End tests -on: - push: - branches: - - develop - - release-* - pull_request: - branches: - - develop - - release-* - -jobs: - e2e_user_profile: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-18.04] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.7' - architecture: 'x64' - - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- - - uses: ./.github/actions/install-oppia-dependencies - - id: check-risk - name: Check if PR is low-risk - uses: ./.github/actions/check-if-pr-is-low-risk - - name: Install chrome - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: python -m scripts.install_chrome_for_ci - - name: Install ffmpeg - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: sudo apt install ffmpeg - - name: Run e2e users test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --suite="users" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e profileMenu test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="profileMenu" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Run e2e profileFeatures test - if: ${{ steps.check-risk.outputs.is-low-risk != 0 }} - run: xvfb-run -a --server-args="-screen 0, 1285x1000x24" python -m scripts.run_e2e_tests --skip-install --skip-build --suite="profileFeatures" --prod_env - env: - VIDEO_RECORDING_IS_ENABLED: 0 - - name: Uploading protractor-video as Artifacts - if: ${{ steps.check-risk.outputs.is-low-risk != 0 && always() }} - uses: actions/upload-artifact@v2 - with: - name: protractor-video - path: /home/runner/work/oppia/protractor-video - - name: Checking Memory Usage - if: ${{ always() }} - run: cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes diff --git a/.github/workflows/eslint_tests.yml b/.github/workflows/eslint_tests.yml index 036b5fae5ffe..9c5bffd95aac 100644 --- a/.github/workflows/eslint_tests.yml +++ b/.github/workflows/eslint_tests.yml @@ -15,21 +15,23 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-22.04] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 + - name: Cache node modules and third_party/static + uses: actions/cache@v3 env: cache-name: cache-node-modules with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} + path: | + /home/runner/work/oppia/yarn_cache + /home/runner/work/oppia/oppia/third_party/static + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock', 'dependencies.json') }} restore-keys: | ${{ runner.os }}-build-${{ env.cache-name }}- ${{ runner.os }}-build- @@ -38,3 +40,9 @@ jobs: - name: Run ESLint Tests if: startsWith(github.head_ref, 'update-changelog-for-release') == false run: PYTHONIOENCODING=utf-8 python -m scripts.run_custom_eslint_tests + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "ESLint checks failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.github/workflows/frontend_tests.yml b/.github/workflows/frontend_tests.yml index e8a066a92a45..1408fd3205f2 100644 --- a/.github/workflows/frontend_tests.yml +++ b/.github/workflows/frontend_tests.yml @@ -10,25 +10,57 @@ on: - release-* jobs: - frontend: + generate-job-strategy-matrix: + runs-on: ubuntu-22.04 + outputs: + job-strategy-matrix: ${{ steps.generate.outputs.job-strategy-matrix }} + steps: + - name: Generate job strategy matrix + id: generate + env: + standard-test-runs: 2 + flakes-test-runs: 40 + run: | + if ${{ github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + then + JOB_STRATEGY_MATRIX=$(node -e "let r=[]; for(let i = 1; i <= ${{ env.flakes-test-runs }}; i++) { r.push(i) }; console.log(JSON.stringify(r));") + echo "::set-output name=job-strategy-matrix::$JOB_STRATEGY_MATRIX" + else + JOB_STRATEGY_MATRIX=$(node -e "let r=[]; for(let i = 1; i <= ${{ env.standard-test-runs }}; i++) { r.push(i) }; console.log(JSON.stringify(r));") + echo "::set-output name=job-strategy-matrix::$JOB_STRATEGY_MATRIX" + fi + frontend-karma-tests: + needs: generate-job-strategy-matrix runs-on: ${{ matrix.os }} strategy: + max-parallel: 25 matrix: - os: [ubuntu-18.04] + os: [ubuntu-22.04] + num_runs: ${{ fromJson(needs.generate-job-strategy-matrix.outputs.job-strategy-matrix) }} + fail-fast: false steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - name: Describe filesystem + run: | + pwd + ls /home/runner/work + ls /home/runner/work/oppia + ls /home/runner/work/oppia/oppia + echo $GITHUB_WORKSPACE + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 + - name: Cache node modules and third_party/static + uses: actions/cache@v3 env: cache-name: cache-node-modules with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} + path: | + /home/runner/work/oppia/yarn_cache + /home/runner/work/oppia/oppia/third_party/static + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock', 'dependencies.json') }} restore-keys: | ${{ runner.os }}-build-${{ env.cache-name }}- ${{ runner.os }}-build- @@ -36,10 +68,21 @@ jobs: - uses: ./.github/actions/install-oppia-dependencies - name: Suppress ENOSPC error from chokidar file watcher. See https://stackoverflow.com/a/32600959. run: echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p - - name: Run frontend tests + - name: Run frontend tests ${{ matrix.num_runs }} run: PYTHONIOENCODING=utf-8 python -m scripts.run_frontend_tests --run_minified_tests --skip_install --check_coverage + - name: Uploading fronted coverage reports as an artifact + if: ${{ failure() }} + uses: actions/upload-artifact@v3 + with: + name: frontend-coverage-${{ matrix.num_runs }} + path: /home/runner/work/oppia/karma_coverage_reports + report-failure: + needs: frontend-karma-tests + runs-on: ubuntu-22.04 + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + steps: + - uses: actions/checkout@v3 - name: Report failure if failed on oppia/oppia develop branch - if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} uses: ./.github/actions/send-webhook-notification with: message: "A frontend test failed on the upstream develop branch." diff --git a/.github/workflows/lighthouse_accessibility.yml b/.github/workflows/lighthouse_accessibility.yml index f3d771dd9a42..73b0e73ded47 100644 --- a/.github/workflows/lighthouse_accessibility.yml +++ b/.github/workflows/lighthouse_accessibility.yml @@ -14,30 +14,58 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-22.04] shard: [1, 2] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v1 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 - env: - cache-name: cache-node-modules - with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} - restore-keys: | - ${{ runner.os }}-build-${{ env.cache-name }}- - ${{ runner.os }}-build- - ${{ runner.os }}- + # Caching is disabled to avoid poisoning our cache with the + # changed lighthouse file. + # + # - name: Cache node modules + # uses: actions/cache@v3 + # env: + # cache-name: cache-node-modules + # with: + # path: /home/runner/work/oppia/yarn_cache + # key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} + # restore-keys: | + # ${{ runner.os }}-build-${{ env.cache-name }}- + # ${{ runner.os }}-build- + # ${{ runner.os }}- - uses: ./.github/actions/install-oppia-dependencies + - run: | + # Replace the 1.5 second timeout in lighthouse for retrieving + # sourcemaps with a 10 second timeout. This timeout is not + # configurable through Lighthouse's exposed API, so we have + # resorted to patching the code instead. + set -e + FILEPATH=$GITHUB_WORKSPACE/node_modules/lighthouse/lighthouse-core/gather/gatherers/source-maps.js + SEARCH_TARGET="1500" + REPLACEMENT="10000" + occurrences=$(grep -c $SEARCH_TARGET $FILEPATH) + echo "Found $occurrences occurrences of '$SEARCH_TARGET'" + # Assert that there is only one match for the target string. + # If there are ever zero or multiple matches, the lighthouse + # code has changed and this logic needs to be reviewed. + [[ $occurrences == "1" ]] + # Replace the target string with the replacement string. + sed -i 's/$SEARCH_TARGET/$REPLACEMENT/' $FILEPATH + echo "Replaced '$SEARCH_TARGET' with '$REPLACEMENT' in $FILEPATH" + shell: bash - name: Install chrome if: startsWith(github.head_ref, 'update-changelog-for-release') == false run: python -m scripts.install_chrome_for_ci - name: Run Lighthouse accessibility checks shard if: startsWith(github.head_ref, 'update-changelog-for-release') == false run: python -m scripts.run_lighthouse_tests --mode accessibility --shard ${{ matrix.shard }} + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "A Lighthouse test failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.github/workflows/lighthouse_performance.yml b/.github/workflows/lighthouse_performance.yml index 262d4d260daa..1d3203c204c0 100644 --- a/.github/workflows/lighthouse_performance.yml +++ b/.github/workflows/lighthouse_performance.yml @@ -14,17 +14,17 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-22.04] shard: [1, 2] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v1 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' - uses: ./.github/actions/merge - name: Cache node modules - uses: actions/cache@v2 + uses: actions/cache@v3 env: cache-name: cache-node-modules with: @@ -41,3 +41,9 @@ jobs: - name: Run Lighthouse performance checks shard if: startsWith(github.head_ref, 'update-changelog-for-release') == false run: python -m scripts.run_lighthouse_tests --mode performance --shard ${{ matrix.shard }} + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "A Lighthouse test failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 899adf9e5904..7ca30d7b04c9 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,22 +14,30 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] - shard: ['1', 'other'] + os: [ubuntu-22.04] + shard: ['other'] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' + cache: 'pip' + cache-dependency-path: | + requirements.in + requirements.txt + requirements_dev.in + requirements_dev.txt - uses: ./.github/actions/merge - - name: Cache node modules - uses: actions/cache@v2 + - name: Cache node modules and third_party/static + uses: actions/cache@v3 env: cache-name: cache-node-modules with: - path: /home/runner/work/oppia/yarn_cache - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock') }} + path: | + /home/runner/work/oppia/yarn_cache + /home/runner/work/oppia/oppia/third_party/static + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock', 'dependencies.json') }} restore-keys: | ${{ runner.os }}-build-${{ env.cache-name }}- ${{ runner.os }}-build- @@ -41,3 +49,9 @@ jobs: - name: Run Lint Checks if: startsWith(github.head_ref, 'update-changelog-for-release') == false run: PYTHONIOENCODING=utf-8 python -m scripts.linters.pre_commit_linter --shard ${{ matrix.shard }} --verbose + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "Lint checks failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.github/workflows/oppiabot.yml b/.github/workflows/oppiabot.yml index 48745bf96595..cf5e488dcfae 100644 --- a/.github/workflows/oppiabot.yml +++ b/.github/workflows/oppiabot.yml @@ -13,7 +13,7 @@ jobs: oppiabot: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: ./.github/actions/merge - name: Github Actions from Oppiabot uses: oppia/oppiabot@1.4.0 diff --git a/.github/workflows/pending-review-notification.yml b/.github/workflows/pending-review-notification.yml index 142fd633723f..91e437b94f69 100644 --- a/.github/workflows/pending-review-notification.yml +++ b/.github/workflows/pending-review-notification.yml @@ -1,8 +1,8 @@ name: Send pending review notifications to reviewer on github-discussion on: schedule: - # Every Tuesday and Thursday at 00:00 UTC. - - cron: '0 0 * * 2,5' + # Every Tuesday and Thursday at 16:00 UTC. + - cron: '0 16 * * 2,4' # GitHub doesn't provide assurance that the scheduled jobs will run on time # (see https://github.community/t/no-assurance-on-scheduled-jobs/133753). # So, we add the workflow_dispatch event here to allow triggering this @@ -15,12 +15,12 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-22.04] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' - uses: DubeySandeep/pending-review-notification@v1 with: diff --git a/.github/workflows/python_type_checks.yml b/.github/workflows/python_type_checks.yml index be3d0e702ad0..d7a863a31029 100644 --- a/.github/workflows/python_type_checks.yml +++ b/.github/workflows/python_type_checks.yml @@ -15,13 +15,17 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04] + os: [ubuntu-22.04] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 with: - python-version: '3.7' + python-version: '3.8.15' architecture: 'x64' + - name: Install python 2 for GAE + run: | + sudo apt update + sudo apt install python2 -y - uses: ./.github/actions/merge - name: Install Third Party Dependencies if: startsWith(github.head_ref, 'update-changelog-for-release') == false @@ -29,3 +33,9 @@ jobs: - name: Run Mypy type checks if: startsWith(github.head_ref, 'update-changelog-for-release') == false run: PYTHONIOENCODING=utf-8 python -m scripts.run_mypy_checks --skip-install --install-globally + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "Python type checks failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.github/workflows/revert-web-wiki-updates.yml b/.github/workflows/revert-web-wiki-updates.yml new file mode 100644 index 000000000000..81a08c81eaee --- /dev/null +++ b/.github/workflows/revert-web-wiki-updates.yml @@ -0,0 +1,46 @@ +name: Revert Wiki Changes Made Through the Web Interface +on: + gollum + +jobs: + revert: + runs-on: ubuntu-22.04 + steps: + - name: Retrieve authentication token + id: get-token + # SHA1 hash of the release-v0.0.1 commit. + uses: oppia/get-github-app-token@8c3b19db0cdcd0f7fded7dd71e5e0429bf72df1a + with: + app_id: ${{ secrets.OPPIA_WIKI_SYNCHRONIZER_APP_ID }} + private_key: ${{ secrets.OPPIA_WIKI_SYNCHRONIZER_APP_PRIVATE_KEY }} + - uses: actions/checkout@v3 + with: + repository: ${{github.repository_owner}}/oppia.wiki + token: ${{ steps.get-token.outputs.token }} + fetch-depth: 0 + - name: Add remote + run: git remote add source https://github.com/${{github.repository_owner}}/oppia-web-developer-docs.git + - name: Fetch remote + run: git fetch source + - name: Check for differences + id: check-diff + run: echo "::set-output name=diff::$(git log source/develop..origin/master)" + - name: Revert commits + if: ${{ steps.check-diff.outputs.diff != '' }} + run: | + git config user.name oppia-wiki-synchronizer[bot] + # Email has the form id+name@users.noreply.github.com. The ID + # can be retrieved from + # https://api.github.com/users/oppia-wiki-synchronizer[bot]. + git config user.email 102317631+oppia-wiki-synchronizer[bot]@users.noreply.github.com + git revert --no-commit source/develop..origin/master + msg_file=$(mktemp) + echo "Reverting the following changes made through the web interface:\n" > $msg_file + git log soure/develop..origin/master | while read line; do echo " ${line}"; done >> $msg_file + git commit -F $msg_file + - name: Push to deployment repository + if: ${{ steps.check-diff.outputs.diff != '' }} + run: git push source master:develop + - name: Push to source repository + if: ${{ steps.check-diff.outputs.diff != '' }} + run: git push diff --git a/.github/workflows/typescript_and_e2e_coverage.yml b/.github/workflows/typescript_and_e2e_coverage.yml new file mode 100644 index 000000000000..2d5585c8c165 --- /dev/null +++ b/.github/workflows/typescript_and_e2e_coverage.yml @@ -0,0 +1,52 @@ +name: End-to-End coverage and Typescript test +on: + push: + branches: + - develop + - release-* + pull_request: + branches: + - develop + - release-* + +jobs: + e2e_coverage_and_typescript_tests: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04] + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: '3.8.15' + architecture: 'x64' + - uses: ./.github/actions/merge + - name: Cache node modules and third_party/static + uses: actions/cache@v3 + env: + cache-name: cache-node-modules + with: + path: | + /home/runner/work/oppia/yarn_cache + /home/runner/work/oppia/oppia/third_party/static + key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('yarn.lock', 'dependencies.json') }} + restore-keys: | + ${{ runner.os }}-build-${{ env.cache-name }}- + ${{ runner.os }}-build- + ${{ runner.os }}- + - uses: ./.github/actions/install-oppia-dependencies + - name: Install chrome + run: python -m scripts.install_chrome_for_ci + - name: Check that all e2e test files are captured in wdio.conf.js + run: python -m scripts.check_e2e_tests_are_captured_in_ci + - name: Run typescript tests + run: python -m scripts.typescript_checks + - name: Run typescript tests in strict mode + run: python -m scripts.typescript_checks --strict_checks + - name: Report failure if failed on oppia/oppia develop branch + if: ${{ failure() && github.event_name == 'push' && github.repository == 'oppia/oppia' && github.ref == 'refs/heads/develop'}} + uses: ./.github/actions/send-webhook-notification + with: + message: "A typescript test failed on the upstream develop branch." + webhook-url: ${{ secrets.BUILD_FAILURE_ROOM_WEBHOOK_URL }} diff --git a/.gitignore b/.gitignore index 7e08b7a96f79..9e67b269c21d 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,10 @@ core/templates/prod/* webpack_bundles/* core/tests/.browserstack.env node_modules/* +# .angular is temp folder used by angular to put logs and other artefacts. +.angular/* +# dist is our angular build folder output. +dist/* .coverage* !.coveragerc coverage.xml @@ -48,6 +52,8 @@ yarn-error.log .lighthouseci .direnv/* .envrc +portserver.socket +temp-tsconfig-strict.json # Oppia uses cache slugs for various resources and we need separate resource # directories for dev and prod. Resource directories for prod are generated @@ -63,4 +69,3 @@ dump.rdb ui-debug.log firebase-debug.log debug.log -.python-version diff --git a/.isort.cfg b/.isort.cfg index 6997ecbad861..3c04d0e79abe 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -1,6 +1,6 @@ [settings] force_single_line=true -single_line_exclusions=typing +single_line_exclusions=typing,typing_extensions force_sort_within_sections=true ignore_whitespace=true known_third_party=apache_beam,backports.functools_lru_cache,browsermobproxy,cloudstorage,contextlib2,elasticsearch,firebase_admin,google.api_core,google.appengine,google.cloud,google.protobuf,mapreduce,mock,mutagen,pipeline,pkg_resources,psutil,pylatexenc,pylint,requests,requests_mock,selenium,six,skulpt,typing,webapp2,webapp2_extras,webtest,yaml diff --git a/.lighthouserc-1.js b/.lighthouserc-1.js index 85c7c349ba7c..3be1863c499c 100644 --- a/.lighthouserc-1.js +++ b/.lighthouserc-1.js @@ -38,7 +38,7 @@ module.exports = { }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/blog-dashboard$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': baseConfig['basePerformanceAssertions'] }, { 'matchingUrlPattern': 'http://[^/]+/about$', @@ -77,12 +77,15 @@ module.exports = { 'assertions': { // The YouTube embed on donate page loads images in jpg format, thus // we need to allow one image. - 'uses-webp-images': [ + 'modern-image-formats': [ 'error', {'maxLength': 1, 'strategy': 'pessimistic'} ], // The YouTube embed on donate page uses passive listeners. 'uses-passive-event-listeners': ['error', {'minScore': 0}], - 'deprecations': ['error', {'minScore': 1}] + 'uses-rel-preload': ['error', {'minScore': 1}], + 'deprecations': ['error', {'minScore': 1}], + 'redirects': ['error', {'minScore': 1}], + 'uses-responsive-images': ['error', {'minScore': 0.8}] } }, { diff --git a/.lighthouserc-2.js b/.lighthouserc-2.js index 6a0cddeac71c..089f028fb8c3 100644 --- a/.lighthouserc-2.js +++ b/.lighthouserc-2.js @@ -33,17 +33,18 @@ module.exports = { { 'matchingUrlPattern': 'http://[^/]+/learner-dashboard$', 'assertions': { - 'uses-webp-images': [ + 'modern-image-formats': [ 'error', {'maxLength': 0, 'strategy': 'pessimistic'} ], // We need to use passive event listeners on this page so that // the page works correctly. 'uses-passive-event-listeners': ['error', {'minScore': 0}], - // Sign up redirects logged in user to learner dashboard page. + // Sign up redirects logged-in user to learner dashboard page. // Learner dashboard Page cannot be preloaded. 'uses-rel-preload': ['error', {'minScore': 0}], 'deprecations': ['error', {'minScore': 1}], - 'redirects': ['error', {'minScore': 0}] + 'redirects': ['error', {'minScore': 0}], + 'uses-responsive-images': ['error', {'minScore': 0.8}] } }, { @@ -86,14 +87,17 @@ module.exports = { 'matchingUrlPattern': 'http://[^/]+/create/.*$', 'assertions': { // TODO(#13465): Change this maxLength to 0 once images are migrated. - 'uses-webp-images': [ + 'modern-image-formats': [ 'error', {'maxLength': 3, 'strategy': 'pessimistic'} ], // We need to use passive event listeners on this page so that // the page works correctly. 'uses-passive-event-listeners': ['error', {'minScore': 0}], // MIDI library uses some deprecated API. - 'deprecations': ['error', {'minScore': 0}] + 'deprecations': ['error', {'minScore': 0}], + 'uses-rel-preload': ['error', {'minScore': 1}], + 'redirects': ['error', {'minScore': 1}], + 'uses-responsive-images': ['error', {'minScore': 1}] } }, { diff --git a/.lighthouserc-accessibility-1.js b/.lighthouserc-accessibility-1.js index c91a1bc03599..7ae1cfb3b82c 100644 --- a/.lighthouserc-accessibility-1.js +++ b/.lighthouserc-accessibility-1.js @@ -33,7 +33,9 @@ module.exports = { }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/about$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/admin$', @@ -41,28 +43,38 @@ module.exports = { }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/blog-dashboard$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/community-library$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/contact$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/contributor-dashboard$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/creator-dashboard$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/delete-account$', 'assertions': { - 'categories:accessibility': ['error', {'minScore': 0.99}] + 'categories:accessibility': ['error', {'minScore': 0.98}] } }, { @@ -77,15 +89,21 @@ module.exports = { }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/get-started$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/learner-dashboard$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/moderator$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } } ] }, diff --git a/.lighthouserc-accessibility-2.js b/.lighthouserc-accessibility-2.js index e678bfcd8d3a..166e12202b50 100644 --- a/.lighthouserc-accessibility-2.js +++ b/.lighthouserc-accessibility-2.js @@ -35,11 +35,15 @@ module.exports = { }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/privacy-policy$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/profile/username1$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/signup?return_url=%2F$', @@ -51,11 +55,15 @@ module.exports = { }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/topics-and-skills-dashboard$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.9}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/terms$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.98}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/thanks$', @@ -69,7 +77,9 @@ module.exports = { }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/create/.*$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.91}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/collection_editor/create/.*$', @@ -80,17 +90,19 @@ module.exports = { { 'matchingUrlPattern': '^http://127.0.0.1:8181/topic_editor/.*$', 'assertions': { - 'categories:accessibility': ['error', {'minScore': 0.86}] + 'categories:accessibility': ['error', {'minScore': 0.84}] } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/skill_editor/.*$', - 'assertions': baseConfig['baseAccessibilityAssertions'] + 'assertions': { + 'categories:accessibility': ['error', {'minScore': 0.91}] + } }, { 'matchingUrlPattern': '^http://127.0.0.1:8181/story_editor/.*$', 'assertions': { - 'categories:accessibility': ['error', {'minScore': 0.86}] + 'categories:accessibility': ['error', {'minScore': 0.84}] } }, ] diff --git a/.lighthouserc-base.js b/.lighthouserc-base.js index 0fba8acb5799..25d86ca01037 100644 --- a/.lighthouserc-base.js +++ b/.lighthouserc-base.js @@ -62,25 +62,20 @@ module.exports = { // Performance category. 'first-contentful-paint': [ 'warn', {'maxNumericValue': 1230000}], 'first-meaningful-paint': ['warn', {'maxNumericValue': 1280000}], - 'first-cpu-idle': ['warn', {'maxNumericValue': 1460000}], 'speed-index': ['warn', {'maxNumericValue': 1230000}], 'interactive': ['warn', {'maxNumericValue': 1540000}], 'max-potential-fid': ['warn', {'maxNumericValue': 130000}], - 'uses-responsive-images': ['error', {'minScore': 1}], 'uses-optimized-images': ['error', {'minScore': 1}], 'uses-rel-preconnect': ['error', {'minScore': 0.5}], 'efficient-animated-content': ['error',{'minScore': 1}], 'offscreen-images': ['error', {'minScore': 0.45}], 'time-to-first-byte': ['off', {}], // Best practices category. - 'appcache-manifest': ['error', {'minScore': 1}], 'errors-in-console': ['error', {'minScore': 1}], 'no-document-write': ['error', {'minScore': 1}], - 'external-anchors-use-rel-noopener': ['error', {'minScore': 1}], 'geolocation-on-start': ['error', {'minScore': 1}], 'doctype': ['error', {'minScore': 1}], 'no-vulnerable-libraries': ['off', {'minScore': 1}], - 'js-libraries': ['error', {'minScore': 1}], 'notification-on-start': ['error', {'minScore': 1}], 'password-inputs-can-be-pasted-into': ['error', {'minScore': 1}], 'image-aspect-ratio': ['error', {'minScore': 1}], @@ -89,13 +84,14 @@ module.exports = { } }, basePerformanceAssertions: { - 'uses-webp-images': [ + 'modern-image-formats': [ 'error', {'maxLength': 0, 'strategy': 'pessimistic'} ], 'uses-passive-event-listeners': ['error', {'minScore': 1}], 'uses-rel-preload': ['error', {'minScore': 1}], 'deprecations': ['error', {'minScore': 1}], - 'redirects': ['error', {'minScore': 1}] + 'redirects': ['error', {'minScore': 1}], + 'uses-responsive-images': ['error', {'minScore': 0.8}], }, baseAccessibilityAssertions: { 'categories:accessibility': ['error', {'minScore': 1}] diff --git a/.pylintrc b/.pylintrc index e3f010faa4f6..b5b2e66ac477 100644 --- a/.pylintrc +++ b/.pylintrc @@ -2,9 +2,6 @@ # # pylint core --rcfile=.pylintrc -i y -[GENERAL] -init-hook='import sys; sys.path.append("../oppia_tools/google_appengine_1.9.67/google_appengine")' - [MASTER] init-hook='import sys; sys.path.append(".")' @@ -65,26 +62,58 @@ indent-string=' ' allowed-comment-prefixes=int,str,float,bool,v forbidden-imports= - core.controllers:core.platform|core.storage, - core.domain:core.controllers, - core.storage:core.domain + *core.controllers*: + import core.platform*| + import core.storage*, + *core.domain*: + import core.controllers*, + *core.storage*: + import core.domain*, + *core.domain.*_domain: + from core.domain* import *_service*| + from core.domain* import *_cleaner| + from core.domain* import *_registry| + from core.domain* import *_fetchers| + from core.domain* import *_manager| + from core.platform import models disallowed-functions-and-replacements-str= - assertEquals=>self.assertEqual, - now=>datetime.datetime.utcnow, - beam.CombineValues=>beam.CombinePerKey + assertEquals=>self.assertEqual, + now=>datetime.datetime.utcnow, + beam.CombineValues=>beam.CombinePerKey disallowed-functions-and-replacements-regex= - \.put_async\(=>put, - \.put_multi_async\(=>put_multi + \.put_async\(=>put, + \.put_multi_async\(=>put_multi [SIMILARITIES] +allowed-type-ignore-error-codes= + attr-defined, + union-attr, + call-overload, + override, + return, + assignment, + list-item, + dict-item, + typeddict-item, + func-returns-value, + misc, + arg-type, + type-arg, + no-untyped-def, + no-untyped-call, + no-any-return, + call-arg, + index + ignore-imports=yes [MESSAGES CONTROL] disable=abstract-method, + ungrouped-imports, arguments-differ, broad-except, consider-using-ternary, @@ -102,6 +131,8 @@ disable=abstract-method, not-context-manager, redefined-variable-type, redundant-returns-doc, +# TODO(#12912): Python interpreter migration to 3.8.15 causing failing lint check. Fix it later. + redundant-u-string-prefix, too-many-arguments, too-many-boolean-expressions, too-many-branches, @@ -115,23 +146,18 @@ disable=abstract-method, # TODO(#12912): Remove these after the Python 3 migration. arg-name-for-non-keyword-arg, arguments-renamed, - consider-using-dict-items, consider-using-with, - cyclic-import, - deprecated-method, import-outside-toplevel, - missing-raises-doc, - no-else-break, - no-else-continue, - no-else-raise, non-explicit-keyword-args, not-an-iterable, - raise-missing-from, - super-with-arguments, unnecessary-pass, - useless-object-inheritance, consider-using-f-string, - consider-using-in +# TODO(#14322): Reinstate this. + missing-type-doc, + # Pylint considers imports to be cyclic even when the cycle is + # broken by putting imports inside functions so they aren't + # executed upon module import. + cyclic-import, [REPORTS] diff --git a/.rtlcssrc b/.rtlcssrc new file mode 100644 index 000000000000..24e1f3dfedb8 --- /dev/null +++ b/.rtlcssrc @@ -0,0 +1,15 @@ +{ + "options": { + "autoRename": false, + "autoRenameStrict": false, + "blacklist":{}, + "clean": true, + "greedy": false, + "processUrls": false, + "stringMap":[], + "useCalc": false, + "aliases":{} + }, + "plugins": [ ], + "map": false +} diff --git a/.stylelintrc b/.stylelintrc index b0bc71f47bcc..a649f2c74de5 100644 --- a/.stylelintrc +++ b/.stylelintrc @@ -1,6 +1,5 @@ { - "extends": "stylelint-config-recommended", - "processors": ["@mapbox/stylelint-processor-arbitrary-tags"], + "extends": ["stylelint-config-recommended", "stylelint-config-html"], "plugins": [ "stylelint-order" ], @@ -19,10 +18,22 @@ "block-opening-brace-newline-after": "always-multi-line", "block-opening-brace-space-after": "always-single-line", "block-opening-brace-space-before": "always", + "color-hex-case": "lower", "color-hex-length": "short", "declaration-colon-space-after": "always", "declaration-colon-space-before": "never", + "no-descending-specificity": null, "order/properties-alphabetical-order": true, "length-zero-no-unit": true - } + }, + + "overrides": [ + { + "files": ["**/*.css"], + "rules": { + "indentation": 2, + "no-descending-specificity": true + } + } + ] } diff --git a/.yarnrc b/.yarnrc index 503a21b7da38..f19f9bd4e3eb 100644 --- a/.yarnrc +++ b/.yarnrc @@ -1,2 +1,2 @@ -yarn-path "../oppia_tools/yarn-1.22.10/bin/yarn" +yarn-path "../oppia_tools/yarn-1.22.15/bin/yarn" cache-folder "../yarn_cache" diff --git a/AUTHORS b/AUTHORS index ac06586421fd..37e1081fc4f5 100644 --- a/AUTHORS +++ b/AUTHORS @@ -11,8 +11,10 @@ Aadya Mishra Aaron Zuspan Aashish Gaba +Aashish Khubchandani Aashish Singh Aasif Faizal +Aayush Kumar Singh Abeer Khan Abhay Garg Abhay Gupta @@ -22,16 +24,22 @@ Abhijit Suresh Abhimanyu Thakre Abhishek Arya Abhishek Kumar +Abhishek Sultaniya Abhishek Uniyal Abhith Krishna Acash Mkj +Adam Halim Adarsh Kumar Aditya Dubey <74500dubey@gmail.com> Aditya Jain +Aditya Narayanm Aditya Sharma Adrija Acharyya Aishwary Saxena +Ajay Gurjar Ajo John +Akash Gupta +Akhilesh Ratnakumar Akshath Kaushal Akshay Anand Akshay Nandwana @@ -39,11 +47,15 @@ Alex Gower Alex Lee Alexandra Wu Allan Zhou +Allison Goldstein Alluri Harshit Varma +Aman Singh Jolly Amanda Rodriguez Amey Kudari +Amit Panwar Amulya Kumar Ana Francisca Bernardo +Ananth Raghav Andrew Low Andrey Mironyuk Anggoro Dewanto @@ -52,15 +64,22 @@ Ankita Saxena Anmol Mittal Anmol Shukla Anshul Hudda +Anshuman Maurya Anthony Alridge Anthony Zheng +Anthony Zhu Anubhav Sinha Anumeha Agrawal +Anurag Singh Anurag Thakur +Anurag Vats +Anurag Verma Apurv Bajaj Apurv Botle Archi Aggarwal Areesha Tariq +Ariel Cohen +Arjun Thakur <21bme057@nith.ac.in> Arkadyuti Bandyopadhyay Arnesh Agrawal Arpan Banerjee @@ -73,28 +92,36 @@ Ashish Verma Ashmeet Singh Ashutosh Chauhan Ashutosh Singla +Ashwath V A Assem Yeskabyl Aubrey Wells Aung Hein Oo Austin Choi +Ava Gizoni Avijit Gupta <526avijit@gmail.com> +Ayush Anand +Ayush Jain +Ayush Jha Ayush Kumar <2580ayush2580@gmail.com> Ayush Nandi Baiba Skujevska Barnabas Makonda Ben Targan Benjamin Beeshma +Bhavuk Jain Bill Morrisson BJ Voth Bolaji Fatade Boyd Y. Ching Brenton Briggs Brian Lin +Brijmohan Siyag Cathleen Huang Charisse De Torres Chase Albert Chen Shenyue Chin Zhan Xiong +Chirag Baid Chris Skalnik Christopher Tao Cihan Bebek @@ -102,6 +129,7 @@ Connie Chow Corey Hunter Céline Deknop Darin Nguyen +Darshan AbhayKumar Dawson Eliasen Debanshu Bhaumik Deepam Banerjee @@ -109,20 +137,28 @@ Deepank Agarwal Denis Samokhvalov Devi Sandeep Dharmesh Poddar +Dhruv Rishishwar Diana Chen +Dipto Chakrabarty Divyadeep Singh +Divyansh Khetan Domenico Vitarella Dong Wook Brian Chung Edward Allison Eesha Arif +Emil Brynielsson Emily Glue +Eric L'Heureux Eric Lou Eric Yang +Eshaan Aggarwal Estelle Lee Fang You Farees Hussain Felicity Zhao +Francois Gonothi Toure Frederik Creemers +Fumiya Goto Gabriel Fuentes Gagan Suneja Gangavarapu Praneeth @@ -130,7 +166,9 @@ Gautam Verma Geet Choudhary Geo Jolly Google Inc. +Gopi Vaibhav Grace Guo +Gustav Stappe Renner Hadyn Fitzgerald Hamlet Villa Hamza Chandad @@ -157,6 +195,7 @@ Ishan Singh Jackson Wu Jacob Davis Jacob Li Peng Cheng +Jaideep Sharma Jakub Osika James James John Jamie Lau @@ -165,16 +204,24 @@ Jared Silver Jasmine Rider Jasper Deng Jaswinder Singh +Jay Vivarekar Jayasanka Madhawa +Jayprahash Sharma Jaysinh Shukla +Jeevesh Garg Jenna Mandel Jerry Chen Jerry Lau +Jessica Li Jiazhi Chen Jim Zhan +Joel Lau +Jogendra Singh +Jogendra Singh John Glennon John Karasinski John Prince Mesape +Jonathan D Lake Jonathan Slaton Jordan Cockles Jordan Stapinski @@ -186,10 +233,15 @@ Joydeep Mukherjee Justin Du Jérôme K.N. Anantha Nandanan +Kaicheng Han Kajol Kumari +Karen Honorio Karen Rustad +Karina Zhang +Karishma Vanwari Kartikey Pandey Kashif Jamal Soofi +Kashish Bhandula Kate Perkins Kathryn Patterson Kefeh Collins @@ -197,6 +249,7 @@ Kenneth Ho Kerry Wang Keshav Bathla Keshav Gupta +Kevin Choong Kevin Conner Kevin Lee Kevin Thomas @@ -208,19 +261,27 @@ Konstantinos Kagkelidis Krishita Jain Krishna Rao Kristin Anthony +Kshitij Patil Kumari Shalini Kunal Garg Kyriaki Velliniati Lakshay Angrish Lara Davies Laura Kinkead +Leah Goldberg +Lev Bernstein Leyla Tuon Cao Linn Hallonqvist +Lontsi Jordan Lorrany Azevedo Lucklita Theng Luis Ulloa +Luiz D. M. Mainart Mahendra Suthar +Mai Elshiashi Mamat Rahmat +Manan Rathi +Manish Roy Manoj Mohan Marcel Schmittfull Mariana Zangrossi @@ -228,6 +289,8 @@ Mark Cabanero Mark Halpin Martin Smithurst Matt Higgins +Matthew Sumpter +Mattias Erlingson Maurício Meneghini Fauth Md Shahbaz Alam Meet Vyas @@ -241,6 +304,7 @@ Min Tan Mohammad Shahebaz Mohammad Zaman Mohit Balwani +Mohit Gupta Mohit Gupta Mohit Musaddi <96mohitm@gmail.com> Mohith Khatri @@ -261,6 +325,7 @@ Nikhil Handa Nikhil Nair Nikhil Prakash Nikhil Sangwan +Nils Johansson Nimalen Sivapalan Nisarg Chaudhari Nischaya Sharma @@ -276,8 +341,11 @@ Owen Parry Ozan Filiz Paloma Oliveira Pankaj Dahiya +Pankaj Prajapati +Pankaj Singh Parth Bhoiwala Parul Priyedarshani +Patel Muhammad Patrycja Praczyk Pawan Rai Pawel Borkar @@ -285,13 +353,18 @@ Philip Hayes Phillip Moulton Piyush Agrawal Pranav Siddharth S +Pranshu Srivastava Prasanna Patil Pratik Katte Prayush Dawda +Princika Rai +Priyansh Mehta <21bece080@iiitdmj.ac.in> +Prottoy Chakraborty Pulkit Aggarwal Pulkit Gera Purhan Purvi Misal +Qinghao Yang Radesh Kumar Rafay Ghafoor Rafał Kaszuba @@ -305,6 +378,7 @@ Rajat Talesra Rajendra Kadam Rajitha Warusavitarana Rakshit Kumar +Ramon Valdivia Raymond Tso Rebekah Houser Reshu Kumari @@ -314,6 +388,7 @@ Richard Cho Rijuta Singh Rishabh Rawat Rishav Chakraborty +Rishi Kejriwal Ritik Kumar Rizky Riyaldhi Robert Moreno Carrillo @@ -328,13 +403,16 @@ Sachin Gopal Saeed Jassani Safwan Mansuri Sagar Manohar +Sahil Jhangar Sajen Sarvajith Sajna Kadalikat +Sakshi Jain Samara Trilling Samriddhi Mishra Sandeep Dubey Sandeep Patel Sanjana Konte +Sanjay Saju Jacob Sankranti Joshi Santos Hernandez Sanyam Khurana @@ -344,24 +422,32 @@ Satmeet Ubhi Satwik Kansal Satyam Bhalla Satyam Yadav +Saurabh Balke +Saurabh Jamadagni Saurav Pratihar Savitha K Jayasankar Scott Brenner Scott Junner Scott Roberts Sean Anthony Riordan +Sean Zhong Sebastian Zangaro Seth Beckman Seth Saloni Shafqat Dulal +Shan Jiang Sharif Shaker +Shiori Nozawa Shiqi Wu Shiva Krishna Yadav <14311a05r1@sreenidhi.edu.in> +Shivam Chaudhary Shivam Jha <20bcs206@iiitdmj.ac.in> Shivan Trivedi Shivansh Bajaj Shivansh Dhiman Shivansh Rakesh +Shivkant Chauhan +Shobhan Srivastava Shouvik Roy Shruti Grover Shruti Satish @@ -369,18 +455,22 @@ Shubha Gupta Shubha Rajan Shubham Bansal Shubham Korde +Shubham Thakur +Shun Nagasaki Shuta Suzuki Siddhant Khandelwal Siddhant Srivastav Siddharth Batra Siddharth Mehta Simran Mahindrakar +Sougata Das Souhit Dey Soumyajyoti Dey Soumyo Dey Sourab Jha Sourav Badami Sourav Singh +Sreelaya Vuyyuru Sreenivasulu Giritheja Srijan Reddy Srikanth Kadaba @@ -388,6 +478,9 @@ Srikar Ch Stefanie Muroya Lei Stephen Hannon Steve Jiang +Subhash Kovela +SUBHASH THENUA +Subin Duresh Subodh Verma Sudhanva MG Sudipta Gyan Prakash Pradhan @@ -398,8 +491,11 @@ Taiwo Adetona Tanishq Gupta Tanmay Mathur Tarashish Mishra +Taylor Murray +Ted Tong Li Teddy Marchildon Tezuesh Varshney +Tham Wan Jun Tia Jin Tianqi Wu Timothy Cyrus @@ -407,6 +503,7 @@ Tonatiuh Garcia Tony Afula Tony Jiang Tracy Homer +Tran Quang Khai < vpeopleonatank@gmail.com> Travis Shafer Truong Kim Tuguldur Baigalmaa @@ -414,9 +511,12 @@ Tushar Mohan Ujjwal Gulecha Umesh Singla Utkarsh Dixit +Vaibhav Tripathi +Valeron Toscano Varazdat Manukyan Varun Tandon Vasu Tomar +Vedika Chandra Vibhor Agarwal Viet Tran Quoc Hoang Vijay Patel @@ -428,22 +528,30 @@ Vishal Desai Vishal Gupta Vishal Teotia Vishnu M +Vishnu Nithin Reddy Vojtěch Jelínek Vuyisile Ndlovu Wiktor Idzikowski Will Li +Winnie Xinyu Wu Y. Budhachandra Singh Yang Lu Yash Jipkate Yash Ladha <201551061@iiitvadodara.ac.in> Yash Santosh Kandalkar +Yavik Kapadia Yi Yan +Yijia Gao Yiming Pan Yogesh Sharma Yousef Hamza +Yuecheng Cao Yuliang +Yuri Pennafort Lemos Zach Puller Zach Wiebesiek Zachery Vekovius +Zaid Ismail +Zhan Liang Zoe Madden-Wood diff --git a/CHANGELOG b/CHANGELOG index 490aa4b3f521..3341f0788dc9 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,4 +1,1339 @@ -This file contains a summary of changes to the Oppia code base. +This file contains a summary of changes to the Oppia codebase up +to release 3.2.9. All the changes after that can be found in the commit +log of the develop branch: https://github.com/oppia/oppia/commits/develop + +v3.2.9 (31 Dec 2022) +------------------------ +Release team +* Fix package build (#16773) +* Fix constants to access files in correct folder (#16733) +* Edit .gcloudignore (#16655) +* Add new dispatch workflow (#16194) +* Use Cloud Secret (#16142) + +Learner/Creator Pages +* Fix #16590: trigger 'change' event when manually removing a widget from ckeditor (#16746) +* Fixed iframe exploration player (#16711) +* Fix donate issues (#16650) +* GA analytics updates (#16644) +* Introduce new Donate page (#16605) +* Fixed mailchimp web tag (#16524) +* Learner group view: Mobile view Improvements (#16496) +* Fixes an e2e flake in AdditionalEditorFeatures (#16495) +* Learner Group learners' progress in stories view review update (#16491) +* Allowing Uploading Images Larger than 1KB for blog (#16479) +* Learner Group UI improvements (#16468) +* Fix part of #15753: Fix curated exploration frontend validation checks (#16428) +* Fix part of #15753: End exploration interaction warning bug (#16389) +* Fix part of #15753: Fix frontend validation for numeric input interaction (#16388) +* Fix part of #15753: RTE link frontend validations (#16371) +* Fixes #16313 and Adds back M1.3 And M1.4 (Blog Integration) (#16323) +* FIX: #16119 Added a copy tooltip + corrected the save progress card indentation and padding (#16321) +* Fixes general issues in Blog Dashboard and #16272 -- Copy Of PR 16123 (#16299) +* Fix issue #16180 : Alignment of the header (#16293) +* Fix part of #15968 : Replace material icons with FA icons. (#16269) +* Fix: #15968 Replaced Material Icons with Font Awesome (#16267) +* Fix part of #15968: Material icons to Font Awesome icons in 3 files (#16249) +* Milestone 2.3: Implements Blog Author Page and feature flag to enable Blog Homepage feature. (#16243) +* Fix issue #15537 : Allow search execution by pressing the Enter key on the classroom page search bar (#16242) +* Milestone 2.1, 2.2, 2.3 (Learner group MVP): Learner Related Views of learner group (#16240) +* Fix part of #15968: Replaced material icons to font awesome in 3 files. (#16239) +* Fix part of #15968: Migrate files from Material Icon to Awesome Font Icon (#16233) +* Fix part of #15968: Migrate 3 files from Material Icons to Font Awesome Icons (#16230) +* Fix button copy in teach/about/splash pages (#16229) +* Removed solution dependence on hints (Helping learners when they get stuck). (#16228) +* Fix part of #15968: Replaces Material icons with Font Awesome icons in "add answer group", "add audio translation", "exploration save and publish buttons" pages. (#16223) +* Add mailchimp android list support (#16217) +* material-icons change to font-awesome icons. (#16206) +* Fix #14085: Added validation to restrict usage of unsupported math functions. (#16189) +* Fix part of #15968: Material-icons change to Font-awesome-icons (#16182) +* Add pt-br translations and few more CTAs for android beta launch page (#16161) +* Introduce GA4 events (#16158) +* Fix: #15157 Fixed window resizing events that hides the buttons and content of different modals in Skill Editor (#16147) +* Fixes general issues in Blog Dashboard (#16123) +* Fix #16078: Aligned play button (#16113) +* Introduce initial android landing page (#16071) +* Topic dependency graph input in the classroom (#16063) +* Fix #16015: Corrects the placeholder text (#16050) +* Fixed delete topic button is partially clickable (#16043) +* Milestone 1.3 and 1.4: Adds blog homepage and post page frontend (#16040) +* Fixed the end of the exploration dialog box not seems to be clickable (#16036) +* Fix part of #15968: Replaces Material icons with Font Awesome icons in some "skill editor" and "story editor" pages. (#16023) +* Fix part of #15753: Hide default feedback for multiple choice input when all choices addressed (#16014) +* Fix: #15994 added bootstrap class for close button (#16007) +* Fix part of #15753: Fix frontend validation for editing hint (#16000) +* Fix part of #15753: Fix frontend validation for image caption length (#15999) +* Fix #15985 and #16116: Save progress button behaviour (#15991) +* Class name change in profile-page.component.js file to avoid class name collision (#15990) +* FIX: #16119 Added a copy tooltip + corrected the save progress card indentation and padding (#15989) +* Fix part of #15753: Fix validation for outcome labelled as correct (#15984) +* Fix part of #15753: Fix frontend validation for numeric input (#15983) +* Fix #15892: Handle CD bugs (#15977) +* Fix #15875: Fixes a bug that was preventing displaying / tagging of misconceptions (#15966) +* UI for the Classroom Admin Page (#15953) +* Fix #15756 : adjusted the carousel arrow position (#15939) +* M1.4 & M1.5 (Helping learners when they get stuck): Add support for preventing learners from being directed back more than 3 cards in the lesson) (#15938) +* Replace material icons (#15926) +* Fix #15880 and #15924 (#15925) +* (Celebrating learners' accomplishments) Milestones 2.2 & 2.3: Add a congratulatory checkpoint message modal which is linked to the lesson info modal (#15916) +* Milestone 1.2: Adding Storage, Domain and Service layer for Blog Post Search functionality (#15914) +* Fix #15749 Display the topic name on top of the subtopic name on smaller screens (#15900) +* [Backend]: Adds the domain class for the classroom model (#15865) +* Fix #13397: Milestone 1.1 - Change 'published_on' to None if Blog Post is Draft (#15862) +* (Celebrating learners' accomplishments) Milestone 2.1: Add a utility service for checkpoint celebration (#15849) +* Milestone 1.5 (Learner group MVP): Add facilitator views of the learner group (#15840) +* Check for Validation Errors Before Reverting an Exploration (#15824) +* Functionality to add diagnostic test skills (#15742) +* Milestone 1.2 & Milestone 1.3 (Helping learners when they get stuck): Add the frontend changes for the new dest_if_really_stuck component and create new dotted links. (#15698) +* Fix #15528: Allow long options in item selection interaction to wrap (#15671) +* Fix remaining RTL issues (#15653) +* Fix part of N/A: Frontend validation for curated explorations (#15584) + +Dev Workflow +* Update Python to 3.8.15 (#16730) +* Fix #16028 and #12647: Cache pip dependencies and make E2E tests use shared webpack build (#16629) +* Fix #16542: Changes required for improved deploy process (#16616) +* FIx #16446: Avoid installing pylint-quotes from GitHub fork (#16474) +* Modify issue forms to make it easier to use (#16452) +* Replace yuicompressor with rcssmin (#16403) +* Change prereq library to new version and remove `typing_extensions` from some remaining places. (#16367) +* Fix part of #14219: Add tests for topic domain (#16304) +* Fix Issue forms (#16253) +* Modify frontend test job name (#16244) +* Introduce Issue forms for non-technical users (#16221) +* Drop obsolete todos for issue 14419 from the codebase. (#16074) +* Fix #15413: ViewDestroyedError Frontend flake and other flakes also present. (#16030) +* Fix #16010: Remove index.yaml order deduplication (#16011) +* Print overall backend coverage report (#15949) + +Miscellaneous +* Updates to GA methods (#16719) +* Add Amharic (አማርኛ) to supported languages (#16624) +* Upload profile picture error message 15793 (#16191) +* Fix part of #15942: Beam job generate missing stats models (#16090) +* Update authors and changelog for v3.2.8 (#15981) +* Fix part of #14419, changing the Enum class from screaming SNAKECASE to PascalCase. (#15931) +* Milestone 1.3 (Improving the lesson creation experience): Create the beam job. (#15681) +* Introduce GA4 alongside UA analytics (#16038) + +Angular Migration +* Fix #14368: Fix for rte output display showing residual text nodes. (#16700) +* Fix Part of #9749: Removal of deadcode i.e. ngjoyride && ui.sortable (#16412) +* Fix part of #15753 && #14119: validation bug fix in exploration tags editor in setting tab. (#16391) +* Fix part of #9749: Migration of ParamChangesEditorComponent from angularJs to Angular. (#16335) +* Fix part of #9749: migration of translation-tab from AngularJs to Angular. (#16295) +* Fix Part of #9749: Migration of Audio translation tab components, state-translation-editor. (#16284) +* Fix Part of #9749: Migration of Exploration-editor-tab, exploration-save-and-publish-buttons, exploration-save-prompt-modal (#16252) +* Fix of #16195: Highlight is not working properly in "Exploration Overview" when on "Translations" fixed, State-graph not showing warning fixed (#16209) +* Fix part Of #9749: Migrates Subtopic-Editor-Page to Angular (#16203) +* Fix part of #9749: Migrates Topic-Questions tab to Angular (#16138) +* Fix Part Of #9749: Migrates Topic Editor Navbar to Angular (#16107) +* Fix Part Of #9749: Migrates Entity-Creation Service to Angular (#16081) +* Fix #16058: Fraction rule input is not connected properly in the answer group input form (#16072) +* Fix part of #9749: Migration of improvements-tab, statistics-tab, feedback-tab in Angular (#16060) +* Fix Part of #9749: Migrates Skill-Editor to Angular (#16059) +* Migrate exploration player page to angular router. (#15965) +* Fix part of #9749: Migration of setting-tab (#15933) +* Fix Part of #10700: Refactor Object Factories (#15854) +* Remove instances of `$.ajax` (#15833) + +Bug fixes +* Fix #16588 Warning shown to User on Merging Skills of same topic (#16671) +* Fix part of #15753: Better frontend emptiness check for HTML content (#16429) +* Fix ckeditor RTE to HTML conversion (#16271) +* Fix #16079: World map preview not visible (#16248) +* Fix #16591: fixed the console error caused by a unhandled function (#16660) +* Small update to conversion function 52 to 53 regarding math RTE tag (#16614) +* Edited suggestion batch job to fix the translation of type list. (#16483) +* Remove unnecessary parts (#16400) +* Adds diagnostic test current topic status model (#16399) +* Fix #16330: added warnings for errors while adding the voice artists (#16396) +* Updated UI for the diagnostic player page (#16392) +* Fix of #16356: Context service flake (#16377) +* Adds diagnostic test topic tracker model (#16350) +* Fix issue #16283: Buttons overlap (#16345) +* Fix of Frontend Flake: cannot read properties of undefined (reading 'split'). (#16337) +* Revert "FIX: #16119 Added a copy tooltip + corrected the save progress card indentation and padding" (#16315) +* Revert "Milestone 1.3 and 1.4: Adds blog homepage and post page frontend" (#16314) +* Adds Diagnostic player page directory structure (#16291) +* Fix part of N/A: Added the button to fix the commit commands (#16290) +* Revert "Fixes general issues in Blog Dashboard" (#16273) +* E2E Flake: Error: Add Interaction button is not visible (#16237) +* Fix #15368: Broken UI for Volunteer Page (#16235) +* Fix export policy docstring for BaseCommitLogEntryModel to use the correct rationale. (#16196) +* Align search bar (#16190) +* Put android page behind a feature flag; Make more text translatable (#16108) +* topic and story viewer flake (#15941) +* Backend apis for the classroom admin (#15932) +* Controller and service changes for classroom (#15909) +* Add a test to detect breakages in the error reporting flow (#15902) +* Adds init file in storage/classroom directory. (#15894) +* FixPart of 9749: `?` removed + unknown removed + added some comments (#15859) +* Fix topicAndStoryEditor flake: create chapter button is not clickable. (#15853) +* Fix #15772: set correct image context in question player; fix decoding of foreign chars in SVGs (#15835) + +Uncategorized + + +Contributor Pages +* Fix #16536: Filter out obsolete translation suggestions in review contributor dashboard (#16578) +* Removed `voiceover application` from codebase. (#16343) +* Fix #13919: Include review errors in review message (#16306) +* Fix 16088 - [Contributor dashboard] Nit Change in Help Text for Hindi… (#16268) +* (Contributor Recognition Infrastructure) Milestone 2.1: Add new frontend endpoints to call the ContributorStatsSummariesHandler (#16153) +* Fix part of #16020: Fixed issue with lazy loading on CD (#16103) +* Fix part of #13055: Handle translation cards that only consist of digits. (#16067) +* (Contributor Recognition Infrastructure) Milestone 1.6: Implement controller layer for the project (#16044) +* Fix part of #16020: Fix UI issues on the Contributor Dashboard (#16025) +* (Contributor Recognition Infrastructure) Milestone 1.5: Write asynchronous tasks to send new rank notifying emails (#15960) +* (Making Contributor Dashboard UI Responsive) Milestone 2.2: Implement the new UI for translation submission modal (#15956) +* (Contributor Recognition Infrastructure) Milestone 1.4: Add required email services to notify contributors about levels they achieved (#15940) +* (Contributor Recognition Infrastructure) Milestone 1.3: Add required suggestion services to handle contributor stats (#15917) +* Fixed UI issues on the Contributor Dashboard raised by the translation team (#15868) +* Fix #15514: Add "contributor dashboard debug" flag to Oppia local dev server start script (#15857) +* Fix #11905, #15892: Navigate to initial page after closing suggestion review modal (#15851) +* (Contributor Recognition Infrastructure) Milestone 1.1: Introduce storage models for contribution stats (#15821) +* Fix #11686: Add filtering to the language selector drop-down in the Contributor Dashboard (#15809) +* (Making Contributor Dashboard UI Responsive) Milestone 2.1: Implement the new UI for question submission modal (#15784) +* Fix #15386: Improve experience for untranslatable cards (#15717) + +Data handling +* Remaining changes for the #15861 (#16525) +* Adds validation for rte tags in translation html for translation suggestion (#16481) +* Move some backend validations to execute in strict mode only. (#16457) +* Fix #11471: Flush cache before any job is run (#16439) +* Do not log unavailable Beam job as exception (#16425) +* Fixes part of #15942: Trigger stats model update during exp migration (#16352) +* Fix #13162: Added schemas to StateCompleteEventHandler, LeaveForRefresherExpEventHandler, and SuggestionEmailHandler. (#16148) +* Fix part of #13162: Added schemas to SubTopicViewerPage of subtopic_viewer.py (#16124) +* Fix #16031: Flush cache after job run (#16109) +* Fix part of #13820: Adds backend validation for RTE content and translations of exploration state (#16065) +* Fix part of #13162: Added schemas to TopicViewerPage of topic_viewer.py (#16054) +* Introduce audit migration jobs (#15928) +* Added schema to QuestionsListHandler of questions_list.py (#15872) +* Conversion function for exploration validation checks (#15861) +* Add exploration migration job (#15837) +* Fix part of #13821: Backend validation for general state interaction. (#15797) +* Fix part of #13822: Backend validation for general state (#15796) + +Automated QA Team +* Fix #16411: Upgrade Actions in GitHub Workflows (#16485) +* E2E Flake: Fixes Conversation not visible and English in exploration language is not visible (#16451) +* Increase max space for karma to fix "Browser Disconnected" issue (#16434) +* E2E Flake: All Exploration summary tile is taking too long to appear (#16426) +* Fix coverage flake in save-version-mismatch-modal.component.ts (#16414) +* E2E Flake: Error First RTE Element not visible (#16320) +* Fix exploration-editor-page coverage flake (#16280) +* Milestone 2.2 (Improve line and branch coverage for the frontend and the backend): Increased coverage in some backend files (#16231) +* E2E Flake: Mat Tab Toggle options take too long to appear. (#16225) +* Fix part of #14219: Adds test coverage for value generators (#16198) +* Milestone 2.1 (Improve line and branch coverage for the frontend and the backend): Increased coverage in some backend files (#16193) +* Fixes #16152: Updates the test to make sure temporary files are deleted (#16188) +* Fix #16168: Fix Context service flake in stats-reporting.service.spec.ts (#16177) +* Flake check frontend tests on merge into develop (#16174) +* Add tags to incoming issues (#16144) +* E2E Flake: Expected ' ' to equal {{some string}}. (#16139) +* Milestone 2.5, 2.6, 2.7 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO. (#16106) +* Fixing ffmpeg video reporter for webdriverio test suites. (#16080) +* Bug fix in `run_backend_test.py` (#16056) +* Fix a case of `First RTE element is not visible` (#16034) +* Fix a flake in the question editor player. (#16032) +* Milestone 2.3 and 2.4 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO. (#15969) +* Milestone 1.6 (Improve line and branch coverage for the frontend and the backend): Added associated test file for some scripts (#15964) +* (Celebrating learners' accomplishments) Milestone 2.4: Add E2E tests (#15946) +* Milestone 1.5 (Improve line and branch coverage for the frontend and the backend): Added associated test file for some scripts (#15919) +* Fix #13824: Clarify backend test shard error messages (#15908) +* Alert when E2E or Lighthouse tests fail on develop (#15903) +* Fixes fade in flake for statecontent editor. (#15893) +* Milestone 2.1 and 2.2 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO. (#15882) +* Moving from Ubuntu 18.04 to 22.04 in Github actions (#15879) +* Fix "Failed: undefined" Flake (#15867) +* Milestone 1.4 (Improve line and branch coverage for the frontend and the backend): Added associated test file for some scripts (#15700) +* (Making the Contributor Dashboard UI Responsive) Milestone 2.3: Adding mobile e2e tests for the Contributor Dashboard and changing the default 'collapsed' status of the cards in the state-editor (#15536) + +Server Errors +* Fix #16008: Add custom elements polyfill (#16404) +* Fix #16005: Add polyfill for Object.entries (#16185) +* Fixes several server errors (#15967) + +Typing +* Added Mypy type annotations to the `core/controller` directory's files. (#16342) +* Fix part of #13341: Added Mypy type annotations to the `script directory`'s pending files. (#16226) +* Fix part of #13341: Added Mypy type annotations to the `script directory`'s root level files. (#16169) +* Added Mypy type annotations to the `extensions` directory. (#16083) +* Fixes some assert issues. (#15980) +* Fixing ChangeDomain typing issue in the backend type annotations. (#15958) +* Fix part of #10474: Make typescript checks strict left in PR #15799 & #15620 (#15912) +* Added linters for exceptional types in backend type annotations (#15905) +* Added Mypy type annotations ~ miscellaneous (#15870) +* FixPart of #9749:?. removed from html files (#15869) +* Added Mypy type annotations to `core/test` (#15864) +* Fix part of #14033: Added Mypy type annotations to some files of domain folder. -- M1.10 (#15827) +* Fix part of #14033: Added Mypy type annotations to some files of domain folder. -- M1.9 (#15801) +* Fix part of #14033: Added Mypy type annotations to some files of domain folder. -- M1.7 (#15693) + +Speed improvement +* Fix part of #15968: Replace material icons with FA icons (#16227) + +Translations +* Routine update of translations (#16082) + +Python Migration +* Fix part of #14419: PARAM_NAMES, VALID_MODEL_NAMES, and FILTER_FIELD_NAMES refactored to ParamNames, ValidModelNames, and FilterFieldNames. Values changed to UPPERCASE (#15951) +* Fix #15567: Python Interpreter Migration from 3.7.10 to 3.8.12 (#15508) + + +v3.2.8 (03 Sep 2022) +------------------------ +Learner/Creator Pages +* Milestone 1.4 (Learner group MVP): Add facilitator dashboard and learner group creation journey (#15709) +* Merge the Lesson information modal on the top-navbar with the one on the footer (#15781) +* Fix #15892: Handle CD bugs (#15977) +* Fix #15883: fix html select component logic (#15974) +* Fix #15875: Fixes a bug that was preventing displaying / tagging of misconceptions (#15966) +* Fix #15885: fix image region interaction drag offset to account for scroll location (#15934) +* Fix release issues (#15907) +* Fix skill delete issue (#15881) +* Fix #13397: Milestone 1.1 - Change 'published_on' to None if Blog Post is Draft (#15862) +* Fix #15730: the category language button was incorrectly placed. I needed to adjust the CSS (#15843) +* Fix issue #14815: change padding in oppia-classroom-view-container, the search container (#15842) +* Fix part of #15757: Fix graph and image click interaction (#15808) +* (Celebrating learners' accomplishments) Make changes based on Web PMs' feedback (#15780) +* Adding Comment To Explain- Why Blog Post Models are not Versioned (#15775) +* Fix #15745 and #15744: Fix UI issues on learner-facing pages. (#15773) +* Fix #14974: Text no longer floats outside the answering box (#15766) +* Fix #15684: Resolve topic and skill editor dashboard UI issue on smaller screen sizes (#15741) +* Fixes backend flake introduced by #15626 (#15731) +* Fix #15480: Orientation of donate page not correct (#15727) +* Milestone 1.8 (Improving the lesson creation experience): Make changes in the controller layer and also create the backend api service. (#15689) +* Milestone 1.1 (Helping learners when they get Stuck): State migration for the field `dest_if_really_stuck` (#15644) +* (Celebrating learners' accomplishments) Milestone 1.3: Add milestone message and post chapter recommendations (#15634) +* Fix issue with 'Cancel Button' on customization modal when using the 'Copy Tool' (#15629) +* Milestone 1.2 (Learner group MVP): Add learner group related handlers and services (#15626) +* Milestones 1.4, 1.5, 1.6 and 1.7 (Improving the lesson creation experience): Allow users to see changes in the exploration metadata. (#15615) +* [Backend]: Adds classroom model (#15810) +* Update topic model and its related domain methods for accommodating diagnostic test skills. (#15694) +* Fix #15739: Link author list items to profile page in the lesson info modal (#15798) + +Angular Migration +* Fix #15874: Console error on trying to tag misconception to a question on skill editor page (#15906) +* Fix part of #9749: contributorDashboard ui fix (#15823) +* Fix part of #9749: Migrate contributor dashboard page, state graph visualization, version diff visualization and question directives (#15768) +* Fix Part of #9749: rule-editor, router.service, skill-editor-routing.service, answer-group-editor and other services/componets (Question-creation service removed dead code) (#15674) +* Fix part of #9749: Migrates angular html bind (#15568) +* Implement application wide error handler (#15783) +* Fix major breakage on develop caused by #15674 (#15723) + +Contributor Pages +* Fix #11905, #15892: Navigate to initial page after closing suggestion review modal (#15851) +* Fix #15746: Don't set active topic to 'ALL' in contributor dashboard page. (#15841) +* Changed color of pagination buttons on the modals on the contributor dashboard. (#15839) +* (Making Contributor Dashboard UI Responsive) Milestone 1.4: Implement the new UI for question review modal (#15703) +* (Making Contributor Dashboard UI Responsive) Milestone 1.3: Implement the new UI for translation review modal (#15670) +* (Making Contributor Dashboard UI Responsive) Milestone 1.2: Implement the mobile UI for list item components (#15627) + + +Automated QA Team +* Avoid Collisions of Backend Coverage Filenames (#15847) +* Milestone 1.5 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO. (#15836) +* Revert "Milestone 1.5 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO." (#15830) +* Fix E2E flake: Trying to fix "Cannot read property 'getDirection' of undefined" flake in state content editor (#15819) +* Fix #14280: Try to fix blog page flake: "Cannot read property 'getDirection' of undefined" (#15818) +* Replace JS `fetch` with Angular `http` (#15816) +* Fix E2E Metrics Reporting and Reruns (#15815) +* Remove unnecessary state-editor-content code in e2e test. (#15806) +* Upload frontend coverage reports and webpack bundles as artifacts for debugging (#15791) +* Milestone 1.6 and 1.7 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO. (#15790) +* Add a waitFor to try and address "successful report" flake. (#15789) +* Fix 'ExplorationTitleInput is not visible' flake in the exploration editor. (#15788) +* Make pre_push_hook_test.py assertRaisesRegex stricter (#15786) +* Fix #13357: Rerun Downloads in scripts/ (#15782) +* Fix bug in check_backend_associated_test_file script (#15774) +* Fix #15728, #15729, #15720, #15721 frontend flakes introduced by #15674 (#15734) +* Milestone 1.5 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO. (#15726) +* (Celebrating learners' accomplishments) Milestone 1.4: Add E2E tests (#15725) +* Milestone 1.4 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO. (#15701) +* Milestone 1.3 (Improve line and branch coverage for the frontend and the backend): Updated run_backend_test script to calculate per-file branch coverage (#15697) +* Milestone 1.3 (Migrate away from protractor): Migrated few test suite from protractor to webdriverIO. (#15655) +* Milestone 1.1 (Improve line and branch coverage for the frontend and the backend): Checks to ensure that every backend file has an associated test file (#15625) +* Install Python Dev Dependencies to Virtual Environment (#15602) + +Typing +* Fixed customization arg rename migration issue. (#15844) +* Fix part of #10474: Make typescript checks strict for few more files (#15799) +* Fix #15716: MyPy's failing tests on develop. (#15771) +* Fix part of #14033: Added Mypy type annotations to some files of domain folder. -- M1.8 (#15762) +* Address pending review comments of #15475 (#15716) +* Added Mypy type annotations to the files of `core/jobs/batch_jobs` directory. (#15711) +* Added Mypy type annotations to the files of `core/jobs/transforms/validation` directory. (#15687) +* Fix part of #14033: Added Mypy type annotations to some files of domain folder. -- M1.6 (#15663) +* Fix part of #10474: Make typescript checks strict for few more files (#15620) +* #14033: Adding mypy type annotations to stats_domain.py (#15479) + +Data handling +* Add exploration migration job (#15837) + +Translations +* Update a string for item selection input (#15829) +* Routine update of translations. (#15777) + +Dev Workflow +* Fix part of #12912: Enable pylint 'super-with-arguments' (#15812) +* Fix #15654: Remove e2e classname used for styling in core/templates/css/oppia.css (#15811) +* Update CODEOWNERS to use teams (#15787) +* Attempt to suppress ev_epollex_linux error (#15776) +* Fix #15152, #15153, #15224, #14625: Move generation of RTL CSS into webpack (#15576) +* Increase memory limit for the webpack build process (#15943) +* Add hash in filenames of CSS files generated by webpack (#15863) + +Python Migration +* Fix part of #12912: Reenable `consider-using-dict-items` pylint rule (#15769) + +Bug fixes +* Fix CSS scoping (#15724) + + +v3.2.7 (05 Aug 2022) +------------------------ +Learner/Creator Pages +* Fix #15743 and #15685: UI issues with the search bar on the Math classroom page (#15778) +* Fix #15745 and #15744: Fix UI issues on learner-facing pages. (#15773) +* Fix #15147: filter box toggle issue on /topics-and-skill-dashboard (#15673) +* Milestone 1.3 (Learner group MVP): Add frontend models and backend service api for Learner groups (#15656) +* Fix #15650 and #15591: Update progress nav when state content changes; Update z-index for modals that need to be stacked (#15651) +* Fix #15150: Learner Dashboard fix for no scroll bar present to check the progress of community lessons if more than 3 lessons are present. (#15616) +* (Celebrating learners' accomplishments) Milestone 1.2: Modify the ratings-and-recommendations component and make sign up section dismissable (#15611) +* Fix part of #13764: Frontend validation for at least one explanation for Medium Rubric (#15609) +* (Celebrating learners' accomplishments) Milestone 1.1: Add end chapter check-mark and confetti (#15589) +* Fix #14199: Fixed math OSK UI for iOS devices. (#15585) +* Milestone 1.1 (Learner group MVP): Add learner group related models and domains (#15583) +* Milestone 1.1 (Improving the lesson creation experience): Create the new domain objects (#15582) +* Fix #15511: Add a Uploading animation when user clicks 'Use This Image' (#15564) +* Fix #15540: Misplaced icon in EndExploration interaction (#15558) +* Fix #15507: The mobile UI for the answer selection modal is offset to the left (#15520) +* Backend changes for Logged-out experience (User Checkpoints Project) (#15422) +* Fix part of #14702: Add the stale tab detection functionality for topic editor page (#15278) +* Fix #15739: Link author list items to profile page in the lesson info modal (#15798) +* Fix editor issues: schema-based-list-editor not populating the choices; editing images not working (#15704) +* Learner checkpointing: Follow up changes (#15547) +* Improved validation and minor fixes to clear the erroring deferred job queue (#15491) +* Frontend changes for logged-out experience (User checkpoints project) (#15482) + +Bug fixes +* Fix CSS scoping (#15724) + +Miscellaneous +* Rearrange import order to fix typing_extensions error when deploying (#15691) +* Add gtag.js to the app (#15683) +* Milestone 1.2 (Improving the lesson creation experience): Create the new storage model and implement its lifecycle (#15607) + +Automated QA Team +* Fix e2e CI error (Failed to load resource: the server responded with a status of 405 ()) (#15688) +* Fix #15676: Fixes the flake in ImageWithRegionsEditorComponent (#15678) +* Fix part of #14219: Increase Per-File Backend Test Coverage for tasks.py (#15566) +* Fix part of #14219: Increase Per-File Backend Test Coverage (#15356) +* Milestone 1.2 (Migrate away from protractor): Setup webdriverio and migrate few tests suite from protractor to webdriverIO (#15614) + +Dev Workflow +* Changes in logged-out User Checkpoints backend (Follow-up to #15422) (#15667) +* Fix Part of #10700: Refactor Object Factories (#15652) +* Add new flag "download_combined_frontend_file" to frontend test execution file (#15635) +* Fix part of #8668: Document url-interpolation.service.ts with jsdoc (#15628) +* Codeowner changes for LaCE team (#15619) +* Fix #15152, #15153, #15224, #14625: Move generation of RTL CSS into webpack (#15576) +* Remove import from utils (#15557) +* Fix codemirror (#15555) +* Purge python utils (#15441) +* Revert "Fix part of #14702: Add the stale tab detection functionality for topic editor page" (#15593) + +Release team +* Fix #15381: Update CODEOWNERS to add Nithesh (#15661) + +Data handling +* Allow bigger JobRunResult (#15658) +* Fix Part of #13162: Add schema for UsernameCheckHandler and ReaderFeedbackHandler (#15605) +* Fix Part of #13162: Add schema for RecommendationsHandler (#15597) +* Fix part of #13822: Added backend validation for sub-topic url fragment (#15500) +* Fix part of #13822: Added backend validation for story_is_published to be boolean (#15417) + +Contributor Pages +* Fix #15648: Remove limit from in-review translation suggestion query (#15649) +* (Making Contributor Dashboard UI Responsive) Milestone 1.1: Implement the mobile UI for dashboard tabs and navigation (#15594) +* Fix #15570: Fix change detection issue in question editor modal. (#15573) +* Fix #12425: Change Drag and Drop sort input so it displays the answer HTML instead of the contentID (#15496) +* Fix #15385: Added functionality to persist translator language on [CD - Translate Text]. (#15460) + +Typing +* Fix part of #14033: Added Mypy type annotations to some files of domain folder. -- M1.5 (#15640) +* Fix part of #14033: Added Mypy type annotations to some files of `domain` folder. -- M1.4 (#15618) +* Fix #13500: Data passed to platform storage service is always in bytes (#15598) +* Fix part of #14033: Added Mypy type annotations to some files of `domain` folder. -- M1.3 (#15596) +* Fix part of #14033: Added Mypy type annotations to some files of `domain` folder. -- M1.2 (#15580) +* Fix part of #10474: Enable typescript strict mode for all the newly added files and number of errors along with errors log. (#15578) +* Improve splash page loading speed : Remove material icons (#15549) +* Added Mypy type annotations to `validation_decorators.py` and `job_result_transforms.py` (#15501) +* Fix part of #10474: Cover ExtractImageFilenamesFromModelService with strict checks (#15495) +* Fix part of #14033: Added Mypy type annotations to `param_domain` and `story_domain` (#15483) +* Fix part of #14033: Type annotate html_cleaner.py (#15435) + +Angular Migration +* Fix part of 9749: Migration of training-data.service.ts, training-panel, exploration-warnings.service.ts, parameter-metadata.service.ts and more. (#15604) +* Fix Part of #9749: Migrate Outcome editor (#15438) + +Speed improvement +* Improve loading speed of splash and math page (#15477) + + +v3.2.6 (03 Jul 2022) +------------------------ +Contributor Pages +* Fix #15552 and part of #15639: Fix contributor dashboard (#15666) +* Fix #15648: Remove limit from in-review translation suggestion query (#15649) +* Fix #15431: Update skill opportunity progress percentage denominator to 10 (#15450) +* Fix #15321: Show review message for accepted suggestions (#15405) +* Fix part of #15182: Organize translations in translation review page by lesson (#15295) + +Speed improvement +* Improve loading speed of splash and math page (#15477) +* Fix images on splash (#15459) + +Bug fixes +* Fix #15638: Fix broken page and broken FA icons (#15641) +* Fix pre-commit linter and release scripts (#15608) + +Learner/Creator Pages +* Fix #15369: Users can now use the keyboard to navigate the Learner Dashboard (#15556) +* Fix #15538, #15541, #15534, #15527: Fix console error when previewing a skill; fix invalid profile URL in the history tab; fix controller schema validation error (#15553) +* Fix edits allowed banner showing when the page hasn't loaded (#15510) +* Fix svg sanitization logic to handle mixed camelCase attrs correctly (#15492) +* Fix #15370: Can report an exploration from the keyboard (#15488) +* Enable translations for the topic name in the "Master Skills for..." text on the practice tab (#15481) +* Fix part of #15444: Add delay to fix issue with dropdowns on the exploration-settings-page (#15474) +* Prevent page from reloading when attempting to navigate to the learner dashboard while currently on the learner dashboard (#15448) +* Fix part of #14702: Add the stale tab and unsaved changes detection functionality for story editor page (#15443) +* Fix #15246: Prerequisite skills from other topics now load properly in the chapter editor (#15442) +* Fix part of #14702: Add the stale tab and unsaved changes detection functionality for skill editor page (#15440) +* Fix #15376: Make classroom page responsive (#15418) +* Add admin utility to rollback explorations (#15397) +* Fix #15375: Make splash page's benefits subsection responsive on wider screens (#15391) +* Fix part of #13822: Add validation for Curated Lesson Category (#15342) +* Fix part of #14219: Increase interaction_registry test coverage to 100% (#15339) +* Fix #14261: Voiceover of multiple choice components (#15315) +* Fix part of #15134: Add a border to the language selector button (#15305) +* Fix #15263: Make titles on exploration cards readable (#15297) +* Fix #14085: Deprecate math interactions' unsupported rules and update cust arg name. (#15271) +* Introduce backend changes for Logged in user checkpoints (#15213) +* Fix #15526 and #15523: Console errors when ENTER key is used in a numeric input field and Unable to enter numbers in formats using a comma as a decimal separator (#15575) + +Dev Workflow +* Remove import from utils (#15557) +* Fix codemirror (#15555) +* Fix how we revert wiki changes from web interface (#15494) +* Fix #15489: Upgrade pip-tools to v6.6.2 for pip 22.1.1 support (#15490) +* Upgrade pip-tools to v6.6.1 for pip 22.1 support (#15465) +* Revert updates to the wiki made through the web interface (#15446) +* Purge python utils (#15441) +* Fix #15425: Increase max_old_space_size for webpack (#15437) +* Upgrade Beam to 2.38.0 (#15400) +* Run clean up before build and ensure webpack_bundles is non-empty (#15317) +* Fix bug in hanging indent checker (#15226) +* Fix secret names for revert-web-wiki-updates workflow (#15467) +* Make instructions for getting help clearer in the PR template. (#15355) + +Typing +* Fix part of #14033: Added Mypy type annotations to `fs_services` and `rating_services` (#15424) +* Fix part of #14033: Type annotate user_query_services.py (#15395) +* Add Mypy type annotations to pending files of storage models. (#15390) +* Add schema for params dict in reader controller (#15388) +* Add Mypy type annotations to pending files of jobs/types directory. (#15373) +* Fix part of #10474: Strict checks for skill-selector.component.ts (#15310) +* Fix part of #13341: Type annotate typescript_checks (#15289) +* Fix part of #14033: Added MyPy type annotation to the file role_services.py (#15286) +* Fix part of #13341: Type annotate common.py (#15258) +* Fix part of #10474: Make typescript checks strict for few more files (#15257) +* Type annotate classroom_services (#15241) +* Fix part of #13341: Type annotate regenerate_requirements.py, run_frontend_tests.py and run_custom_eslint_tests.py (#15208) +* Fix part of #10474: Make typescript checks strict for few more files (#15089) + +Release team +* Fix deployment issue on MacOS (#15454) + +Translations +* Routine update of translations (#15449) +* Fix part of #14960: Page title translations - 2 (#15445) +* Fix part of #14960: Page title translations - 1 (#15304) + +Automated QA Team +* Relax Lighthouse uses-responsive-images assertion (#15434) +* Extend failure notifications to backend, lint, and type checks (#15416) +* Fix Context Service Flake in Translation Tab Component (#15398) +* Fix part of #14219 :Increased coverage to 100% for user.gae_models_test (#15393) +* Fix frontend time flake and other miscellaneous fixes (#15389) +* Fix part of #14219: Increase role_services test coverage to 100% (#15384) +* Extend lighthouse timeout for retrieving sourcemaps (#15379) +* Fix part of #14219: Raise backend test coverage to 100% for core.constants.py (#15292) +* Fix part of #14219: install_third_party_libs.py (#15290) +* Fix part of #14219: Increase core.jobs.job_test_utils_test to 100% test coverage (#15272) +* Fix part of #14219: Increasing backend test coverage of core.utils_test to 100% (#15161) + +Data handling +* Fix part of #14419: Changed the format of REPORT_TYPE class (#15296) +* Fix part of #14419: Changed the format of PARAM_NAMES class (#15291) +* Fix part of #13822: Backend validation of skill medium rubrics explanations. (#15235) +* Fix part of #14537: Refactor fs_domain (#15221) +* Fix part of #13822: Validation for rubric explanations (#15173) +* Add job to populate mailchimp database (#15120) +* Fix part of #13341: Type annotate build.py, check_e2e_tests_are_captured_in_ci.py, check_frontend_test_coverage.py (#15104) +* Refactor param domain file (#15048) +* Fix part of #13162: Add Schema for AnswerSubmittedEventHandler (#14494) +* Improve validation and minor fixes to clear the erroring deferred job queue (#15491) + +Angular Migration +* Fix part of #9749: Migrate Schema Based Editors (#14776) + + +v3.2.5 (04 May 2022) +------------------------ +Contributor Pages +* Fix #15321: Show review message for accepted suggestions (#15405) +* Fix #14715 Stop update requests when no changes are detected (#15053) +* Support legacy translation command in the translation suggestion review edit flow (#15333) + +Learner/Creator Pages +* fix few learner exp quality issues and add 'more' button for feedback updates in the learner dashboard (#15273) +* Fix #15253: update skill editor when state changes; Add discard button in SVG editor (#15254) +* fix change detection issue in skill review editor (#15247) +* Update index.yaml and fix change detection issues in skill / topic editor (#15242) +* Fix #15234: Fixed issue with slider (#15237) +* Fix #15023, #15037: Hide learner suggestions in the feedback tab and show correct open feedback count; Fix errors in skill questions tab (#15220) +* Fixed some UI issues in sidenav and topnav (#15218) +* Removed unnecessary calls to analytics service (#15154) +* Fix part of #14702: Add favicon and staleness detection services and add some functions in local storage service. (#15100) +* Fix #11560: Add "Lesson Info" Modal. (#15065) +* Goals tab based on learner dashboard doc (#14987) +* Fix part of #14085: Added MatchesUpToTrivialManipulations rule for math interactions. (#14977) +* Add logs to debug server error in rte parser (#14897) +* Fix stacking issue with "tag misconceotion modal" (#15267) + +Dev Workflow +* Upgrade Beam to 2.38.0 (#15400) +* Use group in codeowners and remove Hardik (#15276) +* Fail noisily if a process crashes. (#15250) +* Upgrade frontend and backend libs (#15243) +* Update comments of non-null assertion. (#15236) +* Fix #15225: Skip pylint cyclic import check (#15229) +* Transfer code ownership from @DubeySandeep (#15219) +* Use our fork of pylint-quotes (#15216) +* Fix #13991: Fix flaky build tests (#15187) +* Update PR template (#15054) +* Fix part of #7450: Replaced private methods in SuggestionServicesUnitTests (#15001) +* Revert "Fix part of #10474: Make typescript checks strict for some files." (#15252) +* fix race condition causing fe test to be flaky (#15230) + +Release team +* Add step to inform server errors team before deploying release (#15383) +* Update branch protection rule prompt (#15281) +* Update maintenance mode to return status code as 200 instead of 503 (#15210) + +Data handling +* Fix part of #15311: Fix takeout in app feedback and add more granular debugging (#15359) +* Fix part of #13822: Add backend validation check for question state schema version. (#15264) +* Fix part of #13341: Type annotate create_expression_parser.py, create_topological_sort_of_all_services.py, install_chrome_for_ci.py (#15186) +* Fix part of #13341: Type annotate clean.py, run_tests.py, scripts_test_utils.py (#15181) +* Fix part of #14351: Enforce normalized data (#15044) +* Fix part of #13822: Added beam jobs for skill misconception ID validation (#15039) +* Fix part of #14085: Added audit job for math interactions. (#14952) +* Fix part of #14537: Refactor improvements domain (#14866) + +Speed improvement +* Improve speed of splash and learn/math (#15275) +* Improve speed of splash and learn/math (#15142) +* Revert "Improve speed of splash and learn/math" (#15223) + +Typing +* Removing all annotated files from core/jobs. (#15228) +* Fix part of #14033: Added Mypy type annotations to some files. (#15119) +* Fix part of #14033: Type annotate translatable_object_registry (#15118) +* Fix part of #10474: Make typescript checks strict for few more files (#15085) +* Fix part of of #14033: Type annotate email_services file (#15062) +* Fix part of #14033: Added Mypy type annotations to `user_domain.py` (#15057) +* Fix part of #10474: Make typescript checks strict for some files. (#15049) +* Fix part of #14033: Added Mypy type annotations to `collection_domain.py` (#14958) + +Angular Migration +* Fix part of #11329: oppia-interactive-music-notes-input.directive (remove addClass) (#15151) +* Fix Part of #9749: Migrates Music-Notes Input Interaction to Angular (#14841) +* Fix Part of #9749: Migrate End exploration interaction with separate backend-api-service. (#14766) + + +v3.2.4 (29 Mar 2022) +------------------------ +Learner/Creator Pages +* Fix #15023, #15037: Hide learner suggestions in the feedback tab and show correct open feedback count; Fix errors in skill questions tab (#15220) +* Fix #15188: Fix Drag and Drop interaction short response html so it displays _html instead of _contentId (#15211) +* Fixes 15159, 15156, 15163, 15180, 14752 (#15192) +* Fix #15166: Add hacky translation support for breadcrumbs on classroom related pages (#15167) +* Removed unnecessary calls to analytics service (#15154) +* Fix #15101: Question submission image issue (#15131) +* Fix #15123: Show classroom promo in navbar regardless of user state (#15125) +* Fixes scroll issues (#15114) +* Fix base64 parsing issue in svg editor upload functionality (#15109) +* fixes broken UI on lessons tab. (#15068) +* Fix #15024: fixes feedback's reload behaviour. (#15067) +* Fix #14944: Copy text in translation mode (#15061) +* Fix #14855: Edits navbar to ensure learning view does not break (#15045) +* Fix part of #14219: Make test coverage in question_fetchers 100% (#15040) +* Fixes #15005: Topic prerequisite skill checking is broken and prevents topic being published. (#15025) +* Fix part of #14702: Add entity editor browser tabs info domain object (#15019) +* Fix #14982, Fix part of #14960: Fixing search bar's language and categories drop-down and repetition of search results. (#14991) +* Fix part of #14960: UI improvements on exploration and classroom pages (#14979) +* Fix part of #14960: Updated UI of preferred languages chip (#14978) +* Fix part of #14960: Open chapter from the topic page in the same tab instead of opening in a new tab. (#14976) +* Fix part of #14960: Remove extra margin from beneath the lesson completion card (#14964) +* Fix part of #13822: Backend validation for non-duplicate exploration rights user IDs (#14962) +* Added new menu item `about oppia foundation` (#14961) +* Fix part of #14829: Added RTL formatting for remaining files. (#14934) +* Fix #14845: Problems related to skill editor component in the exploration editor (#14933) +* Fix #14921: Made Image-editor component and edit-profile-picture component responsive (#14932) +* Check uniqueness of topic name without considering leading and trailing spaces (#14925) +* Fixes UI Inconsistencies in the Review Player (#14919) +* Add some debug info to track #14708 if it occurs again (#14915) +* Learner dahsboard fixes based on learner dashboard doc (#14898) +* Fix #14793: Strip invalid tags and attribute from SVGs instead of showing a warning. (#14876) +* Fixes The order of In-progress lessons. (#14868) +* Minor UI enhancements in Goals Tab on Learner Dashboard. (#14863) +* Fix #14712: Added warning checks for author recommended exploration ids (#14853) +* Change hover behaviour of cards on the learner's dashboard (#14852) +* Minor UI improvements in Navbar (#14837) +* Minor UI changes in side-navbar. (#14836) +* Fix part of #13764: set minimum number of questions for Topic until practice tab can be displayed (#14799) +* Added new empty lessons tab and fixes feedback tab's title position. (#14768) +* Fix #14788: Results page now doesn't show skills with no questions (#14762) +* Fix #14634: Add checks to validate explorations before adding them as chapters (#14747) +* Switch default values of autogenerated text-to-speech and correctness feedback for explorations (#14746) +* Fix #13010: Allow learners to enter decimals in formats other than English (#14744) +* Fix #14646: Make display of concept card link inline. (#14731) +* Fixes creator dashboard button's screen position, added hover effect on main menu and removed extra unit spaces. (#14721) +* Fix #14667: Adds I18N keys for preference page (#14685) +* Fix #14621: Change concept-card modal's z-index so it stacks correctly over other modals (#14670) +* Fix #14432: Shows previous subtopic card when at last subtopic of the topic. (#14660) +* Fixes Leaner Dashboard Issues (#14654) +* Fixes #14636: Bug on collection player page on mobile devices (#14637) +* Fix part of #14219: Increasing backend test coverage of core.domain.story_fetchers_test to 100% (#14632) +* Changing the login overlay to have the new UI (#14607) +* Align lesson titles on collection page so they don't overlap with the path line (#14591) +* Fixes practice tab's start button behaviour (#14583) +* Fix #14431: Resolves all navbar related fixes for rtl languages (#14559) +* Change layout of exploration and collection cards on community-library for mobile UI (#14527) +* Fix #13566: Updates languages names in voiceover dropdown and other fixes. (#14523) +* Implemented new UI changes in lessons page (mobile view). (#14512) +* Redesign Search bar and Fix #14447: Change dropdown buttons' z-index so they don't appear over the sidebar (#14486) +* Add RTL support to the major viewer pages (#14375) +* Fix #13737: Voiceover does not repeat when user submits answer before voiceover has finished (#14367) +* Fix part of #14085: Making math interactions consistent with the android definitions. (#14197) +* Fix part of #13764: Story notes and skill rubric explanation validation. (#14159) +* Registers translatable fields in Exploration, Question & State based on new translation architecture. (#14534) +* Fix base64 to unicode decoding issue (#15002) +* Fix part of #13764: For curated lessons outcome refresher_exploration_id should be None (#14908) +* Fix 14787: Fixes topic editor tab when wrong URL fragment is entered in `Topic URL Fragment` (#14903) +* Fix #15011: Replace choppy animation and introduce fade in out as a short term fix (#15081) +* Fix part of #11329: conversation-skin.directive (remove: addclass and removeclass) (#15066) +* Fixed different behavior of navbar items. (#14862) +* Skip regenerating summary with new contributor if exp is not found (#14846) +* Fixes #14704: Uploading of exploration in yaml format. (#14800) + +Speed improvement +* Improve speed (#15116) + +Release team +* Fix path for app yaml file (#15107) +* Allow option to choose default browser (#15076) +* Add CORS header to app.yaml (#15043) +* Remove usage of credits form for changelog generation (#14911) +* Re-add rtl css to develop (#14907) +* Update authors and changelog for v3.2.3 (#14904) +* Allow multiple remote urls to be used for release (#14894) +* Fix cloud sdk version and add a check to avoid this issue in future (#14616) +* Fix #14354, #14328 and #57 (on release-scripts): Multiple Release script fixes (#14380) + +Translations +* Routine update of translations. (#15092) +* Fix #14998: Extended hacky translations for next recommended chapter name translation (#14999) +* Fix #14947: Mcq options translations not showing up (#14948) +* Fix #14881: Extended hacky translations for navbar topic titles (#14882) +* Routine update of translations. (#14789) +* Imlementation of Hacky translations for the all classroom related pages (#14631) +* Adds Base structure for the new translation architecture (#14515) + +Contributor Pages +* Fix part of #14355 Allow filtering reviewable translation suggestions by topic [Frontend] (#15074) +* Fix #14826: Implement pagination in Contributor Dashboard Contributions tab. (#15072) +* Fix part of #14355 Allow filtering reviewable translation suggestions by topic [Backend] (#15070) +* Fix #14358: Slow load times for translation opportunities (#14784) +* Fix #14206: Unable to remove question contribution rights in admin dashboard (#14577) +* Fixed length of filter by topics container (#14561) +* Fix #11761: Add a dropdown to replace the sidebar in contributor's dashboard when the screen becomes too narrow (#14342) + +Data handling +* Fix part of #13341: Type annotate third_party_size_check.py, start.py, setup_gae.py (#15069) +* Fix part of #13822: Add backend validation check for story description. (#15038) +* Fix part of #14419: ANDROID_TEXT_SIZE and ANDROID_NETWORK_TYPE enums refactored (#15015) +* Fix #10383: fix nerdamer typing (#14963) +* Fix Part of #13162: Adds schema for handler UserInfoHandler class (#14922) +* Upgrade lighthouse (#14917) +* Replace validation method with object class (#14901) +* Fix part of #13162: Added SiteLanguageHandler schema (#14886) +* Fix part of #13162: Added Schema for ExportAccountHandler (#14880) +* Fix part of #13162: Added Urlhandler schema with required changes (#14879) +* Topic domain refactor (#14844) +* Fix part of #14537: Refactor feedback domain file. (#14802) +* Fix part of #14537: Refactor wipeout domain (#14764) +* Fix #14751: Allow int in float ininstance checks (#14758) +* Fix Part of #13162: Adds schema for handler FetchSkillsHandler class (#14749) +* Fix part of #13822: Exploration title should have max length of 36 (#14748) +* Refactor beam_job_domain file (#14742) +* Fix #14705: Allow any param dict, add more files to GCoud ignore file, fix MyPy bug by adding stubs (#14724) +* Fix #14022: Tidy-up admin related pages and remove remainders of MapReduce jobs (#14663) +* Fix part of #13162: Added Schema for EditableSkillDataHandler (#14623) +* Fix Part of #13162: Added Schema for SignUpPage (#14614) +* Fix part of #14555: Implement a schema type to validate variable no of keys in dict (#14571) +* Fix part of #14383: Refactor comma_separated_*_ids (#14563) +* Fix part of #14537: Introduce lint checks for importing services inside domain files (#14553) +* Fix part of #13162: Added schema for ExplorationEmbedPage (#14481) +* Job to extract email, collection id of users who created collection (#14461) +* Fix part of #13162: Setup schemas on topic editor (#14356) +* Skill opportunity job (#14324) + +Miscellaneous +* Fix tests/methods that refer to private functions in Python inside core.domain.exp_services_test.py (#15060) +* Fix #14827: Clarifies the error messages for NumericInputInteraction (#14918) +* Upgrade libs (#15127) +* Revert "Improve speed" (#15121) +* Transfer codeowner files from Eric to Kevin (#14753) +* Update README file to add social media (#14745) +* Update year to 2022 (#14700) +* Fix frontend test flake caused by #14342 (#14638) +* Fix #14524: Frontend testing (#14619) +* Revert #14470 (#14613) + +Angular Migration +* exploration-improvements.service.ts from AngularJS to Angular (#15051) +* dead code (#15028) +* Fixed part of #9749: Migrated concept-card-editor and skill-preview modal (#14939) +* Fix Part of #10700: Refactor Object Factories (#14888) +* Fix Part of #9749: state-interaction-editor from AngularJS to Angular (#14873) +* Fix part of #9749: Migrated outcome-destination-editor (#14865) +* Fix Part of #9749: Migrate State content editor (#14838) +* Fix part of 9749: customize-interaction-modal.component migrated from AngularJS to Angular (#14798) +* Fix part of #9749: Migrated state-solution-editor component (#14790) +* Fix Part of #9749: Migrate and Redesign DragAndDrop interaction (#14774) +* Fix part of #9749: Migrated add-or-update-solution modal component (#14771) +* Migrate ExplorationStatesService and GraphDataService to Angular 2+ (#14701) +* Fix part of #9749: Migrate whole NumberWithUnits interaction directive. (#14681) +* Fix part of #9749: Migrated ques-misconception-editor (#14672) +* Fixes Part of #9749: Migrate Whole RatioExpressionInput directive interaction (#14661) +* Fix part of #9749: Migrate Solution-editor & solution-explanation-editor to angular (#14656) +* Fix Part of #9749: Migrates Interaction Pencil Code Editor to Angular (#14624) +* Fix Part of #9749: Migrate whole ItemSelectionInput directive inside interaction. (#14593) +* Fix part of #9749: Migrated reviews-materials-editor component (#14590) +* Fix part of #9749: Migrated skill-misconception-editor directive (#14588) +* Fix part of issue #9749: State-hints-editor, hint-editor, response-header component from angularJS to angular (#14576) +* Fix part of #9749: Migrated select-skill-difficulty-modal, question-editor-save modal, confirm-question-exit-modal etc. (#14565) +* Fix part of #9749: Migrate translation-suggestion-review-modal (#14546) +* Fix Part of #9749: Migrate change subtopic assignment modal (#14493) +* Fix part of #14187: Migrate contribution-and-review-service to angular and fully cover it with frontend tests (#14471) +* Fix part of 9749: Migrate help-modal.controller (#14406) +* Fix part of #9749: Migrate conversation skin, conversation skin embed and exploration player page directives (#14087) + +Typing +* Fix part of #10474: Make typescript checks strict for few more files. (#15046) +* Fix part of #14033: Added MyPy type annotation to the file image_services.py (#15032) +* Fix part of #10474: Make typescript checks strict for few more files (#15027) +* Fix part of #14033: Added Mypy type annotations to some files. (#15008) +* Fix part of #10474: Make typescript checks strict for few more files (#14990) +* Fix part of #10474: Make typescript checks strict for subtopic_viewer/*.ts folder (#14989) +* Fix part of #10474: Make typescript checks strict for few files (#14988) +* Fix part of #14033: Added Mypy type annotations to `activity_services.py` (#14986) +* Fix part of #10474: Make typescript checks strict for few files. (#14983) +* Fix part of #14033: Type annotate user_query_domain (#14931) +* Fix part of #10474: Make typescript checks strict for skills-list.component ans spec (#14871) +* Fix Part of #13015: Removing use of `unknown` as a type in Interactions (#14775) +* Added strict tests for more files (#14664) +* Fix part of #10474 : Make typescript checks strict for `core/templates/components/skill-selector/merge-skill-modal.component.ts` (#14644) +* Fix part of #10474 : Make typescript checks strict for `core/templates/domain/editor/undo_redo/base-undo-redo.service.ts` (#14633) +* Fix part of #10474: Make typescript checks strict for ExplorationObjectFactory (#14600) +* Fix part of #14033: Type annotate Topic domain (#14595) +* Fix part of #14033: Added Mypy type annotations to some files. (#14469) + +Developer UX +* Upgrade elasticsearch and limit its memory usage (#15033) +* Fix part of #7450: Replace testing of private methods in BuildTests (#15020) +* Add error message to debug flaky test (#14970) +* Fix Part of #11496: Add lint check for browser.switchTo().activeElement() method (#14959) +* Upgrade frontend and backend libs (#14902) +* Upgrade to fixed version of pip-tools (#14860) +* Catch errors at the time of javascript compilation (#14849) +* Remove codeowner for the pylint files (#14824) +* Fix pip compile (#14813) +* Fix part of #12912: Enables 'missing-raises-doc' and 'cyclic-import' pylint checks (#14772) +* Upgrade frontend libs (#14737) +* Fix part of #12912: Enables raise-missing-from pylint check (#14719) +* Upgrade libraries (#14642) +* Update PULL_REQUEST_TEMPLATE.md (#14629) +* Move import under third party exec code (#14606) +* Fix part of 14219: Increasing backend test coverage of core.storage.classifier.gae_models_test to 100% (#14573) +* Fix part of #14419: SERVER_MODES refactored to ServerModes and values changed to UPPERCASE (#14556) +* Fix part of #12912: Enables `consider-using-in` pylint check (#14520) +* Fix part of #12912: Changes deprecated python methods (#14506) +* Fix parts of #12912: Enables `no-else-continue` and `no-else-raise` pylint checks (#14504) +* Fix part of #10616: Added eslint check "lines-between-class-members" (#14496) + +Automated QA Team +* Fix part of #14219: Increasing backend test coverage of core.domain.playthrough_issue_registry_test to 100% (#15026) +* Fix part of #14219: Add backend tests to achieve 100% per-file backend coverage with classifier_domain_test (#14996) +* Fix part of #14219: Increasing backend test coverage of core.domain.topic_fetchers_test to 100% (#14981) +* Fix part of #14187: Added frontend coverage for `oppia-footer.component.ts` (#14973) +* Fix part of #14219: Increasing backend test coverage of core.domain.story_services_test to 100% (#14942) +* Fix part of #14187: Add frontend test for audio-preloader.service.ts (#14927) +* Fix part of #14187: Add frontend test for story-node.model.ts (#14923) +* Fixes part of #14219: 100% test coverage for skill_domain and suggestion_services (#14920) +* Fixed part of #14219: 100% per-file backend test coverage for acl_decorators.py (#14892) +* Fix part of #14219: Increasing backend test coverage of core.domain.topic_services_test to 100% (#14891) +* Fix part of #14219: Increasing backend test coverage of core.domain.summary_services_test to 100% (#14890) +* Add more tests to SVG editor (#14884) +* Fix part of #14219: Increasing backend test coverage of core.domain.exp_fetchers_test to 100% (#14883) +* Fix part of #14219: Increasing backend test coverage of core.domain.blog_services_test to 100% (#14854) +* Fix part of #14219: Increasing backend test coverage of core.domain.skill_fetchers_test to 100% (#14835) +* Fix part of #14219: Increasing backend test coverage of core.controllers.blog_homepage_test to 100% (#14834) +* Resolving Add to 'play later' list icon taking too long to load flake in learner dashboard (#14822) +* Fix "Wait timed out after 10001ms" flake in wipeout suite. (#14818) +* Fix part of #14219: Increasing backend test coverage of core.domain.exp_services_test to 100% (#14816) +* Fix part of #14219: Increase backend coverage for `classifer_services.py` to 100% (#14812) +* Fix part of #14219: Increasing backend test coverage of `core.domain.skill_services_test` to 100% (#14801) +* Fix part of #14187: Add remaining frontend tests for some files for 100% coverage (#14796) +* Fix part of #14187: Add fronted tests for `normalize-whitespace-punctuation-and-case.pipe.ts` (#14773) +* fixed part of #14219: 100% per-file backend test coverage for question_domain.py (#14759) +* Fix part of #14219: Increasing backend test coverage of core.domain.feedback_services_test to 100% (#14756) +* Fix part of #14187: Cover some files with frontend tests (#14729) +* Fix part of #14187: Add remaining frontend tests for `truncate-and-capitalize.pipe.ts` (#14699) +* Fix part of #14187: Adds frontend tests for `subtopic.model.ts` (#14696) +* fixed part of #14219: 100% per-file backend test coverage for event_services.py (#14682) +* Fix part of #14219: Increasing backend test coverage of `core.storage.auth.gae_models_test` to 100% (#14665) +* Fix part of #14219: Increasing backend test coverage of `core.storage.question.gae_models_test` and `core.jobs.job_utils_test` to 100% (#14662) +* Fix part of #14187: Add remaining frontend tests for three files (#14650) +* Fixed part of #14219: Increased backend test coverage of `core.storage.opportunity.gae_models_test`, `core.controllers.story_editor_test`, `core.controllers.skill_editor_test` and `core.storage.statistics.gae_models_test` to 100% (#14641) +* Fix parts of #14219: Increasing backend test coverage of `core.storage.recommendations.gae_models_test` and `core.storage.subtopic.gae_models_test` to 100% (#14610) +* Fix part of #14219: Increasing backend test coverage of `core.domain.email_manager_test`, `core.domain.action_registry_test` and `core.domain.config_domain_test` to 100% (#14601) +* Fix part of #14219: 100% per-file backend test coverage for editor.py (#14594) +* Fix part of #14219: Increasing backend test coverage of `core.storage.config.gae_models_test`, `core.domain.html_validation_service_test` and `core.domain.stats_domain_test` to 100% (#14589) +* Fix part of #14219: Increasing backend test coverage of `core.storage.story.gae_models_test` to 100% (#14586) +* Fix part of #14219: Increasing backend test coverage of `core.storage.blog.gae_models_test` to 100% (#14585) +* Fix part of #14219: Increasing backend test coverage of `core.storage.audit.gae_models_test` to 100% (#14584) +* Fix part of #14219: Increasing backend test coverage of `core.domain.learner_goals_services_test` and `core.domain.recommendations_services_test` to 100% (#14582) +* Fix part of #14219: Increasing backend test coverage of `core.domain.param_domain_test` to 100% (#14581) +* Fix part of #14219: Increasing backend test coverage of core.storage.improvements.gae_models_test to 100% (#14580) +* Fix part of 14219: Increasing backend test coverage of `core.domain.rights_domain_test` to 100% (#14578) +* Fixes part of 14219: Increasing backend test coverage of `core.domain.taskqueue_services_test` to 100% (#14564) +* Fix part of 14219: Increasing backend test coverage of `core.domain.activity_domain_test` to 100% (#14558) +* Fix part of #14219: Increasing backend test coverage of `core.platform.storage.cloud_storage_emulator_test` to 100% (#14548) +* Fix part of #14219: Increasing backend test coverage of core.controllers.resources_test to 100% (#14541) +* Fix part of #14219: Increase backend test coverage of core.controllers.profile_test to 100% (#14529) +* Add support for E2E rerun overrides (#14508) +* Enable screenshots on the develop branch of oppia/oppia (#14473) +* Fix #12219: Migrate typescript tests to Github actions and filter circle ci tests to run only on develop branch (#14439) + +Bug fixes +* Fixed #15009: Scroll to the top while navigating to the static pages. (#15010) +* Fix part of #14960: Center aligning this text with their images. (#14993) +* Fix failing mypy checks of `core.domain.rights_domain_test` (#14649) +* Fix shared component error for: Migrate delete-hint-modal.template, delete-interaction-modal.template, delete-last-hint-modal.template, delete-solution-modal.template, follow #14454 PR: (#14522) + +Server Errors +* Fix #13624: Limit number of characters that can be entered in the skill description text field (#14940) +* Include additional data when error is thrown to better understand issue #14709 should it arise again (#14938) +* Fix #14711: Unsubscribe from observables to prevent detectChanges calls after component destruction (#14926) +* Fix #14710: Fixes `newWidgetSelector` being assigned null. (#14905) +* Investigating server error described in issue #14707 (#14887) +* Fix #14714: Modified conditional checks for adding content ids (#14867) +* Fixes #14713: ngModel:numfmt error in the editor page (#14850) + +Python Migration +* Fix part of #13956: Migrate function python_utils.open_file (#14728) +* Fix part of #13956: Replaced python_utils.divide with "//" python3 operator (#14657) +* Fix part of #13956: Replaced python_utils.parse_query_string with urllib.parse.parse_qs (#14640) +* Fix part of #13956: python_utils.url_unsplit->urllib.unsplit (#14338) +* Fix part of #13956: Replaced python_utils.ZIP() with zip() (#14279) +* Fix part of #13956: Remove `yaml_from_dict` from python_utils.py (#13970) + + +v3.2.3 (10 Feb 2022) +------------------------ +Learner/Creator Pages +* Fix #14693: Allows changing thumbnail background color. (#14783) +* Fix #14426: Resolved Multiple Choice Options Shuffling Between Attempts (#14777) +* Fix #14694: Stack skill name label and radio button container properly and add text explaining score calculation on the practice session results page (#14716) +* Fix #14683: Learner dashboard minor UI fixes for mobile view (#14688) +* Fix #14667: Adds I18N keys for preference page (#14685) +* Fixes unnecessary underlining on hover and focus on the top navigation bar (#14608) +* Changing the login overlay to have the new UI (#14607) +* Fix #14431: Resolves all navbar related fixes for rtl languages (#14559) +* Fix #14084: Allow resize image to 200%; Surface exp role editor error (#14557) +* Fix #14397 : Contributors page not found (#14540) +* Fix #14530: Change question-editor-container's position so it moves up when screen is scrolled (#14535) +* Change layout of exploration and collection cards on community-library for mobile UI (#14527) +* Fix #13680 Remove voice artists' role in the description text (#14463) +* Fix #14188: Fix the Score Circle on the results page so it animates properly (#14452) +* Implemented new navbar : Home navbar menu item (#14438) +* Fix #11898: Align social-media icons (#14436) +* Fix #14389: Increase HTML length validation check limits (#14391) +* Fix #14255 Added extra space at bottom of the story editor page (#14381) +* Implemented new UI for the 'Get Involved' Tab of the top navbar and fixes issue with keyboard navigation (#14345) +* Fix part of #14085: Making math interactions consistent with the android definitions. (#14197) + +Data handling +* Fix #14751: Allow int in float ininstance checks (#14758) +* Fix translation stats job (#14554) +* Fix part of #13162: Add backend test and schema to validate StoryUrlFragmentHandler class arguments (#14489) +* Fix part of #14351: Extended Schema validator (#14465) +* Fix part of #13162: Add schema for ReviewableSuggestionsHandler (#14407) +* Add skill and story migration jobs (#14361) +* Fix Part of #13162: Add argument schema for ProfileHandler, SignupHandler classes (#14257) + +Contributor Pages +* Fix #14669: Translation modal and other UI fixes in Contributor Dashboard Page (#14680) +* Fix #14519: Contributor profile page UI fixes (#14562) +* Fix #14230: Strip whitespace when comparing translation content html and exploration content html (#14396) + +Developer UX +* Fix parts of #12912: Enables `useless-object-inheritance` (#14551) +* Fix part of #12912: Enables no-else-break pylint check (#14474) +* Upgrade libraries (#14430) +* Updates cron time for pending review notification (#14364) +* Upgrade libraries (#14236) + +Python Migration +* Fix part of #14419: Refactor ValidationModes class and values (#14592) +* Fix part of #13956: remove python_utils.create_enum (#14301) +* Script for auto extending index.yaml (#14293) + +Bug fixes +* Remove submodule (#14568) (#14569) +* Fixed #14510: Fixed working of delete answer group modal component (#14516) +* FE Flake in Router Service (#14318) +* Fixes few server errors and addresses some follow-up comments from #14352 (#14518) + +Translations +* Routine update of translations. (#14789) +* Imlementation of Hacky translations for the all classroom related pages (#14631) +* Routine update of translations. (#14570) + +Automated QA Team +* Fix part of #14219: Increasing backend test coverage of `core.controllers.library_test` to 100% (#14545) +* Fix part of #14219: Increasing backend test coverage of `core.controllers.questions_list_test` to 100% (#14538) +* Fix part of #14219: Increasing Backend Test Coverage of `extensions.rich_text_components.components_test` to 100% (#14528) +* Fix #14525: Fix incorrect error of no backend tests (#14526) +* Fix part of #14219: Write tests to fix 100% per file coverage for collection_domain.py (#14499) +* Fix part of #14219: 100% per-file backend test coverage for base.py (#14495) +* Fix part of #14219: Add a test to complete gae_models coverage (#14466) +* Fix part of #14219: Add tests to completely cover scripts/common.py (#14442) +* Fix part of #14187: Cover current-interaction and language-util services fully (#14434) +* Add e2e test to prevent supplemental cards regression (#14320) +* Add frontend test for schema-based-html-viewer.directive.ts (#14205) + +Typing +* Added strict checking for more files (#14539) +* Fix part of #10474 : Make typescript checks strict for base-components/base.module.ts (#14513) +* Fix part of #10474 : Make typescript checks strict for editable-topic-backend-api.service.ts (#14459) +* Fix part of #14033: Type annotate improvements and feedback domain files (#14445) +* Fix part of #14033: Type annotate Classifier and Classroom domain (#14369) +* Fix part of #10474: Make typescript checks strict for core/templates/components/graph-services/graph-layout.service.ts file (#14311) +* Fix #14231: Added required strict typescript checks to import question-player-state-service (#14274) + +Angular Migration +* Fix #14514: Remove dead code for voiceover tab on contributor dashboard page. (#14517) +* Fix #14502 & Fix part of #9749: use one modal for save-pending-changes-modal. (#14509) +* Fix #14501 & Fix Part of #9749: skill-prerequisite-skills-editor solved data not showing issue and moved it from angularJs to angular (#14505) +* Fixed part of #9749: Migrated skill-rubrics-editor component (#14500) +* Fixed part of #9749: Migrated Add-Worked Editor directive to component (#14490) +* Fix part of issue #9749: migrate forms-templates from AngularJS to Angular (#14488) +* Fixed part of #9749: Migrated misconception editor directive (#14487) +* Fix part of #9749: Migrated Add-worked-example and delete worked example modal (#14477) +* Fix part of #9749: Migrate save-pending-changes-modal.template (#14476) +* Fix part of #8472: Migrates exploration correctness feedback, exploration param changes, exploration param specs services. (#14475) +* Fix part of #9749: Migrate to skill-editor-save-modal.component (#14470) +* Fixed part of #9749: Migrated Add Misconception modal (#14467) +* Fix Part of #9749: Migrate delete-hint-modal.template, delete-interaction-modal.template, delete-last-hint-modal.template, delete-solution-modal.template into component (#14454) +* Fix part of #9749: Migrates topic-editor-send-mail, topic-editor-save, delete story modal (#14451) +* Fixed part of #9749: Migrated preview set parameter modal (#14435) +* Fix part of 9749: Migrate delete-answer-group-modal.template (#14429) +* Fixed part of #9749: Migrated revert-exploration-modal component (#14421) +* Fix part of issue #9749: migrate translation-tab TranslationTabBusyModal, DeleteAudioTranslationModal from angularjs to angular (#14411) +* Fix part of issue #9749: migrate welcome translation modal from angularjs to angular (#14410) +* Fixed part of #9749: Migrated add-hint-modal component (#14401) +* Fix part of issue #9749: migrate preview-summary-tile-modal to angular component and some services (#14388) +* Fix a part of #14278 : Exploration rights Service small important change in code (#14387) +* Fixed part of #8472: Migrated confirm-state-modal component (#14377) +* Fixed part of #8472: Migrated topic-editor-save-modal to Angular component (#14363) +* Migrate and encapsulate logic from conversation skin directive to ratings-and-recommendations component. (#14049) +* Fix Part of #9749: Migrates Creator-Dashboard-Page to Angular (#13906) + +Release team +* Add notes to the release coordinator page (#14472) +* Fix problems in pre push hook (#14457) + +Miscellaneous +* Fix #13162: Add schema and introduce new approach for saving multiple images in suggestion handler. (#14063) + + +v3.2.2 (05 Jan 2022) +------------------------ +Learner/Creator Pages +* Fix breaking of navbar throughout all screen sizes (#14498) +* Fixes an issue where multiple calls where being made to the backend (#14460) +* Fix #14390: Concept cards have line breaks before and above them (#14443) +* Implemented new navbar : Home navbar menu item (#14438) +* Fix #14404: Fixes sidebar freezing behaviour (#14420) +* Fix #14011: Made misaligned subtopic tiles responsive (#14392) +* Fix #14304: Removed navbar links linking pages to new tabs (#14346) +* Implemented new UI for the 'Get Involved' Tab of the top navbar and fixes issue with keyboard navigation (#14345) +* Fix #14305: Change Foundation Page Title (#14329) +* Implemented the new UI for the Learn (formerly Classroom) Tab of the navbar and fixes #13667 (#14294) +* Adding a “Create exploration” button to the top bar of creator-dashboard (#14292) +* Fix part of #13764: page title fragment for web should have min-length 5 characters (#14264) +* Implemented new UI changes in side Navigation bar (#14258) +* Implemented donate nav item according to the new navbar view (#14232) +* Fix - Hints visible on results page, #13248: Adding href to topic viewer page urls (#14224) +* Fix issue #12165 overflow of skill description (#14203) +* Implemented the new UI for the results page (#14193) +* Add translation keys for interaction specific constant translations (#14158) +* Fix #14330: Pass isLearnAgainButton input to the progress-nav component (#14371) +* Fix mobile usability issues (#14198) +* Added confirmation before leaving practice tab to prevent accidental session close. (#14128) + +Contributor Pages +* Add languages: Hausa, Igbo, Yoruba (#14378) +* Fix #14300 When the language is changed on the contributor dashboard, the view doesn't update for a while. (#14357) +* Fix #14339: Unblock translation review process from server error raised while fetching translations to review (#14341) +* Fix #14234 Unable to accept/reject a suggestions during reviews. (#14283) +* Fix #14265: Make translation language selector to show selected language correctly (#14268) + +Data handling +* Add email job to registry (#14393) +* Add debugging to suggestions stats job (#14350) +* Fix part of #13162: Adds schemas for TopicsAndSkillsDashboardPageDataHandler, TopicAssignmentsHandler, SkillsDashboardPageDataHandler (#14323) +* Remove Brian for codeowners (#14319) +* Fix part of #13162: Added schema for the SkillRightsHandler and SkillEditorPage class in skill_editor.py (#14251) +* Fix part of #13162: Add schemas to validate ExplorationStartEventHandler and ExplorationActualStartEventHandler class arguments (#14222) +* Wipeout changes (#14221) +* Fix #13974: Remove unneeded constants (#14213) +* Fix #14103: Shifts handler schema constants to a separate file (#14212) +* Fix Part of #13162: Add argument schema for ExplorationCompleteEventHandler, ExplorationMaybeLeaveHandler, SolutionHitEventHandler classes (#14208) +* Fix part of #13162: Adds schema for the PromoBarHandler in resources.py (#14171) + +Bug fixes +* Fix popperJS (#14386) + +Python Migration +* Fixes part of #13956: python_utils.urllib_unquote (#14374) +* Fix part of #13935: Remove python_utils.url_parse function (#14336) +* Fixed part of #13956: Remove python_utils.zip_longest function (#14317) +* Fix part of #13956: Using Python3 urlrequest (#14303) +* Fix part of #13956: remove python_utils.string_io() and python_utils.url_open() (#14299) +* Fixed part of #13956: Remove python_utils.PRINT function (#14284) +* Introduce helper transforms for jobs (#14120) +* Fix part of #11314: Change "offset" from str to int, cursor to offset and address remaining TODO's (#14172) + +Angular Migration +* Fix part of issue #9749: migrate setting-tab modals (delete, unpublish, reassign, ... ) to Angular Component (#14359) +* Fix part of issue #9749: migrate create new subtopic modal to angular component (#14347) +* Fix part of #8016: Migration of Exploration rights Service from AngularJS to Angular (#14278) +* Fix part of #8016: Make Http Calls of question-suggestion-review-modal.controller.ts and translation-suggestion-review-modal.controller.ts through service component. (#14246) +* Fix #9749: migrate Create feedback thread modal to angular component (#14269) +* Fixes part of #9749: Migrate few instances of angular-html-bind (#14263) +* Fix part of #8016: Make HTTP Calls of setting-tab.component.ts through service component (#14153) +* Fix Part of #9749: Migrates Collection-Player-Page to Angular (#12729) + +Translations +* Routine update of translations. (#14349) + +Developer UX +* Update CONTRIBUTING.md to be the same as our wiki (#14334) +* Fix part of #10616: Add eslint rule 'member-delimiter-style' (#14244) +* Fix part of #10616: Add eslint rule to ensure consistent type definitions (#14162) +* Fix some errors and omissions in the lint script. (#14321) + +Miscellaneous +* Temporarily remove @BenHenning from codeowners (#14286) +* Remove myself from oppia-web CODEOWNERS (#14216) +* Use legacy cust args for v47 to v48 schema migration (#14352) +* Fix #14126: Pass the normalized values for url path elements to the handler (#14127) + +Automated QA Team +* Fix part of #13467: Fixes frontend coverage issue with `save-validation-fail-modal.component.ts` (#14275) +* Fix part of #14187: Cover some files with frontend tests (#14242) +* Disable GPU for Selenium to try and fix renderer timeout (#14220) +* Fix part of #14187: Write frontend tests for some components (#14179) +* Fix #13952: Per-file backend coverage tests (#14166) +* Fix part of #10798: Fixed end-to-end tests for workflow.js (#14124) + +Typing +* Fix part of #10474: Cover translate-text.service.ts and music-phrase-player.service.ts with strict checks. (#14201) +* Fix part of #8015: pretest-question-backend-api.service returns domain object instead of dict (#14192) +* Fix part of #14033: Added type annotations to wipeout_domain (#14163) +* Fix part of #10474: Typescript Strict Check for skill-backend-api.service.ts (#14137) +* Fix part of #14033: Type annotate opportunity domain (#14123) +* Fix part of #10474: Cover Top-Navigation-Bar component with strict checks (#14106) +* Fix part of #14033: Add Mypy type annotations to domain files (#14058) +* Fix part of #13687: Convert dicts to domain objects as a part of normalization through schema validation. (#13678) + + +v3.2.1 (27 Nov 2021) +------------------------ +Learner/Creator Pages +* Fix #14227: Disable "Add Thumbnail" button when user has not uploaded new thumbnail (#14297) +* Fix #14235: Enable to submit question suggestions with image region interations (#14289) +* Fix #14228 and #14253: Set classroom filter choices in skills tab and fix change detection/propagation issue in the supplemental card component (#14270) +* Fix #10315, #14200: Fixes white spacing issues in RTE (#14233) +* Fix submitting answers using 'enter key' in Interactions. (#14195) +* Add same padding on both top and bottom of drag and drop answer fields. (#14191) +* Fix #14009: adds animation to topic navbar and fixes background banner. (#14184) +* Fix part of #12858: Add Dutch, Slovak and Ukrainian to the list of site-supported languages. (#14183) +* Fix #14055: Disables making changes to topic if user has no topic rights. (#14139) +* Fix skill edit editor lag issue (#14129) +* Fix part of #13764: Add frontend validation checks for exploration tags (#14119) +* Learner dashboard fixes (#14105) +* Learner dashboard fixes (#14101) +* Fix part of #13764: Add validation checks for Outcome, Hint and Solution Explanation (#14035) +* Fix #14007: Removed extra dropdown icon from admin page navbar (#14008) +* Fix part of #13380: Fixed exploration player rtl formatting issues (#13980) +* Scale and center SVG in the editor (#14178) +* Fix #12452 and #13358: Guppy Configuration Service (#14138) +* Fix part of #13764: State content should be a valid HTML string with at most 10,000 characters (#13918) +* Fix part of #13603: Validation for Continue, Text Input, Multiple Choice Interaction (#13838) + +Angular Migration +* Fix #14225: Move modal creation logic from service to component for remove from play later playlist modal (#14291) +* Fix part of #9749: Migrate few instances of angular-html-bind (#14263) +* Migrate confirm discard change modal template (#14167) +* Fix part of issue #9749: migrate exploration-publish-modal to angular component (#14152) +* Fix part of #8016: Make Http Calls of history-tab.component.ts through service component (#14111) +* Fix #14052: Introduce i18n service for encapsulating all translation logic (#14099) +* Fix part of #9749: Migrates editor reloading modal to angular (#14089) +* Fix part of issue #9749: migrate post-publish-modal to angular component (#14079) +* Fix part of #8472: Migrate question player state service (#14074) +* Fix part of #9749: Migrates welcome modal to angular (#13890) +* Fix part of #9749: Migrates Score-Ring, Concept-Card, Completion-Graph to Angular (#13837) +* Fix part of #9749: Migrate tutor card directive (#12892) +* Fix part of #9749: Migrate Progress nav directive (#12864) + +Contributor Pages +* Fix #14234 Unable to accept/reject a suggestions during reviews. (#14283) +* Add Azerbaijani language to contributor dashboard. (#14273) +* Fix #14265: Make translation language selector to show selected language correctly (#14268) +* Fix translation stats job to work with sets (#14254) +* Fix #14189: Make translations editable for reviewers (#14194) +* Fix part of #14072, 14027: Surface submit translation suggestion errors and admin dashboard cleanup (#14151) +* Fix #13440: Make it clearer what language is being translated (#14104) +* Fix #14004 Handle frontend display of skill opportunities correctly (#14097) +* Fix part of #12013: Update exploration changed UI to make old content less ambiguous (#14047) +* Fix #13833: Disables 'Translate' button when all translations are in review (#13972) +* Fix #12181: [Contributor Dashboard] Allow filtering of translation opportunities by topic (#13539) + +Bug fixes +* Fix lang param not working due to mulitple lang cookies. (#14266) +* Fix tasks on production (#14176) +* Fix multiple production issues (#14164) +* Address comments from PR 14101 (#14142) + +Data handling +* Add voiceover admin role to admin user (#14252) +* Update note for pre-deletion page. (#14181) +* Fix part of #13162: Add argument schema for ReleaseCoordinatorPage, JobsHandler, JobOutputHandler, MemoryCacheHandler classes. (#14175) +* Fix part of #10474: Made typescript checks strict for display-solutio… (#14051) +* Fix part of #13162: Add schema for PretestHandler, StorePlaythroughHandler, and StatsEventsHandler. (#14046) +* Fix #12297: Use Firebase Session Cookie for PlatformFeatureService.sa… (#14002) +* Fix part of #13162: Add schema for handlers in story_editor and story_viewer (#13914) +* Fix part of #13162: Add argument schema for LearnerIncompleteActivityHandler, RatingHandler and FlagExplorationHandler classes (#14131) +* Add schema for subscribe, unsubscribe handlers. (#13995) + +Python Migration +* Introduce Cloud Logging (#14185) +* Refactor future imports and MyPy typing (#14132) +* Update setup.py version on new release (#14062) +* Fix part of #13956 : Handle ExitStack without python_utils (#14037) +* Fix small Python 3 migration issues (#14005) +* Fix part of #13956: python_utils.with_metaclass (#13998) +* Fix #13956: Migrate python_utils.redirect_stdout to Python 3 function (#13997) +* Fix #13956: Remove the use of python_utils.OBJECT (#13996) +* Fix part of #13956: Remove python_utils.nullcontext (#13993) +* Fix part of #13956 and fix #11547: Remove python_utils.reraise_exception (#13992) +* Give jobs a standard module and naming convention (#13989) +* Fix part of #13956: Remove python_utils.url_split (#13987) +* Fix part of #13956: Remove python_utils.RANGE variable (#13983) +* Fix part of #13956: python utils.url_encode-->urllib.parse.urlencode (#13978) +* Fix part of #13956: Removed the ROUND variable and its references (#13976) +* Fix part of #13956: Replace python_utils.MAP, python_utils.NEXT with map, next (#13969) +* Fix part of #13956: python_utils.url_quote->urllib.parse.quote (#13964) +* Fix part of #13935: Handle input without python_utils (#13955) +* Remove enforcing python_utils function usage from pylint (#13954) +* Fix part of #13935: Handle str and bytes without python_utils (#13882) + +Miscellaneous +* Fix #14116: Fix typo in core/controllers/admin.py (#14117) +* Fix part of #8668: Document suggestion-modal.service.ts file (#14125) + +Typing +* Fix part of #14033: Type annotate value_generators_domain (#14180) +* Fix part of #10474: Cover profile-page-backend-api.service with strict checks (#14140) +* Fix part of #10474: Fixing typescript strict checks issues for StoryObjectFactory.ts (#14134) +* Fix part of #14033: Type annotate rights_domain (#14108) +* Fix part of #14033: Type annotate learner progress domain files (#14093) +* Fix part of #10474: Made typescript checks strict for exploration-foo… (#14045) +* Fix #10474: cover autogenerated audio player service (#14043) +* Fix part of #10474: Enable strict mode in subtopic.model.ts (#14039) +* Fix part of #14033: Type annotate activity_domain file (#14034) +* Fix part of #10616: Introduced typescript-eslint/method-signature-style rule (#13922) +* Fix part of #10474: Cover more files with strict checks (#13876) +* Fix part of #10474: Made typescript checks strict for filtered-choices-field.component.ts and spec (#13819) +* Fix part of #14033: Mypy type annotations added to takeout_domain and translation_domian (#14149) +* Introduce MyPy reviewers (#14135) +* Change codeowner for mypy script and add directories to mypy denylist (#14032) + +Release team +* Fix issues in release (#14174) + +Developer UX +* Fix #14094: Reinstate codeownership of learner analytics files. (#14160) +* Fix `--save_datastore` flag in start command (#14133) +* Fix #14080: Pip index global config bug (#14112) +* Add code to help with the Python 2 unavailable issue (#14076) +* Fix part of #10616: Introduce @typesctript-eslint rules that extend eslint rules (#14029) +* Make output from scripts/typescript_checks.py pretty (#14025) +* Fix part of #13956: python_utils.url_retrieve->urllib.request.urlretrieve (#13990) +* Adds quote before the message to fix no message issue (#13984) +* Fix part for #13956: Removes "python utils.url_unquote_plus" (#13981) +* Fix part of #10616: Introduced typescript-eslint/consistent-type-assertions rule in the codebase (#13942) +* Fix #13872: Brave browser support added for development server (#13873) +* Fix part of #10474: Oppia-Response-Code-Repl with strict checks (#13768) +* Add "pending review notification" workflow (#14100) +* Change pending review notification workflow config to run on workflow_dispatch event (#14177) +* Fix part of #13467: Coverage flake in edit-profile-picture-modal.component (#14088) + +Automated QA Team +* Quote webhook url in action (#14145) +* Fix part of #10798: Fix end to end tests for SkillEditorPage.js (#14114) +* Fix #14090: Tighten rerun policies based on failure rates (#14092) +* Consolidate Backend Tests (2nd PR) (#14081) +* Fix #10798: End-to-End Tests with action.js and waitFor.js in TopicsAndSkillsDashboardPage.js (#14077) +* Fix #14057: Change List Service [Coverage-Flake] (#14068) +* Fix error message when user needs to set chromedriver version (#14053) +* Resolve `getValidationIssues` E2E flake (#14038) +* Fix part of #10798: Fixing End-to-End Tests with action.js and waitFor.js for forms.js (#13924) + +Translations +* Routine update of translations (#14065) +* Routine update of translations (#14182) + v3.2.0 (28 Oct 2021) ------------------------ diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 7a160c1cb76b..9f309ae3e65a 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -23,8 +23,10 @@ Aadya Mishra Aaron Zuspan Aashish Gaba +Aashish Khubchandani Aashish Singh Aasif Faizal +Aayush Kumar Singh Abeer Khan Abhay Garg Abhay Gupta @@ -34,19 +36,25 @@ Abhijit Suresh Abhimanyu Thakre Abhishek Arya Abhishek Kumar +Abhishek Sultaniya Abhishek Uniyal Abhith Krishna Abraham Mgowano Abraham Mgowano Acash Mkj +Adam Halim Adarsh Kumar Aditya Dubey <74500dubey@gmail.com> Aditya Jain +Aditya Narayanm Aditya Sharma Adrija Acharyya Aishwary Saxena +Ajay Gurjar Ajay Sharma Ajo John +Akash Gupta +Akhilesh Ratnakumar Akshath Kaushal Akshay Anand Akshay Nandwana @@ -54,12 +62,16 @@ Alex Gower Alex Lee Alexandra Wu Allan Zhou +Allison Goldstein Alluri Harshit Varma +Aman Singh Jolly Amanda Rodriguez Amey Kudari Amit Deutsch +Amit Panwar Amulya Kumar Ana Francisca Bernardo +Ananth Raghav Andrew Low Andrey Mironyuk Angela Park @@ -69,15 +81,22 @@ Ankita Saxena Anmol Mittal Anmol Shukla Anshul Hudda +Anshuman Maurya Anthony Alridge Anthony Zheng +Anthony Zhu Anubhav Sinha Anumeha Agrawal +Anurag Singh Anurag Thakur +Anurag Vats +Anurag Verma Apurv Bajaj Apurv Botle Archi Aggarwal Areesha Tariq +Ariel Cohen +Arjun Thakur <21bme057@nith.ac.in> Arkadyuti Bandyopadhyay Arnesh Agrawal Arpan Banerjee @@ -90,11 +109,16 @@ Ashish Verma Ashmeet Singh Ashutosh Chauhan Ashutosh Singla +Ashwath V A Assem Yeskabyl Aubrey Wells Aung Hein Oo Austin Choi +Ava Gizoni Avijit Gupta <526avijit@gmail.com> +Ayush Anand +Ayush Jain +Ayush Jha Ayush Kumar <2580ayush2580@gmail.com> Ayush Nandi Baiba Skujevska @@ -102,6 +126,7 @@ Barnabas Makonda Ben Henning Ben Targan Benjamin Beeshma +Bhavuk Jain Bill Morrisson BJ Voth Bolaji Fatade @@ -109,11 +134,13 @@ Boyd Y. Ching Brenton Briggs Brian Lin Brian Rodriguez +Brijmohan Siyag Cathleen Huang Charisse De Torres Chase Albert Chen Shenyue Chin Zhan Xiong +Chirag Baid Chris Skalnik Christopher Tao Cihan Bebek @@ -121,6 +148,7 @@ Connie Chow Corey Hunter Céline Deknop Darin Nguyen +Darshan AbhayKumar David Cheng Dawson Eliasen Debanshu Bhaumik @@ -129,29 +157,39 @@ Deepank Agarwal Denis Samokhvalov Devi Sandeep Dharmesh Poddar +Dhruv Rishishwar Diana Chen +Dipto Chakrabarty Divyadeep Singh +Divyansh Khetan Domenico Vitarella Dong Wook Brian Chung < tigerdw@umich.edu> Edward Allison Eesha Arif Elizabeth Kemp +Emil Brynielsson Emily Glue +Eric L'Heureux Eric Lou Eric Yang +Eshaan Aggarwal Estelle Lee Fang You Farees Hussain Felicity Zhao Florin Balint +Francois Gonothi Toure Frederik Creemers +Fumiya Goto Gabriel Fuentes Gagan Suneja Gangavarapu Praneeth Gautam Verma Geet Choudhary Geo Jolly +Gopi Vaibhav Grace Guo +Gustav Stappe Renner Hadyn Fitzgerald Hamlet Villa Hamza Chandad @@ -179,6 +217,8 @@ Jackson Wu Jacob Davis Jacob Li Peng Cheng Jacque Li +Jaideep Sharma +Jairo Honorio Jakub Osika James James John James Xu @@ -188,18 +228,26 @@ Jared Silver Jasmine Rider Jasper Deng Jaswinder Singh +Jay Vivarekar Jayasanka Madhawa +Jayprahash Sharma Jaysinh Shukla +Jeevesh Garg Jenna Mandel Jeremy Emerson Jerry Chen Jerry Lau +Jessica Li Jian Fu Jiazhi Chen Jim Zhan +Joel Lau +Jogendra Singh +Jogendra Singh John Glennon John Karasinski John Prince Mesape +Jonathan D Lake Jonathan Slaton Jordan Cockles Jordan Stapinski @@ -212,10 +260,15 @@ Juan Saba Justin Du Jérôme K.N. Anantha Nandanan +Kaicheng Han Kajol Kumari +Karen Honorio Karen Rustad +Karina Zhang +Karishma Vanwari Kartikey Pandey Kashif Jamal Soofi +Kashish Bhandula Kate Perkins Kathryn Patterson Kayla Hardie @@ -224,6 +277,7 @@ Kenneth Ho Kerry Wang Keshav Bathla Keshav Gupta +Kevin Choong Kevin Conner Kevin Lee Kevin Thomas @@ -236,23 +290,31 @@ Konstantinos Kagkelidis Krishita Jain Krishna Rao Kristin Anthony +Kshitij Patil Kumari Shalini Kunal Garg Kyriaki Velliniati Lakshay Angrish Lara Davies Laura Kinkead +Leah Goldberg +Lev Bernstein Leyla Tuon Cao Linn Hallonqvist +Lontsi Jordan Lorrany Azevedo Lucklita Theng Luis Ulloa +Luiz D. M. Mainart Lunrong Chen Madhav Sainanee Madiyar Aitbayev Mahendra Suthar +Mai Elshiashi Mamat Rahmat +Manan Rathi Manas Tungare +Manish Roy Manoj Mohan Marcel Schmittfull Mariana Zangrossi @@ -260,6 +322,8 @@ Mark Cabanero Mark Halpin Martin Smithurst Matt Higgins +Matthew Sumpter +Mattias Erlingson Maurício Meneghini Fauth Md Shahbaz Alam Meet Vyas @@ -275,6 +339,7 @@ Min Tan Mohammad Shahebaz Mohammad Zaman Mohit Balwani +Mohit Gupta Mohit Gupta Mohit Musaddi <96mohitm@gmail.com> Mohith Khatri @@ -295,6 +360,7 @@ Nikhil Handa Nikhil Nair Nikhil Prakash Nikhil Sangwan +Nils Johansson Nimalen Sivapalan Nisarg Chaudhari Nischaya Sharma @@ -310,8 +376,11 @@ Owen Parry Ozan Filiz Paloma Oliveira Pankaj Dahiya +Pankaj Prajapati +Pankaj Singh Parth Bhoiwala Parul Priyedarshani +Patel Muhammad Patrycja Praczyk Pawan Rai Pawel Borkar @@ -321,13 +390,18 @@ Phillip Moulton Piyush Agrawal Prakash Subedi Pranav Siddharth S +Pranshu Srivastava Prasanna Patil Pratik Katte Prayush Dawda +Princika Rai +Priyansh Mehta <21bece080@iiitdmj.ac.in> +Prottoy Chakraborty Pulkit Aggarwal Pulkit Gera Purhan Purvi Misal +Qinghao Yang Radesh Kumar Rafay Ghafoor Rafał Kaszuba @@ -342,6 +416,8 @@ Rajendra Kadam Rajitha Warusavitarana Rakshit Kumar Ramin Izadpanah +Ramon Valdivia +Ravi Dalal Raymond Tso Rebekah Houser Reinaldo Aguiar @@ -352,6 +428,7 @@ Richard Cho Rijuta Singh Rishabh Rawat Rishav Chakraborty +Rishi Kejriwal Ritik Kumar Rizky Riyaldhi Robert Moreno Carrillo @@ -367,14 +444,17 @@ Saeed Jassani Safwan Mansuri Sagang Wee Sagar Manohar +Sahil Jhangar Sajal Asati Sajen Sarvajith Sajna Kadalikat +Sakshi Jain Samara Trilling Samriddhi Mishra Sandeep Dubey Sandeep Patel Sanjana Konte +Sanjay Saju Jacob Sankranti Joshi Santos Hernandez Sanyam Khurana @@ -384,6 +464,8 @@ Satmeet Ubhi Satwik Kansal Satyam Bhalla Satyam Yadav +Saurabh Balke +Saurabh Jamadagni Saurav Pratihar Savitha K Jayasankar Scott Brenner @@ -391,20 +473,26 @@ Scott Junner Scott Roberts Sean Anthony Riordan Sean Lip +Sean Zhong Sebastian Zangaro Seth Beckman Seth Saloni Shafqat Dulal +Shan Jiang Shantanu Bhowmik Sharif Shaker +Shiori Nozawa Shiqi Wu Shitong Shou Shiva Krishna Yadav <14311a05r1@sreenidhi.edu.in> +Shivam Chaudhary Shivam Jha <20bcs206@iiitdmj.ac.in> Shivan Trivedi Shivansh Bajaj Shivansh Dhiman Shivansh Rakesh +Shivkant Chauhan +Shobhan Srivastava Shouvik Roy Shruti Grover Shruti Satish @@ -412,18 +500,22 @@ Shubha Gupta Shubha Rajan Shubham Bansal Shubham Korde +Shubham Thakur +Shun Nagasaki Shuta Suzuki Siddhant Khandelwal Siddhant Srivastav Siddharth Batra Siddharth Mehta Simran Mahindrakar +Sougata Das Souhit Dey Soumyajyoti Dey Soumyo Dey Sourab Jha Sourav Badami Sourav Singh +Sreelaya Vuyyuru Sreenivasulu Giritheja Srijan Reddy Srikanth Kadaba @@ -433,18 +525,25 @@ Stephanie Federwisch Stephen Chiang Stephen Hannon Steve Jiang +Subhash Kovela +SUBHASH THENUA +Subin Duresh Subodh Verma Sudhanva MG Sudipta Gyan Prakash Pradhan Sujay Dey Sumit Paroothi Surya Siriki +Swetha Sekhar Taiwo Adetona Tanishq Gupta Tanmay Mathur Tarashish Mishra +Taylor Murray +Ted Tong Li Teddy Marchildon Tezuesh Varshney +Tham Wan Jun Theo Lipeles Tia Jin Tianqi Wu @@ -453,6 +552,7 @@ Tonatiuh Garcia Tony Afula Tony Jiang Tracy Homer +Tran Quang Khai < vpeopleonatank@gmail.com> Travis Shafer Truong Kim Tuguldur Baigalmaa @@ -461,9 +561,12 @@ Tyler Ishikawa Ujjwal Gulecha Umesh Singla Utkarsh Dixit +Vaibhav Tripathi +Valeron Toscano Varazdat Manukyan Varun Tandon Vasu Tomar +Vedika Chandra Vibhor Agarwal Viet Tran Quoc Hoang Vijay Patel @@ -477,11 +580,13 @@ Vishal Gupta Vishal Joisar Vishal Teotia Vishnu M +Vishnu Nithin Reddy Vojtěch Jelínek Vuyisile Ndlovu Wiktor Idzikowski Will Li Wilson Hong +Winnie Xinyu Wu Xuân (Sean) Lương Y. Budhachandra Singh @@ -490,14 +595,20 @@ Yang Lu Yash Jipkate Yash Ladha <201551061@iiitvadodara.ac.in> Yash Santosh Kandalkar +Yavik Kapadia Yi Yan +Yijia Gao Yiming Pan Yogesh Sharma Yousef Hamza Yuan Gu +Yuecheng Cao Yuliang +Yuri Pennafort Lemos Zach Puller Zach Wiebesiek Zachery Vekovius +Zaid Ismail +Zhan Liang Zhu Chu Zoe Madden-Wood diff --git a/MANIFEST.in b/MANIFEST.in index ef8b0661a08d..035d697f0321 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,9 +3,7 @@ # in production. Note that this package is generated # by running 'python setup.py sdist' in build.py. -include assets/constants.ts -include assets/release_constants.json -include assets/rich_text_components_definitions.ts +graft assets include extensions/interactions/html_field_types_to_rule_specs.json include extensions/interactions/legacy_interaction_specs_by_state_version/*.json include extensions/interactions/rule_templates.json diff --git a/NOTICE b/NOTICE index d095fe2d8f54..85b68f3a8fa3 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Oppia -Copyright 2014-2021 The Oppia Authors +Copyright 2014-2022 The Oppia Authors This product includes software developed at The Oppia Foundation (https://www.oppiafoundation.org/). diff --git a/angular-template-style-url-replacer.webpack-loader.js b/angular-template-style-url-replacer.webpack-loader.js new file mode 100644 index 000000000000..23ba52203417 --- /dev/null +++ b/angular-template-style-url-replacer.webpack-loader.js @@ -0,0 +1,105 @@ +// Copyright 2021 The Oppia Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS-IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @fileoverview This is a webpack loader that replaces templateUrl: './html' + * with template: require('./html'). This is needed for our webpack based + * compilation and not the angular compiler. Angular compiler parses the html + * and converts it to js instructions. For the style urls, the angular compiler + * uses styleUrls while webpack uses imports. Hence, currently we put the + * stylesheet as import and as a styleUrl in the component. Once we have moved + * away from separate rtl css files, we will remove the import statements and + * just keep styleUrls. Until then, for webpack, we need remove styleUrls for + * webpack compilation. + */ + +/** + * The regexes are trying to find the templateUrl from the component decorator + * Eg: + * @Component({ + * selector: 'oppia-base-content', + * templateUrl: './base-content.component.html', + * styleUrls: ['./base-content.component.css'] + * }) + * + * From the above we need to get './base-content.component.html' and + * ['./base-content.component.css']. + * + * After modifications, it will look like: + * @Component({ + * selector: 'oppia-base-content', + * template: require('./base-content.component.html'), + * styleUrls: [] + * }) + * Templates can be found using the regex: + * templateUrl[any number of spaces]:[any number of spaces] + * [any of '"` that starts a string in javascript] + * [match all characters between the quotes][End quotes '"`] + * [any number of spaces] + * [ + * ends with a comma or a closing curly bracket depending or wether there are + * more items in the decorator or not + * ] + */ +const TEMPLATE_URL_REGEX = /templateUrl\s*:(\s*['"`](.*?)['"`]\s*([,}]))/gm; +const STYLES_URL_REGEX = /styleUrls *:(\s*\[[^\]]*?\])/g; +const VALUE_REGEX = /(['`"])((?:[^\\]\\\1|.)*?)\1/g; + +/** + * This function is only used for templateUrl modifications. From a string this + * function extracts the first value inside quotes ('"`). + * Example: For a string like: "templateUrl: './base-content.component.html'," + * The VALUE_REGEX will match "'./base-content.component.html'" and the first + * group is the quote ("'") and the second group is + * ""./base-content.component.html" + * @param {string} str + * @returns Relative url + */ +const replaceStringsWithRequiresStatement = (str) => { + return str.replace(VALUE_REGEX, function(_, __, url) { + return "require('" + url + "')"; + }); +}; + +// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types +const loader = (sourceString) => { + // https://webpack.js.org/api/loaders/#thiscacheable + // Cacheable is an interface provided by webpack and is used to speed up + // the build by caching results. + this.cacheable && this.cacheable(); + /** + * The templateURL regex will match something like: + * "templateUrl: './base-content.component.html'," + */ + const newSourceString = sourceString.replace( + TEMPLATE_URL_REGEX, (_, url) => { + return 'template:' + replaceStringsWithRequiresStatement(url); + } + ).replace(STYLES_URL_REGEX, () => { + /** + * For the style urls, the angular compiler + * uses styleUrls while webpack uses imports. Hence, currently we put the + * stylesheet as import and as a styleUrl in the component. Once we have + * moved away from separate rtl css files, we will remove the import + * statements and just keep styleUrls. Until then, for webpack, we need + * remove styleUrls property for webpack compilation. + */ + return 'styleUrl: []'; + }); + + return newSourceString; +}; + + +module.exports = loader; diff --git a/angular.json b/angular.json new file mode 100644 index 000000000000..f35e89db6fd4 --- /dev/null +++ b/angular.json @@ -0,0 +1,133 @@ +{ + "$schema": "./node_modules/@angular/cli/lib/config/schema.json", + "version": 1, + "newProjectRoot": "projects", + "projects": { + "oppia-angular": { + "projectType": "application", + "schematics": { + "@schematics/angular:component": { + "style": "scss" + } + }, + "root": "", + "sourceRoot": "src", + "prefix": "app", + "architect": { + "build": { + "builder": "@angular-devkit/build-angular:browser", + "options": { + "allowedCommonJsDependencies": ["path", "stream", "zlib"], + "outputPath": "dist/oppia-angular", + "index": "src/index.html", + "main": "src/main.ts", + "polyfills": "core/templates/Polyfills.ts", + "tsConfig": "tsconfig.json", + "assets": [ + "src/favicon.ico", + "src/assets" + ], + "styles": [ + "core/templates/css/oppia.css", + "core/templates/css/oppia-material.css" + ], + "scripts": [] + }, + "configurations": { + "production": { + "fileReplacements": [ + { + "replace": "src/environments/environment.ts", + "with": "src/environments/environment.prod.ts" + } + ], + "index": { + "input": "src/index.prod.html", + "output": "index.html" + }, + "baseHref": "/dist/oppia-angular-prod/", + "outputPath": "dist/oppia-angular-prod", + "aot": true, + "optimization": true, + "outputHashing": "all", + "sourceMap": false, + "namedChunks": false, + "extractLicenses": true, + "vendorChunk": false, + "buildOptimizer": true, + "budgets": [ + { + "type": "initial", + "maximumWarning": "500kb", + "maximumError": "2mb" + }, + { + "type": "anyComponentStyle", + "maximumWarning": "6kb", + "maximumError": "28kb" + } + ] + } + } + }, + "serve": { + "builder": "@angular-devkit/build-angular:dev-server", + "options": { + "browserTarget": "oppia-angular:build" + }, + "configurations": { + "production": { + "browserTarget": "oppia-angular:build:production" + } + } + }, + "extract-i18n": { + "builder": "@angular-devkit/build-angular:extract-i18n", + "options": { + "browserTarget": "oppia-angular:build" + } + }, + "test": { + "builder": "@angular-devkit/build-angular:karma", + "options": { + "main": "src/test.ts", + "polyfills": "core/templates/Polyfills.ts", + "tsConfig": "tsconfig.spec.json", + "karmaConfig": "karma.conf.js", + "assets": [ + "src/favicon.ico", + "assets" + ], + "styles": [ + ], + "scripts": [] + } + }, + "lint": { + "builder": "@angular-devkit/build-angular:tslint", + "options": { + "tsConfig": [ + "tsconfig.json" + ], + "exclude": [ + "**/node_modules/**" + ] + } + }, + "e2e": { + "builder": "@angular-devkit/build-angular:protractor", + "options": { + "protractorConfig": "e2e/protractor.conf.js", + "devServerTarget": "oppia-angular:serve" + }, + "configurations": { + "production": { + "devServerTarget": "oppia-angular:serve:production" + } + } + } + } + } + }, + "defaultProject": "oppia-angular" +} diff --git a/app_dev.yaml b/app_dev.yaml index ad5ee53fe930..7ace518e2eb6 100644 --- a/app_dev.yaml +++ b/app_dev.yaml @@ -1,4 +1,4 @@ -runtime: python37 +runtime: python38 instance_class: F2 # The "version" line is added here so that MR jobs can run locally (see issue # #6534 on oppia/oppia). @@ -37,9 +37,9 @@ handlers: static_dir: third_party/static secure: always expiration: "90d" -- url: /security.txt - static_files: assets/security.txt - upload: assets/security.txt +- url: /.well-known/security.txt + static_files: assets/.well-known/security.txt + upload: assets/.well-known/security.txt secure: always # DEVELOPMENT STATIC @@ -47,23 +47,26 @@ handlers: static_dir: webpack_bundles secure: always expiration: "0" +# /dist is the build output folder for angular. +- url: /dist + static_dir: dist/ + secure: always + expiration: "0" - url: /assets static_dir: assets secure: always + http_headers: + # This is replaced by a specific origin when doing a deployment. + Access-Control-Allow-Origin: "*" expiration: "0" -# Serve js scripts and css files under core/templates. -# This regex allows us to recursively serve js scripts. +# Serve CSS files under core/templates. +# This regex allows us to recursively serve CSS files. # "\1" inserts text captured by the capture group in the URL pattern. - url: /templates/(.*\.(css))$ static_files: core/templates/\1 upload: core/templates/(.*\.(css))$ secure: always expiration: "0" -- url: /templates/(.*\.(js))$ - static_files: local_compiled_js/core/templates/\1 - upload: local_compiled_js/core/templates/(.*\.(js))$ - secure: always - expiration: "0" - url: /templates/(.*\.(html))$ static_files: core/templates/\1 upload: core/templates/(.*\.(html))$ @@ -72,26 +75,24 @@ handlers: - url: /third_party/generated static_dir: third_party/generated secure: always + http_headers: + # This is replaced by a specific origin when doing a deployment. + Access-Control-Allow-Origin: "*" expiration: "0" - url: /extensions/interactions/(.*)/static/(.*\.(css|png)) static_files: extensions/interactions/\1/static/\2 upload: extensions/interactions/(.*)/static/(.*\.(css|png)) secure: always expiration: "0" -- url: /extensions/interactions/(.*)/static/(.js) - static_files: extensions/interactions/\1/static/\2 - upload: local_compiled_js/extensions/interactions/(.*)/static/(.js) - secure: always - expiration: "0" - url: /extensions/(interactions|rich_text_components)/(.*)/directives/(.*\.(html)) static_files: extensions/\1/\2/directives/\3 upload: extensions/(interactions|rich_text_components)/(.*)/directives/(.*\.(html)) secure: always expiration: "0" -# Serve js scripts for gadgets, interactions, rich_text_components, custom ckeditor plugins -# and objects under extensions in dev mode. This regex allows us to recursively serve js -# scripts under the three specified directories. "\1" and "\2" insert capture -# groups from the url pattern. +# Serve PNG images for interactions, rich_text_components, objects, classifiers, +# and custom ckeditor plugins under extensions in dev mode. This regex allows +# us to recursively serve PNG images scripts under the five specified +# directories. "\1" and "\2" insert capture groups from the url pattern. - url: /extensions/(interactions|rich_text_components|objects|classifiers|ckeditor_plugins)/(.*\.(png))$ static_files: extensions/\1/\2 upload: extensions/(interactions|rich_text_components|objects|classifiers|ckeditor_plugins)/(.*\.(png))$ diff --git a/assets/.well-known/security.txt b/assets/.well-known/security.txt new file mode 100644 index 000000000000..a6c4cf8d129a --- /dev/null +++ b/assets/.well-known/security.txt @@ -0,0 +1,5 @@ +Contact: security@oppia.org +Preferred-Languages: en +Canonical: https://www.oppia.org/.well-known/security.txt +Acknowledgements: https://www.oppia.org/about#credits +Policy: https://github.com/oppia/oppia/blob/develop/.github/SECURITY.md diff --git a/assets/analytics-constants.json b/assets/analytics-constants.json new file mode 100644 index 000000000000..ab201b532ba3 --- /dev/null +++ b/assets/analytics-constants.json @@ -0,0 +1,6 @@ +{ + "CAN_SEND_ANALYTICS_EVENTS": false, + "GA_ANALYTICS_ID": "", + "UA_ANALYTICS_ID": "", + "SITE_NAME_FOR_ANALYTICS": "" +} diff --git a/assets/audio/end_chapter_celebratory_tadaa.mp3 b/assets/audio/end_chapter_celebratory_tadaa.mp3 new file mode 100644 index 000000000000..424ea8fa1aa0 Binary files /dev/null and b/assets/audio/end_chapter_celebratory_tadaa.mp3 differ diff --git a/assets/constants.ts b/assets/constants.ts index 89bb30cfedad..4893e05da05c 100644 --- a/assets/constants.ts +++ b/assets/constants.ts @@ -3,7 +3,7 @@ // "eslint disable next line" for each of them. /* eslint-disable oppia/no-multiline-disable */ /* eslint-disable quote-props */ -/* eslint-disable quotes */ +/* eslint-disable @typescript-eslint/quotes */ /* Don't modify anything outside the {} brackets. * Insides of the {} brackets should be formatted as a JSON object. * JSON rules: @@ -18,14 +18,13 @@ */ export default { - // Whether to allow custom event reporting to Google Analytics. - // Mock gtag function is used when disabled. - "CAN_SEND_ANALYTICS_EVENTS": false, - // The term 'staging' is used instead of the classroom url fragment field // in the URL for topics that are not yet attached to a classroom. "CLASSROOM_URL_FRAGMENT_FOR_UNATTACHED_TOPICS": "staging", + // Acceptable URL schemes for links. + "ACCEPTABLE_SCHEMES": ["https", ""], + // The default classroom URL fragment to use when the provided classroom URL // fragment in the controller is invalid. "DEFAULT_CLASSROOM_URL_FRAGMENT": "math", @@ -45,6 +44,7 @@ export default { "DISABLED_EXPLORATION_IDS": ["5"], "TESTING_CONSTANT": "test", "LIBRARY_TILE_WIDTH_PX": 208, + "LIBRARY_MOBILE_TILE_WIDTH_PX": 350, "DASHBOARD_TYPE_CREATOR": "creator", "DASHBOARD_TYPE_LEARNER": "learner", "DEFAULT_COLOR": "#a33f40", @@ -59,6 +59,7 @@ export default { "ASSIGNED": "Assigned", "UNASSIGNED": "Unassigned" }, + "TOPIC_MINIMUM_QUESTIONS_TO_PRACTICE": 10, "TOPIC_SKILL_DASHBOARD_SORT_OPTIONS": { "IncreasingCreatedOn": "Newly Created", "DecreasingCreatedOn": "Oldest Created", @@ -72,10 +73,207 @@ export default { "History", "Mathematics", "Medicine", "Music", "Physics", "Programming", "Reading", "Statistics"], + // These classrooms are shown in the add learner group syllabus filters. + "SEARCH_DROPDOWN_CLASSROOMS": ["math"], + + // These types are shown in the add learner group syllabus filters. + "SEARCH_DROPDOWN_TYPES": ["Skill", "Story"], + // The default language code for an exploration. "DEFAULT_LANGUAGE_CODE": "en", - "RTL_LANGUAGE_CODES": ["ar"], + // Hacky translation keys for classroom, topic, skill, exploration + // and subtopic names and descriptions. Needs to be updated whenever + // any new class, topic, skill, exploration or subtopic is added or a + // previous one is deleted. + // TODO(#14645): Remove these keys once translation service is extended. + "HACKY_TRANSLATION_KEYS": [ + "I18N_CLASSROOM_MATH_TITLE", + "I18N_TOPIC_iX9kYCjnouWN_TITLE", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE", + "I18N_TOPIC_qW12maD4hiA8_TITLE", + "I18N_TOPIC_dLmjjMDbCcrf_TITLE", + "I18N_TOPIC_0abdeaJhmfPm_TITLE", + "I18N_TOPIC_5g0nxGUmx5J5_TITLE", + "I18N_TOPIC_iX9kYCjnouWN_DESCRIPTION", + "I18N_TOPIC_sWBXKH4PZcK6_DESCRIPTION", + "I18N_TOPIC_C4fqwrvqWpRm_DESCRIPTION", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION", + "I18N_TOPIC_dLmjjMDbCcrf_DESCRIPTION", + "I18N_TOPIC_0abdeaJhmfPm_DESCRIPTION", + "I18N_TOPIC_5g0nxGUmx5J5_DESCRIPTION", + "I18N_EXPLORATION_K645IfRNzpKy_TITLE", + "I18N_EXPLORATION_K645IfRNzpKy_DESCRIPTION", + "I18N_EXPLORATION_Knvx24p24qPO_TITLE", + "I18N_EXPLORATION_Knvx24p24qPO_DESCRIPTION", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE", + "I18N_EXPLORATION_aAkDKVDR53cG_DESCRIPTION", + "I18N_EXPLORATION_avwshGklKLJE_TITLE", + "I18N_EXPLORATION_avwshGklKLJE_DESCRIPTION", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE", + "I18N_EXPLORATION_OKxYhsWONHZV_DESCRIPTION", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE", + "I18N_EXPLORATION_BJd7yHIxpqkq_DESCRIPTION", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE", + "I18N_EXPLORATION_W0xq3jW5GzDF_DESCRIPTION", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE", + "I18N_EXPLORATION_53Ka3mQ6ra5A_DESCRIPTION", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE", + "I18N_EXPLORATION_VKXd8qHsxLml_DESCRIPTION", + "I18N_EXPLORATION_PsfDKdhd6Esz_TITLE", + "I18N_EXPLORATION_PsfDKdhd6Esz_DESCRIPTION", + "I18N_EXPLORATION_9DITEN8BUEHw_TITLE", + "I18N_EXPLORATION_9DITEN8BUEHw_DESCRIPTION", + "I18N_EXPLORATION_R7WpsSfmDQPV_TITLE", + "I18N_EXPLORATION_R7WpsSfmDQPV_DESCRIPTION", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE", + "I18N_EXPLORATION_zIBYaqfDJrJC_DESCRIPTION", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE", + "I18N_EXPLORATION_1904tpP0CYwY_DESCRIPTION", + "I18N_EXPLORATION_cQDibOXQbpi7_TITLE", + "I18N_EXPLORATION_cQDibOXQbpi7_DESCRIPTION", + "I18N_EXPLORATION_MRJeVrKafW6G_TITLE", + "I18N_EXPLORATION_MRJeVrKafW6G_DESCRIPTION", + "I18N_EXPLORATION_hNOP3TwRJhsz_TITLE", + "I18N_EXPLORATION_hNOP3TwRJhsz_DESCRIPTION", + "I18N_EXPLORATION_zTg2hzTz37jP_TITLE", + "I18N_EXPLORATION_zTg2hzTz37jP_DESCRIPTION", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE", + "I18N_EXPLORATION_8HTzQQUPiK5i_DESCRIPTION", + "I18N_EXPLORATION_40a3vjmZ7Fwu_TITLE", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE", + "I18N_EXPLORATION_WulCxGAmGE61_DESCRIPTION", + "I18N_EXPLORATION_lOU0XPC2BnE9_TITLE", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION", + "I18N_EXPLORATION_wE9pyaC5np3n_TITLE", + "I18N_EXPLORATION_wE9pyaC5np3n_DESCRIPTION", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE", + "I18N_EXPLORATION_umPkwp0L1M0-_DESCRIPTION", + "I18N_EXPLORATION_MjZzEVOG47_1_TITLE", + "I18N_EXPLORATION_MjZzEVOG47_1_DESCRIPTION", + "I18N_EXPLORATION_9trAQhj6uUC2_TITLE", + "I18N_EXPLORATION_9trAQhj6uUC2_DESCRIPTION", + "I18N_EXPLORATION_rfX8jNkPnA-1_TITLE", + "I18N_EXPLORATION_rfX8jNkPnA-1_DESCRIPTION", + "I18N_EXPLORATION_0FBWxCE5egOw_TITLE", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION", + "I18N_EXPLORATION_670bU6d9JGBh_TITLE", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION", + "I18N_EXPLORATION_aHikhPlxYgOH_TITLE", + "I18N_EXPLORATION_aHikhPlxYgOH_DESCRIPTION", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION", + "I18N_EXPLORATION_zW39GLG_BdN2_TITLE", + "I18N_EXPLORATION_zW39GLG_BdN2_DESCRIPTION", + "I18N_EXPLORATION_Xa3B_io-2WI5_TITLE", + "I18N_EXPLORATION_Xa3B_io-2WI5_DESCRIPTION", + "I18N_EXPLORATION_6Q6IyIDkjpYC_TITLE", + "I18N_EXPLORATION_6Q6IyIDkjpYC_DESCRIPTION", + "I18N_EXPLORATION_osw1m5Q3jK41_TITLE", + "I18N_EXPLORATION_osw1m5Q3jK41_DESCRIPTION", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE", + "I18N_EXPLORATION_2mzzFVDLuAj8_DESCRIPTION", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE", + "I18N_EXPLORATION_5NWuolNcwH6e_DESCRIPTION", + "I18N_EXPLORATION_k2bQ7z5XHNbK_TITLE", + "I18N_EXPLORATION_k2bQ7z5XHNbK_DESCRIPTION", + "I18N_EXPLORATION_tIoSb3HZFN6e_TITLE", + "I18N_EXPLORATION_tIoSb3HZFN6e_DESCRIPTION", + "I18N_EXPLORATION_nLmUS6lbmvnl_TITLE", + "I18N_EXPLORATION_nLmUS6lbmvnl_DESCRIPTION", + "I18N_EXPLORATION_Vgde5_ZVqrq5_TITLE", + "I18N_EXPLORATION_Vgde5_ZVqrq5_DESCRIPTION", + "I18N_EXPLORATION_RvopsvVdIb0J_TITLE", + "I18N_EXPLORATION_RvopsvVdIb0J_DESCRIPTION", + "I18N_EXPLORATION_zVbqxwck0KaC_TITLE", + "I18N_EXPLORATION_zVbqxwck0KaC_DESCRIPTION", + "I18N_EXPLORATION_rDJojPOc0KgJ_TITLE", + "I18N_EXPLORATION_rDJojPOc0KgJ_DESCRIPTION", + "I18N_EXPLORATION_kYSrbNDCv5sH_TITLE", + "I18N_EXPLORATION_kYSrbNDCv5sH_DESCRIPTION", + "I18N_EXPLORATION_K89Hgj2qRSzw_TITLE", + "I18N_EXPLORATION_K89Hgj2qRSzw_DESCRIPTION", + "I18N_EXPLORATION_lNpxiuqufPiw_TITLE", + "I18N_EXPLORATION_lNpxiuqufPiw_DESCRIPTION", + "I18N_EXPLORATION_Jbgc3MlRiY07_TITLE", + "I18N_EXPLORATION_Jbgc3MlRiY07_DESCRIPTION", + "I18N_EXPLORATION_rwN3YPG9XWZa_TITLE", + "I18N_EXPLORATION_rwN3YPG9XWZa_DESCRIPTION", + "I18N_EXPLORATION_nTMZwH7i0DdW_TITLE", + "I18N_EXPLORATION_nTMZwH7i0DdW_DESCRIPTION", + "I18N_EXPLORATION_IrbGLTicm0BI_TITLE", + "I18N_EXPLORATION_IrbGLTicm0BI_DESCRIPTION", + "I18N_EXPLORATION_v8fonNnX4Ub1_TITLE", + "I18N_EXPLORATION_v8fonNnX4Ub1_DESCRIPTION", + "I18N_EXPLORATION_ibeLZqbbjbKF_TITLE", + "I18N_EXPLORATION_ibeLZqbbjbKF_DESCRIPTION", + "I18N_EXPLORATION_BDIln52yGfeH_TITLE", + "I18N_EXPLORATION_BDIln52yGfeH_DESCRIPTION", + "I18N_EXPLORATION_SR1IKIdLxnm1_TITLE", + "I18N_EXPLORATION_SR1IKIdLxnm1_DESCRIPTION", + "I18N_EXPLORATION_m1nvGABWeUoh_TITLE", + "I18N_EXPLORATION_m1nvGABWeUoh_DESCRIPTION", + "I18N_EXPLORATION_zNb0Bh27QtJ4_TITLE", + "I18N_EXPLORATION_zNb0Bh27QtJ4_DESCRIPTION", + "I18N_EXPLORATION_5I4srORrwjt2_TITLE", + "I18N_EXPLORATION_5I4srORrwjt2_DESCRIPTION", + "I18N_EXPLORATION_aqJ07xrTFNLF_TITLE", + "I18N_EXPLORATION_aqJ07xrTFNLF_DESCRIPTION", + "I18N_EXPLORATION_0X0KC9DXWwra_TITLE", + "I18N_EXPLORATION_0X0KC9DXWwra_DESCRIPTION", + "I18N_STORY_RRVMHsZ5Mobh_TITLE", + "I18N_STORY_RRVMHsZ5Mobh_DESCRIPTION", + "I18N_STORY_Qu6THxP29tOy_TITLE", + "I18N_STORY_Qu6THxP29tOy_DESCRIPTION", + "I18N_STORY_vfJDB3JAdwIx_TITLE", + "I18N_STORY_vfJDB3JAdwIx_DESCRIPTION", + "I18N_STORY_rqnxwceQyFnv_TITLE", + "I18N_STORY_rqnxwceQyFnv_DESCRIPTION", + "I18N_STORY_3M5VBajMccXO_TITLE", + "I18N_STORY_3M5VBajMccXO_DESCRIPTION", + "I18N_STORY_JhiDkq01dqgC_TITLE", + "I18N_STORY_JhiDkq01dqgC_DESCRIPTION", + "I18N_STORY_ialKSV0VYV0B_TITLE", + "I18N_STORY_ialKSV0VYV0B_DESCRIPTION", + "I18N_SUBTOPIC_iX9kYCjnouWN_place-names-and-values_TITLE", + "I18N_SUBTOPIC_iX9kYCjnouWN_naming-numbers_TITLE", + "I18N_SUBTOPIC_iX9kYCjnouWN_comparing-numbers_TITLE", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE", + "I18N_SUBTOPIC_sWBXKH4PZcK6_subtracting-numbers_TITLE", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE", + "I18N_SUBTOPIC_sWBXKH4PZcK6_estimation_TITLE", + "I18N_SUBTOPIC_sWBXKH4PZcK6_sequences _TITLE", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE", + "I18N_SUBTOPIC_C4fqwrvqWpRm_memorizing-expressions_TITLE", + "I18N_SUBTOPIC_C4fqwrvqWpRm_multiplication-techniques_TITLE", + "I18N_SUBTOPIC_C4fqwrvqWpRm_rules-to-simplify_TITLE", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE", + "I18N_SUBTOPIC_qW12maD4hiA8_techniques-of-division_TITLE", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE", + "I18N_SUBTOPIC_dLmjjMDbCcrf_order-of-operations_TITLE", + "I18N_SUBTOPIC_dLmjjMDbCcrf_variables_TITLE", + "I18N_SUBTOPIC_dLmjjMDbCcrf_modelling-scenarios_TITLE", + "I18N_SUBTOPIC_dLmjjMDbCcrf_problem-solving_TITLE", + "I18N_SUBTOPIC_dLmjjMDbCcrf_algebraic-expressions_TITLE", + "I18N_SUBTOPIC_dLmjjMDbCcrf_solving-equations_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_fractions-of-a-group_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_equivalent-fractions_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_mixed-numbers_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_number-line_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_comparing-fractions_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_adding-fractions_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_subtracting-fractions_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_multiplying-fractions_TITLE", + "I18N_SUBTOPIC_0abdeaJhmfPm_dividing-fractions_TITLE", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE", + "I18N_SUBTOPIC_5g0nxGUmx5J5_equivalent-ratios_TITLE", + "I18N_SUBTOPIC_5g0nxGUmx5J5_calculations-with-ratios_TITLE", + "I18N_SUBTOPIC_5g0nxGUmx5J5_combining-ratios_TITLE" + ], "ALLOWED_THUMBNAIL_BG_COLORS": { "chapter": ["#F8BF74", "#D68F78", "#8EBBB6", "#B3D8F1"], @@ -85,6 +283,7 @@ export default { }, "ALLOWED_IMAGE_FORMATS": ["svg", "png", "jpeg", "jpg", "gif"], + "MAX_ALLOWED_IMAGE_SIZE_IN_KB_FOR_BLOG": 1024, "TASK_TYPE_HIGH_BOUNCE_RATE": "high_bounce_rate", "TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP": "ineffective_feedback_loop", @@ -99,18 +298,30 @@ export default { "TASK_TARGET_TYPE_STATE": "state", + // Filters for adding new syllabus items to learner groups. + "DEFAULT_ADD_SYLLABUS_FILTER": "All", + "LEARNER_GROUP_ADD_STORY_FILTER": "Story", + "LEARNER_GROUP_ADD_SKILL_FILTER": "Skill", + // Roles in exploration. "ROLE_OWNER": "owner", "ROLE_EDITOR": "editor", "ROLE_VOICE_ARTIST": "voice artist", "ROLE_VIEWER": "viewer", + // The supported tags for the mailing list subscriptions. + "MAILING_LIST_ANDROID_TAG": "Android", + "MAILING_LIST_WEB_TAG": "Web", // Regex to validate the format of Math rich-text component SVGs. If this is // changed in the future, the existing filenames on the server should be // handled as well. // eslint-disable-next-line max-len "MATH_SVG_FILENAME_REGEX": "mathImg_[a-z0-9_]+_height_[0-9d]+_width_[0-9d]+_vertical_[0-9d]+.(svg)$", + // This regex validates whether a given string is in the format of YYYY-MM-DD + // format. + "DATE_REGEX": "^[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}$", + // The SVG tag-specific attribute allowlist is based on the list of tags and // and attributes specified in this project: // https://github.com/cure53/DOMPurify @@ -4747,288 +4958,389 @@ export default { "Welcome": "#992a2b" }, + "INVALID_RTE_COMPONENTS_FOR_BLOG_POST_EDITOR": ["tabs", "skillreview"], + // This is linked to VALID_RTE_COMPONENTS in android_validation_constants. - "VALID_RTE_COMPONENTS_FOR_ANDROID": ["image", "link", "math", "skillreview"], + "VALID_RTE_COMPONENTS_FOR_ANDROID": ["image", "math", "skillreview"], // This is linked to SUPPORTED_LANGUAGES in android_validation_constants. "SUPPORTED_CONTENT_LANGUAGES_FOR_ANDROID": [{ "code": "en", - "description": "English" + "description": "English", + "decimal_separator": "." }], // List of supported content languages in which we can create explorations or // other entities. Each description has a parenthetical part that may be // stripped out to give a shorter description. + // The decimal separators were derived from https://en.wikipedia.org/w/index.php?title=Decimal_separator§ion=9#Usage_worldwide. "SUPPORTED_CONTENT_LANGUAGES": [{ "code": "en", "description": "English", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "ar", "description": "العربية (Arabic)", - "direction": "rtl" + "direction": "rtl", + "decimal_separator": "," }, { "code": "sq", "description": "shqip (Albanian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," + }, { + "code": "am", + "description": "አማርኛ (Amharic)", + "direction": "ltr", + "decimal_separator": "." }, { "code": "az", "description": "Azeri (Azerbaijani)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "bg", "description": "български (Bulgarian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "bn", "description": "বাংলা (Bangla)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "ca", "description": "català (Catalan)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "zh", "description": "中文 (Chinese)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "hr", "description": "hrvatski (Croatian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "cs", "description": "čeština (Czech)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "da", "description": "dansk (Danish)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "nl", "description": "Nederlands (Dutch)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "fat", "description": "Fanti", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "tl", "description": "Filipino (Filipino)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "fi", "description": "suomi (Finnish)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "fr", "description": "français (French)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "lg", "description": "Luganda (Ganda)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "de", "description": "Deutsch (German)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "el", "description": "ελληνικά (Greek)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," + }, { + "code": "ha", + "description": "Halshen Hausa (Hausa)", + "direction": "ltr", + "decimal_separator": "." }, { "code": "he", "description": "עברית (Hebrew)", - "direction": "rtl" + "direction": "rtl", + "decimal_separator": "." }, { "code": "hi", "description": "हिन्दी (Hindi)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "hi-en", "description": "Hinglish", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "hu", "description": "magyar (Hungarian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "id", "description": "Bahasa Indonesia (Indonesian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," + }, { + "code": "ig", + "description": "Ásụ̀sụ́ Ìgbò (Igbo)", + "direction": "ltr", + "decimal_separator": "." }, { "code": "it", "description": "italiano (Italian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "ja", "description": "日本語 (Japanese)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "kab", "description": "Taqbaylit (Kabyle)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "ko", "description": "한국어 (Korean)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "lv", "description": "latviešu (Latvian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "lt", "description": "lietuvių (Lithuanian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "mr", "description": "मराठी (Marathi)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "no", "description": "Norsk (Norwegian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "fa", "description": "فارسی (Persian)", - "direction": "rtl" + "direction": "rtl", + "decimal_separator": "," + }, { + "code": "pcm", + "description": "Naijá (Nigerian Pidgin)", + "direction": "ltr", + "decimal_separator": "," }, { "code": "pl", "description": "polszczyzna (Polish)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "prs", "description": "دری (Dari)", - "direction": "rtl" + "direction": "rtl", + "decimal_separator": "," }, { "code": "pt", "description": "português (Portuguese)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "ro", "description": "română (Romanian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "ru", "description": "pусский (Russian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "sr", "description": "cрпски (Serbian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "sk", "description": "slovenčina (Slovak)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "sl", "description": "slovenščina (Slovenian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "es", "description": "español (Spanish)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "sw", "description": "kiswahili (Swahili)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "sv", "description": "svenska (Swedish)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "ta", "description": "தமிழ் (Tamil)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "th", "description": "ภาษาไทย (Thai)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "code": "tr", "description": "Türkçe (Turkish)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "uk", "description": "yкраїнська (Ukrainian)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "code": "ur", "description": "اُردُو (Urdu)", - "direction": "rtl" + "direction": "rtl", + "decimal_separator": "," }, { "code": "vi", "description": "Tiếng Việt (Vietnamese)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," + }, { + "code": "yo", + "description": "Èdè Yoùbá (Yoruba)", + "direction": "ltr", + "decimal_separator": "." }], // NOTE TO DEVELOPERS: While adding another language, please ensure that the // languages are roughly in order of how much support we have for them in // terms of lesson content translations. // List of supported site languages in which the platform is offered. + // The decimal separators were derived from https://en.wikipedia.org/w/index.php?title=Decimal_separator§ion=9#Usage_worldwide. "SUPPORTED_SITE_LANGUAGES": [{ "id": "en", "text": "English", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "id": "pt-br", "text": "Português (Brasil)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "id": "ar", "text": "العربية", - "direction": "rtl" + "direction": "rtl", + "decimal_separator": "," }, { "id": "hi", "text": "हिन्दी", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "id": "es", "text": "Español", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "id": "bn", "text": "বাংলা", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "id": "fr", - "text": "français (French)", - "direction": "ltr" + "text": "français", + "direction": "ltr", + "decimal_separator": "," }, { "id": "id", "text": "Bahasa Indonesia", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," + }, { + "id": "pcm", + "text": "Naijá (Nigerian Pidgin)", + "direction": "ltr", + "decimal_separator": "," }, { "id": "uk", - "text": "украї́нська мо́ва (Ukrainian)", - "direction": "ltr" + "text": "украї́нська мо́ва", + "direction": "ltr", + "decimal_separator": "," }, { "id": "sk", - "text": "slovenčina (Slovak)", - "direction": "ltr" + "text": "slovenčina", + "direction": "ltr", + "decimal_separator": "," }, { "id": "nl", - "text": "Nederlands (Dutch)", - "direction": "ltr" + "text": "Nederlands", + "direction": "ltr", + "decimal_separator": "," }, { "id": "kab", "text": "Taqbaylit (Kabyle)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "id": "vi", "text": "Tiếng Việt", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "," }, { "id": "tr", - "text": "Türkçe (Turkish)", - "direction": "ltr" + "text": "Türkçe", + "direction": "ltr", + "decimal_separator": "," }, { "id": "zh-hans", "text": "中文(简体)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }, { "id": "zh-hant", "text": "中文(繁體)", - "direction": "ltr" + "direction": "ltr", + "decimal_separator": "." }], // List of supported audio languages in which we have audio and translations @@ -5042,127 +5354,137 @@ export default { "direction": "ltr" }, { "id": "ak", - "description": "Akan", + "description": "Ákán (Akan)", "relatedLanguages": ["ak"], "direction": "ltr" }, { "id": "sq", - "description": "Albanian", + "description": "shqip (Albanian)", "relatedLanguages": ["sq"], "direction": "ltr" + }, { + "id": "am", + "description": "አማርኛ (Amharic)", + "relatedLanguages": ["am"], + "direction": "ltr" }, { "id": "ar", - "description": "Arabic", + "description": "العربية (Arabic)", "relatedLanguages": ["ar"], "direction": "rtl" }, { "id": "az", - "description": "Azerbaijani", + "description": "Azeri (Azerbaijani)", "relatedLanguages": ["az"], "direction": "ltr" }, { "id": "bg", - "description": "Bulgarian", + "description": "български (Bulgarian)", "relatedLanguages": ["bg"], "direction": "ltr" }, { "id": "bn", - "description": "Bangla", + "description": "বাংলা (Bangla)", "relatedLanguages": ["bn"], "direction": "ltr" }, { "id": "ms", - "description": "Bahasa Melayu", + "description": "بهاس ملايو(Bahasa Melayu)", "relatedLanguages": ["ms"], "direction": "ltr" }, { "id": "ca", - "description": "Catalan", + "description": "català (Catalan)", "relatedLanguages": ["ca"], "direction": "ltr" }, { "id": "zh", - "description": "Chinese", + "description": "中文 (Chinese)", "relatedLanguages": ["zh"], "direction": "ltr" }, { "id": "hr", - "description": "Croatian", + "description": "hrvatski (Croatian)", "relatedLanguages": ["hr"], "direction": "ltr" }, { "id": "cs", - "description": "Czech", + "description": "čeština (Czech)", "relatedLanguages": ["cs"], "direction": "ltr" }, { "id": "da", - "description": "Danish", + "description": "dansk (Danish)", "relatedLanguages": ["da"], "direction": "ltr" }, { "id": "prs", - "description": "Dari", + "description": "دری (Dari)", "relatedLanguages": ["prs"], "direction": "rtl" }, { "id": "nl", - "description": "Dutch", + "description": "Nederlands (Dutch)", "relatedLanguages": ["nl"], "direction": "ltr" }, { "id": "ee", - "description": "Ewe", + "description": "Eʋegbe (Ewe)", "relatedLanguages": ["ee"], "direction": "ltr" }, { "id": "fat", - "description": "Fanti", + "description": "Fante (Fanti)", "relatedLanguages": ["ak", "fat"], "direction": "ltr" }, { "id": "tl", - "description": "Filipino", + "description": "Filipino (Filipino)", "relatedLanguages": ["tl"], "direction": "ltr" }, { "id": "fi", - "description": "Finnish", + "description": "suomi (Finnish)", "relatedLanguages": ["fi"], "direction": "ltr" }, { "id": "fr", - "description": "French", + "description": "français (French)", "relatedLanguages": ["fr"], "direction": "ltr" }, { "id": "lg", - "description": "Ganda", + "description": "Luganda (Ganda)", "relatedLanguages": ["lg"], "direction": "ltr" }, { "id": "de", - "description": "German", + "description": "Deutsch (German)", "relatedLanguages": ["de"], "direction": "ltr" }, { "id": "el", - "description": "Greek", + "description": "ελληνικά (Greek)", "relatedLanguages": ["el"], "direction": "ltr" }, { "id": "gaa", - "description": "Ga", + "description": "Gã (Ga)", "relatedLanguages": ["gaa"], "direction": "ltr" + }, { + "id": "ha", + "description": "Halshen Hausa (Hausa)", + "relatedLanguages": ["ha"], + "direction": "ltr" }, { "id": "he", - "description": "Hebrew", + "description": "עברית (Hebrew)", "relatedLanguages": ["he"], "direction": "rtl" }, { "id": "hi", - "description": "Hindi", + "description": "हिन्दी (Hindi)", "relatedLanguages": ["hi"], "direction": "ltr" }, { @@ -5172,149 +5494,164 @@ export default { "direction": "ltr" }, { "id": "hu", - "description": "Hungarian", + "description": "magyar (Hungarian)", "relatedLanguages": ["hu"], "direction": "ltr" }, { "id": "id", - "description": "Indonesian", + "description": "Bahasa Indonesia (Indonesian)", "relatedLanguages": ["id"], "direction": "ltr" + }, { + "id": "ig", + "description": "Ásụ̀sụ́ Ìgbò (Igbo)", + "relatedLanguages": ["igbo"], + "direction": "ltr" }, { "id": "it", - "description": "Italian", + "description": "italiano (Italian)", "relatedLanguages": ["it"], "direction": "ltr" }, { "id": "ja", - "description": "Japanese", + "description": "日本語 (Japanese)", "relatedLanguages": ["ja"], "direction": "ltr" }, { "id": "kab", - "description": "Kabyle", + "description": "Taqbaylit (Kabyle)", "relatedLanguages": ["kab"], "direction": "ltr" }, { "id": "ko", - "description": "Korean", + "description": "한국어 (Korean)", "relatedLanguages": ["ko"], "direction": "ltr" }, { "id": "lv", - "description": "Latvian", + "description": "latviešu (Latvian)", "relatedLanguages": ["lv"], "direction": "ltr" }, { "id": "lt", - "description": "Lithuanian", + "description": "lietuvių (Lithuanian)", "relatedLanguages": ["lt"], "direction": "ltr" }, { "id": "mr", - "description": "Marathi", + "description": "मराठी (Marathi)", "relatedLanguages": ["mr"], "direction": "ltr" }, { "id": "no", - "description": "Norwegian", + "description": "Norsk (Norwegian)", "relatedLanguages": ["no"], "direction": "ltr" }, { "id": "fa", - "description": "Persian", + "description": "فارسی (Persian)", "relatedLanguages": ["fa"], "direction": "rtl" + }, { + "id": "pcm", + "description": "Naijá (Nigerian Pidgin)", + "relatedLanguages": ["pcm"], + "direction": "ltr" }, { "id": "pl", - "description": "Polish", + "description": "polszczyzna (Polish)", "relatedLanguages": ["pl"], "direction": "ltr" }, { "id": "pt", - "description": "Portuguese", + "description": "português (Portuguese)", "relatedLanguages": ["pt"], "direction": "ltr" }, { "id": "ps", - "description": "Pashto", + "description": "پښتو (Pashto)", "relatedLanguages": ["ps"], "direction": "rtl" }, { "id": "ro", - "description": "Romanian", + "description": "română (Romanian)", "relatedLanguages": ["ro"], "direction": "ltr" }, { "id": "ru", - "description": "Russian", + "description": "pусский (Russian)", "relatedLanguages": ["ru"], "direction": "ltr" }, { "id": "sr", - "description": "Serbian", + "description": "cрпски (Serbian)", "relatedLanguages": ["sr"], "direction": "ltr" }, { "id": "sk", - "description": "Slovak", + "description": "slovenčina (Slovak)", "relatedLanguages": ["sk"], "direction": "ltr" }, { "id": "sl", - "description": "Slovenian", + "description": "slovenščina (Slovenian)", "relatedLanguages": ["sl"], "direction": "ltr" }, { "id": "es", - "description": "Spanish", + "description": "español (Spanish)", "relatedLanguages": ["es"], "direction": "ltr" }, { "id": "sw", - "description": "Swahili", + "description": "kiswahili (Swahili)", "relatedLanguages": ["sw"], "direction": "ltr" }, { "id": "sv", - "description": "Swedish", + "description": "svenska (Swedish)", "relatedLanguages": ["sv"], "direction": "ltr" }, { "id": "ta", - "description": "Tamil", + "description": "தமிழ் (Tamil)", "relatedLanguages": ["ta"], "direction": "ltr" }, { "id": "te", - "description": "Telugu", + "description": "తెలుగు (Telugu)", "relatedLanguages": ["te"], "direction": "ltr" }, { "id": "th", - "description": "Thai", + "description": "ภาษาไทย (Thai)", "relatedLanguages": ["th"], "direction": "ltr" }, { "id": "tr", - "description": "Turkish", + "description": "Türkçe (Turkish)", "relatedLanguages": ["tr"], "direction": "ltr" }, { "id": "uk", - "description": "Ukrainian", + "description": "yкраїнська (Ukrainian)", "relatedLanguages": ["uk"], "direction": "ltr" }, { "id": "ur", - "description": "Urdu", + "description": "اُردُو (Urdu)", "relatedLanguages": ["ur"], "direction": "rtl" }, { "id": "vi", - "description": "Vietnamese", + "description": "Tiếng Việt (Vietnamese)", "relatedLanguages": ["vi"], "direction": "ltr" + }, { + "id": "yo", + "description": "Èdè Yoùbá (Yoruba)", + "relatedLanguages": ["yo"], + "direction": "ltr" }], "AUTOGENERATED_AUDIO_LANGUAGES": [{ @@ -5373,11 +5710,13 @@ export default { // eslint-disable-next-line max-len "Use respectful pronouns (like “आप” instead of “तुम/तू ”) and a corresponding respectful tone like “करिये, करेंगे”.", // eslint-disable-next-line max-len - "Use the same voice (active or passive) as in the original English text.", + "Feel free to change the voice and order of phrases to make the text readable.", // eslint-disable-next-line max-len "Preserve punctuation and bolding. If the original content has bold text, make sure it is bold in Hindi as well. If there are bullet points, double quotes, etc., make sure that the translated content also has bullet points and double quotes.", // eslint-disable-next-line max-len - "If the original card has “components” (such as pictures, links, and equations), these need to be added to the translated content. You can use the “Copy tool” for this -- click on the Copy tool and then click on the component you want to carry over. Also, double-click on the image and translate the alt text (and caption, if any)." + "If the original card has “components” (such as pictures, links, and equations), these need to be added to the translated content. You can use the “Copy tool” for this -- click on the Copy tool and then click on the component you want to carry over. Also, double-click on the image and translate the alt text (and caption, if any).", + // eslint-disable-next-line max-len + "Refer to Glossary - https://docs.google.com/spreadsheets/d/13NMEnYqLZuMbeX1Z6XXG-femHkKNAN8KwjhaC67EkxI/edit#gid=0" ], // Spanish. "es": [ @@ -5419,6 +5758,8 @@ export default { "LIST": "list" }, + "EMAIL_REGEX": "^[^\\s@]+@[^\\s@]+\\.[^\\s@]+", + "ALLOWED_QUESTION_INTERACTION_CATEGORIES": [{ "name": "Commonly Used", "interaction_ids": [ @@ -5509,6 +5850,9 @@ export default { ] }], + "MIN_CHOICES_IN_MULTIPLE_CHOICE_INPUT_CURATED_EXP": 4, + "MIN_CHOICES_IN_MULTIPLE_CHOICE_INPUT_REGULAR_EXP": 2, + "CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION": "translation", "CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER": "voiceover", "CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION": "question", @@ -5517,6 +5861,8 @@ export default { "translation", "voiceover", "question", "submit_question" ], + "SUGGESTIONS_SORT_KEY_DATE": "Date", + "ACTION_REMOVE_ALL_REVIEW_RIGHTS": "all", "ACTION_REMOVE_SPECIFIC_CONTRIBUTION_RIGHTS": "specific", "USER_FILTER_CRITERION_USERNAME": "username", @@ -5552,7 +5898,7 @@ export default { "\\u001b", "\\u001c", "\\u001d", "\\u001e", "\\u001f" ], - "DEFAULT_SKILL_DIFFICULTY": 0.3, + "DEFAULT_SKILL_DIFFICULTY": 0.6, "INLINE_RTE_COMPONENTS": ["link", "math", "skillreview"], @@ -5578,18 +5924,29 @@ export default { "MAX_SKILLS_PER_QUESTION": 3, - "MAX_QUESTIONS_PER_SKILL": 50, + "MAX_QUESTIONS_PER_SKILL": 10, "NUM_EXPLORATIONS_PER_REVIEW_TEST": 3, "NUM_QUESTIONS_PER_PAGE": 10, + "MIN_QUESTION_COUNT_FOR_A_DIAGNOSTIC_TEST_SKILL": 3, + "BULK_EMAIL_SERVICE_SIGNUP_URL": "", // The default number of opportunities to show on the contributor dashboard // page. "OPPORTUNITIES_PAGE_SIZE": 10, + // The breakpoint for mobile view for contributor dashboard in px. + // This value must be the same as the one specified in + // opportunities-list-item.component.html. + "OPPORTUNITIES_LIST_ITEM_MOBILE_BREAKPOINT": 700, + + // Represents the string value indicating "All topics" in the Contributor + // Dashboard topic selector. + "TOPIC_SENTINEL_NAME_ALL": "All", + // The following character limit constraints follow from // android_validation_constants.py. Both have to be kept in sync. @@ -5597,6 +5954,7 @@ export default { // classroom in the classroom page URL. E.g. in /learn/math/..., // 'math' is the 'classroom URL fragment'. "MAX_CHARS_IN_CLASSROOM_URL_FRAGMENT": 20, + "MAX_CHARS_IN_CLASSROOM_NAME": 39, "MAX_CHARS_IN_TOPIC_NAME": 39, "MAX_CHARS_IN_ABBREV_TOPIC_NAME": 12, // This represents the maximum number of characters in the URL fragment for @@ -5611,9 +5969,10 @@ export default { "MAX_CHARS_IN_EXPLORATION_TITLE": 36, "MAX_CHARS_IN_CHAPTER_DESCRIPTION": 152, "MAX_CHARS_IN_MISCONCEPTION_NAME": 100, - "MAX_CHARS_IN_BLOG_POST_TITLE": 40, + "MAX_CHARS_IN_BLOG_POST_TITLE": 65, "MIN_CHARS_IN_BLOG_POST_TITLE": 5, "MAX_CHARS_IN_BLOG_POST_SUMMARY": 300, + "MAX_CHARS_IN_LEARNER_GROUP_TITLE": 36, "STORY_ID_LENGTH": 12, // This represents the maximum number of characters in the URL fragment for // story in the story page URL. E.g. @@ -5625,14 +5984,18 @@ export default { // in /learn/math/fractions/revision/place-values, 'place-values' is the // 'subtopic URL fragment'. "MAX_CHARS_IN_SUBTOPIC_URL_FRAGMENT": 25, - // This represents the maximum number of characters in the URL fragment for - // the blog post. - "MAX_CHARS_IN_BLOG_POST_URL_FRAGMENT": 65, + // This is same as base_models.ID_Length. + "BLOG_POST_ID_LENGTH": 12, // The recommended length for meta tag contents. Search engines will truncate // results greater than this limit. "MAX_CHARS_IN_META_TAG_CONTENT": 160, "MIN_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB": 5, "MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB": 50, + // The maximum number of questions can exceed this by at most 3 + // (i.e., 18 questions) in some special cases when the user has attempted 14 + // questions and another topic is tested for more accurate results. For all + // other cases, 15 questions is the upper limit. + "MAX_ALLOWED_QUESTIONS_IN_THE_DIAGNOSTIC_TEST": 15, "NEW_STATE_TEMPLATE": { "classifier_model_id": null, @@ -5647,6 +6010,7 @@ export default { "answer_groups": [], "default_outcome": { "dest": "Introduction", + "dest_if_really_stuck": null, "feedback": { "content_id": "default_outcome", "html": "" @@ -5660,28 +6024,14 @@ export default { "hints": [], "solution": null }, - "next_content_id_index": 0, "param_changes": [], "recorded_voiceovers": { - "voiceovers_mapping": { - "content": {}, - "default_outcome": {} - } + "voiceovers_mapping": {} }, "solicit_answer_details": false, - "card_is_checkpoint": false, - "written_translations": { - "translations_mapping": { - "content": {}, - "default_outcome": {} - } - } + "card_is_checkpoint": false }, - // Data required for Google Analytics. - "ANALYTICS_ID": "", - "SITE_NAME_FOR_ANALYTICS": "", - // Data required for Firebase authentication. // // NOTE TO RELEASE COORDINATORS: Please change these to the production values, @@ -5716,8 +6066,12 @@ export default { // A regular expression for allowed entity id's. "ENTITY_ID_REGEX": "^[a-zA-Z0-9-_]{1,12}$", + // A regular expression for allowed learner group IDs. + "LEARNER_GROUP_ID_REGEX": "^[a-zA-Z]{1,12}$", + // A regular expression for allowed characters in Title field for Blog Post. - "VALID_BLOG_POST_TITLE_REGEX": "^[a-zA-Z0-9][a-zA-Z0-9 ]+(-[a-zA-Z0-9]+)*$", + // eslint-disable-next-line max-len + "VALID_BLOG_POST_TITLE_REGEX": "^[a-zA-Z0-9(&!,'/)][a-zA-Z0-9(&!,'/) ]+([-:][ a-zA-Z0-9(&!,'/)]+)*$", // A regular expression for allowed characters in URL fragment for Blog Post. "VALID_URL_BLOG_FRAGMENT_REGEX": "^[a-z0-9]+(-[a-z0-9]+)*$", @@ -5729,6 +6083,9 @@ export default { // A regular expression for valid skill misconception id. "VALID_SKILL_MISCONCEPTION_ID_REGEX": "[A-Za-z0-9]{12}-[0-9]+", + // A regular expression for allowed characters in author name field for Author + // details Model. + "VALID_AUTHOR_NAME_REGEX": "^[a-zA-Z0-9][a-zA-Z0-9 ]+(-[a-zA-Z0-9]+)*$", // Invalid names for parameters used in expressions. "INVALID_PARAMETER_NAMES": [ "answer", "choices", "abs", "all", "and", "any", "else", @@ -5784,8 +6141,8 @@ export default { "upsilon", "phi", "chi", "psi", "omega", "Gamma", "Delta", "Theta", "Lambda", "Xi", "Pi", "Sigma", "Phi", "Psi", "Omega"], - // Allowed letters in the OSK. - "VALID_CUSTOM_OSK_LETTERS": [ + // Valid allowed letters for math lessons. + "VALID_ALLOWED_VARIABLES": [ "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", @@ -5804,6 +6161,9 @@ export default { "arcsin", "arccos", "arctan", "sinh", "cosh", "tanh" ], + // Supported functions for math interactions. + "SUPPORTED_FUNCTION_NAMES": ["sqrt", "abs"], + "OSK_MAIN_TAB": "mainTab", "OSK_FUNCTIONS_TAB": "functionsTab", "OSK_LETTERS_TAB": "lettersTab", @@ -5840,9 +6200,11 @@ export default { // eslint-disable-next-line max-len "DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR": "Check out this interactive lesson on Oppia - a free platform for teaching and learning!", + // eslint-disable-next-line max-len + "DEFUALT_BLOG_POST_SHARE_TWITTER_TEXT": "Check out this new blog post on Oppia!", + "OPPORTUNITY_TYPE_SKILL": "skill", "OPPORTUNITY_TYPE_TRANSLATION": "translation", - "OPPORTUNITY_TYPE_VOICEOVER": "voiceover", // The bucket name is set to app_default_bucket which is used to store files // in GCS when local development server is running. This should be changed @@ -5851,15 +6213,21 @@ export default { "ENABLE_EXP_FEEDBACK_FOR_LOGGED_OUT_USERS": true, - // Link to open when the Oppia avatar is clicked on any page. - "OPPIA_AVATAR_LINK_URL": null, - // Maximum allowed length of a username. "MAX_USERNAME_LENGTH": 30, + // Maximum allowed length of a blog post author's name. + "MAX_AUTHOR_NAME_LENGTH": 35, + + // Maximum allowed characters in a blog post author's bio. + "MAX_CHARS_IN_AUTHOR_BIO": 250, + // Maximum allowed length of a state name. "MAX_STATE_NAME_LENGTH": 50, + // Maximum allowed length of unique progress url ID. + "MAX_PROGRESS_URL_ID_LENGTH": 6, + "PLATFORM_PARAMETER_ALLOWED_BROWSER_TYPES": [ "Chrome", "Edge", "Safari", "Firefox", "Others"], "PLATFORM_PARAMETER_ALLOWED_PLATFORM_TYPES": ["Web", "Android", "Backend"], @@ -5979,7 +6347,7 @@ export default { "PAGES_REGISTERED_WITH_FRONTEND": { "ABOUT": { "ROUTE": "about", - "TITLE": "About | Oppia", + "TITLE": "I18N_ABOUT_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -5997,34 +6365,48 @@ export default { }, "ABOUT_FOUNDATION": { "ROUTE": "about-foundation", - "TITLE": "About foundation | Oppia", + "TITLE": "About the Oppia Foundation | Oppia", "META": [] }, - "CLASSROOM": { - "ROUTE": "learn/:classroom_url_fragment", - "TITLE": "Oppia", + "EXPLORATION_PLAYER": { + "ROUTE": "explore/:exploration_id", + "TITLE": "", // Some routes contain url fragments, as syntax for url fragments are // different for angular router and backend. They have to be registered // manually in the backend. Please use angular router syntax here. "MANUALLY_REGISTERED_WITH_BACKEND": true, + "META": [] + }, + "EXPLORATION_PLAYER_EMBED": { + "ROUTE": "embed/exploration/:exploration_id", + "TITLE": "", + // Some routes contain url fragments, as syntax for url fragments are + // different for angular router and backend. They have to be registered + // manually in the backend. Please use angular router syntax here. + "MANUALLY_REGISTERED_WITH_BACKEND": true, + "META": [] + }, + "ANDROID": { + "ROUTE": "android", + "TITLE": "Android | Oppia", "META": [ { "PROPERTY_TYPE": "itemprop", "PROPERTY_VALUE": "description", // eslint-disable-next-line max-len - "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it" + "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it." }, { "PROPERTY_TYPE": "itemprop", "PROPERTY_VALUE": "og:description", // eslint-disable-next-line max-len - "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it" + "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it." } ] }, "CONTACT": { "ROUTE": "contact", - "TITLE": "Contact | Oppia", + "TITLE": "I18N_CONTACT_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6060,7 +6442,7 @@ export default { }, "GET_STARTED": { "ROUTE": "get-started", - "TITLE": "Get Started | Oppia", + "TITLE": "I18N_GET_STARTED_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6076,7 +6458,7 @@ export default { }, "LICENSE": { "ROUTE": "license", - "TITLE": "License Page | Oppia", + "TITLE": "I18N_LICENSE_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6092,12 +6474,12 @@ export default { }, "LOGIN": { "ROUTE": "login", - "TITLE": "Sign in | Oppia", + "TITLE": "I18N_LOGIN_PAGE_TITLE", "META": [] }, "LOGOUT": { "ROUTE": "logout", - "TITLE": "Logout | Oppia", + "TITLE": "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE", "META": [] }, "PARTNERSHIPS": { @@ -6107,7 +6489,7 @@ export default { }, "PLAYBOOK": { "ROUTE": "creator-guidelines", - "TITLE": "Creator Guidelines | Oppia", + "TITLE": "I18N_PLAYBOOK_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6125,12 +6507,12 @@ export default { }, "PRIVACY": { "ROUTE": "privacy-policy", - "TITLE": "Privacy Policy | Oppia", + "TITLE": "I18N_PRIVACY_POLICY_PAGE_TITLE", "META": [] }, "SIGNUP": { "ROUTE": "signup", - "TITLE": "Join the community - Oppia", + "TITLE": "I18N_SIGNUP_PAGE_TITLE", "MANUALLY_REGISTERED_WITH_BACKEND": true, "META": [ { @@ -6147,7 +6529,7 @@ export default { }, "TEACH": { "ROUTE": "teach", - "TITLE": "Guide to Oppia for Parents and Teachers | Oppia", + "TITLE": "I18N_TEACH_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6165,7 +6547,7 @@ export default { }, "TERMS": { "ROUTE": "terms", - "TITLE": "Terms of Use | Oppia", + "TITLE": "I18N_TERMS_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6183,7 +6565,7 @@ export default { }, "THANKS": { "ROUTE": "thanks", - "TITLE": "Thanks | Oppia", + "TITLE": "I18N_THANKS_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6199,7 +6581,7 @@ export default { }, "DELETE_ACCOUNT": { "ROUTE": "delete-account", - "TITLE": "Delete Account | Oppia", + "TITLE": "I18N_DELETE_ACCOUNT_PAGE_TITLE", "META": [] }, "LIBRARY_INDEX": { @@ -6276,12 +6658,12 @@ export default { }, "PENDING_ACCOUNT_DELETION": { "ROUTE": "pending-account-deletion", - "TITLE": "Pending Account Deletion | Oppia", + "TITLE": "I18N_PENDING_ACCOUNT_DELETION_PAGE_TITLE", "META": [] }, "PREFERENCES": { "ROUTE": "preferences", - "TITLE": "Preferences | Oppia", + "TITLE": "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6297,7 +6679,7 @@ export default { }, "PROFILE": { "ROUTE": "profile/:username_fragment", - "TITLE": "Profile | Oppia", + "TITLE": "I18N_PROFILE_PAGE_TITLE", // Some routes contain url fragments, as syntax for url fragments are // different for angular router and backend. They have to be registered // manually in the backend. Please use angular router syntax here. @@ -6306,7 +6688,7 @@ export default { }, "RELEASE_COORDINATOR_PAGE": { "ROUTE": "release-coordinator", - "TITLE": "Oppia Release Coordinator Panel", + "TITLE": "I18N_RELEASE_COORDINATOR_PAGE_TITLE", "META": [ { "PROPERTY_TYPE": "itemprop", @@ -6322,38 +6704,148 @@ export default { } ] }, - "SPLASH": { - "ROUTE": "", - "TITLE": "Oppia | Free, Online and Interactive Lessons for Anyone", + "STORY_VIEWER": { + // eslint-disable-next-line max-len + "ROUTE": "learn/:classroom_url_fragment/:topic_url_fragment/story/:story_url_fragment", + "TITLE": "Oppia", + // Some routes contain url fragments, as syntax for url fragments are + // different for angular router and backend. They have to be registered + // manually in the backend. Please use angular router syntax here. + "MANUALLY_REGISTERED_WITH_BACKEND": true, + "META": [] + }, + "VOLUNTEER": { + "ROUTE": "volunteer", + "TITLE": "Volunteer | Oppia", + "META": [] + }, + "CLASSROOM": { + "ROUTE": "learn/:classroom_url_fragment", + "TITLE": "Oppia", + "LIGHTWEIGHT": true, + // Some routes contain url fragments, as syntax for url fragments are + // different for angular router and backend. They have to be registered + // manually in the backend. Please use angular router syntax here. + "MANUALLY_REGISTERED_WITH_BACKEND": true, "META": [ { "PROPERTY_TYPE": "itemprop", "PROPERTY_VALUE": "description", // eslint-disable-next-line max-len - "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it." + "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it" }, { "PROPERTY_TYPE": "itemprop", "PROPERTY_VALUE": "og:description", // eslint-disable-next-line max-len - "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it." + "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it" } ] }, - "STORY_VIEWER": { - // eslint-disable-next-line max-len - "ROUTE": "learn/:classroom_url_fragment/:topic_url_fragment/story/:story_url_fragment", - "TITLE": "Oppia", + "BLOG_HOMEPAGE": { + "ROUTE": "blog", + "TITLE": "I18N_BLOG_HOME_PAGE_TITLE", + "META": [ + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "description", + // eslint-disable-next-line max-len + "CONTENT": "Read the latest on what's new and exciting with Oppia." + }, + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "og:description", + // eslint-disable-next-line max-len + "CONTENT": "Read the latest on what's new and exciting with Oppia." + } + ] + }, + "BLOG_HOMEPAGE_SEARCH": { + "ROUTE": "blog/search/find", + "TITLE": "I18N_BLOG_HOME_PAGE_TITLE", + "META": [ + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "description", + // eslint-disable-next-line max-len + "CONTENT": "Read the latest on what's new and exciting with Oppia." + }, + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "og:description", + // eslint-disable-next-line max-len + "CONTENT": "Read the latest on what's new and exciting with Oppia." + } + ] + }, + "BLOG_AUTHOR_PROFILE_PAGE": { + "ROUTE": "blog/author/:author_username", + "TITLE": "I18N_BLOG_AUTHOR_PROFILE_PAGE_TITLE", + "MANUALLY_REGISTERED_WITH_BACKEND": true, + "META": [ + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "description", + // eslint-disable-next-line max-len + "CONTENT": "Read the latest on what's new and exciting with Oppia." + }, + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "og:description", + // eslint-disable-next-line max-len + "CONTENT": "Read the latest on what's new and exciting with Oppia." + } + ] + }, + "BLOG_POST_PAGE": { + "ROUTE": "blog/:blog_post_url_fragment", + "TITLE": "I18N_BLOG_POST_PAGE_TITLE", // Some routes contain url fragments, as syntax for url fragments are // different for angular router and backend. They have to be registered // manually in the backend. Please use angular router syntax here. "MANUALLY_REGISTERED_WITH_BACKEND": true, - "META": [] + "META": [ + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "description", + // eslint-disable-next-line max-len + "CONTENT": "Read the latest on what's new and exciting with Oppia." + }, + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "og:description", + // eslint-disable-next-line max-len + "CONTENT": "Read the latest on what's new and exciting with Oppia." + } + ] }, - "VOLUNTEER": { - "ROUTE": "volunteer", - "TITLE": "Volunteer | Oppia", + "LEARNER_GROUP_VIEWER": { + "ROUTE": "learner-group/:learner_group_id", + "TITLE": "I18N_LEARNER_GROUP_PAGE_TITLE", + // Some routes contain url fragments, as syntax for url fragments are + // different for angular router and backend. They have to be registered + // manually in the backend. Please use angular router syntax here. + "MANUALLY_REGISTERED_WITH_BACKEND": true, "META": [] + }, + "SPLASH": { + "ROUTE": "", + "TITLE": "Oppia | Free, Online and Interactive Lessons for Anyone", + "LIGHTWEIGHT": true, + "META": [ + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "description", + // eslint-disable-next-line max-len + "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it." + }, + { + "PROPERTY_TYPE": "itemprop", + "PROPERTY_VALUE": "og:description", + // eslint-disable-next-line max-len + "CONTENT": "With Oppia, you can access free lessons on math, physics, statistics, chemistry, music, history and more from anywhere in the world. Oppia is a nonprofit with the mission of providing high-quality education to those who lack access to it." + } + ] } }, @@ -6416,5 +6908,17 @@ export default { "EMULATOR_MODE": true, "ASSET_TYPE_AUDIO": "audio", "ASSET_TYPE_IMAGE": "image", - "ASSET_TYPE_THUMBNAIL": "thumbnail" + "ASSET_TYPE_THUMBNAIL": "thumbnail", + + "FAVICON_ALERT_PATH": "/assets/images/favicon_alert/favicon_alert.ico", + + "METADATA_PROPERTIES": [ + "title", "category", "objective", "language_code", "tags", "blurb", + "author_notes", "states_schema_version", "init_state_name", "param_specs", + "param_changes", "auto_tts_enabled", "correctness_feedback_enabled", + "edits_allowed" + ], + "NON_METADATA_PROPERTIES": ["id", "states", "next_content_id_index"], + "CONTRIBUTOR_CERTIFICATE_WIDTH": 1493, + "CONTRIBUTOR_CERTIFICATE_HEIGHT": 1313 } as const; diff --git a/assets/i18n/ab.json b/assets/i18n/ab.json index 18c678ee8c85..259ae8e4aafd 100644 --- a/assets/i18n/ab.json +++ b/assets/i18n/ab.json @@ -4,7 +4,7 @@ "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Ацхыраара ҟаҵатәуп", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4_HEADING": "Адиректор", "I18N_ABOUT_PAGE_TABS_CREDITS": "Аԥҵаҩцәа", - "I18N_ABOUT_PAGE_TITLE": "Ҳара иҳазкны - Oppia", + "I18N_ABOUT_PAGE_TITLE": "Ҳара | Oppia", "I18N_ACTION_BROWSE_LESSONS": "Ирыхәаԥштәуп ҳара ҳаурокқәа", "I18N_ACTION_CREATE_LESSON": "Иаԥышәҵа аурок", "I18N_CONTACT_PAGE_PARAGRAPH_15_HEADING": "Ашәарҭадара", @@ -131,7 +131,6 @@ "I18N_SIDEBAR_BLOG": "Аблог", "I18N_SIDEBAR_CONTACT_US": "Аконтактқәа", "I18N_SIDEBAR_FORUM": "Афорум", - "I18N_SIDEBAR_GET_STARTED": "Иалагатәуп", "I18N_SIDEBAR_LIBRARY_LINK": "Абиблиотека", "I18N_SIGNUP_CC_TITLE": "Creative Commons Алицензиа", "I18N_SIGNUP_CLOSE_BUTTON": "Иарктәуп", diff --git a/assets/i18n/ar.json b/assets/i18n/ar.json index 485e4c08fbd0..f7b0c7918547 100644 --- a/assets/i18n/ar.json +++ b/assets/i18n/ar.json @@ -1,7 +1,8 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "حول مؤسسة أوبيا", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "أنشئ استكشافًا", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "عن موضوع يهمك.", - "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "كسب التغذية الراجعة", + "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "كسب نصائح", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK_TEXT": "لتحسين استكشافك.", "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "حول أوبيا", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_1": "مهمة أوبيا هي مساعدة أي شخص على تعلم أي شيء يريده بطريقة فعالة وممتعة.", @@ -14,7 +15,7 @@ "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_8": "سواء كنت معلما من الروضة إلى الصف الثاني عشر، أو طالب دراسات عليا، أو شخصا متحمسا لموضوع معين ويريد مشاركة معرفته، ترحب أوبيا بك، انضم إلى المجتمع وابدأ في الاستكشاف معنا.", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "انشر & شارك", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE_TEXT": "إنشاءاتك مع المجتمع.", - "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "الترجمات الصوتية", + "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "الترجمات(مكتوبة) للصوت", "I18N_ABOUT_PAGE_BREADCRUMB": "حول", "I18N_ABOUT_PAGE_CREATE_LESSON_CONTENT": "باستخدام نظام انشاء المحتوى الخاص بأوبيا,يمكنك بسهولة انشاء الدروس وتخصيصهابما يهمك.", "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "تشكرات", @@ -64,17 +65,66 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "دليل للمعلم", "I18N_ACTION_TIPS_FOR_PARENTS": "نصائح للوالدين وأولياء الأمور", "I18N_ACTION_VISIT_CLASSROOM": "زيارة غرفة الصف", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "عنوان البريد الإلكتروني", + "I18N_ATTRIBUTION_HTML_STEP_ONE": "انسخ و الصق الHTML", + "I18N_ATTRIBUTION_HTML_STEP_TWO": "تأكد من ظهور الرابط كـ \"<[linkText]>\"", + "I18N_ATTRIBUTION_HTML_TITLE": "الرِّيَاضِيَّات لأوبيا ، لِمُحَاوَلَة الْحِلّ .", + "I18N_ATTRIBUTION_PRINT_STEP_ONE": "انسخ والصق الرصيد", + "I18N_ATTRIBUTION_PRINT_STEP_TWO": "إرفاق نسخة من \"<[link]>\"", + "I18N_ATTRIBUTION_PRINT_TITLE": "السمة في الطباعة", + "I18N_ATTRIBUTION_TITLE": "كيف تنسب هذا الدرس للمشاركة أو إعادة الاستخدام", + "I18N_BLOG_CARD_PREVIEW_CONTEXT": "الرِّيَاضِيَّات لأوبيا ، لِمُحَاوَلَة الْحِلّ .", + "I18N_BLOG_CARD_PREVIEW_HEADING": "معاينة بطاقة المدونة", + "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "إنشاء منشور مدونة جديد", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "يبدوا أنك لم تنشأ أي قصة بعد !", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "منشور جديد", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "المسودات", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "تم النشر", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "مدونة", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "وسوم", + "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "اضف صورة للواجهة", + "I18N_BLOG_POST_EDITOR_BODY_HEADING": "جسم", + "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "إلغاء", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "حذف", + "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "تغيير صورة الواجهة", + "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "آخر حفظ في", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "نشر", + "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "تم", + "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "حفظ كمسودة", + "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "عرض مسبق", + "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "الوسوم", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_PREFIX": "حدود", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "لا يزال من الممكن إضافة المزيد من الوسوم.", + "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "صورة الواجهة", + "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "عنوان", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "الوسوم", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "اختر ملف أو اسحبه هنا", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "خطأ: لا يمكن قراءة ملف الصورة.", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "إضافة صورة للواجهة", + "I18N_BLOG_POST_UNTITLED_HEADING": "غير مسمى", + "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "محتوى هذه البطاقة طويل جدا، يرجى عدم تجاوز 4500 حرف للتسجيل.", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "هذه البطاقة طويلة جدا، وقد يفقد الطلاب اهتمامهم; فكر في تقصيرها أو تقسيمها إلى بطاقتين.", + "I18N_CHAPTER_COMPLETION": "مبروك على اتمام هاذه الوحدة !", "I18N_CLASSROOM_CALLOUT_BUTTON": "استكشف", "I18N_CLASSROOM_CALLOUT_HEADING_1": "أسس الرياضيات", "I18N_CLASSROOM_CALLOUT_HEADING_2": "تقديم: غرفة دراسة Oppia", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "تحقق من الدورة الأولى في صفوف أوبيا الجديدة تماما! خطط دروس مثبتة، تم إنشاؤها من قبل المعلمين، حتى تتمكن من إتقان المواد الأكاديمية الهامة.", + "I18N_CLASSROOM_MATH_TITLE": "رياضيات", "I18N_CLASSROOM_PAGE_COMING_SOON": "قريباً", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "تفاصيل المساق", "I18N_CLASSROOM_PAGE_HEADING": "صفحة الفصول الدراسية في أوبيا", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "استكشف المزيد من الدروس التي أعدها المجتمع", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "التصفح من خلال مكتبتنا العامة", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "المواضيع المغطاة", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "بدأ", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "التالي", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "لقد أكملت هذه المجموعة! لا تتردد بإعادة أي من البحوث أسفله.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "حم فوق الرمز لعرض الاستكشاف.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "لم تتم إضافة أي استكشاف إلى هذه المجموعة.", + "I18N_COMING_SOON": " قريبا!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "مجموعة", + "I18N_COMPLETED_STORY": "<[story]> كاملة.", + "I18N_COMPLETE_CHAPTER": "أكمل فصلاً في <[topicName]>", "I18N_CONTACT_PAGE_BREADCRUMB": "اتصل", "I18N_CONTACT_PAGE_HEADING": "شارك!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "شكرا لاهتمامك بمساعدة مشروع أوبيا!", @@ -103,7 +153,12 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "لذلك، إذا كنت ترغب في إنشاء دروس مجانية وفعالة للطلاب في جميع أنحاء العالم، فقد وصلت إلى المكان الصحيح. نحن نشجعك على الاطلاع على دروس منشئي المحتوى والدروس الحالية، وبدء إنشاء الدرس الخاص بك، بالإضافة إلى ذلك، إذا كنت ترغب في التأكد من أن دروسك لها تأثير كبير، فالرجاء مراعاة تطبيق برنامج درِّس مع أوبيا; حيث نساعدك في إنشاء واختبار، وتحسين استكشافاتك لتحقيق التأثير الأمثل.", "I18N_CONTACT_PAGE_PARAGRAPH_9": "تحب استكشافًا موجودًا، ولكن وجدت شيئا يمكن أن يكون أفضل؟ يمكنك اقتراح تغييرات على أي استكشاف مباشرة من صفحة الاستكشاف، ما عليك سوى النقر على رمز القلم الرصاص في الركن العلوي الأيسر، ومشاركة ما تعتقد أنه يمكن تحسينه. منشئ الدرس سوف يتلقي اقتراحاتك وتُتَاح له الفرصة لدمجها في الاستكشاف، هذه طريقة قيمة بشكل لا يُصدَّق للمساهمة، خاصة إذا كنت تستطيع وضع اقتراحاتك خارج تجارب الطلاب الذين يمرحون من خلال الاستكشاف.", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "تحسين الاستكشافات الحالية", + "I18N_CONTACT_PAGE_TITLE": "الاتصال | أوبيا", "I18N_CONTINUE_REGISTRATION": "متابعة التسجيل", + "I18N_COOKIE_BANNER_ACKNOWLEDGE": "موافق", + "I18N_COOKIE_BANNER_EXPLANATION": "يستخدم هذا الموقع ملفات تعريف الارتباط والتقنيات المماثلة لدعم الوظائف الأساسية والحفاظ على أمان الموقع وتحليل حركة المرور على موقعنا. تعرف على مزيد من المعلومات في .", + "I18N_CORRECT_FEEDBACK": "صحيح!", + "I18N_CREATE_ACCOUNT": "أنشئ حسابًا", "I18N_CREATE_ACTIVITY_QUESTION": "ماذا تريد أن تنشئ؟", "I18N_CREATE_ACTIVITY_TITLE": "إنشاء نشاط", "I18N_CREATE_COLLECTION": "إنشاء مجموعة", @@ -127,11 +182,14 @@ "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TITLE": "العنوان", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TOTAL_PLAYS": "مجموع عدد مرات التشغيل", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_UNRESOLVED_ANSWERS": "إجابات لم يتم حلها", + "I18N_DASHBOARD_LESSONS": "دروس", "I18N_DASHBOARD_OPEN_FEEDBACK": "فتح التغذية الراجعة", + "I18N_DASHBOARD_SKILL_PROFICIENCY": "إتقان المهارة", "I18N_DASHBOARD_STATS_AVERAGE_RATING": "متوسط التقييم", "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "فتح التغذية الراجعة", "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "مجموع عدد مرات التشغيل", "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "مشتركون", + "I18N_DASHBOARD_STORIES": "قصص", "I18N_DASHBOARD_SUBSCRIBERS": "مشتركون", "I18N_DASHBOARD_SUGGESTIONS": "اقتراحات", "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "استكشاف", @@ -153,16 +211,22 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "الالتزامات المقدمة للمستكشف العام والمجوعات التي لها مالكون آخرون", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "الالتزامات التي أجريت على الموضوعات,والقصص,والمهارات والأسئلة", "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "لتأكيد الحذف ، يرجى إدخال اسم المستخدم في الحقل أدناه والضغط على الزر 'حذف حسابي'. لا يمكن التراجع عن هذا الإجراء. ", - "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "سيؤدي هذا الإجراء إلى حذف حساب المستخدم هذا وكذلك جميع البيانات الخاصة المرتبطة بهذا الحساب. ستصبح البيانات العامة بالفعل مجهولة المصدر، بحيث لا يمكن ربطها بهذا الحساب. قد يتم نقل ملكية بعض هذه البيانات إلى المجتمع.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "سيؤدي هذا الإجراء إلى حذف حساب المستخدم هذا وكذلك جميع البيانات الخاصة المرتبطة بهذا الحساب. ستكون البيانات العامة بالفعل مجهولة المصدر بحيث لا يمكن ربطها بهذا الحساب ، باستثناء البيانات الاحتياطية (التي يتم تخزينها لمدة 6 أشهر). قد لا تنطبق بعض الفئات المذكورة أدناه على حسابك.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "نظرة عامة", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "ملخص البيانات التي سيتم حذفها", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "ملخص البيانات التي سيتم تحويلها بيانات مجهلة", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "بالإضافة إلى ذلك ، سيتم نقل الاستكشافات والمجموعات المنشورة التي ليس لها مالكين آخرين إلى ملكية المجتمع.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "لو كانت لديك أي أسئلة بخصوص عملية إزالة الحسابات، فمن فضلك أرسل بريدا إلكترونيا إلى privacy@oppia.org.", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "هذا سوف يأخذك الى الصفحة التي يمكنك من خلالها حذف حساب أوبيا", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "حذف الحساب | أوبيا", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "سحب صورة إلى هذه المنطقة", "I18N_DIRECTIVES_UPLOAD_A_FILE": "رفع ملف", "I18N_DONATE_PAGE_BREADCRUMB": "تبرع", + "I18N_DONATE_PAGE_IMAGE_TITLE": "هديتك الكريمة تمول :", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "اقرأ مدونتنا", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "عنوان البريد الإلكتروني", + "I18N_DONATE_PAGE_TITLE": "تبرع ل مؤسسة اوبيا", + "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "استمع من مجتمع أوبيا", "I18N_ERROR_DISABLED_EXPLORATION": "تعطيل الاستكشاف", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "عذرا، لكن التنقيب الذي نقرت علىه معطل حاليا؛ الرجاء معاودة المحاولة في وقت لاحق.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "تعطيل الاستكشاف - أوبيا", @@ -179,6 +243,129 @@ "I18N_ERROR_PAGE_TITLE_401": "خطأ 401 - أوبيا", "I18N_ERROR_PAGE_TITLE_404": "خطأ 404 - أوبيا", "I18N_ERROR_PAGE_TITLE_500": "خطأ 500 - أوبيا", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "هل انت مستعد للمزيد من الكاب كيك ؟ قم بهذا الاختبار السريع للتأكيد من مكتسباتك !", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "تساوي الكسور (مراجعة)", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION": "هل ممكن ان يكون كسر متنكر على شكل آخر ؟ لنرى ما سيحدث لماثيو عند التقائه بكرمب للمرة الثانية", + "I18N_EXPLORATION_0FBWxCE5egOw_TITLE": "تكافؤ كسرين", + "I18N_EXPLORATION_0X0KC9DXWwra_DESCRIPTION": "يحتفل الجميع بعيد ميلاد سمير في منزل كمال. أضاف كمال المزيد من المرح بانشائه لعبة رياضيات لآفا و سمير. انظر ان كنت تستطيع حل الأسئلة!", + "I18N_EXPLORATION_0X0KC9DXWwra_TITLE": "مراجعة:مهارات حل المشكلات", + "I18N_EXPLORATION_1904tpP0CYwY_DESCRIPTION": "حان الوقت لكي تزرع آريا الخضر!واصل مسار بستنك حيث تساعد بالزراعة و ابدأ بحفظ جدول الضرب.", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE": "تعبيرات من رقم واحد من 1 الى 5", + "I18N_EXPLORATION_2mzzFVDLuAj8_DESCRIPTION": "إنضم لجامي و عمه في تعلمهم النسب و كيفية استعمالها!", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "ما هي النسبة؟", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION": "نينا و أمها تزوران صديقتهم، التي تمتلك متجر صغير لبيع الفواكه. \nإنضم لنينا و هي تستعمل عملية القسمة لتساعد صديقتهم في كشكها!", + "I18N_EXPLORATION_40a3vjmZ7Fwu_TITLE": "تذكير و حالات خاصة", + "I18N_EXPLORATION_53Ka3mQ6ra5A_DESCRIPTION": "مايا, عمر و مليك يذهبون للمتجر لاقتناء المزيد من المكونات و سيحتاجون لجمع أعداد أكبر. أنظر كيف يمكنك مساعدتهم!", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "جمع أعداد أكبر", + "I18N_EXPLORATION_5I4srORrwjt2_DESCRIPTION": "في متجر الوجبات الخفيفة, يقول كمال أن عليهم التصرف بذكاء في طريقة استعمالهم لمبلغ المال المحدد لديهم. ساعد افا و سمير لإيجاد الوجبات التي يستطعون شراءها!", + "I18N_EXPLORATION_5I4srORrwjt2_TITLE": "التناسب والأسلوب الوحدوي", + "I18N_EXPLORATION_5NWuolNcwH6e_DESCRIPTION": "جايمس يحاول تحضير عصائره بنفسه... لكنها ليست شهية جدا. ما هو الخطأ الذي إرتكبه ؟ إبدأ الدرس لاكتشاف ذالك !", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE": "أهمية الترتيب", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION": "ساعد ماثيو لحل مشكلة زبون السيد بايكر و هو يتعلم عن الأعداد الكسرية و المستقيم المدرج. إبدأ الدرس لمساعدته!", + "I18N_EXPLORATION_670bU6d9JGBh_TITLE": "الأعداد الكسرية وخط الأعداد 1", + "I18N_EXPLORATION_6Q6IyIDkjpYC_DESCRIPTION": "للسيد بايكر طلبية كبيرة و مهمة و يحتاج لمساعدة ماثيو لاقتناء المقادير. هل يمكنك أن تجد ما يحتاجون اليه من خلال الكسور ؟", + "I18N_EXPLORATION_6Q6IyIDkjpYC_TITLE": "طرح الكسور", + "I18N_EXPLORATION_8HTzQQUPiK5i_DESCRIPTION": "إنضم لنينا و أمها في جولتهم إلى المتجر. ساعدهم بإستعمال عملية القسمة لإيجاد عدد الأكياس التي تحتاجانها لمشترياتهما !", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "ما هي القسمة ؟", + "I18N_EXPLORATION_9DITEN8BUEHw_DESCRIPTION": "تعلم كيفية تقييم المعادلات الجبرية التي تتضمن عمليات الجمع و الطرح.", + "I18N_EXPLORATION_9DITEN8BUEHw_TITLE": "جمع و طرح عدة أرقام", + "I18N_EXPLORATION_9trAQhj6uUC2_DESCRIPTION": "يمكن استعمال الكسور للتعبير عن أجزاء كعك.لكن هل يمكن استعمالها للتعبير عن مجموعة من الأشياء؟ العب هذا الدرس لتكتشف ذلك!", + "I18N_EXPLORATION_9trAQhj6uUC2_TITLE": "كسور مجموعة", + "I18N_EXPLORATION_BDIln52yGfeH_DESCRIPTION": "عندما يصلان إلى مدينة الملاهي ، يرغب آفا وسمير في قضاء وقت ممتع ، لكن كمال يقول إنهما بحاجة لمعرفة ما إذا كان لديهما ما يكفي من المال. ساعدهم في الرياضيات!", + "I18N_EXPLORATION_BDIln52yGfeH_TITLE": "تبسيط المعادلات", + "I18N_EXPLORATION_BJd7yHIxpqkq_DESCRIPTION": "ساعد أبطالنا الثلاث لتحضير البيزا، في تعلمهم كيفية الجمع من الصفر و التعبير عن الاعداد الناقصة في \" معلومات الجمع \".", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE": "أساسيات الجمع", + "I18N_EXPLORATION_IrbGLTicm0BI_DESCRIPTION": "بينما ينتظران إيفا و كمال السيدة بليم، لنرى إذا تعلمت تقنيات الحل لمشاكل في الحياة اليومية!", + "I18N_EXPLORATION_IrbGLTicm0BI_TITLE": "مراجعة: حل مشاكل الحياة اليومية", + "I18N_EXPLORATION_Jbgc3MlRiY07_DESCRIPTION": "بعد تعلم كل هذه المهارات الجديده، إيفا تريد اكتشاف كيفية استعمالها. انضم لايفا و هي تطبق مهاراتها الجديده في حل المشاكل اليوميه!", + "I18N_EXPLORATION_Jbgc3MlRiY07_TITLE": "التعبير عن مشاكل الحياة اليومية", + "I18N_EXPLORATION_K645IfRNzpKy_DESCRIPTION": "يتعلم خايمي القيمة المكانية لكل رقم في عدد كبير.", + "I18N_EXPLORATION_K645IfRNzpKy_TITLE": "ما هي القيمة المكانية", + "I18N_EXPLORATION_K89Hgj2qRSzw_DESCRIPTION": "كمال يكشف عن التقنيات التي استعملها لإيجاد ساعة الاستيقاظ بسرعة. تريد أن ترى كيف فعل ذلك؟ العب الدرس لترى!", + "I18N_EXPLORATION_K89Hgj2qRSzw_TITLE": "قانون النشر", + "I18N_EXPLORATION_Knvx24p24qPO_DESCRIPTION": "يفهم خايمي قيمة نقاطه في لعبة الورق.", + "I18N_EXPLORATION_Knvx24p24qPO_TITLE": "إيجاد قيم الرقم", + "I18N_EXPLORATION_MRJeVrKafW6G_DESCRIPTION": "حديقة أريا أصبحت نجاح كبير! كل اسبوع خلال الصيف، ينبت المزيد و المزيد من الخضر و الفواكه. ساعدها لحساب كم انبتت من خضار.", + "I18N_EXPLORATION_MRJeVrKafW6G_TITLE": "الضرب في مضاعفات العشرة", + "I18N_EXPLORATION_MjZzEVOG47_1_DESCRIPTION": "تعلمنا أن \"مقام\" الكسر هو عدد الأجزاء المتساوية في الكل. لكن لماذا يجب أن تكون الأجزاء هي نفسها؟ هيا نكتشف!", + "I18N_EXPLORATION_MjZzEVOG47_1_TITLE": "معنى \"الأجزاء المتساوية\"", + "I18N_EXPLORATION_OKxYhsWONHZV_DESCRIPTION": "انضم إلى مايا وعمر حيث يتعلمان كيف يمكن \"تجميع\" الأرقام أو \"إضافتها\" لإنشاء رقم جديد!", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "ما هو الجمع ؟", + "I18N_EXPLORATION_PsfDKdhd6Esz_DESCRIPTION": "يبدو أن مايا وعمر ومالك قد خسروا المال بسبب المكونات الفاسدة. باستخدام الطرح ، هل يمكنك مساعدتهم في معرفة كيفية حساب ذلك؟", + "I18N_EXPLORATION_PsfDKdhd6Esz_TITLE": "طرح أعداد كبيرة ، الجزء 2", + "I18N_EXPLORATION_R7WpsSfmDQPV_DESCRIPTION": "جنبًا إلى جنب مع آريا ، لنتعلم ما هو الضرب وكيف نكتب التعابير معه وكيف نستخدمه لحل المشكلات في حي آريا!", + "I18N_EXPLORATION_R7WpsSfmDQPV_TITLE": "أجزاء من تعبيرات الضرب", + "I18N_EXPLORATION_RvopsvVdIb0J_DESCRIPTION": "حان الوقت ليبيع جيمس عصيره الجديد! أقام كشكًا مع العم بيري. هل يمكنهم معرفة مقدار المال الذي يجب أن يحصل عليه كل منهم؟", + "I18N_EXPLORATION_RvopsvVdIb0J_TITLE": "ربط النسب بالأرقام الفعلية", + "I18N_EXPLORATION_SR1IKIdLxnm1_DESCRIPTION": "شعرت آفا بالملل من لعب ألعاب الملاهي ، لذلك ابتكر كمال لعبة رياضيات ممتعة. هل تستطيع التغلب على مباراة كمال؟ انقر فوق هذا الدرس لمعرفة ذلك!", + "I18N_EXPLORATION_SR1IKIdLxnm1_TITLE": "مراجعة: المتغيرات", + "I18N_EXPLORATION_STATE_PREVIOUSLY_COMPLETED": "لقد أجبت على هذا السؤال في جلسة سابقة.", + "I18N_EXPLORATION_VKXd8qHsxLml_DESCRIPTION": "لاحظت مايا وعمر ومالك أن بعض مكوناتها قد فسدت. هل يمكنك مساعدتهم في معرفة مقدار ما تبقى لديهم باستخدام الطرح؟", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE": "طرح أعداد كبيرة ، الجزء 1", + "I18N_EXPLORATION_Vgde5_ZVqrq5_DESCRIPTION": "اكتشف جيمس الشكل الذي يريد أن تبدو عليه وصفة العصائر الخاصة به ، لكنه يواجه مشكلة في دمج جميع الأجزاء معًا. هل يمكنك مساعدته في هذا؟", + "I18N_EXPLORATION_Vgde5_ZVqrq5_TITLE": "الجمع بين النسب", + "I18N_EXPLORATION_W0xq3jW5GzDF_DESCRIPTION": "يحدث شيء غير متوقع عندما تحاول مايا وعمر ومالك صنع بيتزا ثانية.", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "ما هو الطرح ؟", + "I18N_EXPLORATION_WulCxGAmGE61_DESCRIPTION": "نينا زارت منزل ساندرا. انضم لها لاستعمال القسمة، من أجل مساعدة ساندرا في مشاكل أصعب. كتحويل جميع الفواكه للصناديق !", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE": "القسمة على مضاعفات العشرة", + "I18N_EXPLORATION_WwqLmeQEn9NK_DESCRIPTION": "جامي يواصل تعلم المزيد من التقنيات لتدوير الأعداد.", + "I18N_EXPLORATION_WwqLmeQEn9NK_TITLE": "تدوير الاعداد، جزء 2", + "I18N_EXPLORATION_Xa3B_io-2WI5_DESCRIPTION": "انضم لماثيو في مساعدة السيد بيكر لتصليح العطل، في حين تتعلم كيف تجمع الكسور.", + "I18N_EXPLORATION_Xa3B_io-2WI5_TITLE": "جمع الكسور", + "I18N_EXPLORATION_aAkDKVDR53cG_DESCRIPTION": "جامي يتعلم إذا كان عدد أصغر أو أكبر من عدد آخر.", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE": "مقارنة الأعداد", + "I18N_EXPLORATION_aHikhPlxYgOH_DESCRIPTION": "انضم إلى ماثيو حيث يتعلم كيف أن الأرقام المختلطة هي مجرد كسور عادية مقنعة.", + "I18N_EXPLORATION_aHikhPlxYgOH_TITLE": "الأعداد الكسرية وخط الأعداد 2", + "I18N_EXPLORATION_aqJ07xrTFNLF_DESCRIPTION": "بعد استخدام الطريقة الأحادية للعثور على وجبة خفيفة يجب على آفا شراؤها ، جاء دور سمير باستخدام طريقة جديدة. انضم إلى سمير واعثر على ما تحصل عليه من وجبة خفيفة!", + "I18N_EXPLORATION_aqJ07xrTFNLF_TITLE": "حل المسائل بطريقة العلب", + "I18N_EXPLORATION_avwshGklKLJE_DESCRIPTION": "يتعلم خايمي كيفية تبسيط الرقم دون إجراء الكثير من التغييرات على قيمته.", + "I18N_EXPLORATION_avwshGklKLJE_TITLE": "تدوير الأعداد، جزء 1", + "I18N_EXPLORATION_cQDibOXQbpi7_DESCRIPTION": "أريا مستعدة لزراعة خضروات أكبر في حديقتها! ساعدها في زرعها وسقيها مع حفظ المزيد من المضاعفات معها.", + "I18N_EXPLORATION_cQDibOXQbpi7_TITLE": "تعبيرات من رقم واحد من 5 الى 9", + "I18N_EXPLORATION_hNOP3TwRJhsz_DESCRIPTION": "أريا تبدأ المدرسة مرة أخرى! تريد حديقة كبيرة للأطفال في مدرستها. ساعدها في التخطيط لها مع عمر باستخدام الضرب بأعداد أكبر.", + "I18N_EXPLORATION_hNOP3TwRJhsz_TITLE": "الضرب متعدد الأرقام ، الجزء الأول", + "I18N_EXPLORATION_ibeLZqbbjbKF_DESCRIPTION": "في محطة القطار ، تجد آفا وكمال أنه لا يوجد قطار! كمال يجد خطأ في الحسابات. هل ستساعدهم في العثور عند وصول القطار؟", + "I18N_EXPLORATION_ibeLZqbbjbKF_TITLE": "إدخال قيم المتغيرات", + "I18N_EXPLORATION_k2bQ7z5XHNbK_DESCRIPTION": "هل من الممكن أن تعني نسبتان مختلفتان نفس الشيء؟ اكتشف الأمر مع جيمس و العم بيري أثناء تجربة وصفة جديدة لعصائر الشوكولاتة.", + "I18N_EXPLORATION_k2bQ7z5XHNbK_TITLE": "النسب المعادلة", + "I18N_EXPLORATION_kYSrbNDCv5sH_DESCRIPTION": "تريد آفا تحقيق أقصى استفادة من عيد ميلاد سمير ، لذا تبدأ في التخطيط ليومها. ساعدها في استخدام الاختصارات لتقييم التعبيرات لمعرفة الأشياء!", + "I18N_EXPLORATION_kYSrbNDCv5sH_TITLE": "القوانين التبادلية والنقابية", + "I18N_EXPLORATION_lNpxiuqufPiw_DESCRIPTION": "ستحتاج آفا قريبًا إلى تطبيق مهاراتها على بعض مشاكل العالم الحقيقي. هل ستتمكن من المساعدة؟ جرب هذا الدرس لترى ما إذا كنت تتقن التعبيرات!", + "I18N_EXPLORATION_lNpxiuqufPiw_TITLE": "خلاصة: العمل مع التعبيرات", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION": "انضم إلى نينا لأنها تساعد ساندرا في صنع عصير الفاكهة لكشكها ، باستخدام أسلوب جديد في التقسيم!", + "I18N_EXPLORATION_lOU0XPC2BnE9_TITLE": "القسمة المطولة ، القسمات أحادية الرقم", + "I18N_EXPLORATION_m1nvGABWeUoh_DESCRIPTION": "تنتهي آفا وسمير من لعب الألعاب والذهاب إلى المتجر لاستخدام التذاكر الخاصة بهما. هناك ، وجدوا آلة غامضة! انقر فوق الدرس التالي لمعرفة ذلك!", + "I18N_EXPLORATION_m1nvGABWeUoh_TITLE": "ما هو المعدل / المتوسط؟", + "I18N_EXPLORATION_nLmUS6lbmvnl_DESCRIPTION": "هل يستطيع جيمس معرفة ما إذا كان العصير أكثر \"حليبيًا\" أو \"زبادي\" بمجرد النظر إلى الوصفة ، بدلاً من الحاجة إلى صنع كل سموذي يدويًا؟", + "I18N_EXPLORATION_nLmUS6lbmvnl_TITLE": "مقارنة النسب", + "I18N_EXPLORATION_nTMZwH7i0DdW_DESCRIPTION": "إيفا و كمال يذهبون إلى محطة القطار. يلتقون بالسيدة بليم، طباخه، و يساعدونها لاستعمال المعادلات من أجل مدخولها، تكاليفها و أرباحها.", + "I18N_EXPLORATION_nTMZwH7i0DdW_TITLE": "من مشكله خطية إلى معادلات رياضية", + "I18N_EXPLORATION_osw1m5Q3jK41_DESCRIPTION": "حان وقت الكعك من جديد ! إنتهز هذه الفرصة للتأكد من قدراتك التي تعلمتها من الدروس السابقة !", + "I18N_EXPLORATION_osw1m5Q3jK41_TITLE": "عمليات على الكسور ( مراجعة )", + "I18N_EXPLORATION_rDJojPOc0KgJ_DESCRIPTION": "إيفا و كمال يشتريان هدايا لعيد ميلاد إبن خالهما! إنضم إليهما حيث يبحثان كيف يحسبون الأسعار من خلال تقييم المعادلات.", + "I18N_EXPLORATION_rDJojPOc0KgJ_TITLE": "تقييم التعبيرات - ترتيب العمليات", + "I18N_EXPLORATION_rfX8jNkPnA-1_DESCRIPTION": "هل يمكنك مساعدة ماثيو ليفوز ببعض الكعك ؟ خذ هذا الامتحان السريع لترى كم تتذكر حول الكسور.", + "I18N_EXPLORATION_rfX8jNkPnA-1_TITLE": "العبير عن الكسور ( مراجعة )", + "I18N_EXPLORATION_rwN3YPG9XWZa_DESCRIPTION": "حيث يستمتعان بالمثلجات، يحاول كل من إيفا و كمال الإجابة عن أسئلة إيفا عن زيارتهم القادمة لمدينة الملاهي!", + "I18N_EXPLORATION_rwN3YPG9XWZa_TITLE": "حل المشاكل الخطية", + "I18N_EXPLORATION_tIoSb3HZFN6e_DESCRIPTION": "جايمس يتعلم كيفية تبسيط نسبة إلى شكلها المبسط، ليجعل عملياته سهلة.", + "I18N_EXPLORATION_tIoSb3HZFN6e_TITLE": "كتابة النسب بالطريقة المبسطة", + "I18N_EXPLORATION_umPkwp0L1M0-_DESCRIPTION": "إنضم إلى ماثيو عند إلتقائه بالسيد بايكر لأول مرة و تعلمه عن الكسور. ما هو الكسر ؟ إلعب الدرس لتكتشف المزيد!", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE": "ما هو الكسر ؟", + "I18N_EXPLORATION_v8fonNnX4Ub1_DESCRIPTION": "إيفا و كمال يواصلون مساعة السيدة بليم حول مطبخها، لكن هناك بعض المجاهيل في المعادلات. هل ستستطيع إيفا المساعدة ؟", + "I18N_EXPLORATION_v8fonNnX4Ub1_TITLE": "صياغة معادلات بمجاهيل", + "I18N_EXPLORATION_wE9pyaC5np3n_DESCRIPTION": "نينا و ساندرا تشاركان بمسابقة. إنضم لنينا و هي تستعمل مهارات عملية القسمة لبيع أكثر كمية ممكنة من الفاكهة و العصير, لتفوز بالجائزة الكبرى !", + "I18N_EXPLORATION_wE9pyaC5np3n_TITLE": "تقسيم متعدد الأرقام", + "I18N_EXPLORATION_zIBYaqfDJrJC_DESCRIPTION": "استمر في مغامرة البستنة الخاصة بك مع آريا وهي تزرع الفاكهة وتتعلم وتمارس الضرب مع عمر!", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE": "ما تعني عملية الضرب", + "I18N_EXPLORATION_zNb0Bh27QtJ4_DESCRIPTION": "في مطعم الوجبات الخفيفة ، يفحص كمال جيوبه ولا يجد محفظته. بدون المحفظة ، لا يمكنهم الحصول على أي وجبات خفيفة! هل يمكنك المساعدة في العثور على محفظة كمال؟", + "I18N_EXPLORATION_zNb0Bh27QtJ4_TITLE": "المتوالية العددية", + "I18N_EXPLORATION_zTg2hzTz37jP_DESCRIPTION": "بعد الكثير من التخطيط ، حصلت أريا على صديقاتها لمساعدتها في زراعة الحديقة لمدرستها! استخدم مهاراتك لمساعدتهم على زرع حديقة مذهلة!", + "I18N_EXPLORATION_zTg2hzTz37jP_TITLE": "الضرب متعدد الأرقام ، الجزء 2", + "I18N_EXPLORATION_zVbqxwck0KaC_DESCRIPTION": "جيمس والعم بيري مدعوون لعمل عصائر لحفلة الجيران. هل يمكن أن تكون هذه بداية صعودهم إلى الشهرة كصناع عصير؟", + "I18N_EXPLORATION_zVbqxwck0KaC_TITLE": "العلاقات النسبية", + "I18N_EXPLORATION_zW39GLG_BdN2_DESCRIPTION": "بينما يتعلم ماثيو كيفية مقارنة الكسور من حيث الحجم ، يقع حادث في المخبز ، وينزعج السيد بيكر. دعونا نرى ما حدث!", + "I18N_EXPLORATION_zW39GLG_BdN2_TITLE": "مقارنة الكسور", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "مجهول", "I18N_FOOTER_ABOUT": "حول", "I18N_FOOTER_ABOUT_ALL_CAPS": "عن أوبيا", @@ -202,6 +389,7 @@ "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "الرجاء إدخال الرقم الذي هو على الأقل <[minValue]>.", "I18N_FORMS_TYPE_NUMBER_AT_MOST": "الرجاء إدخال الرقم الذي هو على الأكثر <[maxValue]>.", "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "يُرجَى إدخال رقم عشري صالح.", + "I18N_GENERATE_ATTRIBUTION": "توليد الإسناد", "I18N_GET_STARTED_PAGE_BREADCRUMB": "ابدأ", "I18N_GET_STARTED_PAGE_HEADING": "ابدأ!", "I18N_GET_STARTED_PAGE_PARAGRAPH_1": "إنشاء استكشاف سهل ومجاني. تبادل المعرفة مع الطلاب في جميع أنحاء العالم، واحصل على ردود الفعل التي يمكن استخدامها لتحسين فعالية استكشافك.", @@ -222,15 +410,33 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_9": "عندما يعبر المتعلمون من خلال استكشافك، فإنه يمكن أن يرسلوا لك ردود الفعل لتنبيهك إلى مشاكل أو لتبادل الأفكار لجعله أفضل.", "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "تحسين استكشافك", "I18N_GET_STARTED_PAGE_TITLE": "ابدأ", + "I18N_GOAL_LIMIT": "حد <[limit]> من الأهداف", + "I18N_GOT_IT": "فهمت.", "I18N_HEADING_VOLUNTEER": "متطوع", + "I18N_HINT_NEED_HELP": "تحتاج مساعدة؟ عرض تلميح لهذه المشكلة!", + "I18N_HINT_TITLE": "تلميح", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "أكتب معادلة هنا.", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "اكتب التعليمات البرمجية في المحرر", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "انتقل إلى محرر التعليمات البرمجية", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "اسحب واسقط العناصر", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "من فضلك لا تضع 0 في المقام", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "أدخل كسر عشري في الصيغة «ب/م» أو رقم مختلط في الصيغة «ع ب/م».", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "أدخل كسر عشري في الصيغة ب/م.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS": "من فضلك قم باستخدام أرقام، أو مسافات، أو الرمز (/)", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "لا يجب أن يحتوي أي رقم في الكسر العشري على أكثر من 7 أرقام.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "من فضلك أدخل كسر عشري صالح (5/3 أو 1 2/3 على سبيل المثال)", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "الرجاء إدخال قيمة كسر غير فارغة.", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "الرجاء إدخال إجابتك في صورة كسر (على سبيل المثال ، 5/3 بدلاً من 1 2/3).", + "I18N_INTERACTIONS_FRACTIONS_PROPER_FRACTION": "الرجاء إدخال إجابة بجزء كسري \"مناسب\" (على سبيل المثال ، 1 2/3 بدلاً من 5/3).", + "I18N_INTERACTIONS_FRACTIONS_SIMPLEST_FORM": "الرجاء إدخال إجابة في أبسط صورة (على سبيل المثال ، 1/3 بدلاً من 2/6).", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "إضافة حافة", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "إضافة عقدة", "I18N_INTERACTIONS_GRAPH_DELETE": "حذف", "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "اضغط على قمة الهدف لإنشاء حافة (انقر على نفس القمة لإلغاء إنشاء الحافة).", "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "اضغط على قمة الهدف الأولية لحافة للإنشاء.", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "الرسم البياني غير صالح!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "قم بإنشاء رسم بياني", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "عرض الرسم البياني", "I18N_INTERACTIONS_GRAPH_MOVE": "نقل", "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "انقر فوق أي نقطة لنقل قمة الهدف إلى تلك النقطة.", "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "اضغط على قمة الهدف للتحرك.", @@ -241,42 +447,99 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "و<[vertices]> قمم", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "تحديث التسمية", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "تحديث الوزن", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "اضغط على الصورة", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[حدد صورة للعرض]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "يمكنك أن تختار المزيد من الخيارات", "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{من فضلك اختر خيارا واحدا أو أكثر.} other{من فضلك اختر # أو المزيد من الخيارات.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{لا أكثر من 1 خيار يمكن اختيارها.} other{لا أكثر من # خيار يمكن اختيارها.}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "انقر على الخريطة", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "اعرض الخريطة", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "أكتب معادلة هنا", "I18N_INTERACTIONS_MUSIC_CLEAR": "مسح", + "I18N_INTERACTIONS_MUSIC_INSTRUCTION": "اسحب الملاحظات إلى الموظفين لتشكيل تسلسل", + "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "عرض فريق الموسيقى", "I18N_INTERACTIONS_MUSIC_PLAY": "تشغيل", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "تشغيل تسلسل الهدف", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "الرجاء إدخال عملة صالحة (على سبيل المثال ، $5 أو Rs 5)", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "يرجى كتابة وحدات العملة في البداية", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_UNIT_CHARS": "يرجى التأكد من أن الوحدة تحتوي فقط على أرقام وحروف أبجدية و (،) ، * ، ^ ، / ، -", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "يرجى التأكد من أن القيمة إما كسر أو رقم", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "تنسيقات الوحدات المحتملة", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_DECIMAL": "في الغالب فاصلة عشرية واحدة يجب ان تظهر", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_EXPONENT": "في الغالب علامة اس واحدة يجب ان تكون موجودة", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_MINUS": "غالبا علامة 1 سالب (-) يجب ان تظهر", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_COMMA": "يمكن ان تحتوي الاجابة غالبا على 15 رقما (0 - 9)ما عدا الرموز (, او- )", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_DOT": "يمكن ان تحتوي الاجابة غالبا على 15 رقما (0 - 9)ما عدا الرموز (, او- )", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "الاجابة يجب ان تكون رقما فعالا", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "الاجابة يجب ان تكون اكبر من او تساوي صفرا.", + "I18N_INTERACTIONS_NUMERIC_INPUT_MINUS_AT_BEGINNING": "السالب (-) علامة مسموحة فقط في البداية", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_TRAILING_DECIMAL": "الكسور العشرية غير مسموحة", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "هل تريد بالتأكيد إعادة تعيين الرمز؟", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "إلغاء", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "التأكيد مطلوب", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "إعادة تعيين الرمز", + "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "قم بتحرير التعليمات البرمجية. انقر فوق \"تشغيل\" للتحقق من ذلك!", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "إظهار محرر التعليمات البرمجية", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "من فضلك أدخل نسبة صحيحة (1:2 أو 1:2:3 على سبيل المثال)", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "لا يمكن أن تحتوي النسب على 0 كعنصر.", + "I18N_INTERACTIONS_RATIO_INVALID_CHARS": "من فضلك قم بكتابة نسبة تتكون من أرقام مفصولة بنقطتين رأسيتين (1:2 أو 1:2:3 على سبيل المثال).", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "إجابتك تحتوي على نقطتين رأسيتين (:) متتاليتين.", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "من فضلك أدخل نسبة صحيحة (1:2 أو 1:2:3 على سبيل المثال)", + "I18N_INTERACTIONS_RATIO_NON_INTEGER_ELEMENTS": "بالنسبة لهذا السؤال ، يجب أن يكون كل عنصر في النسبة عددًا صحيحًا (وليس كسرًا أو عشريًا).", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "أضف عنصرا", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "عفوا، يبدو أن لديك مجموعة بها تكرارات!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(إضافة عنصر واحد في كل سطر.)", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "لا توجد إجابة معطاة.", "I18N_INTERACTIONS_SUBMIT": "إرسال", + "I18N_INTERACTIONS_TERMS_LIMIT": "المنشئ حدد عدد المصطلحات في الإجابة ليكون <[termsCount]>", + "I18N_INVALID_TAGS_AND_ATTRIBUTES_ALERT": "تمت إزالة بعض العلامات والسمات غير الصالحة من الصورة التي تم تحميلها. إذا بدت صورتك مشوهة ، يرجى \" target=\"_blank\">اخبارنا ، ثم حاول تحميل SVG مختلف.", "I18N_LANGUAGE_FOOTER_VIEW_IN": "أظهر أوبيا ب:", + "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "مساء الخير", + "I18N_LEARNER_DASHBOARD_ALL": "الكل", + "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "تغيير الأهداف", + "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "نحاس", + "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "دروس من طرف المجتمع", + "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "الأهداف المحققة", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "مكتمل", "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "تم نقل <[numberMoved]> من المجموعات التي أكملتها إلى قسم \"قيد التقدم\" حيث تمت إضافة استكشافات جديدة إليها!", + "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "واصل من حيث توقفت", + "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "الأهداف الحالية", "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "يبدو أنه لا توجد أية مجموعات في قائمة \"تشغيل لاحقا\"; توجه للمكتبة وابنِ قائمة التشغيل المنسقة الخاصة بك!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "يبدو أنك لم تكمل أية مجموعات بعد. توجه إلى المكتبة لبدء مجموعة جديدة ومثيرة!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "يبدو أنك لم تكمل أية استكشافات بعد. توجه إلى المكتبة لبدء مجموعة جديدة ومثيرة!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_GOALS_SECTION": "أكمل هدفًا من أعلى وشاهد تقدمك هنا عند اكتماله!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_STORIES_SECTION": "توجه إلى الفصل الدراسي لإكمال قصة جديدة مثيرة!", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "ابدأ التعلم عن طريق  ", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_HEADING": "ابدأ", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "تحديد هدف!", + "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "ابدأ التعلم بتحديد هدف من التالي!", "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "يبدو أنه لا توجد أية استكشافات في قائمة \"تشغيل لاحقا\"; توجه للمكتبة وابنِ قائمة التشغيل المنسقة الخاصة بك!", "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "ليس لديك أي سلاسل ملاحظات نشطة حتى الآن. تساعد ملاحظاتك في تحسين جودة دروسنا. يمكنك القيام بذلك عن طريق بدء أي من دروسنا وإرسال ملاحظاتك القيمة!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "يبدو أنه ليست لديك مجموعات مكتملة جزئيا في الوقت الحالي; توجه إلى المكتبة لبدء مجموعة جديدة ومثيرة!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "يبدو أنه ليست لديك أي استكشافات كاملة جزئيا في الوقت الحالي; توجه إلى المكتبة لبدء استكشاف جديد ومثير!", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "يبدو أنك وصلت إلى حد اختيار الهدف. توجه إلى المكتبة واستكشف المزيد من الاستكشافات.", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "ابدأ بـ  ", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "يسمح تحديد هدف لاوبيا بمنحك توصيات أفضل في لوحة القيادة الخاصة بك والتي تساهم في رحلة التعلم الخاصة بك.", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "تحديد الهدف! ", "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "يبدو أنك لم تشترك في أي منشئ محتوى حتى الآن; توجه إلى المكتبة لاكتشاف منشئي محتوى جدد واستكشافاتهم الرائعة!", + "I18N_LEARNER_DASHBOARD_EMPTY_SUGGESTED_FOR_YOU_SECTION": "لقد أكملت كل دروس الموضوع لدينا! لا تتردد في مراجعة \"استكشافاتنا الأخرى\" على صفحة دروس المجتمع ", + "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "مساء الخير", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "آخر تشغيل", "I18N_LEARNER_DASHBOARD_FEEDBACK_SECTION": "تحديثات التغذية الراجعة", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "الرد", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_STATUS_CHANGE_MESSAGE": "تم تغيير الحالة إلى '<[threadStatus]>'", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_WARNING": "تجنب مشاركة أية معلومات شخصية نظرا لأن هذه المناقشة قابلة للعرض بشكل عام.", + "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "أهداف", + "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "ذهب", + "I18N_LEARNER_DASHBOARD_HOME_SECTION": "الصفحة الرئيسية", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "غير مكتمل", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "في تقدم", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "يبدو أنك لم تجرب أيًا من استكشافاتنا حتى الآن.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "فلنبدأ في هذه الرحلة المثيرة!", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "مجموعاتك", + "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "تعلم شيء جديد", + "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "صباح الخير", + "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "محتوى قصة جديدة متوفر", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COLLECTIONS_FROM_PLAYLIST": "{numberNonexistent, plural, one{1 من المجموعات في قائمة 'تشغيل لاحقا' لم تعد متوفرة. نأسف للإزعاج} other{# من المجموعات في قائمة 'تشغيل لاحقا' لم تعد متوفرة. نأسف للإزعاج}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_COLLECTIONS": "{numberNonexistent, plural, one{1 من المجموعات التي أكملتها لم تعد متوفرة. نأسف للإزعاج} other{# من المجموعات التي أكملتها لم تعد متوفرة. نأسف للإزعاج}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_EXPLORATIONS": "{numberNonexistent, plural, one{1 من الاستكشافات التي أكملتها لم تعد متوفرة. نأسف للإزعاج} other{# من الاستكشافات التي أكملتها لم تعد متوفرة. نأسف للإزعاج}}", @@ -284,8 +547,10 @@ "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_COLLECTIONS": "{numberNonexistent, plural, one{1 من المجموعات قيد التقدم لم تعد متوفرة. نأسف للإزعاج} other{# من المجموعات قيد التقدم لم تعد متوفرة. نأسف للإزعاج}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_EXPLORATIONS": "{numberNonexistent, plural, one{1 من الاستكشافات في التقدم لم تعد متاحة. نأسف للإزعاج} other{# من الاستكشافات في التقدم لم تعد متاحة. نأسف للإزعاج}}", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "يبدو أنك لم تبدأ أي مجموعات بعد; توجه إلى المكتبة لبدء مجموعة جديدة ومثيرة!", - "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "يبدو أنك لم تبدأ أي استكشافات بعد; توجه إلى المكتبة لبدء استكشاف جديد ومثير!", + "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "يبدو أنك لم تبدأ أي استكشافات بعد. توجه إلى المكتبة لبدء استكشاف جديد ومثير!", + "I18N_LEARNER_DASHBOARD_PAGE_TITLE": "لوحة معلومات المتعلم | أوبيا", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "تشغيل لاحقا", + "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "التقدم", "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE": "اسحب وأعد ترتيب الأنشطة بالترتيب الذي تريد أن تشغلهم به!", "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE_MOBILE": "اسحب وأعد ترتيب الأنشطة بالترتيب الذي تريد أن تشغلهم به!", "I18N_LEARNER_DASHBOARD_REMOVE_ACTIVITY_MODAL_BODY": "هل تريد بالتأكيد إزالة '<[entityTitle]>' من قائمة '<[sectionNameI18nId]>'؟", @@ -294,14 +559,25 @@ "I18N_LEARNER_DASHBOARD_RETURN_TO_FEEDBACK_THREADS_MESSAGE": "ارجع إلى قائمة الرسائل", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "أرسل", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "جارٍ الإرسال...", + "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "الفضة", + "I18N_LEARNER_DASHBOARD_SKILLS": "المهارات", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "تقدم المهارات", + "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "القصص المكتملة", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "الاشتراكات", + "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "التقدم:", "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "الحالي:", "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "وصف موجز للتغييرات:", "I18N_LEARNER_DASHBOARD_SUGGESTION_NO_CURRENT_STATE": "!عفوا! لم تعد هذه الدولة موجودة", "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "مقترح:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "اقتراح", "I18N_LEARNER_DASHBOARD_TOOLTIP": "تُعَد المجموعات استكشافات متعددة ذات صلة يُقصَد إكمالها بالتسلسل.", + "I18N_LEARNER_DASHBOARD_VIEW": "عرض", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "عرض الاقتراح", + "I18N_LEARNT_TOPIC": "تعلمت <[topicName]>", + "I18N_LEARN_TOPIC": "تعلم <[topicName]>", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "مؤلفو الدرس", + "I18N_LESSON_INFO_HEADER": "معلومات الدرس", + "I18N_LESSON_INFO_TOOLTIP_MESSAGE": "لقد وصلت إلى نقطة تفتيش. عمل عظيم! اعرض تقدمك ومعلومات الدرس الأخرى هنا.", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "لقد أكملت هذا", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "تمت إضافتها إلى قائمة التشغيل", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "أضف إلى قائمة التشغيل", @@ -375,13 +651,20 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "الترخيص", "I18N_LICENSE_PAGE_PARAGRAPH_1": "جميع المحتويات في استكشافات أوبيا مرخصة بموجب المشاع الإبداعي النسبة-المشاركة بالمثل 4.0 مع التنازل عن شرط الإسناد (على وجه التحديد، القسمين 3 (أ) (1) و3 (أ) (2))، إذا أعدت استخدام محتوى من هذا الموقع، فنحن نشجعك على تضمين رابط إلى صفحة الاستكشاف ذات الصلة، ولكن لا تحتاج إلى ذلك.", "I18N_LICENSE_PAGE_PARAGRAPH_2": "برنامج تشغيل أوبيا مفتوح المصدر وكوده، ومفرج عنه بموجب رخصة أباتشي 2.0.", + "I18N_LICENSE_PAGE_TITLE": "صفحة الترخيص | أوبيا", "I18N_LICENSE_TERMS_HEADING": "شروط الترخيص", + "I18N_LOGIN_PAGE_TITLE": "تسجيل الدخول | أوبيا", "I18N_LOGOUT_LOADING": "تسجيل الخروج", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "تسجيل الخروج | أوبيا", "I18N_LOGOUT_PAGE_TITLE": "تسجيل الخروج", + "I18N_MATH_COURSE_DETAILS": "تقوم دورة أسس الرياضيات المنسقة من اوبيا بتعليم اللبنات الأساسية للرياضيات ، وتغطي المفاهيم الأساسية مثل الجمع والضرب والكسور. بمجرد أن تتقن هذه المفاهيم الأساسية ، يمكنك الانتقال إلى دروس أكثر تقدمًا! يعتمد كل موضوع على الموضوع السابق ، بحيث يمكنك البدء من البداية وإكمال الدروس من أي مستوى مهارة ، أو الغوص مباشرة إذا كنت بحاجة إلى مساعدة في موضوع معين.", + "I18N_MATH_TOPICS_COVERED": "ابدأ من الأساسيات بموضوعنا الأول ، ضع القيم. أو ، إذا كنت تريد التركيز على موضوع معين ، فانتقل إلى أي موضوع وتعمق فيه!", "I18N_MODAL_CANCEL_BUTTON": "إلغاء", "I18N_MODAL_CONTINUE_BUTTON": "استمرار", "I18N_NEXT_LESSON": "الدرس التالي", + "I18N_NO": "لا", "I18N_ONE_SUBSCRIBER_TEXT": "لديك مشترك واحد.", + "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "شراكات", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "في انتظار حذف الحساب", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "الحساب للحذف", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "حسابك مدرج في الجدول للحذف، وسيتم حذفه خلال 24 ساعة. سيتم إخطارك بواسطة البريد الإلكتروني بعد اكتمال عملية الحذف.", @@ -430,6 +713,7 @@ "I18N_PLAYER_GIVE_UP": "ستستسلم؟", "I18N_PLAYER_GIVE_UP_TOOLTIP": "انقر هنا للحصول على الجواب.", "I18N_PLAYER_HINT": "تلميح", + "I18N_PLAYER_HINTS": "تلميحات", "I18N_PLAYER_HINTS_EXHAUSTED": "آسف، أنا بعيد عن التلميحات!", "I18N_PLAYER_HINT_IS_AVAILABLE": "انقر هنا للحصول على تلميح!", "I18N_PLAYER_HINT_NEED_A_HINT": "تحتاج مساعدة؟", @@ -479,6 +763,7 @@ "I18N_PLAYER_UNRATED": "غير مقيَّم", "I18N_PLAYER_VIEWS_TOOLTIP": "المشاهدات", "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "قسم(جلسة) التدريب", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "لغة الصوت", "I18N_PREFERENCES_BIO": "السيرة الذاتية", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "هذا الحقل اختياري. أي شيء تكتبه هنا عام وللعرض في العالم.", "I18N_PREFERENCES_BREADCRUMB": "تفضيلات", @@ -491,9 +776,15 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_FEEDBACK_NEWS": "تلقي رسائل بريد إلكتروني عندما يرسل لك شخص ما تغذية راجعة عن الاستكشاف", "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "تلقي الأخبار والتحديثات حول الموقع", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "تلقي رسائل إلكترونية عندما يشترك منشئ محتوى في عملية نشر جديدة", + "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "لم نتمكن من إضافتك إلى قائمتنا البريدية تلقائيًا. يرجى زيارة الرابط التالي للتسجيل في قائمتنا البريدية:", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "تصدير الحساب", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "سيؤدي هذا إلى تنزيل بيانات حساب اوبيا كملف نصي بتنسيق JSON.", + "I18N_PREFERENCES_EXPORT_ACCOUNT_WARNING_TEXT": "من فضلك لا تترك هذه الصفحة. يتم حاليًا تحميل بياناتك وسيتم تنزيلها كملف نصي بتنسيق JSON عند الانتهاء. إذا حدث خطأ ما ، يرجى الاتصال", "I18N_PREFERENCES_HEADING": "تفضيلات", "I18N_PREFERENCES_HEADING_SUBTEXT": "أي تغييرات تجريها على هذه الصفحة سيتم حفظها تلقائيا.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "لم تشترك في أي منشئ محتوى حتى الآن.لا تتردد للاشتراك في مؤلفك المفضل عم طريق النقر على زو \"اشتراك\" على صفحة المؤلف.عنطريق الاشتراك سيتم اعلامك عن طريق البريد الاكتروني عندما يقوم المؤلف بنشر درساً جديداً.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "التأثير", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "تفضيلات | أوبيا", "I18N_PREFERENCES_PAGE_TITLE": "تغيير تفضيلات ملفك الشخصي - أوبيا", "I18N_PREFERENCES_PICTURE": "الصورة", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "لغة الصوت المفضلة", @@ -503,6 +794,7 @@ "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "هذه لوحة التحكم التي سيتم عرضها بشكل تلقائي عند تسجيل الدخول.", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "لغات الاستكشاف المفضلة", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "سيتم اختيار هذه اللغات بشكل افتراضي عند البحث عن استكشافات في المعرض.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "تحديد اللغات المفضلة.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "لغة الموقع المفضلة", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "هذه هي اللغة التي سيتم عرض الموقع بها.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "لغة الموقع المفضلة", @@ -510,18 +802,24 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "اسحب للقص وتغيير الحجم:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "خطأ: لا يمكن قراءة ملف الصورة.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "رفع الصورة الشخصية", + "I18N_PREFERENCES_SEARCH_LABEL": "بحث", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "تحديد اللغات المفضلة...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "لغة الموقع", "I18N_PREFERENCES_SUBJECT_INTERESTS": "الاهتمامات", + "I18N_PREFERENCES_SUBJECT_INTERESTS_ERROR_TEXT": "يجب أن تكون الاهتمامات الموضوعية فريدة وفي أحرف صغيرة.", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "على سبيل المثال: الرياضيات، علم الحاسوب، الفن، ...", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "إضافة مواضيع اهتمامات جديدة (باستخدام الحروف الصغيرة والمسافات)...", + "I18N_PREFERENCES_SUBJECT_INTERESTS_LABEL": "اهتمامات جديدة بالموضوع", "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "أدخل مواضيع اهتمامات...", "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "منشئو المحتوى الذين اشتركت فيهم", "I18N_PREFERENCES_USERNAME": "اسم المستخدم", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "غير محدد بعد", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "سياسة الخصوصية | أوبيا", "I18N_PROFILE_NO_EXPLORATIONS": "لم ينشئ أو يعدل هذا المستخدم أية عمليات استكشافية بعد.", + "I18N_PROFILE_PAGE_TITLE": "الملف الشخصي | أوبيا", "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "اكتشف المزيد عن درجاتك", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "لوحة بياناتي", - "I18N_QUESTION_PLAYER_NEW_SESSION": "دورة جديدة", + "I18N_QUESTION_PLAYER_NEW_SESSION": "إعادة التشغيل", "I18N_QUESTION_PLAYER_RETRY_TEST": "إعادة محاولة الاختبار", "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "العودة إلى القصة", "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "مراجعة المهارات الأقل درجة", @@ -529,21 +827,51 @@ "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "أوصاف المهارة", "I18N_QUESTION_PLAYER_TEST_FAILED": "فشل الاختبار؛ تُرجَى مراجعة المهارات والمحاولة مرة أخرى", "I18N_QUESTION_PLAYER_TEST_PASSED": "اكتمل الاختبار. أحسنت!", + "I18N_REFRESHER_EXPLORATION_MODAL_BODY": "يبدوا انك تواجه صعوبات مع هذه المعادلة. هل تريد محاولة جولة قصيرة حول الدرس للمراجعه، و العودة للحل بعد ذالك ؟", + "I18N_REFRESHER_EXPLORATION_MODAL_TITLE": "هل تريد المراجعة ؟", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "جلسة التسجيل منتهية الصلاحية", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "عذرا، انتهت صلاحية جلسة التسجيل الخاصة بك، الرجاء النقر فوق \"متابعة التسجيل\" لإعادة تشغيل العملية.", + "I18N_RESET_CODE": "إعادة التعليمات البرمجية", + "I18N_RESTART_EXPLORATION_BUTTON": "أعِد الدرس", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "فحص المراجعة.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_1": "إذا كان لديك حساب، سيُحفظ تقدّمك في التعلّم تلقائيًا.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "لديك حساب بالفعل؟", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_3": "استخدم الرابط أدناه لحفظ التقدم لمدة 72 ساعة.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_5": "اكتب أو انسخ الرابط أدناه", + "I18N_SAVE_PROGRESS": "قم بتسجيل الدخول أو التسجيل لحفظ تقدمك واللعب خلال الدرس التالي.", + "I18N_SAVE_PROGRESS_TEXT": "احفظ التقدّم", + "I18N_SHARE_LESSON": "شارك هذا الدرس", + "I18N_SHOW_LESS": "اعرض أقل", + "I18N_SHOW_MORE": "أظهر المزيد", "I18N_SHOW_SOLUTION_BUTTON": "أظهر الحل", "I18N_SIDEBAR_ABOUT_LINK": "حول", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "حول أوبيا", "I18N_SIDEBAR_BLOG": "مدونة", "I18N_SIDEBAR_CLASSROOM": "غرفة الدراسة", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "الرياضيات الأساسية", "I18N_SIDEBAR_CONTACT_US": "اتصل بنا", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "نحن هنا للإجابة على أي سؤال لديك.", "I18N_SIDEBAR_DONATE": "تبرع", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "مساعدتك تساهم في تقديم تعليم ذو جودة للجميع.", "I18N_SIDEBAR_FORUM": "المنتدى", - "I18N_SIDEBAR_GET_STARTED": "ابدأ", + "I18N_SIDEBAR_GET_INVOLVED": "شارك", + "I18N_SIDEBAR_HOME": "الصفحة الرئيسية", + "I18N_SIDEBAR_LEARN": "تعلم", "I18N_SIDEBAR_LIBRARY_LINK": "المكتبة", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "أساسيات الرياضيات", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "دروس للمبتدئين من أجل مساعدتك بالبدأ في الرياضيات", "I18N_SIDEBAR_OPPIA_FOUNDATION": "مؤسسة أوبيا", + "I18N_SIDEBAR_PARTNERSHIPS": "شراكات", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "قدم تعليم جيد للطلاب في منطقتك.", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "الجمع و الطرح", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "مكتبة المجتمع", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "موارد إضافية قدمها المجتمع.", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "الضرب", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "القيمة المكانية", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "تصفح كل الدروس", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "التعليم مع أوبيا", + "I18N_SIDEBAR_VOLUNTEER": "متطوع", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "إنضم لمجموعتنا العالمية لإنشاء و تحسين الدروس.", "I18N_SIGNIN_LOADING": "تسجيل الدخول", "I18N_SIGNIN_PAGE_TITLE": "تسجيل الدخول", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "من خلال تحديد المربع الموجود على يمين هذا النص، فإنك تقر وتوافق وتقبل الالتزام ببنود <[sitename]>شروط الاستخدام، الموجودةهنا.", @@ -569,6 +897,7 @@ "I18N_SIGNUP_LOADING": "جارٍ التحميل...", "I18N_SIGNUP_PAGE_TITLE": "انضم للمجتمع - أوبيا", "I18N_SIGNUP_REGISTRATION": "تسجيل", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "لا تسألني مرة أخرى", "I18N_SIGNUP_SEND_ME_NEWS": "أرسل لي الأخبار والتحديثات حول الموقع", "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]> هو مشاع مفتوح من مصادر التعلم. جميع المواد قابلة لإعادة الاستخدام والمشاركة بحرية.", "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]> موجود لتشجيع الإبداع والتحسين المستمر لمجموعة من المصادر التعليمية عالية الجودة التي هي متاحة بحرية لأي شخص.", @@ -578,6 +907,8 @@ "I18N_SIGNUP_WHY_LICENSE": "لماذا CC-BY-SA؟", "I18N_SOLICIT_ANSWER_DETAILS_FEEDBACK": "حسنا، دعنا الآن نعود إلى إجابتك.", "I18N_SOLICIT_ANSWER_DETAILS_QUESTION": "هل يمكن أن توضح لماذا اخترت هذه الإجابة؟", + "I18N_SOLUTION_EXPLANATION_TITLE": "الشرح:", + "I18N_SOLUTION_TITLE": "الحل", "I18N_SPLASH_BENEFITS_ONE": "التعلم الذاتي", "I18N_SPLASH_BENEFITS_THREE": "سهولة تتبع الدروس", "I18N_SPLASH_BENEFITS_TITLE": "فوائدنا", @@ -613,13 +944,64 @@ "I18N_SPLASH_TESTIMONIAL_3": "«تعلمت النسب في المدرسة إلا أنني مع أوبيا تعرفت على الكثير من الأمور الجديدة مثل النسب الثلاثية والنسب المركبة»", "I18N_SPLASH_TESTIMONIAL_4": "«لقد تعلمت الكثير من الرياضيات الجديدة وكان الأمر يسيرًا للغاية»", "I18N_SPLASH_THIRD_EXPLORATION_DESCRIPTION": "يتيح لك أوبيا إنشاء ومشاركة الاستكشافات عن مجموعة واسعة من الموضوعات، وتقتصر فقط على خيالك.", - "I18N_SPLASH_TITLE": "التفكير خارج الكتب.", + "I18N_SPLASH_TITLE": "تعليم مجاني للجميع", "I18N_SPLASH_VOLUNTEERS_CONTENT": "لا مشكلة من أنت،يمكنك ايجاد الانتماء في أوبيا. نحن دائماً بحاجة الى المزيد من الأشخص لتطوير الدروس عن طريق اقتراحات الأشئلة، المساهمة في الرسومات أو ترجمة الدروس.", "I18N_SPLASH_VOLUNTEERS_TITLE": "يديرها المجتمع", "I18N_START_HERE": "اضغط هنا للبدء!", + "I18N_STORY_3M5VBajMccXO_DESCRIPTION": "في هذه القصة، ستنضم لماثيو في زيارته لمتجر الحلويات لاقتناء كعك. \nلسوء الحظ، لايملك المبلغ الكافي لكعك كامل.لذلك، قام السيد بيكر بمساعدته بتجزأت الكعك الذي اختاره لقطع صغيرة يمكنه شرائها. ماذا حصل بعدها ؟ شغل الدرس لتكتشف ذالك!", + "I18N_STORY_3M5VBajMccXO_TITLE": "ماثيو يزور متجر الحلويات", + "I18N_STORY_JhiDkq01dqgC_DESCRIPTION": "إنضم لإيفا و والدها في رحلتهم لحديقة التسلي. ساعدهم بمعرفتك حول المعادلات لحل المشاكل التي يواجهونها!", + "I18N_STORY_JhiDkq01dqgC_TITLE": "يوم في حديقة التسلي", + "I18N_STORY_Qu6THxP29tOy_DESCRIPTION": "تعلم كيف تجمع و تطرح مع مايا، عمر و جدهم، خلال تحضيرهم للبيزا مع بعض !", + "I18N_STORY_Qu6THxP29tOy_TITLE": "مايا، عمر و مالك يحضرون البيزا!", + "I18N_STORY_RRVMHsZ5Mobh_DESCRIPTION": "في هذه القصة، ستتابع جامي و أخته نيس في تعلمهم كيفية تمثيل و قرائة قيمة عدد.", + "I18N_STORY_RRVMHsZ5Mobh_TITLE": "مغامرات جامي في الأقواس", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - مكتمل!", + "I18N_STORY_ialKSV0VYV0B_DESCRIPTION": "إلتقي بجامي و عمه خلال إكتشافهم كيف يستعملون النسب لتحضير مشروبات لذيذة!", + "I18N_STORY_ialKSV0VYV0B_TITLE": "مغامرات جامي مع المشروبات", + "I18N_STORY_rqnxwceQyFnv_DESCRIPTION": "إنضم لنينا خلال إستعمالها تقنيات عملية القسمة لمساعدة أمها و ساندرا في السوق!", + "I18N_STORY_rqnxwceQyFnv_TITLE": "نينا تزور السوق", + "I18N_STORY_vfJDB3JAdwIx_DESCRIPTION": "إنضم لأريا و أبوها عمر في استعمالهم لعملية الضرب من أجل غرس البذور في حديقتهم!", + "I18N_STORY_vfJDB3JAdwIx_TITLE": "اريا تريد أن تزرع حديقة", "I18N_SUBSCRIBE_BUTTON_TEXT": "اشترك", + "I18N_SUBTOPIC_0abdeaJhmfPm_adding-fractions_TITLE": "جمع الكسور", + "I18N_SUBTOPIC_0abdeaJhmfPm_comparing-fractions_TITLE": "مقارنة الكسور", + "I18N_SUBTOPIC_0abdeaJhmfPm_dividing-fractions_TITLE": "قسمة الكسور", + "I18N_SUBTOPIC_0abdeaJhmfPm_equivalent-fractions_TITLE": "تكافؤ الكسور", + "I18N_SUBTOPIC_0abdeaJhmfPm_fractions-of-a-group_TITLE": "كسور مجموعة", + "I18N_SUBTOPIC_0abdeaJhmfPm_mixed-numbers_TITLE": "أعداد كسرية", + "I18N_SUBTOPIC_0abdeaJhmfPm_multiplying-fractions_TITLE": "ضرب كسرين أو أكثر", + "I18N_SUBTOPIC_0abdeaJhmfPm_number-line_TITLE": "خط الأعداد", + "I18N_SUBTOPIC_0abdeaJhmfPm_subtracting-fractions_TITLE": "قسمة كسرين أو أكثر", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE": "ما هو الكسر؟", + "I18N_SUBTOPIC_5g0nxGUmx5J5_calculations-with-ratios_TITLE": "حسابات بالنسب", + "I18N_SUBTOPIC_5g0nxGUmx5J5_combining-ratios_TITLE": "الجمع بين النسب", + "I18N_SUBTOPIC_5g0nxGUmx5J5_equivalent-ratios_TITLE": "النسب المعادلة", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "ما هي النسبة؟", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE": "مبادئ عملية الضرب", + "I18N_SUBTOPIC_C4fqwrvqWpRm_memorizing-expressions_TITLE": "حفظ جدول الضرب", + "I18N_SUBTOPIC_C4fqwrvqWpRm_multiplication-techniques_TITLE": "تقنيات عملية الضرب", + "I18N_SUBTOPIC_C4fqwrvqWpRm_rules-to-simplify_TITLE": "قواعد لتبسيط عملية الضرب", "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "المهارة التالية", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "المهارة السابقة:", + "I18N_SUBTOPIC_dLmjjMDbCcrf_algebraic-expressions_TITLE": "تبسيط المعدلات الجبرية", + "I18N_SUBTOPIC_dLmjjMDbCcrf_modelling-scenarios_TITLE": "تمثيل مواقف الحياة الحقيقة باستعمال المعدلات", + "I18N_SUBTOPIC_dLmjjMDbCcrf_order-of-operations_TITLE": "ترتيب العمليات", + "I18N_SUBTOPIC_dLmjjMDbCcrf_problem-solving_TITLE": "إستراتيجيات حل المشاكل", + "I18N_SUBTOPIC_dLmjjMDbCcrf_solving-equations_TITLE": "معالجة وحل المعادلات", + "I18N_SUBTOPIC_dLmjjMDbCcrf_variables_TITLE": "التعبير عن المجاهيل بالمتغيرات", + "I18N_SUBTOPIC_iX9kYCjnouWN_comparing-numbers_TITLE": "مقارنة اعداد", + "I18N_SUBTOPIC_iX9kYCjnouWN_naming-numbers_TITLE": "تسمية الأرقام", + "I18N_SUBTOPIC_iX9kYCjnouWN_place-names-and-values_TITLE": "أسماء الأماكن وقيمها", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "تدوير الأعداد", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE": "مبادئ القسمة", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "حل المسائل", + "I18N_SUBTOPIC_qW12maD4hiA8_techniques-of-division_TITLE": "تقنيات القسمة", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE": "جمع الاعداد", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "العلاقه بين الجمع و الطرح", + "I18N_SUBTOPIC_sWBXKH4PZcK6_estimation_TITLE": "تقييم", + "I18N_SUBTOPIC_sWBXKH4PZcK6_sequences _TITLE": "المتتاليات", + "I18N_SUBTOPIC_sWBXKH4PZcK6_subtracting-numbers_TITLE": "طرح الاعداد", "I18N_TEACH_BENEFITS_ONE": "فعال،جودة عالية للتعليم لجميع الأعمار", "I18N_TEACH_BENEFITS_THREE": "دائماً مجاني وسهل للاستخدام", "I18N_TEACH_BENEFITS_TITLE": "فوائدنا", @@ -634,6 +1016,7 @@ "I18N_TEACH_PAGE_LIBRARY_CONTENT": "المعلمون واشخاص آخرين من المجتمع حول العالم يستخدمون نظام أوبيا الانشائي كطريقة لانشاء الدروس ومشاركتها.يمكنك ايجاد 20.000 درس ل 17 موضوع مختلف في مكتبة الاستكشاف الخاصة بنا,وربما يمكنها الهامك لانشاء مكتبتك الخاصة.", "I18N_TEACH_PAGE_LIBRARY_TITLE": "استكشف الدروس التي تم تجهيزها من المجتمع", "I18N_TEACH_PAGE_SIX_TITLE": " ابدأ بالتعلم اليوم", + "I18N_TEACH_PAGE_TITLE": "دليل أوبيا لأولياء الأمور والمعلمين | أوبيا", "I18N_TEACH_STUDENT_DETAILS_1": "ريا سوجاني", "I18N_TEACH_STUDENT_DETAILS_2": "ولاء عوض", "I18N_TEACH_STUDENT_DETAILS_3": "هيمانشو تانيجا،كوروكوشسترا،الهند", @@ -641,52 +1024,98 @@ "I18N_TEACH_TESTIMONIAL_1": "أنا ممتن لأن أتيحت لي الفرصة لتعليم الأطفال الهنود المحرومين وسد الفجوات في فهمهم لمفاهيم الرياضيات النقدية. كانت مشاهدة زيادة ثقة هؤلاء الطلاب أثناء تعلمهم تستحق الساعات الإضافية.", "I18N_TEACH_TESTIMONIAL_2": "أوبيا هي الأولى من نوعها! تساعد الطلاب على تعلم كل ما يحتاجون إليه حول موضوع معين بطريقة جذابة وجذابة ؛ كما يشجعهم على استخدام الأجهزة الذكية لمصلحتهم.", "I18N_TEACH_TESTIMONIAL_3": "لم أتوقع أبدًا أن يتعلم الطلاب التكنولوجيا وأن يحضروا دروسًا في الرياضيات بهذه السرعة. إنه أول تعرض لهم للتكنولوجيا الذكية وكانوا يكافحون حقًا للتعامل معها في البداية. الآن ، أشعر بسعادة بالغة لرؤيتهم يقومون بدروس أوبيا حتى قبل أن أدخل الفصل!", + "I18N_TERMS_PAGE_TITLE": "شروط الاستخدام | أوبيا", "I18N_THANKS_PAGE_BREADCRUMB": "شكر", + "I18N_THANKS_PAGE_TITLE": "شكرا | أوبيا", + "I18N_TIME_FOR_BREAK_BODY_1": "يبدو أنك تجيب بسرعه كبيرة، هل تعبت ؟", + "I18N_TIME_FOR_BREAK_BODY_2": "اذا، خذ استراحة! يمكنك العودة لاحقا.", + "I18N_TIME_FOR_BREAK_FOOTER": "أنا مستعد لمواصلة الدرس", + "I18N_TIME_FOR_BREAK_TITLE": "وقت استراحة؟", + "I18N_TOPIC_0abdeaJhmfPm_DESCRIPTION": "غالبا ما ستحتاج للتكلم عن أجزاء من شيء : في وصفة قد تستحق نصف كأس من الدقيق، أو ستدفق جزء من قارورة الحليب. في هذا الموضوع، ستتعلم كيف تستعمل الكسور للفهم و التعبير عن مواقف كهذه.", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "الكسور", + "I18N_TOPIC_5g0nxGUmx5J5_DESCRIPTION": "تعد النسب مفيدة في حساب كمية المكونات التي يجب استخدامها إذا كان لديك وصفة لأربعة أشخاص ولكنك تريد الطهي لشخصين. في هذا الموضوع ، ستتعلم كيفية استخدام النسب لمقارنة حجم شيء بآخر بسهولة.", + "I18N_TOPIC_5g0nxGUmx5J5_TITLE": "النسب والاستدلال النسبي", + "I18N_TOPIC_C4fqwrvqWpRm_DESCRIPTION": "إذا اشتريت 60 علبة تحتوي كل منها على خمسة كعكات. كم من كعكة لديك ؟ في هذا الموضوع ستتعلم كيف تستعمل عملية الضرب لتحل مشاكل كهذه ( بدون أن تستعمل الجمع عدة مرات!)", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "الضرب", + "I18N_TOPIC_LEARN": "تعلم", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{درس واحد} other{# دروس}}", + "I18N_TOPIC_TITLE": "موضوع", "I18N_TOPIC_VIEWER_CHAPTER": "فصل", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{فصل واحد} other{# فصول}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "قريبًا!", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "عد مرة أخرى عندما تكون الدروس متوفرة حول هذا الموضوع.", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "عد لاحقا عنها تصبح التطبيقات متوفرة لهذا الموضوع.", "I18N_TOPIC_VIEWER_DESCRIPTION": "الوصف", "I18N_TOPIC_VIEWER_LESSON": "الدروس", "I18N_TOPIC_VIEWER_LESSONS": "الدروس", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "عد لاحقا عندما تصبح الدروس متوفرة حول هذا الموضوع.", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "تمرّس مهارات متعلقة بالموضوع <[topicName]>", + "I18N_TOPIC_VIEWER_NO_QUESTION_WARNING": "حاليا لا توجد أسئلة بعد ، حول هذه الوحدة.", "I18N_TOPIC_VIEWER_PRACTICE": "الممارسة", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "(بيتا)", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "عد لاحقا عندما تصبح الأسئلة متواجدة حول هذا الموضوع.", "I18N_TOPIC_VIEWER_REVISION": "مراجعة", - "I18N_TOPIC_VIEWER_SELECT_SKILLS": "حدد المهارة لتمرين<[topicName]> معرفتك عليها", + "I18N_TOPIC_VIEWER_SELECT_SKILLS": "حدد المهارة لتمرين<[topicName]> معرفتك عليها.", "I18N_TOPIC_VIEWER_SKILL": "المهارات", "I18N_TOPIC_VIEWER_SKILLS": "المهارات", "I18N_TOPIC_VIEWER_START_PRACTICE": "ابدأ", "I18N_TOPIC_VIEWER_STORIES": "القصص", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "قصص يمكنك لعبها", "I18N_TOPIC_VIEWER_STORY": "القصة", "I18N_TOPIC_VIEWER_STUDY_SKILLS": "ادرس مهارات متعلقة بالموضوع <[topicName]>", "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "استخدم بطاقات المراجعة التالية كي تساعدك في دراسة مهارات متعلقة بالموضوع <[topicName]>.", "I18N_TOPIC_VIEWER_VIEW_ALL": "عرض الكل", "I18N_TOPIC_VIEWER_VIEW_LESS": "أظهر أقل", + "I18N_TOPIC_dLmjjMDbCcrf_DESCRIPTION": "غالبا ما ستواجه مشاكل بإعداد مجهولة -- مثال، إذا إشتريت منتج خلال التخفيضات و تريد أن تجد الثمن الأساسي. في هذا الموضوع ستتعلم كيف تحل هذه المشاكل بالمعادلات.", + "I18N_TOPIC_dLmjjMDbCcrf_TITLE": "العبارات الجبرية و المعادلات", + "I18N_TOPIC_iX9kYCjnouWN_DESCRIPTION": "هل تعلم أنه يمكن التعبير عن جميع الأعداد الممكنة للأشياء باستخدام عشرة أرقام فقط (0،1،2،3 ، ... ، 9)؟ في هذا الموضوع ، سوف نتعلم كيف يمكننا استخدام قيم الخانات للقيام بذلك ، ونرى لماذا \"5\" لها قيمة مختلفة في \"25\" و \"2506\".", + "I18N_TOPIC_iX9kYCjnouWN_TITLE": "القيمة المكانية", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION": "إذا كنت تمتلك إثنين و ثلاثون حبة طماطم لتشاركها مع أربعة أشخاص، كم من حبة طماطم سيحصل عليها كل شخص ؟ في هذا الموضوع سنتعلم كيف نستعمل عملية القسمة لتقسيم شيء إلى أجزاء.", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "القسمة", + "I18N_TOPIC_sWBXKH4PZcK6_DESCRIPTION": "إذا كنت تملك أربعة بيضات و أعطاك صديقك 37 حبة إضافية، كم سيصبح لديك من بيضة ؟ ماذا لو فقدت ثمانية ؟ في هذا الموضوع، ستتعلم كيف تحل هذه المشاكل بإستعمال أساسيات الجمع و الطرح.", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "الجمع و الطرح", "I18N_TOPNAV_ABOUT": "حول", "I18N_TOPNAV_ABOUT_OPPIA": "عن أوبيا", "I18N_TOPNAV_ADMIN_PAGE": "صفحة الإداري", "I18N_TOPNAV_BLOG": "مدونة", - "I18N_TOPNAV_CLASSROOM": "غرفة الدراسة", - "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "الرياضيات الأساسية", + "I18N_TOPNAV_BLOG_DASHBOARD": "لوحة تحكم المدونة", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "أساسيات الرياضيات", "I18N_TOPNAV_CONTACT_US": "اتصل بنا", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "نحن هنا للمساعدة، إذ كان لديك اي سؤال.", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "لوحة المساهم", "I18N_TOPNAV_CREATOR_DASHBOARD": "لوحة تحكم منشئي المحتوى", "I18N_TOPNAV_DONATE": "تبرع", + "I18N_TOPNAV_DONATE_DESCRIPTION": "مساعدتك تساهم في تقديم تعليم ذو جودة عالية للجميع.", "I18N_TOPNAV_FORUM": "المنتدى", "I18N_TOPNAV_GET_INVOLVED": "شارك", "I18N_TOPNAV_GET_STARTED": "ابدأ", + "I18N_TOPNAV_HOME": "الرئيسية", + "I18N_TOPNAV_LEARN": "تعلم", "I18N_TOPNAV_LEARNER_DASHBOARD": "لوحة المتعلم", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "دروس للمبتدئين لمساعدتك بالبدأ في الرياضيات.", + "I18N_TOPNAV_LEARN_HEADING": "طرق لتعلم المزيد", + "I18N_TOPNAV_LEARN_LINK_1": "تصفح كل الدروس", + "I18N_TOPNAV_LEARN_LINK_2": "واصل التعلم", "I18N_TOPNAV_LIBRARY": "المكتبة", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "المزيد من المصادر المحضرة من طرف المجموعة لمساعدتك في تعلم المزيد.", "I18N_TOPNAV_LOGOUT": "خروج", "I18N_TOPNAV_MODERATOR_PAGE": "صفحة منسق الحوارات", "I18N_TOPNAV_OPPIA_FOUNDATION": "مؤسسة أوبيا", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "مشاركة دليل التشغيل", - "I18N_TOPNAV_PARTNERSHIPS": "الشراكات", + "I18N_TOPNAV_PARTNERSHIPS": "المدارس والمنظمات", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "اشترك و احضر اوبيا لمدرستك، مجتمعك أو منطقتك.", "I18N_TOPNAV_PREFERENCES": "التفضيلات", "I18N_TOPNAV_SIGN_IN": "تسجيل الدخول", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "الدخول عن طريق جوجل", "I18N_TOPNAV_TEACH_WITH_OPPIA": "التعليم مع أوبيا", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "لوحة معلومات المواضيع والمهارات", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "إنضم إلى مجموعتنا العالمية لإنشاء و تطوير دروس.", "I18N_TOTAL_SUBSCRIBERS_TEXT": "لديك ما مجموعه <[totalSubscribers]> مشترك.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "إلغاء الاشتراك", - "I18N_WORKED_EXAMPLE": "مثال على العمل" + "I18N_VIEW_ALL_TOPICS": "عرض كافة مواضيع <[classroomName]>", + "I18N_VOLUNTEER_PAGE_BREADCRUMB": "متطوع", + "I18N_WARNING_MODAL_DESCRIPTION": "هذا سيظهر الحل. هل أنت متأكد ؟", + "I18N_WARNING_MODAL_TITLE": "تحذير!", + "I18N_WORKED_EXAMPLE": "مثال على العمل", + "I18N_YES": "نعم" } diff --git a/assets/i18n/ast.json b/assets/i18n/ast.json index 46f1d948da03..1ab63c51c6d3 100644 --- a/assets/i18n/ast.json +++ b/assets/i18n/ast.json @@ -289,7 +289,6 @@ "I18N_SIDEBAR_CONTACT_US": "Comunica con nós", "I18N_SIDEBAR_DONATE": "Donativos", "I18N_SIDEBAR_FORUM": "Foru", - "I18N_SIDEBAR_GET_STARTED": "Primeros pasos", "I18N_SIDEBAR_LIBRARY_LINK": "Biblioteca", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Enseñar con Oppia", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Al marcar la caxella de la izquierda d'esti testu, reconoces, alcuerdes y aceutes respetar les Condiciones d'usu de <[sitename]>, que s'alcuentren equí.", diff --git a/assets/i18n/bn.json b/assets/i18n/bn.json index 6a8be09813e4..05912c9e8296 100644 --- a/assets/i18n/bn.json +++ b/assets/i18n/bn.json @@ -1,44 +1,154 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "ওপিয়া ফাউন্ডেশন সম্পর্কে", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "ওপিয়া ফাউন্ডেশন সম্পর্কে | ওপিয়া", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "একটি অন্বেষণ তৈরি করুন", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "একটি বিষয় সম্পর্কে আপনি যার তদারক করেন।", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "প্রতিক্রিয়া পান", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK_TEXT": "আপনার অন্বেষণ উন্নতি করতে।", - "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "Oppia সম্পর্কে", + "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "ওপিয়া সম্পর্কে", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_1": "ওপিয়ার লক্ষ্য হল যে কাউকে একটি কার্যকর এবং উপভোগ্য উপায়ে তারা যা কিছু শিখতে চান তা শিখাতে সহায়তা করা।", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_2": "সারা বিশ্বের শিক্ষাবিদদের সাহায্যে বিনামূল্যে, উচ্চ-মানের, প্রদর্শনযোগ্যভাবে কার্যকর পাঠের একটি সমাহার তৈরি করে, ওপিয়া শিক্ষার্থীদের মানসম্পন্ন শিক্ষা প্রদানের লক্ষ্য রাখে— তারা যেখানেই থাকুক বা কোন ঐতিহ্যগত সম্পদে তাদের প্রবেশাধিকার থাকুক না কেন।", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_3": "এখনও অবধি, শিক্ষাবিদগণ <[numberOfExplorations]>টিরও বেশি পাঠ তৈরি করেছেন, যেগুলো এখানে অন্বেষণ নামে পরিচিত। এবং সেগুলো বিশ্বব্যাপী প্রায় <[numberofStudentsServed]> জন ছাত্রকে পরিবেশন করা হচ্ছে।", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_4": "অন্বেষণ ছাত্রদের ভিডিও, ছবি এবং খোলামেলা প্রশ্নের মাধ্যমে মজাদার এবং সৃজনশীল উপায়ে শিখতে সাহায্য করে। এবং যেহেতু ছাত্রদের প্রায়ই একই রকমের ভুল ধারণা থাকে, তাই ওপিয়া শিক্ষকদের এই ভুল ধারণাগুলিকে সরাসরি অনুসন্ধানের মধ্যে সমাধান করার ক্ষমতা প্রদান করে, তাদের ক্ষমতায়ন করে এক সময়ে একাধিক ছাত্রদের লক্ষ্যযুক্ত প্রতিক্রিয়া প্রদান করতে।", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_5": "আপনি যদি ওপিয়ার সাথে শিখতে আগ্রহী একজন ছাত্র হন, আপনি আমাদের অন্বেষণ ব্রাউজ করে আপনার শেখার দুঃসাহসিক কাজ শুরু করতে পারেন৷", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_6": "আপনি যদি সারা বিশ্বের ছাত্রদের জীবনকে প্রভাবিত করতে আগ্রহী একজন শিক্ষক হন, তাহলে আপনি আমাদের ওপিয়াতে শেখান প্রোগ্রামে যোগদানের জন্য আবেদন করতে পারেন, যার লক্ষ্য হল যে বিষয়গুলি ছাত্রদের জন্য কঠিন মনে হয় তার জন্য পাঠ প্রদান করা।", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_7": "ওপিয়াতে শিক্ষাদানের মাধ্যমে, আপনি সারা বিশ্বের শিক্ষার্থীদের শিক্ষার উন্নতি করতে সাহায্য করার পাশাপাশি যোগাযোগ ও সহমর্মিতায় আপনার দক্ষতা উন্নত করতে পারেন। অথবা, আপনি যদি এখনও শেখানোর জন্য প্রস্তুত না হন, তাহলেও আপনি পাঠের বিষয়ে প্রতিক্রিয়া শেয়ার করতে পারেন যাতে সেগুলিকে অন্য ছাত্রদের জন্য আরও ভালো করা যায়!", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "প্রকাশ করুন & বণ্টন করুন", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE_TEXT": "সম্প্রদায়ের সাথে আপনার সৃষ্টিসমূহ।", + "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "অডিও সাবটাইটেল", + "I18N_ABOUT_PAGE_BREADCRUMB": "সম্পর্কে", + "I18N_ABOUT_PAGE_CREATE_LESSON_CONTENT": "ওপিয়ার বিষয়বস্তু তৈরিকরণ পদ্ধতির সাহায্যে, আপনি যে বিষয়ে আগ্রহী সেই বিষয়ে সহজেই পাঠ তৈরি এবং সংশোধন করতে পারেন।", "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "কৃতিত্ব", + "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT": "ওপিয়ার অবদানকারীগণ সারা বিশ্বে ছড়িয়ে রয়েছেন — আমাদের মধ্যে অনেকেই ছাত্র, সাম্প্রতিক ছাত্র ও শিক্ষক। আমরা নিম্নলিখিত অবদানকারীদের ধন্যবাদ জানাতে চাই যারা এই মঞ্চটি তৈরি করতে সাহায্য করেছেন৷ আপনি যদি সাহায্য করতে চান তাহলে জেনে নিন কিভাবে জড়িত হবেন!", + "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT_BOTTOM": "ওপিয়া উন্নয়ন দল <[listOfNames]>র প্রতিক্রিয়া, ধারণা, সাহায্য এবং পরামর্শের জন্যেও কৃতজ্ঞ।", + "I18N_ABOUT_PAGE_CREDITS_THANK_TRANSLATEWIKI": "ক্রাউডসোর্সড অনুবাদ প্রদান করার জন্য আমরা ট্রান্সলেটউইকি.নেটকে ধন্যবাদ জানাতে চাই।", + "I18N_ABOUT_PAGE_EASILY_CREATE_LESSON": "সহজেই পাঠ তৈরি করুন", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "সম্প্রদায়ের তৈরি পাঠ অন্বেষণ করুন৷", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "দান করুন", "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "জড়িত হোন", "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "ওপিয়া ফাউন্ডেশন", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_1": "ওপিয়া ওয়েবসাইট এবং উৎস কোড ওপিয়া ফাউন্ডেশন কতৃক সমর্থিত, একটি কর-মুক্ত ৫০১(গ)(৩) অলাভজনক সংস্থা যা ক্যালিফোর্নিয়া রাজ্যে নিবন্ধিত।", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4_HEADING": "পরিচালক", "I18N_ABOUT_PAGE_HEADING": "ওপিয়া: সবার জন্য শিক্ষা", + "I18N_ABOUT_PAGE_LANGUAGE_FEATURE": "স্থানীয় উপভাষায় অনুবাদ", "I18N_ABOUT_PAGE_LEARN_BUTTON": "আমি শিখতে চাই", + "I18N_ABOUT_PAGE_LEARN_FROM": "ওপিয়ার তত্ত্বাবধনে পাঠ থেকে শিখুন", + "I18N_ABOUT_PAGE_LESSON_FEATURE": "গল্প ভিত্তিক পাঠ", + "I18N_ABOUT_PAGE_MOBILE_FEATURE": "মোবাইল-বান্ধব নেভিগেশন", + "I18N_ABOUT_PAGE_OUR_FEATURES": "আমাদের বৈশিষ্ট্য", + "I18N_ABOUT_PAGE_OUR_OUTCOMES": "আমাদের ফলাফল", "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "আপনি আজ কী করতে চান?", "I18N_ABOUT_PAGE_TABS_ABOUT": "সম্পর্কে", "I18N_ABOUT_PAGE_TABS_CREDITS": "কৃতিত্ব", "I18N_ABOUT_PAGE_TABS_FOUNDATION": "ফাউন্ডেশন", "I18N_ABOUT_PAGE_TEACH_BUTTON": "আমি শেখাতে চাই", "I18N_ABOUT_PAGE_TITLE": "সম্পর্কে | Oppia", - "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Oppia দিয়ে শিখানোর জন্য আবেদন করুন", + "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "ওপিয়া দিয়ে শুরু করুন", + "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "ওপিয়ার মাধ্যমে শিখানোর জন্য আবেদন করুন", "I18N_ACTION_BROWSE_EXPLORATIONS": "আমাদের অন্বেষণে ব্রাউজ করুন", "I18N_ACTION_BROWSE_LESSONS": "আমাদের পাঠ ব্রাউজ করুন", + "I18N_ACTION_BROWSE_LIBRARY": "গ্রন্থাগার দেখুন", "I18N_ACTION_CREATE_EXPLORATION": "একটি অন্বেষণ তৈরি করুন", "I18N_ACTION_CREATE_LESSON": "আপনার নিজস্ব পাঠ তৈরি করুন", + "I18N_ACTION_CREATE_LESSON_BUTTON": "পাঠ তৈরি করুন", + "I18N_ACTION_EXPLORE_LESSONS": "পাঠ অন্বেষণ", + "I18N_ACTION_GUIDE_FOR_TEACHERS": "শিক্ষকদের জন্য নির্দেশনা", + "I18N_ACTION_TIPS_FOR_PARENTS": "পিতামাতা এবং অভিভাবকদের জন্য পরামর্শ", + "I18N_ACTION_VISIT_CLASSROOM": "শ্রেণিকক্ষে যান", + "I18N_ADD_SYLLABUS_DESCRIPTION_TEXT": "আপনার পাঠ্যক্রমে দক্ষতা বা গল্প যোগ করুন যাতে সেগুলি আপনার ছাত্রদের কাছে স্বয়ংক্রিয়ভাবে পাঠানো যায়।", + "I18N_ADD_SYLLABUS_SEARCH_PLACEHOLDER": "অনুসন্ধান করুন যেমন:- গল্প, পদার্থবিদ্যা, বাংলা", + "I18N_ATTRIBUTION_HTML_STEP_ONE": "HTML অনুলিপি করে প্রতিলেপন করুন", + "I18N_ATTRIBUTION_HTML_STEP_TWO": "নিশ্চিত করুন যে সংযোগটি \"<[linkText]>\" হিসাবে প্রদর্শিত হচ্ছে", + "I18N_BLOG_CARD_PREVIEW_CONTEXT": "এইভাবে ব্লগ কার্ড প্রধান পাতায় এবং আপনার লেখক প্রোফাইলে প্রদর্শিত হবে।", + "I18N_BLOG_CARD_PREVIEW_HEADING": "ব্লগ পত্রের প্রাকদর্শন", + "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "নতুন ব্লগ পোস্ট তৈরি করুন", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "মনে হচ্ছে আপনি এখনও কোন গল্প তৈরি করেননি!", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "নতুন পোস্ট", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "খসড়া", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "প্রকাশিত", + "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "সংক্ষেপচিত্র যোগ করুন", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "মূলাংশ", + "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "বাতিল", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "মুছুন", + "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "সংক্ষেপচিত্র সম্পাদনা করুন", + "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "সর্বশেষ সংরক্ষিত", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "প্রকাশ করুন", + "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "সম্পন্ন", + "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "খসড়া হিসেবে সংরক্ষণ করুন", + "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "প্রাকদর্শন", + "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "ট্যাগসমূহ", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "আরও ট্যাগ যোগ করা যেতে পারে।", + "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "সংক্ষেপচিত্র", "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "শিরোনাম", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "একটি ফাইল চয়ন করুন বা সেটি এখানে টেনে আনুন৷", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "ত্রুটি: চিত্রের ফাইল পড়া যায়নি।", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "একটি সংক্ষেপচিত্র যোগ করুন", + "I18N_BLOG_POST_UNTITLED_HEADING": "শিরোনামহীন", + "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "এই পত্রের বিষয়বস্তু খুব দীর্ঘ। সংরক্ষণ করতে অনুগ্রহ করে ৪৫০০ অক্ষরের নিচে রাখুন।", + "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "এই পত্রটি বেশ দীর্ঘ, এবং শিক্ষার্থীরা আগ্রহ হারিয়ে ফেলতে পারে। এটি সংক্ষিপ্ত করার বিবেচনা করুন, বা এটিকে দুটি পত্রে বিভক্ত করুন।", + "I18N_CHAPTER_COMPLETION": "অধ্যায় সম্পূর্ণ করার জন্য অভিনন্দন!", + "I18N_CLASSROOM_CALLOUT_BUTTON": "অন্বেষণ করুন", + "I18N_CLASSROOM_CALLOUT_HEADING_1": "গণিতের ভিত্তি", + "I18N_CLASSROOM_CALLOUT_HEADING_2": "পরিচিতি: ওপিয়া শ্রেণিকক্ষ", + "I18N_CLASSROOM_MATH_TITLE": "গণিত", + "I18N_CLASSROOM_PAGE_COMING_SOON": "শীঘ্রই আসছে", + "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "কোর্সের বিবরণ", + "I18N_CLASSROOM_PAGE_HEADING": "ওপিয়া শ্রেণিকক্ষ", + "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "সম্প্রদায়ের তৈরি আরও পাঠ অন্বেষণ করুন৷", + "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "আমাদের সম্প্রদায়ের গ্রন্থাগারে সন্ধান চালান", + "I18N_CLASSROOM_PAGE_TITLE": "ওপিয়ার মাধ্যমে <[classroomName]> শিখুন | ওপিয়া", + "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "অন্তর্ভুক্ত প্রসঙ্গসমূহ", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "<[collectionTitle]> - ওপিয়া সম্পাদক", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "শিরোনামহীন - ওপিয়া সম্পাদক", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "শুরু করুন", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "চালিয়ে যান", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "এই সংগ্রহে কোনও অন্বেষণ যোগ করা হয়নি।", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> - ওপিয়া", + "I18N_COMING_SOON": "শীঘ্রই আসছে!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "সংগ্রহ", + "I18N_COMPLETED_STORY": "'<[story]>' সম্পন্ন হয়েছে", + "I18N_COMPLETE_CHAPTER": "'<[topicName]>'-এ একটি অধ্যায় সম্পূর্ণ করুন", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "একটি নিখুঁত প্রারম্ভ! এরমই বজায় রাখুন!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "আপনি একটি চেকপয়েন্ট সম্পূর্ণ করেছেন! সাবাশ!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_1": "আপনি অর্ধেক পথ পেরিয়েই গেছেন, আপনি কিছু সময়ের মধ্যে সম্পন্ন করবেন!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_2": "আপনি অর্ধেকটা সম্পন্ন করেছেন, চমৎকার কাজ!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_3": "দারুন! আপনি ইতোমধ্যে পাঠের অর্ধেকটা সম্পন্ন করেছেন! আশ্চর্যজনক কাজ!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "আর মাত্র একটি বাকি, উহু!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_2": "চলুন! আর মাত্র একটা বাকি!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_3": "আপনি দুর্দান্ত করছেন, আর মাত্র একটি বাকি!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "আপনি ভাল অগ্রগতি করছেন! চালিয়ে যান!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_1": "আপনি প্রায় পৌঁছে গেছেন! এরকমই বজায় রাখুন!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_2": "আপনি প্রায় শেষ পর্যন্ত পৌঁছে গেছেন! চালিয়ে যান!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_3": "চমৎকার কাজ! আপনি প্রায় শেষ রেখায়!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "হুররে!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "অসাধারণ!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "সাবাশ!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "দারুণ কাজ!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "সাবাশ!", + "I18N_CONTACT_PAGE_BREADCRUMB": "যোগাযোগ", "I18N_CONTACT_PAGE_HEADING": "জড়িত হোন!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Oppia প্রকল্পে সাহায্য করার জন্য আপনার আগ্রহের জন্য ধন্যবাদ!", "I18N_CONTACT_PAGE_PARAGRAPH_10_HEADING": "উন্নতি ও রক্ষণাবেক্ষণ বজায় রাখা", "I18N_CONTACT_PAGE_PARAGRAPH_11_HEADING": "দান", + "I18N_CONTACT_PAGE_PARAGRAPH_12": "আপনি যদি এই প্রকল্পে অনুদান দিতে চান, অনুগ্রহ করে www.oppia.org/donate--এ গিয়ে মুক্তহাতে দান করুন। আপনার সাহায্যের জন্য ধন্যবাদ!", "I18N_CONTACT_PAGE_PARAGRAPH_13_HEADING": "প্রেস", + "I18N_CONTACT_PAGE_PARAGRAPH_14": "আমাদের পাঠগুলিকে আরও বৃহত্তর শ্রোতাদের কাছে নিয়ে যেতে, অথবা শিক্ষার্থীদের কাছে যে বিষয়গুলিকে কঠিন মনে হয় সেগুলির উপর অন্বেষণের একটি সংগ্রহ তৈরি করতে আপনি যদি ওপিয়ার সাথে অংশীদারিত্ব করতে আগ্রহী হন, অনুগ্রহ করে partnerships@oppia.org ঠিকানায় ইমেল করুন৷", "I18N_CONTACT_PAGE_PARAGRAPH_14_HEADING": "অংশীদারিত্ব", "I18N_CONTACT_PAGE_PARAGRAPH_15_HEADING": "নিরাপত্তা", "I18N_CONTACT_PAGE_PARAGRAPH_2_HEADING": "আমরা সকলেই স্বেচ্ছাসেবক", - "I18N_CONTACT_PAGE_PARAGRAPH_3_HEADING": "কিভাবে ওপিয়া অন্যান্য শেখার প্ল্যাটফর্ম থেকে ভিন্ন", + "I18N_CONTACT_PAGE_PARAGRAPH_3_HEADING": "কীভাবে ওপিয়া অন্যান্য শিক্ষা মঞ্চ থেকে ভিন্ন", + "I18N_CONTACT_PAGE_PARAGRAPH_4": "এটি একটি বৃহৎ প্রয়াস, তাই আপনার যে কোন ধরনের সহায়তাকে স্বাগত! আপনি কোথায় বাস করেন, আপনি কোন ভাষায় কথা বলেন, বা আপনার বয়স কত বা অল্প বয়সী কি না তা বিবেচ্য নয় — এটি একটি সম্প্রদায়ভিত্তিক প্রকল্প, এবং যতক্ষণ আপনি সাহায্য করতে ইচ্ছুক, ততক্ষণে আমরা আপনাকে পাশে চাই। নিম্নোক্ত উপায়গুলো অনুসরণ করে ঝাঁপিয়ে পড়ুন:", "I18N_CONTACT_PAGE_PARAGRAPH_4_HEADING": "যেভাবে আপনি সাহায্য করতে পারেন", + "I18N_CONTACT_PAGE_PARAGRAPH_5_HEADING": "বিদ্যমান অন্বেষণ পরীক্ষা করা হচ্ছে", "I18N_CONTACT_PAGE_PARAGRAPH_7_HEADING": "নতুন অন্বেষণ তৈরি করা", + "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "বিদ্যমান অন্বেষণ উন্নত করা হচ্ছে", + "I18N_CONTACT_PAGE_TITLE": "যোগাযোগ | ওপিয়া", + "I18N_CONTINUE_REGISTRATION": "নিবন্ধন চালিয়া যান", + "I18N_COOKIE_BANNER_ACKNOWLEDGE": "ঠিক আছে", + "I18N_COOKIE_BANNER_EXPLANATION": "এই ওয়েবসাইটটি মূল কার্যকারিতা সমর্থন করতে, সাইটটিকে সুরক্ষিত রাখতে এবং আমাদের ওয়েবসাইটের ট্রাফিক বিশ্লেষণ করতে কুকিজ ও অনুরূপ প্রযুক্তি ব্যবহার করে। আরও জানতে আমাদের গোপনীয়তা নীতি দেখুন।", "I18N_CORRECT_FEEDBACK": "সঠিক!", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "আপনার গোষ্ঠীর লিঙ্ক", + "I18N_CREATE_ACCOUNT": "অ্যাকাউন্ট তৈরি করুন", "I18N_CREATE_ACTIVITY_QUESTION": "আপনি কি তৈরি করতে চান?", "I18N_CREATE_ACTIVITY_TITLE": "একটি কার্যকলাপ তৈরি করুন", "I18N_CREATE_COLLECTION": "সংগ্রহ তৈরি করুন", @@ -47,11 +157,13 @@ "I18N_CREATE_EXPLORATION_QUESTION": "আপনি একটি অন্বেষণ তৈরি করতে চান?", "I18N_CREATE_EXPLORATION_TITLE": "অন্বেষণ তৈরি করুন", "I18N_CREATE_EXPLORATION_UPLOAD": "আপলোড করুন", + "I18N_CREATE_LEARNER_GROUP": "গোষ্ঠী তৈরি করুন", + "I18N_CREATE_LEARNER_GROUP_PAGE_TITLE": "শিক্ষার্থী গোষ্ঠী তৈরি করুন | ওপিয়া", "I18N_CREATE_NO_THANKS": "না, ধন্যবাদ", "I18N_CREATE_YES_PLEASE": "হ্যাঁ, দয়া করে!", "I18N_CREATOR_IMPACT": "প্রভাব", "I18N_DASHBOARD_COLLECTIONS": "সংগ্রহ", - "I18N_DASHBOARD_CREATOR_DASHBOARD": "নির্মাতার ড্যাশবোর্ড", + "I18N_DASHBOARD_CREATOR_DASHBOARD": "নির্মাতার যতিফলক", "I18N_DASHBOARD_EXPLORATIONS": "অন্বেষণ", "I18N_DASHBOARD_EXPLORATIONS_EMPTY_MESSAGE": "মনে হচ্ছে আপনি এখনো কোনও অনুসন্ধান করেননি। চল শুরু করি!", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY": "বাছাই করুন", @@ -62,19 +174,79 @@ "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TITLE": "শিরোনাম", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TOTAL_PLAYS": "মোট নাটকগুলি", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_UNRESOLVED_ANSWERS": "অমীমাংসিত উত্তরসমূহ", + "I18N_DASHBOARD_LESSONS": "পাঠ", "I18N_DASHBOARD_OPEN_FEEDBACK": "প্রতিক্রিয়া খুলুন", + "I18N_DASHBOARD_SKILL_PROFICIENCY": "দক্ষতা কুশল", "I18N_DASHBOARD_STATS_AVERAGE_RATING": "গড় মূল্যায়ন", "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "উন্মুক্ত প্রতিক্রিয়া", "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "মোট নাটকগুলি", "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "গ্রাহক", + "I18N_DASHBOARD_STORIES": "গল্পসমূহ", "I18N_DASHBOARD_SUBSCRIBERS": "গ্রাহক", "I18N_DASHBOARD_SUGGESTIONS": "পরামর্শসমূহ", "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "অন্বেষণ", "I18N_DASHBOARD_TABLE_HEADING_LAST_UPDATED": "সর্বশেষ হালনাগাদ", "I18N_DASHBOARD_TABLE_HEADING_RATING": "মূল্যায়ন", "I18N_DASHBOARD_TABLE_HEADING_UNRESOLVED_ANSWERS": "অমীমাংসিত উত্তরসমূহ", + "I18N_DASHBOARD_TOPICS_AND_SKILLS_DASHBOARD": "প্রসঙ্গ ও দক্ষতার যতিফলক", + "I18N_DELETE_ACCOUNT_PAGE_BREADCRUMB": "অ্যাকাউন্ট মুছুন", + "I18N_DELETE_ACCOUNT_PAGE_BUTTON": "আমার অ্যাকাউন্ট মুছুন", + "I18N_DELETE_ACCOUNT_PAGE_HEADING": "অ্যাকাউন্ট মুছুন", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_1": "ব্যবহারকারী সেটিংস ও ইমেল পছন্দ", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_2": "ব্যক্তিগত অন্বেষণ এবং সংগ্রহ", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_3": "পাঠের অগ্রগতি", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_4": "ব্যবহারকারীর তৈরি অন্বেষণ ও সংগ্রহ সংক্রান্ত পরিসংখ্যান", + "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "মুছে ফেলা নিশ্চিত করতে, অনুগ্রহ করে নীচের স্থানটিতে আপনার ব্যবহারকারী নাম লিখুন ও 'আমার অ্যাকাউন্ট মুছুন' বোতামটি টিপুন। পরে এটি ফেরৎ পওয়া সম্ভব নয়", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "এর ফলে এই ব্যবহারকারী অ্যাকাউন্ট এবং এই অ্যাকাউন্টের সাথে সম্পর্কিত সমস্ত ব্যক্তিগত উপাত্ত মুছে ফেলা হবে৷ ইতিমধ্যেই সর্বজনীন উপাত্ত বেনামি করা হবে যাতে এটি ব্যাক-আপ উপাত্ত (যা ৬ মাসের জন্য সংরক্ষণ করা হয়) ব্যতীত এই অ্যাকাউন্টের সাথে যুক্ত করা যাবে না। নীচে উল্লিখিত কিছু বিষয়শ্রেণী আপনার অ্যাকাউন্টে প্রযোজ্য নাও হতে পারে।", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "একনজরে", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "নিম্নোক্ত প্রকারের উপাত্ত মুছে ফেলা হবে:", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "নিম্নোক্ত প্রকারের উপাত্ত বেনামি করা হবে:", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "এছাড়াও, প্রকাশিত অন্বেষণ এবং সংগ্রহ যেগুলির অন্য কোনও মালিক নেই সেগুলি সম্প্রদায়ের মালিকানায় স্থানান্তরিত হবে৷", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "অ্যাকাউন্ট অপসারণ প্রক্রিয়া সম্পর্কে আপনার কোনো প্রশ্ন বা উদ্বেগ থাকলে, অনুগ্রহ করে privacy@oppia.org- ঠিকানায় একটি ইমেল পাঠান।", + "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "এটি আপনাকে সেই পাতায় নিয়ে যাবে যেখানে আপনি আপনার ওপিয়া অ্যাকাউন্ট মুছে ফেলতে পারবেন।", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "অ্যাকাউন্ট মুছুন | ওপিয়া", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "চিত্র এই এলাকায় টেনে আনুন", "I18N_DIRECTIVES_UPLOAD_A_FILE": "একটি চিত্র আপলোড করুন", + "I18N_DONATE_PAGE_BREADCRUMB": "দান করুন", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "দান করুন | কিছু ইতিবাচক প্রভাব তৈরি করুন | ওপিয়া", + "I18N_DONATE_PAGE_BUDGET_HEADING": "আপনার টাকা কোথায় যায়?", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "রক্ষণাবেক্ষণ", + "I18N_DONATE_PAGE_FAQ_ANSWER_10": "Oppia সম্পর্কে সাধারণ প্রশ্নের জন্য, অনুগ্রহ করে contact@oppia.org-এ যোগাযোগ করুন।", + "I18N_DONATE_PAGE_FAQ_HEADING_TEXT": "প্রায়শই জিজ্ঞাসিত প্রশ্নসমূহ", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "ওপিয়া কি?", + "I18N_DONATE_PAGE_FAQ_QUESTION_2": "কেন ওপিয়া বিদ্যমান?", + "I18N_DONATE_PAGE_FAQ_QUESTION_3": "ওপিয়া কীভাবে প্রভাব পরিমাপ করছে এবং প্ল্যাটফর্মটি এখন পর্যন্ত কী করেছে?", + "I18N_DONATE_PAGE_FAQ_QUESTION_6": "আমি কিভাবে চেকের মাধ্যমে দেব?", + "I18N_DONATE_PAGE_FAQ_QUESTION_9": "আমি একজন কর্পোরেট অংশীদার হতে আগ্রহী হলে কি এমন কেউ আছে যার সাথে আমি কথা বলতে পারি?", + "I18N_DONATE_PAGE_HEADING_2": "উচ্চ মানের এবং আকর্ষক শিক্ষা।", + "I18N_DONATE_PAGE_IMAGE_TITLE": "আপনার উদার উপহার তহবিল:", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_1": "ভারতের খানপুর থেকে", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "ভারত থেকে", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "আমাদের ব্লগ পড়ুন", + "I18N_DONATE_PAGE_STATISTIC_2": "আমাদের ভার্চুয়াল লাইব্রেরিতে পাঠ", + "I18N_DONATE_PAGE_STATISTIC_3": "র‍্যান্ডমাইজড ট্রায়াল সম্পন্ন হয়েছে, আরও কিছু আসবে", + "I18N_DONATE_PAGE_STATISTIC_4": "সারা বিশ্বের স্বেচ্ছাসেবক", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "সাবস্ক্রাইব করার জন্য ধন্যবাদ!", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_1": "আপনি শীঘ্রই আপনার ইনবক্সে হালনাগাদ পেতে শুরু করবেন৷ আমরা স্প্যাম না করার প্রতিশ্রুতি দিই, এবং আপনি যেকোনো সময় সদস্যতা ত্যাগ করতে পারেন।", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_2": "আমাদের সম্প্রদায়ের (আপনি সহ!) সহায়তা এবং সমর্থনে, ওপিয়া সারা বিশ্বে সবচেয়ে কম-সম্পদহীন শিক্ষার্থীদের পরিষেবা দিয়ে চলেছে এবং চালিয়ে যাচ্ছে।", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "ইমেইল ঠিকানা", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "নাম (ঐচ্ছিক)", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "এখনই সাবস্ক্রাইব করুন", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_1": "আজই আমাদের সাথে যোগদান করুন!", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "দান করার জন্য ধন্যবাদ!", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_3": "আপনার যদি কোনো প্রশ্ন থাকে তবে, যে কোনো সময় যোগাযোগ করুন।", + "I18N_DONATE_PAGE_TITLE": "ওপিয়া ফাউন্ডেশনে দান করুন", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "একটি ভিডিও দেখুন", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "এখনও আপনার কোনও গোষ্ঠী নেই", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "আপনি সবেমাত্র আপনার ১ম অধ্যায় শেষ করেছেন!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "আপনি সবেমাত্র আপনার ৫ম অধ্যায় শেষ করেছেন!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "আপনি সবেমাত্র আপনার ১০তম অধ্যায় সম্পূর্ণ করেছেন!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_4": "আপনি সবেমাত্র আপনার ২৫তম অধ্যায় সম্পূর্ণ করেছেন!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "আপনি সবেমাত্র আপনার ৫০তম অধ্যায় সম্পূর্ণ করেছেন!", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "পরবর্তী পাঠে!", + "I18N_END_CHAPTER_PRACTICE_SESSION_TEXT": "আপনার নতুন অর্জিত দক্ষতা অনুশীলন করুন!", + "I18N_END_CHAPTER_REVISION_TAB_TEXT": "আপনি এ পর্যন্ত যা শিখেছেন তা আরেকবার দেখে নিন!", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "আপনি পরবর্তী জিনিস কী করতে পারেন তা এখানে!", "I18N_ERROR_DISABLED_EXPLORATION": "অন্বেষণ নিষ্ক্রিয় আছে", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "দুঃখিত, কিন্তু যে অন্বেষণে আপনি ক্লিক করেছেন তা বর্তমানে নিষ্ক্রিয় আছে। অনুগ্রহ করে একটু পরে আবার চেষ্টা করুন।", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "অন্বেষণ নিষ্ক্রিয় আছে - Oppia", @@ -86,14 +258,23 @@ "I18N_ERROR_MESSAGE_401": "আপনি সেখানে যেতে পারবেন না। দ্রুত করুন, শিক্ষক আসার আগেই ফিরে যান!", "I18N_ERROR_MESSAGE_404": "দুঃখিত, আমরা খুঁজেছি এবং খুঁজেছি কিন্তু আমরা সেই পাতাটি খুঁজে পাই নি।", "I18N_ERROR_MESSAGE_500": "কিছু একটায় ভয়ঙ্করভাবে ভুল হয়েছে। কিন্তু এটা আপনার দোষ নয়। একটি অভ্যন্তরীণ ত্রুটি ঘটেছে।", - "I18N_ERROR_NEXT_STEPS": "এই মূহুর্তে সম্ভবত সবচেয়ে ভালো কাজ করা হল \">প্রধান পাতায় ফেরত যাওয়া। তবে, যদি এই সমস্যা পুনরাবৃত্ত হয়, আপনি কি মনে করেন এটা হওয়ার কথা নয়, দয়া করে আমাদের \" target=\"_blank\">সমস্যা অনুসরণে এটির কথা জানান। এটি জন্য দুঃখিত।", - "I18N_ERROR_PAGE_TITLE_400": "ত্রুটি ৪০০ - Oppia", - "I18N_ERROR_PAGE_TITLE_401": "ত্রুটি ৪০১ - Oppia", - "I18N_ERROR_PAGE_TITLE_404": "ত্রুটি ৪০৪ - Oppia", - "I18N_ERROR_PAGE_TITLE_500": "ত্রুটি ৫০০ - Oppia", + "I18N_ERROR_NEXT_STEPS": "এই মূহুর্তে সম্ভবত সবচেয়ে ভালো যা করতে পারেন তা হলো \">প্রধান পাতায় ফেরত যাওয়া। তবে, যদি এই সমস্যার পুনরাবৃত্তি হয়, এবং আপনার মনে হয় সেটা হওয়া উচিত নয়, তবে দয়া করে আমাদের \" target=\"_blank\">সমস্যা চিহ্নিতকারী পাতায় জানান। এটি জন্য দুঃখিত।", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "ত্রুটি <[statusCode]> | ওপিয়া", + "I18N_ERROR_PAGE_TITLE": "ত্রুটি <[statusCode]> - ওপিয়া", + "I18N_ERROR_PAGE_TITLE_400": "ত্রুটি ৪০০ - ওপিয়া", + "I18N_ERROR_PAGE_TITLE_401": "ত্রুটি ৪০১ - ওপিয়া", + "I18N_ERROR_PAGE_TITLE_404": "ত্রুটি ৪০৪ - ওপিয়া", + "I18N_ERROR_PAGE_TITLE_500": "ত্রুটি ৫০০ - ওপিয়া", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "অনুপাত কী?", + "I18N_EXPLORATION_PLAYER_PAGE_TITLE": "<[explorationTitle]> - ওপিয়া", + "I18N_EXPLORATION_STATE_PREVIOUSLY_COMPLETED": "আপনি আগের পর্বে এই প্রশ্নের উত্তর দিয়েছেন।", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "বিয়োগ কী?", + "I18N_FACILITATOR_DASHBOARD_PAGE_TITLE": "সুবিধাদানকীর যতিফলক | ওপিয়া", + "I18N_FEEDBACK_INSTRUCTION": "প্রতিক্রিয়া বার্তা সর্বাধিক <[count]> অক্ষরের হওয়া উচিত।", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "নামহীন", "I18N_FOOTER_ABOUT": "সম্পর্কে", - "I18N_FOOTER_ABOUT_ALL_CAPS": "OPPIA সম্পর্কে", + "I18N_FOOTER_ABOUT_ALL_CAPS": "ওপিয়া সম্পর্কে", + "I18N_FOOTER_ANDROID_APP": "অ্যান্ড্রয়েড অ্যাপ", "I18N_FOOTER_AUTHOR_PROFILES": "লেখকের প্রোফাইল", "I18N_FOOTER_BROWSE_LIBRARY": "গ্রন্থাগার ব্রাউজ করুন", "I18N_FOOTER_CONTACT_US": "যোগাযোগ", @@ -106,13 +287,16 @@ "I18N_FOOTER_GET_STARTED": "শুরু করুন", "I18N_FOOTER_OPPIA_FOUNDATION": "Oppia ফাউন্ডেশন", "I18N_FOOTER_PRIVACY_POLICY": "গোপনীয়তার নীতি", - "I18N_FOOTER_TEACH": "Oppia দিয়ে শিখান", + "I18N_FOOTER_TEACH": "ওপিয়ার মাধ্যমে শেখান", "I18N_FOOTER_TEACH_LEARN_ALL_CAPS": "শিখান/শিখুন", + "I18N_FOOTER_TEACH_PAGE": "অভিভাবক/শিক্ষকদের জন্য", "I18N_FOOTER_TERMS_OF_SERVICE": "পরিষেবার শর্তাদি", "I18N_FORMS_TYPE_NUMBER": "একটি নম্বর লিখুন", "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "দয়া করে একটি সংখ্যা লিখুন যা <[minValue]> থেকে ছোট নয়।", "I18N_FORMS_TYPE_NUMBER_AT_MOST": "দয়া করে একটি সংখ্যা লিখুন যা <[maxValue]> থেকে বড় নয়।", "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "দয়া করে একটি বৈধ দশমিক নম্বর লিখুন।", + "I18N_GENERATE_ATTRIBUTION": "কৃতিত্ব তৈরি করুন", + "I18N_GET_STARTED_PAGE_BREADCRUMB": "শুরু করুন", "I18N_GET_STARTED_PAGE_HEADING": "শুরু করুন!", "I18N_GET_STARTED_PAGE_PARAGRAPH_11_HEADING": "জড়িত হোন", "I18N_GET_STARTED_PAGE_PARAGRAPH_2_HEADING": "একটি প্রসঙ্গ চয়ন করুন", @@ -121,13 +305,22 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_8_HEADING": "আপনার অন্বেষণ শেয়ার করুন", "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "আপনার অন্বেষণের উন্নতি করুন", "I18N_GET_STARTED_PAGE_TITLE": "শুরু করুন", + "I18N_GOT_IT": "বুঝেছি", "I18N_HEADING_VOLUNTEER": "সেচ্ছাসেবী", + "I18N_HINT_NEED_HELP": "সাহায্য দরকার? এই সমস্যা সমাধানের আভাস দেখুন!", + "I18N_HINT_TITLE": "আভাস", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "ক/খ হিসেবে অনুপাত দিন।", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "প্রান্ত যোগ করুন", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "সংযোগস্থল যোগ করুন", "I18N_INTERACTIONS_GRAPH_DELETE": "অপসারণ", - "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "অবৈধ গ্রাফ!", + "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "প্রান্ত তৈরি করতে লক্ষ্য শীর্ষে আলতো চাপুন (প্রান্ত নির্মাণ বাতিল করতে একই শীর্ষে ক্লিক করুন)।", + "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "তৈরি করতে প্রান্তের প্রাথমিক শীর্ষবিন্দুতে টোকা দিন।", + "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "অবৈধ লেখচিত্র!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "একটি লেখচিত্র তৈরি করুন", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "লেখচিত্র দেখুন", "I18N_INTERACTIONS_GRAPH_MOVE": "স্থানান্তর", + "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "শীর্ষবিন্দুকে সেই বিন্দুতে সরাতে যেকোন বিন্দুতে টোকা দিন।", + "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "সরানোর জন্য শীর্ষবিন্দুতে টোকা দিন।", "I18N_INTERACTIONS_GRAPH_RESET_BUTTON": "পুন:স্থাপন করুন", "I18N_INTERACTIONS_GRAPH_RESPONSE_EDGE": "<[edges]>টি প্রান্ত", "I18N_INTERACTIONS_GRAPH_RESPONSE_EDGES": "<[edges]>টি প্রান্ত", @@ -135,45 +328,93 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "ও <[vertices]>টি শীর্ষবিন্দু", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "লেবেল হালনাগাদ করুন", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "ওজন হালনাগাদ করুন", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "ছবিতে ক্লিক করুন", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[প্রদর্শন করার জন্য একটি চিত্র নির্বাচন করুন]", - "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{দয়া করে অন্তত একটি বিকল্প নির্বাচন করুন।} other{দয়া করে অন্তত #টি বিকল্প নির্বাচন করুন।}}", + "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "আপনি আরও পছন্দ নির্বাচন করতে পারেন।", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{দয়া করে সমস্ত সঠিক বিকল্প নির্বাচন করুন।} other{দয়া করে # বা ততধিক বিকল্প নির্বাচন করুন।}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{১টির বেশী বিকল্প নির্বাচন করা যাবে না।} other{#টির বেশী বিকল্প নির্বাচন করা যাবে না।}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "মানচিত্রে ক্লিক করুন", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "মানচিত্র দেখুন", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "এখানে একটি সমীকরণ লিখুন।", "I18N_INTERACTIONS_MUSIC_CLEAR": "পরিষ্কার করুন", "I18N_INTERACTIONS_MUSIC_PLAY": "চালান", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "লক্ষ্য অনুক্রম চালান", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "অনুগ্রহ করে একটি বৈধ মুদ্রা লিখুন (যেমন, $৫ বা ₹৫)", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "অনুগ্রহ করে শুরুতে মুদ্রার একক লিখুন", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "সম্ভাব্য এককের বিন্যাসন", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "আপনি কি নিশ্চিত যে আপনি আপনার কোড পুনঃস্থাপন করতে চান?", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "বাতিল", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "অনুমোদন প্রয়োজন", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "কোড পুনঃস্থাপন করুন", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "অনুগ্রহ করে একটি বৈধ অনুপাত লিখুন (যেমন, ১:২ বা ১:২:৩)।", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "অনুগ্রহ করে একটি বৈধ অনুপাত লিখুন (যেমন, ১:২ বা ১:২:৩)।", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "উপাদান যোগ করুন", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "ওহো, এটা দেখে মনে হচ্ছে আপনার সেটে সদৃশ্য রয়েছে!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(লাইন প্রতি একটি উপাদান যোগ করুন।)", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "কোন উত্তর দেয়া হয়নি।", "I18N_INTERACTIONS_SUBMIT": "জমা দিন", - "I18N_LANGUAGE_FOOTER_VIEW_IN": "এতে Oppia দেখুন:", + "I18N_LANGUAGE_FOOTER_VIEW_IN": "এতে ওপিয়া দেখুন:", + "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "শুভ অপরাহ্ন", + "I18N_LEARNER_DASHBOARD_ALL": "সব", + "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "লক্ষ্য সম্পাদনা করুন", "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "ব্রোঞ্জ", + "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "সম্প্রদায় পাঠ", + "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "লক্ষ্য পূরণ", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "সম্পন্ন হয়েছে", + "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "আপনি যেখানে ছেড়েছিলেন সেখান থেকে শুরু করুন", + "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "বর্তমান লক্ষ্য", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_HEADING": "শুরু করুন", "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "শুভসন্ধ্যা", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "উত্তর দিন", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_STATUS_CHANGE_MESSAGE": "অবস্থা '<[threadStatus]>'-এ পরিবর্তিত", + "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "লক্ষ্যসমূহ", "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "স্বর্ণ", + "I18N_LEARNER_DASHBOARD_HOME_SECTION": "নীড়", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "অসম্পূর্ণ", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "চলমান", + "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "নতুন কিছু শিখুন", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "শুভ সকাল", "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "নতুন গল্পের সামগ্রী উপলব্ধ", + "I18N_LEARNER_DASHBOARD_PAGE_TITLE": "শিক্ষার্থীর যতিফলক | ওপিয়া", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "পরে চালান", - "I18N_LEARNER_DASHBOARD_REMOVE_ACTIVITY_MODAL_HEADER": "'<[sectionNameI18nId]>' তালিকা থেকে সরাবেন?", - "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "সরান", + "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "অগ্রগতি", + "I18N_LEARNER_DASHBOARD_REMOVE_ACTIVITY_MODAL_HEADER": "'<[sectionNameI18nId]>' তালিকা থেকে অপসারণ করবেন?", + "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "অপসারণ করুন", + "I18N_LEARNER_DASHBOARD_RETURN_TO_FEEDBACK_THREADS_MESSAGE": "বার্তা তালিকায় ফিরে যান", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "পাঠান", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "পাঠানো হচ্ছে...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "রৌপ্য", "I18N_LEARNER_DASHBOARD_SKILLS": "দক্ষতা", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "দক্ষতার অগ্রগতি", + "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "সম্পন্ন হওয়া গল্পসমূহ", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "সদস্যতা", - "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "অগ্রগতি", + "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "অগ্রগতি:", "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "বর্তমান:", + "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "পরিবর্তনের সংক্ষিপ্ত বিবরণ:", "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "পরামর্শকৃত:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "পরামর্শ", + "I18N_LEARNER_DASHBOARD_VIEW": "দেখুন", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "পরামর্শ দেখুন", + "I18N_LEARNER_GROUPS_SECTION_TITLE": "আপনার শিক্ষার্থী গোষ্ঠী", + "I18N_LEARNER_GROUP_ADD_GROUP_DETAILS": "গোষ্ঠীর বিবরণ যোগ করুন", + "I18N_LEARNER_GROUP_ADD_SYLLABUS_ITEMS": "পাঠ্যক্রম সামগ্রী যোগ করুন", + "I18N_LEARNER_GROUP_ADD_TO_SYLLABUS": "পাঠক্রম যোগ করুন", + "I18N_LEARNER_GROUP_CREATED_TITLE": "আপনার গোষ্ঠী <[groupName]> তৈরি করা হয়েছে।", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "পরবর্তী", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "পূর্ববর্তী ধাপ", + "I18N_LEARNER_GROUP_DETAILS_GROUP_DESCRIPTION": "গোষ্ঠীর বর্ণনা (২-৪ পংক্তিতে গোষ্ঠীর উদ্দেশ্য বর্ণনা করুন)", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "গোষ্ঠী শিরোনাম", + "I18N_LEARNER_GROUP_INVITE_LIST_TEXT": "আমন্ত্রণ তালিকা", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "সংযোজিত", + "I18N_LEARNER_GROUP_MINIMUM_SYLLABUS_ITEMS_INFO": "একটি গোষ্ঠী তৈরি করার জন্য আপনাকে কমপক্ষে একটি পাঠ্যক্রম সামগ্রী (দক্ষতা/গল্প) যোগ করতে হবে।", + "I18N_LEARNER_GROUP_NO_ITEMS_ADDED": "আপনি এখনও নতুন সামগ্রী যোগ করেননি।", + "I18N_LEARNER_GROUP_NO_RESULTS_FOUND": "কোনও ফলাফল পাওয়া যায়নি।", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "বিস্তারিত দেখুন", + "I18N_LEARNT_TOPIC": "<[topicName]> শিখেছি", + "I18N_LEARN_TOPIC": "<[topicName]> শিখুন", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "পাঠের লেখক", + "I18N_LESSON_INFO_HEADER": "পাঠের তথ্য", + "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "আপনি এটি সম্পূর্ণ করেছেন", "I18N_LIBRARY_ALL_CATEGORIES": "সব বিষয়শ্রেণী", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "সব বিষয়শ্রেণী নির্বাচিত হয়েছে", "I18N_LIBRARY_ALL_LANGUAGES": "সকল ভাষা", @@ -229,31 +470,53 @@ "I18N_LIBRARY_MAIN_HEADER": "কল্পনা করুন আপনি আজ কি শিখতে পারেন...", "I18N_LIBRARY_N/A": "প্র/ন", "I18N_LIBRARY_NO_EXPLORATIONS": "ওহো, দেখানোর জন্য কোন অন্বেষণ নেই।", - "I18N_LIBRARY_NO_EXPLORATION_FOR_QUERY": "ওহো, এটা দেখে মনে হচ্ছে আপনার অনুসন্ধান কোন অন্বেষণের সাথে মেলেনি।", + "I18N_LIBRARY_NO_EXPLORATION_FOR_QUERY": "ওহো, বোধহয় আপনার অনুসন্ধান কোন অন্বেষণের সাথে মেলেনি।", "I18N_LIBRARY_NO_EXPLORATION_GROUPS": "প্রদর্শনের জন্য এখানে আর কোন অন্বেষণ উপলব্ধ নেই।", "I18N_LIBRARY_NO_OBJECTIVE": "কোন উদ্দেশ্য নির্দিষ্ট করা হয়নি।", "I18N_LIBRARY_N_CATEGORIES": "{categoriesCount, plural, =1{১টি বিষয়শ্রেণী} other{#টি বিষয়শ্রেণী}}", "I18N_LIBRARY_N_LANGUAGES": "{languagesCount, plural, =1{১টি ভাষা} other{#টি ভাষা}}", - "I18N_LIBRARY_PAGE_TITLE": "গ্রন্থাগার অন্বেষণ - Oppia", - "I18N_LIBRARY_RATINGS_TOOLTIP": "টি মূল্যায়ন", + "I18N_LIBRARY_PAGE_BROWSE_MODE_TITLE": "ওপিয়া - থেকে শেখার জন্য অন্বেষণ খুঁজুন", + "I18N_LIBRARY_PAGE_TITLE": "সম্প্রদায়ের গ্রন্থাগার পাঠ | ওপিয়া", + "I18N_LIBRARY_RATINGS_TOOLTIP": "মূল্যায়নসমূহ", "I18N_LIBRARY_SEARCH_PLACEHOLDER": "আপনি কোন ব্যাপারে উৎসুক?", - "I18N_LIBRARY_SUB_HEADER": "আমাদের অন্বেষণ ব্রাউজ করার দ্বারা আপনার দুঃসাহসিক কাজ শুরু করুন।", + "I18N_LIBRARY_SUB_HEADER": "সম্প্রদায় কর্তৃক তৈরিকৃত পাঠের সম্পূর্ণ গুচ্ছ দেখুন", "I18N_LIBRARY_VIEWS_TOOLTIP": "টি দর্শন", "I18N_LIBRARY_VIEW_ALL": "সব দেখুন", "I18N_LICENSE_PAGE_LICENSE_HEADING": "লাইসেন্স", + "I18N_LICENSE_PAGE_PARAGRAPH_1": "ওপিয়ার পাঠের সমস্ত বিষয়বস্তু সিসি-বাই-এসএ ৪.০-এর অধীনে লাইসেন্সপ্রাপ্ত।", + "I18N_LICENSE_PAGE_PARAGRAPH_2": "ওপিয়ার চালক সফটওয়্যারটি উন্মুক্ত উৎসের, এবং এর কোড একটি Apache 2.0 লাইসেন্সের অধীনে প্রকাশিত।", + "I18N_LICENSE_PAGE_TITLE": "লাইসেন্স পাতা | ওপিয়া", + "I18N_LICENSE_TERMS_HEADING": "লাইসেন্সের শর্তাবলী", + "I18N_LOGIN_PAGE_TITLE": "প্রবেশ করুন | ওপিয়া", + "I18N_LOGOUT_LOADING": "প্রস্থান", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "প্রস্থান | ওপিয়া", + "I18N_LOGOUT_PAGE_TITLE": "প্রস্থান", "I18N_MODAL_CANCEL_BUTTON": "বাতিল", "I18N_MODAL_CONTINUE_BUTTON": "অগ্রসর হোন", + "I18N_NEXT_LESSON": "পরবর্তী পাঠ", + "I18N_NO": "না", "I18N_ONE_SUBSCRIBER_TEXT": "আপনার ১ জন গ্রাহক রয়েছে।", + "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "অংশীদারিত্ব", + "I18N_PARTNERSHIPS_PAGE_TITLE": "অংশীদারিত্ব | ওপিয়া", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "মুছে ফেলা হবে এমন অ্যাকাউন্ট", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "মুছে ফেলার বিবরণ", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "সম্প্রদায়ের নির্দেশিকা", + "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_TEXT": "এই নির্দেশিকাগুলির বিষয়ে আপনার যদি কোনও স্পষ্টীকরণের প্রয়োজন হয়, অনুগ্রহ করে আমাদের ফোরামে নির্দ্বিধায় জিজ্ঞাসা করুন৷", + "I18N_PLAYBOOK_HEADING": "নির্মাতার নির্দেশিকা", + "I18N_PLAYBOOK_PAGE_TITLE": "নির্মাতার নির্দেশিকা | ওপিয়া", "I18N_PLAYBOOK_PUBLICATION_POLICY_HEADING": "প্রকাশনা নীতি", "I18N_PLAYBOOK_TAB_PARTICIPATION_PLAYBOOK": "অংশগ্রহণ নীতিমালা", - "I18N_PLAYER_AUDIO_EXPAND_TEXT": "অডিও", + "I18N_PLAYER_AUDIO_EXPAND_TEXT": "পাঠ শুনুন", "I18N_PLAYER_AUDIO_LANGUAGE": "ভাষা", "I18N_PLAYER_AUDIO_LOADING_AUDIO": "অডিও লোড হচ্ছে...", + "I18N_PLAYER_AUDIO_MIGHT_NOT_MATCH_TEXT": "অডিও সম্পূর্ণরূপে পাঠ্যের সাথে নাও মিলতে পারে", "I18N_PLAYER_AUDIO_NOT_AVAILABLE_IN": "<[languageDescription]> ভাষায় উপলব্ধ নয়", "I18N_PLAYER_AUDIO_TRANSLATION_SETTINGS": "অডিও অনুবাদ সেটিংস", "I18N_PLAYER_BACK": "পিছনে", "I18N_PLAYER_BACK_TO_COLLECTION": "সংগ্রহে ফিরুন", + "I18N_PLAYER_BANDWIDTH_USAGE_WARNING_MODAL_BODY": "এই অডিও অনুবাদে <[fileSizeMB]>এমবি <[languageDescription]>-এর অডিও রয়েছে। ডাউনলোড চালিয়ে যেতে চান?", + "I18N_PLAYER_BANDWIDTH_USAGE_WARNING_MODAL_DOWNLOAD_ALL_AUDIO": "এই অন্বেষণে সমস্ত <[languageDescription]> অডিও ডাউনলোড করুন (<[fileSizeMB]>এমবি)", + "I18N_PLAYER_BANDWIDTH_USAGE_WARNING_MODAL_TITLE": "ব্যান্ডউইথ ব্যবহারের সতর্কতা", "I18N_PLAYER_CARD_NUMBER_TOOLTIP": "কার্ড #", "I18N_PLAYER_COMMUNITY_EDITABLE_TOOLTIP": "সম্প্রদায়ের সম্পাদনাযোগ্য", "I18N_PLAYER_CONTINUE_BUTTON": "অব্যাহত রাখুন", @@ -263,19 +526,27 @@ "I18N_PLAYER_EMBED_TOOLTIP": "এম্বেড", "I18N_PLAYER_FEEDBACK_TOOLTIP": "প্রতিক্রিয়া", "I18N_PLAYER_FORWARD": "অগ্রবর্তী", - "I18N_PLAYER_HINT": "ইঙ্গিত", - "I18N_PLAYER_HINT_IS_AVAILABLE": "একটি ইঙ্গিতের জন্য এখানে ক্লিক করুন!", - "I18N_PLAYER_HINT_NEED_A_HINT": "ইঙ্গিত দরকার?", + "I18N_PLAYER_GIVE_UP": "ছেড়ে দেবেন?", + "I18N_PLAYER_GIVE_UP_TOOLTIP": "উত্তরের জন্য এখানে ক্লিক করুন।", + "I18N_PLAYER_HINT": "আভাস", + "I18N_PLAYER_HINTS": "আভাস", + "I18N_PLAYER_HINTS_EXHAUSTED": "দুঃখিত, আমি আর আভাস দিতে পারছি না!", + "I18N_PLAYER_HINT_IS_AVAILABLE": "একটি আভাস পেতে এখানে ক্লিক করুন!", + "I18N_PLAYER_HINT_NEED_A_HINT": "আভাস দরকার?", + "I18N_PLAYER_HINT_NOT_AVAILABLE": "আভাস চাওয়ার পূর্বে একটু চিন্তা করার চেষ্টা করুন!", + "I18N_PLAYER_HINT_REQUEST_STRING_1": "আমি একটি আভাস চাই।", + "I18N_PLAYER_HINT_REQUEST_STRING_2": "আমি একটু আটকে আছি, কোন আভাস?", "I18N_PLAYER_HINT_REQUEST_STRING_3": "আমার কিছু সমস্যা হচ্ছে।", "I18N_PLAYER_INFO_TOOLTIP": "তথ্য", "I18N_PLAYER_IS_PRIVATE": "এই অনুসন্ধানটি ব্যক্তিগত।", "I18N_PLAYER_LAST_UPDATED_TOOLTIP": "সর্বশেষ হালনাগাদ", "I18N_PLAYER_LEARN_AGAIN_BUTTON": "আবার শিখুন", - "I18N_PLAYER_LEAVE_FEEDBACK": "লেখকের জন্য প্রতিক্রিয়া দিন...", + "I18N_PLAYER_LEAVE_FEEDBACK": "লেখকদের জন্য প্রতিক্রিয়া দিন। (জমা দেওয়া হলে, এতে আপনি বর্তমানে যে পত্রটির অন্বেষণে রয়েছেন তার একটি তথ্যসূত্রও এতে অন্তর্ভুক্ত থাকবে।)", "I18N_PLAYER_LOADING": "লোড হচ্ছে...", "I18N_PLAYER_NEXT_LESSON": "পরবর্তী পাঠ", "I18N_PLAYER_NO_OBJECTIVE": "কোন উদ্দেশ্য নির্দিষ্ট করা হয়নি।", "I18N_PLAYER_NO_TAGS": "কোন ট্যাগ নির্দিষ্ট করা হয়নি।", + "I18N_PLAYER_PLAY_EXPLORATION": "অন্বেষণ চালান", "I18N_PLAYER_PLUS_TAGS": "<[additionalTagNumber]>+টির বেশী ট্যাগ", "I18N_PLAYER_PREVIOUS_RESPONSES": "পূর্ববর্তী প্রতিক্রিয়া (<[previousResponses]>)", "I18N_PLAYER_RATE_EXPLORATION": "নতুন কিছু শিখেছেন? আপনি কিভাবে এই অন্বেষণ মূল্যায়ন করবেন?", @@ -297,6 +568,7 @@ "I18N_PLAYER_RETURN_TO_EDITOR": "সম্পাদকে ফেরত যান", "I18N_PLAYER_RETURN_TO_LIBRARY": "গ্রন্থাগারে ফিরুন", "I18N_PLAYER_RETURN_TO_PARENT": "প্রধান পাঠে ফিরে যান", + "I18N_PLAYER_RETURN_TO_STORY": "গল্পে ফেরত যান", "I18N_PLAYER_SHARE_EXPLORATION": "এই অন্বেষণ উপভোগ করছেন? আপনার বন্ধুদের এটির কথা জানান!", "I18N_PLAYER_SHARE_THIS_COLLECTION": "এই সংগ্রহ শেয়ার করুন", "I18N_PLAYER_SHARE_THIS_EXPLORATION": "এই অন্বেষণ ভাগ করুন", @@ -306,25 +578,36 @@ "I18N_PLAYER_THANK_FEEDBACK": "আপনার প্রতিক্রিয়ার জন্য আপনাকে ধন্যবাদ!", "I18N_PLAYER_UNRATED": "অমূল্যায়িত", "I18N_PLAYER_VIEWS_TOOLTIP": "টি দর্শন", + "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "অনুশীলন পর্ব", + "I18N_PRACTICE_SESSION_PAGE_TITLE": "অনুশীলন পর্ব: <[topicName]> - ওপিয়া", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "অডিওর ভাষা", "I18N_PREFERENCES_BIO": "জীবনী", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "এই ক্ষেত্রটি ঐচ্ছিক। আপনি এখানে কিছু লিখলে তা সার্বজনীন ও বিশ্ব-দর্শনযোগ্য হবে।", "I18N_PREFERENCES_BREADCRUMB": "পছন্দসমূহ", "I18N_PREFERENCES_CANCEL_BUTTON": "বাতিল", "I18N_PREFERENCES_CHANGE_PICTURE": "প্রোফাইল চিত্র পরিবর্তন করুন", "I18N_PREFERENCES_EMAIL": "ইমেইল", + "I18N_PREFERENCES_EMAIL_CLARIFICATION": "আপনি কখন ওপিয়ার ইমেল পেতে চান অনুগ্রহ করে নীচে নির্দেশ করুন৷ এই জাতীয় প্রতিটি ইমেলে থাকা সদস্যতা ত্যাগ করার নির্দেশাবলী অনুসরণ করে অথবা আপনি সর্বদা এই পাতায় আপনার পছন্দসমূহ পরিবর্তন করতে পারেন।", "I18N_PREFERENCES_EMAIL_EXPLAIN": "শুধু নিয়ন্ত্রণকারী এবং সাইট প্রশাসকগণ আপনার ইমেইল ঠিকানা দেখতে পারেন।", "I18N_PREFERENCES_EMAIL_RECEIVE_EDIT_RIGHTS_NEWS": "ইমেইল পান যখন কেউ আপনাকে একটি অন্বেষণ সম্পাদনা করার অধিকার দেয়", "I18N_PREFERENCES_EMAIL_RECEIVE_FEEDBACK_NEWS": "ইমেইল পান যখন কেউ একটি অন্বেষণে আপনাকে প্রতিক্রিয়া পাঠায়", "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "সাইটটি সম্পর্কে সংবাদ ও হালনাগাদ গ্রহণ করুন", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "ইমেইল গ্রহণ করুন যখন আপনার সদস্যতা নেয়া একজন একটি অন্বেষণ প্রকাশ করেন", + "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "আমরা আপনাকে স্বয়ংক্রিয়ভাবে আমাদের মেইলিং তালিকায় যুক্ত করতে পারিনি। আমাদের মেইলিং লিস্টে সাইন আপ করতে অনুগ্রহ করে নিচের লিঙ্কে যান:", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "অ্যাকাউন্ট রপ্তানি করুন", "I18N_PREFERENCES_HEADING": "পছন্দসমূহ", "I18N_PREFERENCES_HEADING_SUBTEXT": "এই পাতায় কোন পরিবর্তন করলে তা স্বয়ংক্রিয়ভাবে সংরক্ষিত হবে।", - "I18N_PREFERENCES_PAGE_TITLE": "আপনার প্রোফাইলের পছন্দসমূহ পরিবর্তন করুন - Oppia", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "প্রভাব", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "পছন্দসমূহ | ওপিয়া", + "I18N_PREFERENCES_PAGE_TITLE": "আপনার প্রোফাইলের পছন্দসমূহ পরিবর্তন করুন - ওপিয়া", "I18N_PREFERENCES_PICTURE": "চিত্র", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "পছন্দের অডিও ভাষা", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE_PLACEHOLDER": "পছন্দের অডিও ভাষা", + "I18N_PREFERENCES_PREFERRED_DASHBOARD": "পছন্দের যতিফলক", + "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "এই যতিফলকটি প্রবেশ করার সময় পূর্বনির্ধারিতভাবে দেখাবে।", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "পছন্দকৃত অন্বেষণের ভাষা", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "এইসব ভাষা পূর্বনির্ধারিতভাবে নির্বাচিত করা হবে যখন আপনি অন্বেষণের জন্য গ্যালারি অনুসন্ধান করেন।", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "পছন্দের ভাষা নির্বাচন করুন।", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "পছন্দনীয় সাইটের ভাষায়", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "এটি হচ্ছে সেই ভাষা যাতে সাইটটি দেখাচ্ছে।", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "পছন্দনীয় সাইটের ভাষায়", @@ -332,25 +615,70 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "কাটতে ও পুনঃমাপ করতে টেনে আনুন:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "ত্রুটি: চিত্রের ফাইল পড়া যায়নি।", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "প্রোফাইল ছবি আপলোড করুন", + "I18N_PREFERENCES_SEARCH_LABEL": "অনুসন্ধান", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "পছন্দের ভাষা নির্বাচন করুন...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "সাইটের ভাষা", "I18N_PREFERENCES_SUBJECT_INTERESTS": "আগ্রহের বিষয়", + "I18N_PREFERENCES_SUBJECT_INTERESTS_ERROR_TEXT": "বিষয় আগ্রহ অনন্য এবং ছোট হাতের হতে হবে।", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "উদাঃ গণিত, কম্পিউটার বিজ্ঞান, শিল্প, ...", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "নতুন আগ্রহের বিষয় যোগ করুন (অক্ষর ও শূন্যস্থান ব্যবহার করে)...", + "I18N_PREFERENCES_SUBJECT_INTERESTS_LABEL": "নতুন আগ্রহের বিষয়", "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "আগ্রহের বিষয় লিখুন...", + "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "আপনি সদস্যতা গ্রহণ করেছেন এমন নির্মাতাগণ", "I18N_PREFERENCES_USERNAME": "ব্যবহারকারী নাম", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "এখনো নির্বাচন করা হয়নি", - "I18N_SIDEBAR_ABOUT_LINK": "ওপিয়া সম্পর্কে", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "গোপনীয়তা নীতি | ওপিয়া", + "I18N_PROFILE_NO_EXPLORATIONS": "এই ব্যবহারকারী এখনও কোনো অন্বেষণ তৈরি বা সম্পাদনা করেননি।", + "I18N_PROFILE_PAGE_TITLE": "পরিলেখ | ওপিয়া", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "আপনি কি চালিয়ে যেতে চান?", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "না, প্রথম থেকে পুনরায় শুরু করুন", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "হ্যাঁ, পাঠ আবার শুরু করুন", + "I18N_QUESTION_PLAYER_MY_DASHBOARD": "আমার যতিফলক", + "I18N_QUESTION_PLAYER_RETRY_TEST": "পরীক্ষা পুনরায় চেষ্টা করুন", + "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "গল্পে ফেরত যান", + "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "দক্ষতার বিবরণ", + "I18N_QUESTION_PLAYER_TEST_FAILED": "পর্ব ব্যর্থ হয়েছে৷ দক্ষতা পর্যালোচনা করুন এবং আবার চেষ্টা করুন", + "I18N_QUESTION_PLAYER_TEST_PASSED": "পর্ব সম্পূর্ণ হয়েছে। সাবাশ!", + "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "নিবন্ধন পর্বের মেয়াদ শেষ", + "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "দুঃখিত, আপনার নিবন্ধন পর্ব মেয়াদ শেষ হয়ে গেছে। প্রক্রিয়াটি পুনরায় চালু করতে অনুগ্রহ করে \"নিবন্ধন চালিয়ে যান\"-এ ক্লিক করুন।", + "I18N_RESTART_EXPLORATION_BUTTON": "পাঠ পুনরায় শুরু করুন", + "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "পরীক্ষা পর্যালোচনা করুন", + "I18N_REVIEW_TEST_PAGE_TITLE": "পরীক্ষা পর্যালোচনা: <[storyName]> - ওপিয়া", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_1": "আপনার একটি অ্যাকাউন্ট থাকলে আপনার শেখার অগ্রগতি স্বয়ংক্রিয়ভাবে সংরক্ষিত হবে।", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "ইতিমধ্যে একটি অ্যাকাউন্ট আছে?", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_3": "৭২ ঘন্টার জন্য অগ্রগতি সংরক্ষণ করতে নীচের সংযোগটি ব্যবহার করুন।", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_5": "নীচের লিঙ্কটি লিখুন বা অনুলিপি করুন", + "I18N_SAVE_PROGRESS_TEXT": "অগ্রগতি সংরক্ষণ করুন", + "I18N_SHARE_LESSON": "এই পাঠ শেয়ার করুন", + "I18N_SHOW_LESS": "কম দেখান", + "I18N_SHOW_MORE": "আরও দেখান", + "I18N_SHOW_SOLUTION_BUTTON": "সমাধান দেখান", + "I18N_SIDEBAR_ABOUT_LINK": "আমাদের সম্পর্কে", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "ওপিয়া ফাউন্ডেশন সম্পর্কে", "I18N_SIDEBAR_BLOG": "ব্লগ", "I18N_SIDEBAR_CLASSROOM": "শ্রেণিকক্ষ", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "প্রাথমিক গণিত", "I18N_SIDEBAR_CONTACT_US": "যোগাযোগ", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "আপনার কোন প্রশ্ন থাকলে সাহায্য করতে আমরা এখানেই রয়েছি।", "I18N_SIDEBAR_DONATE": "দান করুন", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "আপনার অবদান সকলকে মানসম্মত শিক্ষা প্রদানে সহায়তা করে।", "I18N_SIDEBAR_FORUM": "ফোরাম", - "I18N_SIDEBAR_GET_STARTED": "শুরু করুন", + "I18N_SIDEBAR_GET_INVOLVED": "জড়িত হোন", + "I18N_SIDEBAR_HOME": "প্রধান পাতা", + "I18N_SIDEBAR_LEARN": "শিখুন", "I18N_SIDEBAR_LIBRARY_LINK": "পাঠাগার", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "গণিত শেখা আরম্ভ করতে সহায়ক ও অনভিজ্ঞ-বান্ধব পাঠ।", "I18N_SIDEBAR_OPPIA_FOUNDATION": "ওপিয়া ফাউন্ডেশন", - "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Oppia দিয়ে শিখুন", + "I18N_SIDEBAR_PARTNERSHIPS": "অংশীদারিত্ব", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "আপনার অঞ্চলের শিক্ষার্থীদের জন্য মানসম্পন্ন শিক্ষা নিয়ে আসুন।", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "যোগ এবং বিয়োগ", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "সম্প্রদায়ের পাঠাগার", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "সম্প্রদায় কর্তৃক তৈরি অতিরিক্ত সম্পদ।", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "গুণ", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "সমস্ত পাঠ দেখুন", + "I18N_SIDEBAR_TEACH_WITH_OPPIA": "ওপিয়া দিয়ে শেখান", "I18N_SIDEBAR_VOLUNTEER": "সেচ্ছাসেবী", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "পাঠ তৈরি এবং উন্নত করতে আমাদের বিশ্বব্যাপী দলে যোগ দিন।", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "এই লেখার বাঁদিকে উপস্থিত বাক্সে টিক দেয়ার মাধ্যমে, আপনি এখানে থাকা <[sitename]> ব্যবহারের শর্তাবলী স্বীকার, একমত, এবং আবদ্ধ হতে গ্রহণ করছেন।", "I18N_SIGNUP_BUTTON_SUBMIT": "জমা দিন ও অবদান রাখা শুরু করুন", "I18N_SIGNUP_CC_TITLE": "ক্রিয়েটিভ কমন্স লাইসেন্স", @@ -365,6 +693,7 @@ "I18N_SIGNUP_ERROR_USERNAME_NOT_AVAILABLE": "এই ব্যবহারকারী নাম উপলবদ্ধ নয়।", "I18N_SIGNUP_ERROR_USERNAME_ONLY_ALPHANUM": "ব্যবহারকারী নামে শুধুমাত্র আলফানিউমেরিক অক্ষর থাকতে পারবে।", "I18N_SIGNUP_ERROR_USERNAME_TAKEN": "দুঃখিত, এই ব্যবহারকারী নাম ইতিমধ্যে নিয়ে নেয়া হয়েছে।", + "I18N_SIGNUP_ERROR_USERNAME_TOO_LONG": "ব্যবহারকারী নামে সর্বাধিক <[maxUsernameLength]> অক্ষর থাকতে পারবে।", "I18N_SIGNUP_ERROR_USERNAME_WITH_ADMIN": "'admin' যুক্ত ব্যবহারকারী নাম সংরক্ষিত।", "I18N_SIGNUP_ERROR_USERNAME_WITH_SPACES": "দয়া করে নিশ্চিত করুন যে আপনার ব্যবহারকারী নামে কোন শূন্যস্থান নেই।", "I18N_SIGNUP_FIELD_REQUIRED": "এই ক্ষেত্রেটি প্রয়োজনীয়।", @@ -373,6 +702,7 @@ "I18N_SIGNUP_LOADING": "লোড হচ্ছে", "I18N_SIGNUP_PAGE_TITLE": "সম্প্রদায়ে যোগ দিন - Oppia", "I18N_SIGNUP_REGISTRATION": "নিবন্ধন", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "পরে আর জিজ্ঞেস করবেন না", "I18N_SIGNUP_SEND_ME_NEWS": "আমাকে এই সাইট সম্পর্কে সংবাদ ও হালনাগাদ পাঠান", "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]> হচ্ছে শিক্ষণীয় সম্পদের একটি উন্মুক্ত কমন্স। এতে থাকা সব উপাদান অবাধে পুনঃব্যবহার এবং ভাগ করা যাবে।", "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]> উচ্চ মানের শিক্ষা সম্পদের একটি সেট সৃষ্টি এবং ক্রমাগত উন্নতি লালনপালনের জন্য বিদ্যমান যাতে তা অবাধে সবার নিকট উপলব্ধ হয়।", @@ -380,44 +710,157 @@ "I18N_SIGNUP_USERNAME": "ব্যবহারকারী নাম", "I18N_SIGNUP_USERNAME_EXPLANATION": "আপনার ব্যবহারকারী নাম আপনার অবদান পাশে প্রদর্শিত হবে।", "I18N_SIGNUP_WHY_LICENSE": "কেন সিসি-বাই-এসএ?", + "I18N_SOLICIT_ANSWER_DETAILS_FEEDBACK": "ঠিক আছে, এখন আপনার উত্তরে ফিরে যাওয়া যাক।", + "I18N_SOLUTION_EXPLANATION_TITLE": "ব্যাখ্যা:", "I18N_SOLUTION_TITLE": "সমাধান", + "I18N_SPLASH_BENEFITS_ONE": "ব্যক্তিগতকৃত শিক্ষা", + "I18N_SPLASH_BENEFITS_THREE": "সহজ-বোধ্য পাঠসমূহ", + "I18N_SPLASH_BENEFITS_TITLE": "আমাদের সুবিধা", + "I18N_SPLASH_BENEFITS_TWO": "গল্প-ভিত্তিক পাঠ", "I18N_SPLASH_FIRST_EXPLORATION_DESCRIPTION": "Oppia-এর পাঠ, যা অন্বেষণ নামেও পরিচিত, ব্যবহারকারীদের করার দ্বারা শেখায় যা স্থির ভিডিও ও লেখার থেকেও ভালো অভিজ্ঞতা প্রদান করে।", + "I18N_SPLASH_FOR_STUDENTS": "ছাত্রদের জন্য", + "I18N_SPLASH_FOR_TEACHERS": "শিক্ষকদের জন্য", + "I18N_SPLASH_FOR_VOLUNTEERS": "স্বেচ্ছাসেবকদের জন্য", + "I18N_SPLASH_ICON_ONE_TEXT": "১ মিলিয়ন+ ব্যবহারকারী", + "I18N_SPLASH_ICON_TWO_TEXT": "<[languageCount]>+ ভাষায় উপলভ্য", "I18N_SPLASH_JAVASCRIPT_ERROR_DESCRIPTION": "Oppia ইন্টারেক্টিভ কার্যক্রমে পূর্ণ মুক্ত, উন্মুক্ত উৎসের শেখার প্ল্যাটফর্ম যা 'অন্বেষণ' নামে পরিচিত। দুঃখের বিষয় হল, Oppia সঠিকভাবে কাজ করার জন্য আপনার ওয়েব ব্রাউজারে জাভাস্ক্রিপ্ট সক্রিয় করতে হবে এবং আপনার ওয়েব ব্রাউজারে জাভাস্ক্রিপ্ট অক্ষম করা আছে। জাভাস্ক্রিপ্ট সক্রিয় করতে আপনার সাহায্যের প্রয়োজন হলে, \">এখানে ক্লিক করুন।", "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "আপনাকে ধন্যবাদ।", "I18N_SPLASH_JAVASCRIPT_ERROR_TITLE": "আমাদের আপনার ব্রাউজারে জাভাস্ক্রিপ্ট প্রয়োজন", - "I18N_SPLASH_PAGE_TITLE": "Oppia: শিখান, শিখুন, অন্বেষণ করুন", + "I18N_SPLASH_LEARN_MORE": "আরও পড়ুন", + "I18N_SPLASH_PAGE_TITLE": "ওপিয়া | সবার জন্য বিনামূল্যে, অনলাইন ও মিথস্ক্রিয় পাঠ", "I18N_SPLASH_SECOND_EXPLORATION_DESCRIPTION": "অন্বেষণ তৈরি করা সহজ। তাঁদের খুব সহজেই প্রতিটি ছাত্রের প্রতিক্রিয়া এবং বিশ্বজুড়ে আপনার শিক্ষার্থীর অভিজ্ঞতার প্রবণতার উপর ভিত্তি করে অভিযোজিত করা যায়।", "I18N_SPLASH_SITE_FEEDBACK": "সাইট প্রতিক্রিয়া", - "I18N_SPLASH_SUBTITLE": "Oppia ইন্টারেক্টিভ পাঠ তৈরি করা সহজ করে তোলে যা শেখায় ও আকর্ষণীয়।", + "I18N_SPLASH_START_CONTRIBUTING": "অবদান রাখা শুরু করুন", + "I18N_SPLASH_START_LEARNING": "শেখা শুরু করো", + "I18N_SPLASH_START_TEACHING": "শিক্ষকতা শুরু করুন", + "I18N_SPLASH_STUDENTS_TITLE": "মজাদার এবং কার্যকরী শিক্ষা", + "I18N_SPLASH_STUDENT_DETAILS_1": "- মীরা, ছাত্রী, ফিলিস্তিন", + "I18N_SPLASH_STUDENT_DETAILS_2": "- ধীরাজ, ছাত্র, ভারত", + "I18N_SPLASH_STUDENT_DETAILS_3": "- সামা, ছাত্র, ফিলিস্তিন", + "I18N_SPLASH_STUDENT_DETAILS_4": "- গৌরব, ছাত্র, ভারত", + "I18N_SPLASH_SUBTITLE": "সকলের জন্য আকর্ষণীয় এবং কার্যকর মানসম্পন্ন শিক্ষা", + "I18N_SPLASH_TEACHERS_TITLE": "সহজেই আপনার জ্ঞান ভাগ করে নিন", + "I18N_SPLASH_TESTIMONIAL_2": "\"প্রশ্নগুলি সমাধান করার সময় আমি সত্যিই মজা পেয়েছি কারণ সেগুলিতে অনেক রঙিন নকশা এবং চিত্র ছিল। ছবিগুলি বিষয়গুলিকেও বোঝা সহজ করে তুলেছে!”", + "I18N_SPLASH_TESTIMONIAL_3": "\"আমি বিদ্যালয়ে অনুপাত শিখেছিলাম কিন্তু ওপিয়ার মাধ্যমে আমি অনেক নতুন জিনিসের সাথে পরিচিত হয়েছি যেমন, তিনটি সংখ্যার অনুপাত এবং সমন্বিত অনুপাত\"", + "I18N_SPLASH_TESTIMONIAL_4": "\"আমি অনেক নতুন গণিত শিখেছি এবং এটি খুব সহজ ছিল\"", "I18N_SPLASH_THIRD_EXPLORATION_DESCRIPTION": "Oppia আপনাকে বিষয়ের একটি বিস্তৃত পরিসরের উপর অন্বেষণ তৈরিভাগ করতে দেয়, যা শুধুমাত্র আপনার কল্পনা দ্বারা সীমাবদ্ধ।", - "I18N_SPLASH_TITLE": "বইয়ের বাইরে চিন্তা করুন।", + "I18N_SPLASH_TITLE": "সবার জন্য বিনামূল্যে শিক্ষা", + "I18N_SPLASH_VOLUNTEERS_CONTENT": "আপনি যেই হোন না কেন, ওপিয়াতে আপনার জন্যও জায়গা রয়েছে। প্রশ্ন প্রস্তাব করে, গ্রাফিক্সে অবদান রেখে বা পাঠ অনুবাদ করে পাঠের উন্নতি করতে আমাদের সবসময় আরও বেশি লোকের প্রয়োজন।", + "I18N_SPLASH_VOLUNTEERS_TITLE": "সম্প্রদায় কতৃক চালিত", "I18N_START_HERE": "শুরু করতে এখানে ক্লিক করুন!", + "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - সম্পন্ন!", + "I18N_STORY_VIEWER_PAGE_TITLE": "<[topicName]> | <[storyTitle]> শিখুন | ওপিয়া", "I18N_SUBSCRIBE_BUTTON_TEXT": "সদস্যতা নিন", - "I18N_TEACH_PAGE_HEADING": "বিশ্বজুড়ে শিক্ষার্থীদের সহায়তা করুন", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "অনুপাত কী?", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE": "গুণের মৌলিক ধারণা", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "পরবর্তী দক্ষতা:", + "I18N_SUBTOPIC_VIEWER_PAGE_TITLE": "<[subtopicTitle]> পর্যালোচনা করুন | ওপিয়া", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "পূর্ববর্তী দক্ষতা:", + "I18N_SUBTOPIC_dLmjjMDbCcrf_algebraic-expressions_TITLE": "বীজগাণিতিক রাশি সরলীকরণ", + "I18N_SUBTOPIC_dLmjjMDbCcrf_problem-solving_TITLE": "সমস্যা সমাধানের কৌশল", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE": "ভাগের মৌলিক ধারণা", + "I18N_SYLLABUS_SKILL_TITLE": "দক্ষতা", + "I18N_SYLLABUS_STORY_TITLE": "গল্প", + "I18N_TEACH_BENEFITS_ONE": "সব বয়সের জন্য কার্যকর, উচ্চ-মানের শিক্ষা", + "I18N_TEACH_BENEFITS_THREE": "সর্বদা বিনামূল্যে এবং সহজসাধ্য", + "I18N_TEACH_BENEFITS_TITLE": "আমাদের সুবিধা", + "I18N_TEACH_BENEFITS_TWO": "মজাদার, গল্প-ভিত্তিক পাঠ", + "I18N_TEACH_PAGE_ACTION_START_LEARNING": "শেখা শুরু করুন", + "I18N_TEACH_PAGE_CLASSROOM_BUTTON": "শ্রেণিকক্ষে যান", + "I18N_TEACH_PAGE_HEADING": "পিতামাতা, শিক্ষক ও অভিভাবকদের জন্য ওপিয়া", + "I18N_TEACH_PAGE_LIBRARY_BUTTON": "গ্রন্থাগার সন্ধান করুন", + "I18N_TEACH_PAGE_SIX_TITLE": "আজই শেখা শুরু করুন", + "I18N_TEACH_PAGE_TITLE": "পিতামাতা এবং শিক্ষকদের জন্য ওপিয়ার নির্দেশিকা | ওপিয়া", + "I18N_TEACH_STUDENT_DETAILS_1": "রিয়া সোগানি", + "I18N_TEACH_STUDENT_DETAILS_2": "ওয়ালা আওয়াদ", + "I18N_TEACH_STUDENT_DETAILS_3": "হিমাংশু তানেজা, কুরুক্ষেত্র, ভারত", + "I18N_TEACH_STUDENT_DETAILS_4": "ইয়ামামা, সুবিধাদানকারী, ফিলিস্তিন", + "I18N_TEACH_TESTIMONIAL_1": "“আমি কৃতজ্ঞ যে আমি সুবিধাবঞ্চিত ভারতীয় শিশুদের শিক্ষিত করার সুযোগ পেয়েছি এবং তাদের গণিতের সমালোচনামূলক ধারণাগুলির বোঝার শূন্যতা পূরণ করতে পেরেছি। এই ছাত্রদের আত্মবিশ্বাস বাড়তে দেখে তারা শিখেছে অতিরিক্ত সময়ের মূল্য কী।”", + "I18N_TEACH_TESTIMONIAL_2": "“ওপিয়াই এই ক্ষেত্রে প্রথম! যা শিক্ষার্থীদের আকর্ষণীয় ও মনোরম উপায়ে কোনও নির্দিষ্ট বিষয় সম্পর্কে তাদের প্রয়োজনীয় সমস্ত কিছু শিখতে সহায়তা করে; এটি তাদের নিজেদের ভালোর জন্য স্মার্ট ডিভাইস ব্যবহার করতে উৎসাহিত করে।\"", + "I18N_TEACH_TESTIMONIAL_3": "“আমি কখনই আশা করিনি যে শিক্ষার্থীরা এত দ্রুত প্রযুক্তি শিখবে ও গণিত পাঠ করবে। এটি স্মার্টটেকের সাথে তাদের প্রথম পরিচয় এবং তারা প্রথমে সেগুলো চালাতে গিয়ে সত্যিই অসুবিধার সম্মুখীন হয়েছিল। এখন, আমি শ্রেণিকক্ষে ঢোকার আগেই তাদের ওপিয়া পাঠ করতে দেখে খুব আনন্দিত বোধ করছি!”", + "I18N_TERMS_PAGE_TITLE": "ব্যবহারের শর্তাবলী | ওপিয়া", + "I18N_THANKS_PAGE_BREADCRUMB": "ধন্যবাদ", + "I18N_THANKS_PAGE_TITLE": "ধন্যবাদ | ওপিয়া", + "I18N_TIME_FOR_BREAK_BODY_1": "আপনি খুব দ্রুত উত্তর জমা করছেন বলে মনে হচ্ছে। আপনার কি ক্লান্তি অনুভব হচ্ছে?", + "I18N_TIME_FOR_BREAK_BODY_2": "যদি তাই হয়, তবে একটি বিরতি নেওয়ার কথা বিবেচনা করুন! আপনি পরে ফিরে আসতে পারেন।", + "I18N_TIME_FOR_BREAK_FOOTER": "আমি পাঠ চালিয়ে যেতে প্রস্তুত", + "I18N_TIME_FOR_BREAK_TITLE": "বিরতি চান?", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "ভগ্নাংশ", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "গুণ", + "I18N_TOPIC_LANDING_PAGE_TITLE": "<[topicTitle]> | <[topicTagline]> | ওপিয়া", + "I18N_TOPIC_LEARN": "শিখুন", + "I18N_TOPIC_TITLE": "প্রসঙ্গ", + "I18N_TOPIC_VIEWER_CHAPTER": "অধ্যায়", + "I18N_TOPIC_VIEWER_COMING_SOON": "শীঘ্রই আসছে!", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "এই প্রসঙ্গের জন্য পাঠ উপলব্ধ হলে পরে ফিরে আসুন।", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "এই প্রসঙ্গের উপর অনুশীলন প্রশ্ন উপলব্ধ হলে পরে ফিরে আসুন।", "I18N_TOPIC_VIEWER_DESCRIPTION": "বিবরণ", + "I18N_TOPIC_VIEWER_LESSON": "পাঠ", + "I18N_TOPIC_VIEWER_LESSONS": "পাঠসমূহ", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "এই প্রসঙ্গের জন্য পাঠ উপলব্ধ হলে পরে ফিরে আসুন।", + "I18N_TOPIC_VIEWER_MASTER_SKILLS": "<[topicName]> প্রসঙ্গের দক্ষতা আয়ত্ত করুন", + "I18N_TOPIC_VIEWER_PAGE_TITLE": "<[topicName]> | <[pageTitleFragment]> | ওপিয়া", "I18N_TOPIC_VIEWER_PRACTICE": "অনুশীলন করুন", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_MESSAGE": "অনুশীলন বৈশিষ্ট্যটি এখনও বিটাতে সংস্করণে রয়েছে এবং শুধুমাত্র ইংরেজিতে উপলব্ধ। আপনি কি চালিয়ে যেতে চান?", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_TITLE": "অনুশীলনের ভাষা নিশ্চিত করুন", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "(বেটা)", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "এই প্রসঙ্গের উপর অনুশীলন প্রশ্ন উপলব্ধ হলে পরে ফিরে আসুন।", + "I18N_TOPIC_VIEWER_REVISION": "সংশোধন", + "I18N_TOPIC_VIEWER_SKILL": "দক্ষতা", + "I18N_TOPIC_VIEWER_SKILLS": "দক্ষতা", "I18N_TOPIC_VIEWER_START_PRACTICE": "শুরু করুন", + "I18N_TOPIC_VIEWER_STORIES": "গল্পসমূহ", "I18N_TOPIC_VIEWER_STORY": "গল্প", + "I18N_TOPIC_VIEWER_STUDY_SKILLS": "<[topicName]> প্রসঙ্গের দক্ষতা অধ্যয়ন করুন", + "I18N_TOPIC_VIEWER_VIEW_ALL": "সব দেখুন", + "I18N_TOPIC_VIEWER_VIEW_LESS": "কম দেখুন", + "I18N_TOPIC_iX9kYCjnouWN_TITLE": "মান বসান", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "ভাগ", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "যোগ ও বিয়োগ", "I18N_TOPNAV_ABOUT": "সম্পর্কে", "I18N_TOPNAV_ABOUT_OPPIA": "Oppia সম্পর্কে", "I18N_TOPNAV_ADMIN_PAGE": "প্রশাসকের পাতা", + "I18N_TOPNAV_ANDROID_APP_HEADING": "অ্যান্ড্রয়েড অ্যাপ", "I18N_TOPNAV_BLOG": "ব্লগ", + "I18N_TOPNAV_BLOG_DASHBOARD": "ব্লগ যতিফলক", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "প্রাথমিক গণিত", "I18N_TOPNAV_CONTACT_US": "যোগাযোগ", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "আপনার কোন প্রশ্ন থাকলে সাহায্য করতে আমরা এখানেই রয়েছি।", + "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "অবদানকারীর যতিফলক", "I18N_TOPNAV_CREATOR_DASHBOARD": "ড্যাশবোর্ড তৈরি করুন", "I18N_TOPNAV_DONATE": "দান করুন", + "I18N_TOPNAV_DONATE_DESCRIPTION": "আপনার অবদান সকলকে মানসম্মত শিক্ষা প্রদানে সহায়তা করে।", + "I18N_TOPNAV_FACILITATOR_DASHBOARD": "সুবিধাদনকারীর যতিফলক", "I18N_TOPNAV_FORUM": "ফোরাম", "I18N_TOPNAV_GET_INVOLVED": "জড়িত হোন", "I18N_TOPNAV_GET_STARTED": "শুরু করুন", - "I18N_TOPNAV_LIBRARY": "পাঠাগার", + "I18N_TOPNAV_HOME": "নীড়", + "I18N_TOPNAV_LEARN": "শিখুন", + "I18N_TOPNAV_LEARNER_DASHBOARD": "শিক্ষার্থীর যতিফলক", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "গণিত শেখা আরম্ভ করতে সহায়ক ও অনভিজ্ঞ-বান্ধব পাঠ।", + "I18N_TOPNAV_LEARN_HEADING": "আরও শেখার উপায়", + "I18N_TOPNAV_LEARN_LINK_1": "সমস্ত পাঠ দেখুন", + "I18N_TOPNAV_LEARN_LINK_2": "পড়া চালিয়ে যান", + "I18N_TOPNAV_LIBRARY": "সম্প্রদায় পাঠাগার", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "আপনাকে আরও জানতে সাহায্য করার জন্য সম্প্রদায়ের তৈরি অতিরিক্ত বিষয়বস্তু।", "I18N_TOPNAV_LOGOUT": "প্রস্থান", "I18N_TOPNAV_MODERATOR_PAGE": "নিয়ন্ত্রণকারী পাতা", "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia ফাউন্ডেশন", - "I18N_TOPNAV_PARTNERSHIPS": "অংশীদারিত্ব", + "I18N_TOPNAV_PARTNERSHIPS": "বিদ্যালয় ও সংস্থাসমূহ", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "অংশীদার হন এবং ওপিয়াকে আপনার বিদ্যালয়, সম্প্রদায় বা এলাকায় নিয়ে আসুন।", "I18N_TOPNAV_PREFERENCES": "পছন্দসমূহ", "I18N_TOPNAV_SIGN_IN": "প্রবেশ", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "গুগল দিয়ে প্রবেশ করুন", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Oppia দিয়ে শিখান", + "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "প্রসঙ্গ ও দক্ষতার যতিফলক", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "পাঠ তৈরি এবং উন্নত করতে আমাদের বিশ্বব্যাপী দলে যোগ দিন।", "I18N_TOTAL_SUBSCRIBERS_TEXT": "আপনার মোট <[totalSubscribers]> জন গ্রাহক রয়েছে।", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "সদস্যতা ত্যাগ", - "I18N_WORKED_EXAMPLE": "কাজের উদাহরণ" + "I18N_VIEW_ALL_TOPICS": "<[classroomName]>-এর সমস্ত প্রসঙ্গ দেখুন", + "I18N_VOLUNTEER_PAGE_BREADCRUMB": "স্বেচ্ছাসেবক", + "I18N_VOLUNTEER_PAGE_TITLE": "স্বেচ্ছাসেবক | ওপিয়া", + "I18N_WARNING_MODAL_DESCRIPTION": "এটি সম্পূর্ণ সমাধান দেখাবে। আপনি কি নিশ্চিত?", + "I18N_WARNING_MODAL_TITLE": "সতর্কীকরণ!", + "I18N_WORKED_EXAMPLE": "কাজের উদাহরণ", + "I18N_YES": "হ্যাঁ" } diff --git a/assets/i18n/br.json b/assets/i18n/br.json index 33e9e32050ef..625358166321 100644 --- a/assets/i18n/br.json +++ b/assets/i18n/br.json @@ -314,7 +314,6 @@ "I18N_SIDEBAR_CONTACT_US": "Mont e darempred ganeomp", "I18N_SIDEBAR_DONATE": "Ober un donezon", "I18N_SIDEBAR_FORUM": "Forom", - "I18N_SIDEBAR_GET_STARTED": "Kregiñ", "I18N_SIDEBAR_LIBRARY_LINK": "Levraoueg", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Kelenn gant Oppia", "I18N_SIGNUP_CC_TITLE": "Aotre-implijout Creative Commons", @@ -357,6 +356,7 @@ "I18N_TOPNAV_FORUM": "Forom", "I18N_TOPNAV_GET_INVOLVED": "Kemer perzh", "I18N_TOPNAV_GET_STARTED": "Kregiñ", + "I18N_TOPNAV_HOME": "Degemer", "I18N_TOPNAV_LEARNER_DASHBOARD": "Taolenn an deskard", "I18N_TOPNAV_LIBRARY": "Levraoueg", "I18N_TOPNAV_LOGOUT": "Digevreañ", diff --git a/assets/i18n/bs.json b/assets/i18n/bs.json index 6ea3b747c539..116025abc5bf 100644 --- a/assets/i18n/bs.json +++ b/assets/i18n/bs.json @@ -185,7 +185,6 @@ "I18N_SIDEBAR_CONTACT_US": "Kontaktirajte nas", "I18N_SIDEBAR_DONATE": "Doniraj", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Započnite", "I18N_SIDEBAR_LIBRARY_LINK": "Biblioteka", "I18N_SIGNUP_BUTTON_SUBMIT": "Pošalji i počni doprinositi", "I18N_SIGNUP_CLOSE_BUTTON": "Zatvori", diff --git a/assets/i18n/ca.json b/assets/i18n/ca.json index c9a0d782deea..52939701f6bf 100644 --- a/assets/i18n/ca.json +++ b/assets/i18n/ca.json @@ -157,8 +157,9 @@ "I18N_PREFERENCES_EMAIL": "Adreça electrònica", "I18N_PREFERENCES_HEADING": "Preferències", "I18N_PREFERENCES_USERNAME": "Nom d’usuari", - "I18N_QUESTION_PLAYER_NEW_SESSION": "Sessió nova", - "I18N_SIDEBAR_ABOUT_LINK": "Quant a", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Repetir", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "Copiat!", + "I18N_SIDEBAR_ABOUT_LINK": "Sobre nosaltres", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Classes", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Matemàtiques Bàsiques", @@ -186,7 +187,7 @@ "I18N_TOPNAV_FORUM": "Fòrum", "I18N_TOPNAV_GET_INVOLVED": "Impliqueu-vos-hi", "I18N_TOPNAV_GET_STARTED": "Introducció", - "I18N_TOPNAV_LIBRARY": "Biblioteca", + "I18N_TOPNAV_LIBRARY": "Biblioteca de la comunitat", "I18N_TOPNAV_LOGOUT": "Finalitza la sessió", "I18N_TOPNAV_MODERATOR_PAGE": "Pàgina de moderació", "I18N_TOPNAV_PREFERENCES": "Preferències", diff --git a/assets/i18n/da.json b/assets/i18n/da.json index dd107ff8a9c7..1808916dba8d 100644 --- a/assets/i18n/da.json +++ b/assets/i18n/da.json @@ -156,6 +156,7 @@ "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Mål", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "Igangværende", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Lad os komme i gang med denne spændende rejse!", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "Dine grupper", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "Afspil senere", "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "Fremskridt", "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "Fjern", @@ -167,6 +168,7 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Foreslået:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Forslag:", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Vis forslag", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "Beskrivelse", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Du har fuldført dette", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Allerede tilføjet afspilningslisten", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Tilføj til afspilningsliste", @@ -280,7 +282,6 @@ "I18N_SIDEBAR_CONTACT_US": "Kontakt os", "I18N_SIDEBAR_DONATE": "Doner", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Kom igang", "I18N_SIDEBAR_LIBRARY_LINK": "Bibliotek", "I18N_SIDEBAR_PARTNERSHIPS": "Partnerskaber", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Undervis med Oppia", @@ -296,6 +297,7 @@ "I18N_SIGNUP_USERNAME": "Brugernavn", "I18N_SIGNUP_WHY_LICENSE": "Hvorfor CC-BY-SA?", "I18N_SOLUTION_EXPLANATION_TITLE": "Forklaring:", + "I18N_SOLUTION_NEED_HELP": "Vil du se den komplette løsning?", "I18N_SPLASH_ICON_ONE_TEXT": "1 million+ brugere", "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "Tak.", "I18N_SPLASH_LEARN_MORE": "Lær mere", diff --git a/assets/i18n/de.json b/assets/i18n/de.json index 332d8992c2d6..19a4559c58eb 100644 --- a/assets/i18n/de.json +++ b/assets/i18n/de.json @@ -1,4 +1,6 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Über die Oppia-Stiftung", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Über die Oppia-Stiftung | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Eine Erforschung erstellen", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "über ein Thema, das dich interessiert.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Rückmeldungen verdienen", @@ -20,6 +22,8 @@ "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT": "Oppias Autoren kommen von der ganzen Welt – viele von uns sind Studenten, ehemalige Studenten und Lehrer. Wir möchten den folgenden Autoren danken, die geholfen haben, die Plattform zu erstellen. Falls du auch helfen möchtest, hier sind Informationen, wie du mitmachen kannst!", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT_BOTTOM": "Das Oppia-Entwicklungsteam ist auch dankbar für Rückmeldungen, Ideen, Hilfe und Vorschläge von <[listOfNames]>.", "I18N_ABOUT_PAGE_CREDITS_THANK_TRANSLATEWIKI": "Wir möchten auch translatewiki.net danken für das Bereitstellen von Übersetzungen.", + "I18N_ABOUT_PAGE_EASILY_CREATE_LESSON": "Erstelle ganz einfach Lektionen", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Entdecke Lektionen, die von der Gemeinschaft erstellt wurden", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Spenden", "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "Mitmachen", "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "Die Oppia Foundation", @@ -29,41 +33,101 @@ "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4": "Die Direktoren der Foundation sind Ben Henning, Jacob Davis und Sean Lip. Die Satzungen und Protokolle der Foundation sind zum Lesen verfügbar. Falls du die Foundation kontaktieren möchtest, sende bitte eine E-Mail an: admin@oppia.org.", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4_HEADING": "Direktoren", "I18N_ABOUT_PAGE_HEADING": "Oppia: Bildung für alle", + "I18N_ABOUT_PAGE_LANGUAGE_FEATURE": "Übersetzung in lokale Dialekte", "I18N_ABOUT_PAGE_LEARN_BUTTON": "Ich möchte lernen", + "I18N_ABOUT_PAGE_LESSON_FEATURE": "Geschichtenbasierte Lektionen", + "I18N_ABOUT_PAGE_MOBILE_FEATURE": "Mobilfreundliche Navigation", "I18N_ABOUT_PAGE_OUR_FEATURES": "Unsere Features", "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "Was willst du heute machen?", "I18N_ABOUT_PAGE_TABS_ABOUT": "Über", "I18N_ABOUT_PAGE_TABS_CREDITS": "Danksagungen", "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Foundation", "I18N_ABOUT_PAGE_TEACH_BUTTON": "Ich möchte unterrichten", - "I18N_ABOUT_PAGE_TITLE": "Über uns – Oppia", + "I18N_ABOUT_PAGE_TITLE": "Über | Oppia", + "I18N_ABOUT_PAGE_WIFI_FEATURE": "Niedrige Bandbreite erforderlich", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Für „Teach with Oppia“ bewerben", "I18N_ACTION_BROWSE_EXPLORATIONS": "Unsere Erforschungen durchsuchen", "I18N_ACTION_BROWSE_LESSONS": "Lektionen durchsuchen", + "I18N_ACTION_BROWSE_LIBRARY": "Bibliothek durchsuchen", "I18N_ACTION_CREATE_EXPLORATION": "Eine Erforschung erstellen", "I18N_ACTION_CREATE_LESSON": "Eigene Lektion erstellen", - "I18N_ACTION_EXPLORE_LESSONS": "Entdecke Stunden", + "I18N_ACTION_CREATE_LESSON_BUTTON": "Lektion erstellen", + "I18N_ACTION_EXPLORE_LESSONS": "Lektionen erkunden", "I18N_ACTION_GUIDE_FOR_TEACHERS": "Anleitung für Lehrer", + "I18N_ACTION_TIPS_FOR_PARENTS": "Tipps für Eltern und Erziehungsberechtigte", + "I18N_ACTION_VISIT_CLASSROOM": "Klassenraum besuchen", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Abbrechen", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Fertig", + "I18N_ADD_SYLLABUS_SEARCH_PLACEHOLDER": "Suche z.B. Geschichte, Physik, Englisch", + "I18N_ANDROID_PAGE_CONSENT_CHECKBOX_LABEL": "Du bestätigst, dass du über 18 Jahre alt bist oder dass du die Zustimmung deines gesetzlichen Elternteils oder Erziehungsberechtigten hast.", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "E-Mail-Adresse", + "I18N_ANDROID_PAGE_FEATURES_SECTION_HEADER": "Bildung für jeden.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_2": "Spiele Lektionen auch ohne Internetverbindung.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_1": "Lerne durch fesselnde Geschichten", + "I18N_ANDROID_PAGE_FEATURE_TEXT_2": "Lerne jederzeit und überall", + "I18N_ANDROID_PAGE_FEATURE_TEXT_3": "Lerne in deiner Sprache", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Name", + "I18N_ANDROID_PAGE_SUPPORT_TEXT": "Wir unterstützen alle Android-Versionen ab Lollipop (Android 5).", + "I18N_ANDROID_PAGE_TITLE": "Android | Oppia", + "I18N_ANDROID_PAGE_UPDATES_MAIN_TEXT": "Melden Sie sich an, um Updates zur Android-App von Oppia zu erhalten", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "Benachrichtige mich", + "I18N_ANDROID_PAGE_UPDATES_SUBTEXT": "Wir versprechen, keinen Spam zu versenden, und Sie erhalten nur gelegentlich E-Mails. Sie können sich jederzeit abmelden.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_2": "Bitte überprüfe deine Rechtschreibung.", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "Autorenprofile", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TITLE": "Blog | Autor | Oppia", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TOTAL_POSTS_DISPLAY": "<[totalNumber]> Beiträge", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_EXPLAIN_TEXT": "Dies ist eine kleine Beschreibung über dich. Alles, was du hier schreibst, ist öffentlich und für die ganze Welt einsehbar.", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "Bio", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Name und Bio bearbeiten", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Name", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Speichern", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Entwürfe", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Veröffentlicht", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Blog", + "I18N_BLOG_HOME_PAGE_TITLE": "Oppia-Blog | Oppia", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "Willkommen im Oppia-Blog!", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Abbrechen", "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Löschen", "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "Veröffentlichen", "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "Fertig", + "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "Als Entwurf speichern", "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "Vorschau", "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "Tags", "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "Vorschaubild", "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "Titel", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_EXTENSIONS_PREFIX": "Erlaubte Bilddateiendungen:", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "Wähle eine Datei aus oder ziehe sie hierher", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Fehler: Die Bilddatei konnte nicht gelesen werden.", "I18N_BLOG_POST_UNTITLED_HEADING": "Ohne Titel", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Diese Karte ist ziemlich lang. Studenten könnten die Interesse verlieren. Ziehe in Erwägung, sie zu kürzen oder in zwei Karten aufzuteilen.", "I18N_CLASSROOM_CALLOUT_BUTTON": "Entdecken", + "I18N_CLASSROOM_CALLOUT_HEADING_1": "Mathematische Grundlagen", "I18N_CLASSROOM_CALLOUT_HEADING_2": "Einleitung: Der Oppia-Klassenraum", + "I18N_CLASSROOM_MATH_TITLE": "Mathematik", "I18N_CLASSROOM_PAGE_COMING_SOON": "Bald verfügbar", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Kursdetails", "I18N_CLASSROOM_PAGE_HEADING": "Der Oppia-Klassenraum", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Weitere Stunden entdecken, die von der Community erstellt wurden", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Durchsuche unsere Community-Bibliothek", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Abgedeckte Themen", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "<[collectionTitle]> - Oppia-Editor", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "Unbenannt - Oppia-Editor", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Weiter", + "I18N_COMING_SOON": "Bald verfügbar!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "SAMMLUNG", + "I18N_COMPLETED_STORY": "'<[story]>' abgeschlossen", + "I18N_CONCEPT_CARD_NEED_HELP": "Brauchst du Hilfe? Wirf einen Blick auf die Konzeptkarte.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_1": "Du hast gerade den ersten Kontrollpunkt abgeschlossen! Guter Start!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "Ein perfekter Start! Weiter so!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "Du hast einen Kontrollpunkt abgeschlossen! Gut gemacht!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_3": "Gute Arbeit! Du hast gerade einen Kontrollpunkt abgeschlossen!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_2": "Toll! Du hast gerade deinen zweiten Kontrollpunkt abgeschlossen!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "Hurra!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "Hervorragend!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_3": "Kontrollpunkt!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "Gut gemacht!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "Gute Arbeit!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "Gut erledigt!", "I18N_CONTACT_PAGE_BREADCRUMB": "Kontakt", "I18N_CONTACT_PAGE_HEADING": "Mach mit!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Danke für dein Interesse an einer Mithilfe am Oppia-Projekt!", @@ -92,9 +156,12 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "Wenn du freie effektive Lektionen für Studenten auf der ganzen Welt erstellen möchtest, bist du am richtigen Ort. Wir ermutigen dich, unsere Ersteller-Tutorials und vorhandene Lektionen anzusehen und deine eigene Lektion zu erstellen. Wenn du zusätzlich sicherstellen möchtest, dass deine Lektionen eine große Wirkung haben, ziehe bitte in Erwägung, dich bei unserem Programm „Mit Oppia unterrichten“ zu bewerben, wo wir dir beim Erstellen, Testen und Verbessern deiner Erforschungen helfen für eine optimale Wirkung.", "I18N_CONTACT_PAGE_PARAGRAPH_9": "Magst du eine vorhandene Erforschung, du hast jedoch etwas gefunden, was besser sein könnte? Du kannst auf jeder Erforschungsseite direkt Änderungen vorschlagen. Klicke einfach auf das Stiftsymbol in der obigen rechten Ecke und teile das, was verbessert werden könnte. Der Ersteller der Lektion erhält deine Vorschläge und hat die Möglichkeit, sie in die Erforschung einzuarbeiten. Dies ist ein unglaublich wertvoller Weg zum Beitragen, vor allem wenn deine Vorschläge aus Erfahrungen von Studenten basieren, die sich durch die Erforschung spielen.", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "Vorhandene Erforschungen verbessern", + "I18N_CONTACT_PAGE_TITLE": "Kontakt | Oppia", "I18N_CONTINUE_REGISTRATION": "Mit der Registrierung fortfahren", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "OK", "I18N_CORRECT_FEEDBACK": "Richtig!", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "Dein Gruppenlink", + "I18N_CREATE_ACCOUNT": "Benutzerkonto erstellen", "I18N_CREATE_ACTIVITY_QUESTION": "Was möchtest du erstellen?", "I18N_CREATE_ACTIVITY_TITLE": "Eine Aktivität erstellen", "I18N_CREATE_COLLECTION": "Sammlung erstellen", @@ -103,6 +170,7 @@ "I18N_CREATE_EXPLORATION_QUESTION": "Möchtest du eine Erforschung erstellen?", "I18N_CREATE_EXPLORATION_TITLE": "Eine Erforschung erstellen", "I18N_CREATE_EXPLORATION_UPLOAD": "Hochladen", + "I18N_CREATE_LEARNER_GROUP": "Gruppe erstellen", "I18N_CREATE_NO_THANKS": "Nein, danke.", "I18N_CREATE_YES_PLEASE": "Ja, bitte!", "I18N_CREATOR_IMPACT": "Auswirkung", @@ -118,6 +186,7 @@ "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TITLE": "Titel", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TOTAL_PLAYS": "Insgesamt gespielt", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_UNRESOLVED_ANSWERS": "Unerledigte Antworten", + "I18N_DASHBOARD_LESSONS": "Lektionen", "I18N_DASHBOARD_OPEN_FEEDBACK": "Rückmeldung eröffnen", "I18N_DASHBOARD_STATS_AVERAGE_RATING": "Durchschnittliche Bewertung", "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "Offene Rückmeldungen", @@ -141,10 +210,19 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_1": "Themen und Vorschläge", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Übersicht", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Dies bringt dich zu einer Seite, wo du dein Oppia-Konto löschen kanst.", + "I18N_DELETE_LEARNER_GROUP": "Gruppe löschen", + "I18N_DEST_IF_STUCK_INFO_TOOLTIP": "Du kannst jetzt eine neue Karte angeben, auf der du die Lernenden durch die in der Frage verwendeten Konzepte führen kannst, wenn sie wirklich nicht weiterkommen!", + "I18N_DIAGNOSTIC_TEST_START_BUTTON": "Test starten", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Ein Bild in diesem Bereich ablegen", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Eine Datei hochladen", "I18N_DONATE_PAGE_BREADCRUMB": "Spenden", - "I18N_DONATE_PAGE_TITLE": "Spende für die
Oppia Foundation", + "I18N_DONATE_PAGE_TITLE": "Spende für die Oppia Foundation", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "Du hast noch keine Gruppe", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "Du hast gerade dein erstes Kapitel abgeschlossen!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "Du hast gerade dein 5. Kapitel abgeschlossen!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "Du hast gerade dein 10. Kapitel abgeschlossen!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_4": "Du hast gerade dein 25. Kapitel abgeschlossen!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "Du hast gerade dein 50. Kapitel abgeschlossen!", "I18N_ERROR_DISABLED_EXPLORATION": "Deaktivierte Erforschung", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Leider ist die angeklickte Erforschung derzeit deaktiviert. Bitte später erneut versuchen.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Deaktivierte Erforschung – Oppia", @@ -161,9 +239,22 @@ "I18N_ERROR_PAGE_TITLE_401": "Fehler 401 – Oppia", "I18N_ERROR_PAGE_TITLE_404": "Fehler 404 – Oppia", "I18N_ERROR_PAGE_TITLE_500": "Fehler 500 – Oppia", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "Große Zahlen addieren", + "I18N_EXPLORATION_Jbgc3MlRiY07_DESCRIPTION": "Nachdem sie so viel Neues gelernt hat, will Ava nun herausfinden, was sie mit ihrem Wissen anfangen kann. Begleite Ava, wenn sie ihr neues Wissen bei Problemen aus dem wirklichen Leben anwendet!", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "Was bedeutet Addition?", + "I18N_EXPLORATION_PLAYER_PAGE_TITLE": "<[explorationTitle]> - Oppia", + "I18N_EXPLORATION_SR1IKIdLxnm1_TITLE": "Wiederholung: Variablen", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE": "Große Zahlen subtrahieren, Teil 1", + "I18N_EXPLORATION_Vgde5_ZVqrq5_DESCRIPTION": "James weiß nun, wie sein eigenes Smoothie-Rezept aussehen soll, aber er hat Schwierigkeiten, alle Zutaten zu kombinieren. Kannst du ihm dabei helfen?", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "Was ist Subtraktion?", + "I18N_EXPLORATION_avwshGklKLJE_TITLE": "Zahlen runden, Teil 1", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION": "Begleite Nina, wenn sie Sandra hilft, Fruchtsaft für ihren Verkaufsstand zu machen, und dabei eine neue Divisionstechnik benutzt!", + "I18N_EXPLORATION_umPkwp0L1M0-_DESCRIPTION": "Begleite Matthew, wenn er Mr. Baker zum ersten Mal trifft und etwas über Brüche lernt. Spiel die Stunde durch, um mehr zu erfahren!", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE": "Was Multiplikation bedeutet", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anonym", "I18N_FOOTER_ABOUT": "Über", "I18N_FOOTER_ABOUT_ALL_CAPS": "ÜBER OPPIA", + "I18N_FOOTER_ANDROID_APP": "Android-App", "I18N_FOOTER_AUTHOR_PROFILES": "Autorenprofile", "I18N_FOOTER_BROWSE_LIBRARY": "Die Bibliothek durchsuchen", "I18N_FOOTER_CONTACT_US": "Kontaktiere uns", @@ -225,6 +316,7 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "und <[vertices]> Eckpunkte", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Bezeichnung aktualisieren", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Gewicht aktualisieren", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Klicke auf das Bild", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Wähle ein Bild zur Anzeige aus]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Du kannst mehr Auswahlen treffen.", "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Bitte treffe mindestens eine Auswahl.} other{Bitte treffe mindestens # Auswahlen.}}", @@ -232,11 +324,14 @@ "I18N_INTERACTIONS_MUSIC_CLEAR": "Löschen", "I18N_INTERACTIONS_MUSIC_PLAY": "Spielen", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Zielsequenz spielen", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "Bitte gib eine gültige Währung ein (z. B. $5 oder Rs 5)", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "Bitte schreibe Währungseinheiten am Anfang", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Mögliche Einheitenformate", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "Bist du sicher, dass du deinen Code zurücksetzen möchtest?", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Abbrechen", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Bestätigung erforderlich", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Code zurücksetzen", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Code-Editor anzeigen", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Objekt hinzufügen", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Huch! Es scheint, als ob dein Satz Duplikate hat!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Ein Objekt pro Zeile hinzufügen.)", @@ -244,8 +339,11 @@ "I18N_INTERACTIONS_SUBMIT": "Absenden", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Oppia ansehen auf:", "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Guten Nachmittag", - "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "ziele bearbeiten", + "I18N_LEARNER_DASHBOARD_ALL": "Alle", + "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Ziele bearbeiten", "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Bronze", + "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Gemeinschaftslektionen", + "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "Abgeschlossene Ziele", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "Vervollständigt", "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "<[numberMoved]> der vervollständigten Sammlungen wurden in den Abschnitt „In Bearbeitung“ verschoben, da neue Erforschungen zu ihnen hinzugefügt wurden!", "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Aktuelle Ziele", @@ -269,9 +367,11 @@ "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Ziele", "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "Gold", "I18N_LEARNER_DASHBOARD_HOME_SECTION": "Startseite", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "Unvollständig", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "In Bearbeitung", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "Es sieht so aus, als ob du noch keine unserer Erforschungen ausprobiert hast.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Lass uns bei dieser aufregenden Reise anfangen!", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_DECLINE_INVITATION": "Ablehnen", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Lerne etwas Neues.", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Guten Morgen", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COLLECTIONS_FROM_PLAYLIST": "{numberNonexistent, plural, one{Eine der Sammlungen in deiner Liste „Später spielen“ ist nicht mehr verfügbar. Das tut uns leid.} other{# der Sammlungen in deiner Liste „Später spielen“ sind nicht mehr verfügbar. Das tut uns leid.}}", @@ -302,6 +402,12 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Vorschlag", "I18N_LEARNER_DASHBOARD_TOOLTIP": "Sammlungen sind mehrere ähnliche Erforschungen, die zur Vervollständigung in einer Reihenfolge beabsichtigt sind.", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Vorschlag ansehen", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "Beschreibung", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "Speichern", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_FALSE": "Nein, vielleicht später", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "AUTOREN VON LEKTIONEN", + "I18N_LESSON_INFO_HEADER": "Lektionsinfo", + "I18N_LESSON_INFO_TOOLTIP_MESSAGE": "Du hast einen Kontrollpunkt erreicht. Gut gemacht! Siehe dir hier deinen Fortschritt und weitere Lektionsinformationen an.", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Du hast dies vervollständigt", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Bereits zur Spielliste hinzugefügt", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Zur 'Später abspielen'-Liste hinzufügen", @@ -375,11 +481,16 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "Lizenz", "I18N_LICENSE_PAGE_PARAGRAPH_1": "Alle Inhalte in den Oppia-Erforschungen sind lizenziert unter CC-BY-SA 4.0 mit einem Verzicht auf eine Namensnennungserfordernis (insbesonders die Abschnitte 3(a)(1) und 3(a)(2)). Falls du Inhalte von dieser Website weiterverwendest, ermutigen wir dich, einen Link auf die relevante Erforschungsseite zu platzieren, der jedoch nicht erforderlich ist.", "I18N_LICENSE_PAGE_PARAGRAPH_2": "Die Software, die Oppia betreibt, ist „Open Source“ und ihr Code wurde unter einer Apache-2.0-Lizenz veröffentlicht.", + "I18N_LOGIN_PAGE_TITLE": "Anmelden | Oppia", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "Abmelden | Oppia", + "I18N_LOGOUT_PAGE_TITLE": "Abmelden", "I18N_MODAL_CANCEL_BUTTON": "Abbrechen", "I18N_MODAL_CONTINUE_BUTTON": "Fortfahren", "I18N_NEXT_LESSON": "Nächste Lektion", + "I18N_NO": "Nein", "I18N_ONE_SUBSCRIBER_TEXT": "Du hast einen Abonnenten.", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Partnerschaften", + "I18N_PARTNERSHIPS_PAGE_TITLE": "Partnerschaften | Oppia", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "Löschung des Kontos ausstehend", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_1": "Jeder ist willkommen zum Spielen und Geben von Rückmeldungen zu veröffentlichten Erforschungen. Mit der Hilfe von Jedem können wir ständig die Lektionen auf der Website verbessern und sie so effektiv wie möglich machen.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_2": "Verwende ein gutes Urteilsvermögen, wenn du Erforschungen veröffentlichst. Erforschungen sollten einen bedeutsamen lehrreichen Wert haben und keine Werbung sowie keinen Spam, Vandalismus und/oder Missbrauch enthalten.", @@ -399,7 +510,7 @@ "I18N_PLAYBOOK_PUBLICATION_POLICY_PARAGRAPH_1": "Jeder Besitzer einer privaten Erforschung kann sie jederzeit veröffentlichen. Allerdings kann die Veröffentlichung minderwertiger Erforschungen durch die Moderatoren mit einer Rückmeldung für Verbesserungen wieder zurückgenommen werden.", "I18N_PLAYBOOK_PUBLICATION_POLICY_PARAGRAPH_2": "Erforschungen sind zur ständigen Verbesserung gedacht und solche, die seit langer Zeit nicht bearbeitet wurden, gelten als „verwaist“. In diesem Fall kann die Eigentümerschaft im Allgemeinen an die Oppia-Gemeinschaft übergehen (im Ermessen der Moderatoren), so dass die Erforschung weiterhin verbessert werden kann.", "I18N_PLAYBOOK_TAB_PARTICIPATION_PLAYBOOK": "Teilnahme-Spielbuch", - "I18N_PLAYER_AUDIO_EXPAND_TEXT": "Audio", + "I18N_PLAYER_AUDIO_EXPAND_TEXT": "Lektion anhören", "I18N_PLAYER_AUDIO_LANGUAGE": "Sprache", "I18N_PLAYER_AUDIO_LOADING_AUDIO": "Lade Audio …", "I18N_PLAYER_AUDIO_MIGHT_NOT_MATCH_TEXT": "Audio könnte nicht vollständig dem Text entsprechen", @@ -470,6 +581,7 @@ "I18N_PLAYER_THANK_FEEDBACK": "Vielen Dank für die Rückmeldung!", "I18N_PLAYER_UNRATED": "Unbewertet", "I18N_PLAYER_VIEWS_TOOLTIP": "Aufrufe", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Audiosprache", "I18N_PREFERENCES_BIO": "Biografie", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "Dieses Feld ist optional. Alles, was du hier reinschreibst, ist öffentlich und weltweit sichtbar.", "I18N_PREFERENCES_BREADCRUMB": "Einstellungen", @@ -484,6 +596,7 @@ "I18N_PREFERENCES_HEADING": "Einstellungen", "I18N_PREFERENCES_HEADING_SUBTEXT": "Jede Änderung, die du auf dieser Seite durchführst, wird automatisch gespeichert.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "Du hast noch keine Ersteller abonniert.", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "Einstellungen | Oppia", "I18N_PREFERENCES_PAGE_TITLE": "Deine Profileinstellungen ändern – Oppia", "I18N_PREFERENCES_PICTURE": "Bild", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Bevorzugte Audiosprache", @@ -499,6 +612,7 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Ziehen zum Zuschneiden und Anpassen der Größe:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Fehler: Die Bilddatei konnte nicht gelesen werden.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Profilbild hochladen", + "I18N_PREFERENCES_SEARCH_LABEL": "Suchen", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Bevorzugte Sprachen auswählen …", "I18N_PREFERENCES_SUBJECT_INTERESTS": "Themeninteressen", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "Zum Beispiel: Mathematik, Informatik, Kunst …", @@ -511,21 +625,34 @@ "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Meine Übersichtsseite", "I18N_QUESTION_PLAYER_NEW_SESSION": "Neue Sitzung", "I18N_QUESTION_PLAYER_SCORE": "Punktzahl", + "I18N_REDIRECTION_TO_STUCK_STATE_MESSAGE": "Es scheint, als ob du hier ein wenig feststeckst. Lass uns die Konzepte auf einem kurzen Wiederholungsweg durchgehen.", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Registrierungssitzung abgelaufen", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "Deine Registrierungssitzung ist leider abgelaufen. Klicke bitte auf „Mit der Registrierung fortfahren“, um den Vorgang erneut zu starten.", + "I18N_SAVE_BUTTON_ALERT_TOOLTIP": "Der Fortschritt kann nicht gespeichert werden, wenn du den ersten Kontrollpunkt nicht erreicht hast.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "Hast du bereits ein Benutzerkonto?", + "I18N_SAVE_PROGRESS_TEXT": "Fortschritt speichern", + "I18N_SHARE_LESSON": "Lektion teilen", + "I18N_SHOW_LESS": "Weniger anzeigen", + "I18N_SHOW_MORE": "Mehr anzeigen", "I18N_SHOW_SOLUTION_BUTTON": "Lösung anzeigen", - "I18N_SIDEBAR_ABOUT_LINK": "Über Oppia", + "I18N_SIDEBAR_ABOUT_LINK": "Über uns", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "Über die Oppia-Stiftung", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Klassenraum", + "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Grundlegende Mathematik", "I18N_SIDEBAR_CONTACT_US": "Kontaktiere uns", "I18N_SIDEBAR_DONATE": "Spenden", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Anfangen", + "I18N_SIDEBAR_HOME": "Startseite", "I18N_SIDEBAR_LIBRARY_LINK": "Bibliothek", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "Mathematische Grundlagen", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Die Oppia Foundation", "I18N_SIDEBAR_PARTNERSHIPS": "Partnerschaften", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "Addition und Subtraktion", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "Gemeinschaftsbibliothek", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Mit Oppia unterrichten", "I18N_SIDEBAR_VOLUNTEER": "Freiwilliger", + "I18N_SIGNIN_PAGE_TITLE": "Anmelden", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Durch das Anklicken des Kontrollkästchens auf der linken Seite dieses Textes akzeptierst du die Nutzungsbedingungen von <[sitename]>, die hier gefunden werden können.", "I18N_SIGNUP_BUTTON_SUBMIT": "Registrieren und mit Beiträgen beginnen", "I18N_SIGNUP_CC_TITLE": "Creative-Commons-Lizenz", @@ -540,6 +667,7 @@ "I18N_SIGNUP_ERROR_USERNAME_NOT_AVAILABLE": "Dieser Benutzername ist nicht verfügbar.", "I18N_SIGNUP_ERROR_USERNAME_ONLY_ALPHANUM": "Benutzernamen können nur alphanumerische Zeichen enthalten.", "I18N_SIGNUP_ERROR_USERNAME_TAKEN": "Dieser Benutzername ist leider bereits vergeben.", + "I18N_SIGNUP_ERROR_USERNAME_TOO_LONG": "Ein Benutzername darf höchstens <[maxUsernameLength]> Zeichen haben.", "I18N_SIGNUP_ERROR_USERNAME_WITH_ADMIN": "Benutzernamen, die das Wort „admin“ enthalten, sind reserviert.", "I18N_SIGNUP_ERROR_USERNAME_WITH_SPACES": "Bitte stelle sicher, dass dein Benutzername keine Leerzeichen enthält.", "I18N_SIGNUP_FIELD_REQUIRED": "Dieses Feld ist erforderlich.", @@ -555,7 +683,9 @@ "I18N_SIGNUP_USERNAME": "Benutzername", "I18N_SIGNUP_USERNAME_EXPLANATION": "Dein Benutzername wird neben deinen Beiträgen angezeigt.", "I18N_SIGNUP_WHY_LICENSE": "Warum CC-BY-SA?", + "I18N_SKILL_LEVEL_BEGINNER": "Anfänger", "I18N_SOLUTION_EXPLANATION_TITLE": "Erklärung:", + "I18N_SOLUTION_NEED_HELP": "Möchtest du die komplette Lösung sehen?", "I18N_SOLUTION_TITLE": "Lösung", "I18N_SPLASH_BENEFITS_TITLE": "Unsere Vorteile", "I18N_SPLASH_FIRST_EXPLORATION_DESCRIPTION": "Die Lektionen von Oppia – auch bekannt als Erforschungen – bieten umfassendere Erfahrungen als statische Videos oder Texte. Sie helfen Benutzern beim Lernen durch Handeln.", @@ -563,11 +693,12 @@ "I18N_SPLASH_FOR_TEACHERS": "Für Lehrer", "I18N_SPLASH_FOR_VOLUNTEERS": "Für Freiwillige", "I18N_SPLASH_ICON_ONE_TEXT": "1 Million+ Benutzer", + "I18N_SPLASH_ICON_TWO_TEXT": "Verfügbar in <[languageCount]>+ Sprachen", "I18N_SPLASH_JAVASCRIPT_ERROR_DESCRIPTION": "Oppia ist eine freie Lernplattform auf Open-Source-Basis voller interaktiver Aktivitäten namens „Erforschungen“. Leider erfordert Oppia aktiviertes JavaScript in deinem Webbrowser, um ordnungsgemäß zu funktionieren. Bei deinem Webbrowser ist JavaScript jedoch deaktiviert. Falls du bei der Aktivierung von JavaScript Hilfe benötigst, \">klicke hier.", "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "Vielen Dank.", "I18N_SPLASH_JAVASCRIPT_ERROR_TITLE": "Wir benötigen JavaScript in deinem Browser", "I18N_SPLASH_LEARN_MORE": "Weitere Informationen", - "I18N_SPLASH_PAGE_TITLE": "Oppia: Unterrichten, Lernen, Entdecken", + "I18N_SPLASH_PAGE_TITLE": "Opia | Kostenlose, Online- und interaktive Lektionen für jeden", "I18N_SPLASH_SECOND_EXPLORATION_DESCRIPTION": "Erforschungen sind einfach zu erstellen. Sie sind angepasst basierend auf einzelnen Rückmeldungen von Studenten und Trends in der Erfahrung unserer Lernenden auf der ganzen Welt.", "I18N_SPLASH_SITE_FEEDBACK": "Website-Rückmeldungen", "I18N_SPLASH_STUDENT_DETAILS_1": "- Mira, Studentin, Palästina", @@ -576,46 +707,75 @@ "I18N_SPLASH_STUDENT_DETAILS_4": "- Gaurav, Student, Indien", "I18N_SPLASH_SUBTITLE": "Oppia macht es leicht, interaktive Lektionen zum Unterrichten und Engagieren zu erstellen.", "I18N_SPLASH_THIRD_EXPLORATION_DESCRIPTION": "Mit Oppia kannst du Erforschungen zu einer großen Auswahl von Themen erstellen und teilen, nur beschränkt durch deine Vorstellungen.", - "I18N_SPLASH_TITLE": "Denke außerhalb der Bücher.", + "I18N_SPLASH_TITLE": "Kostenlose Bildung für alle", "I18N_START_HERE": "Hier klicken zum Anfangen!", + "I18N_STORY_Qu6THxP29tOy_TITLE": "Maya, Omar und Malik backen eine Pizza!", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - Abgeschlossen!", + "I18N_STORY_ialKSV0VYV0B_TITLE": "James' Smoothie-Abenteuer", + "I18N_STORY_rqnxwceQyFnv_TITLE": "Nina besucht den Markt", "I18N_SUBSCRIBE_BUTTON_TEXT": "Abonnieren", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "Zahlen runden", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Problemlösung", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE": "Zahlen addieren", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "Das Verhältnis von Addition und Subtraktion", + "I18N_SUBTOPIC_sWBXKH4PZcK6_subtracting-numbers_TITLE": "Zahlen subtrahieren", "I18N_TEACH_BENEFITS_TITLE": "Unsere Vorteile", + "I18N_TEACH_PAGE_CLASSROOM_BUTTON": "KLASSENRAUM BESUCHEN", "I18N_TEACH_PAGE_HEADING": "Oppia für Eltern, Lehrer und Betreuer", + "I18N_TEACH_PAGE_LIBRARY_BUTTON": "BIBLIOTHEK DURCHSUCHEN", + "I18N_TEACH_STUDENT_DETAILS_2": "Wala Awad", + "I18N_TERMS_PAGE_TITLE": "Nutzungsbedingungen | Oppia", "I18N_THANKS_PAGE_BREADCRUMB": "Dankeschön", + "I18N_TIME_FOR_BREAK_BODY_1": "Du scheinst sehr schnell Antworten einzureichen. Fängst du an, müde zu werden?", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "Multiplikation", + "I18N_TOPIC_TITLE": "Thema", "I18N_TOPIC_VIEWER_CHAPTER": "Kapitel", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{ein Kapitel} other{# Kapitel}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "Bald verfügbar!", "I18N_TOPIC_VIEWER_DESCRIPTION": "Beschreibung", + "I18N_TOPIC_VIEWER_LESSON": "Lektion", + "I18N_TOPIC_VIEWER_LESSONS": "Lektionen", "I18N_TOPIC_VIEWER_PRACTICE": "Übung", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_TITLE": "Übungssprache bestätigen", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "(Beta)", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "Komme später wieder, wenn Übungsfragen zu diesem Thema verfügbar sind.", "I18N_TOPIC_VIEWER_START_PRACTICE": "Starten", "I18N_TOPIC_VIEWER_STORY": "Geschichte", "I18N_TOPIC_VIEWER_VIEW_ALL": "Alles anzeigen", "I18N_TOPIC_VIEWER_VIEW_LESS": "Weniger anzeigen", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION": "Wenn du 32 Tomaten für vier Personen hättest, wie viele Tomaten würde jede Person bekommen? Hier lernst du, wie du mithilfe der Division etwas aufteilen kannst.", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "Addition und Subtraktion", "I18N_TOPNAV_ABOUT": "Über", "I18N_TOPNAV_ABOUT_OPPIA": "Über Oppia", "I18N_TOPNAV_ADMIN_PAGE": "Verwaltungsseite", + "I18N_TOPNAV_ANDROID_APP_HEADING": "Android-App", "I18N_TOPNAV_BLOG": "Blog", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Grundlegende Mathematik", "I18N_TOPNAV_CONTACT_US": "Kontaktiere uns", "I18N_TOPNAV_CREATOR_DASHBOARD": "Ersteller-Verwaltungsseite", "I18N_TOPNAV_DONATE": "Spenden", "I18N_TOPNAV_FORUM": "Forum", "I18N_TOPNAV_GET_INVOLVED": "Anfangen", "I18N_TOPNAV_GET_STARTED": "Anfangen", + "I18N_TOPNAV_LEARN": "Lernen", "I18N_TOPNAV_LEARNER_DASHBOARD": "Verwaltungsseite des Lernenden", - "I18N_TOPNAV_LIBRARY": "Bibliothek", + "I18N_TOPNAV_LIBRARY": "Gemeinschaftsbibliothek", "I18N_TOPNAV_LOGOUT": "Abmelden", "I18N_TOPNAV_MODERATOR_PAGE": "Moderatorseite", "I18N_TOPNAV_OPPIA_FOUNDATION": "Die Oppia Foundation", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Teilnahmespielbuch", - "I18N_TOPNAV_PARTNERSHIPS": "Partnerschaften", + "I18N_TOPNAV_PARTNERSHIPS": "Schulen und Organisationen", "I18N_TOPNAV_PREFERENCES": "Einstellungen", "I18N_TOPNAV_SIGN_IN": "Anmelden", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Mit Google anmelden", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Mit Oppia unterrichten", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Verwaltungsseite für Themen und Fähigkeiten", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "Probiere es noch heute aus!", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "Schließe dich unserem globalen Team an, um Lektionen zu erstellen und zu verbessern.", "I18N_TOTAL_SUBSCRIBERS_TEXT": "Du hast insgesamt <[totalSubscribers]> Abonnenten.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Abmelden", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Freiwilliger", "I18N_WARNING_MODAL_TITLE": "Warnung!", - "I18N_WORKED_EXAMPLE": "Gelungenes Beispiel" + "I18N_WORKED_EXAMPLE": "Gelungenes Beispiel", + "I18N_YES": "Ja" } diff --git a/assets/i18n/diq.json b/assets/i18n/diq.json index 22c9bdca94b0..e9daf87ff975 100644 --- a/assets/i18n/diq.json +++ b/assets/i18n/diq.json @@ -24,6 +24,8 @@ "I18N_ACTION_BROWSE_LESSONS": "Çım berz Dersanê ma", "I18N_ACTION_CREATE_EXPLORATION": "Keşf Vıraz", "I18N_ACTION_CREATE_LESSON": "Dersê xo Vıraze", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Name", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Bestere", "I18N_CLASSROOM_CALLOUT_BUTTON": "Keşıf", "I18N_CLASSROOM_CALLOUT_HEADING_1": "Temelê Matematiki", "I18N_CLASSROOM_CALLOUT_HEADING_2": "Şınasnayış: Banê Oppia", @@ -66,8 +68,8 @@ "I18N_DASHBOARD_OPEN_FEEDBACK": "Peyd rışyen akerê", "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "Peyd rışyen akerê", "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "Kay pêro", - "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "Aboney", - "I18N_DASHBOARD_SUBSCRIBERS": "Aboney", + "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "Aboneyi", + "I18N_DASHBOARD_SUBSCRIBERS": "Aboneyi", "I18N_DASHBOARD_SUGGESTIONS": "Wesıbneyay", "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "Keşıfi", "I18N_DASHBOARD_TABLE_HEADING_LAST_UPDATED": "Rocanekerdışo Peyên", @@ -96,7 +98,7 @@ "I18N_ERROR_PAGE_TITLE_404": "Xırab 404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "Xırab 500 - Oppia", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anonim", - "I18N_FOOTER_ABOUT": "Heqa cı dı", + "I18N_FOOTER_ABOUT": "Heqa cı de", "I18N_FOOTER_ABOUT_ALL_CAPS": "Heqa OPPIA dı", "I18N_FOOTER_AUTHOR_PROFILES": "Profilê nuştekari", "I18N_FOOTER_BROWSE_LIBRARY": "Çım berz Kıtabxane", @@ -134,7 +136,7 @@ "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Etiketi rocane kı", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Herayin rocane kı", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Asenaye rrsimi bıweçinê]", - "I18N_INTERACTIONS_MUSIC_CLEAR": "Pak kı", + "I18N_INTERACTIONS_MUSIC_CLEAR": "Pak ke", "I18N_INTERACTIONS_MUSIC_PLAY": "Bıcıne", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Rêza Hedefi Bıcıne", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Muhtemel formatê unitey", @@ -147,6 +149,7 @@ "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "cewab nêdeya.", "I18N_INTERACTIONS_SUBMIT": "Bırışe", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Oppai'y ena zıwan dı bımotné:", + "I18N_LEARNER_DASHBOARD_ALL": "Pêro", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "Temam biyo", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "Kaya peyên", "I18N_LEARNER_DASHBOARD_FEEDBACK_SECTION": "Rocanekerdışê peydrıştışi", @@ -163,6 +166,7 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "Kışm izahê vurnayışan:", "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Wesıbnaye:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Teklif", + "I18N_LEARNER_DASHBOARD_VIEW": "Bıvêne", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Wesıbnaya bıvênê", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Şıma no kerdo temam", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Hora debiyao lista kay kerdışi", @@ -227,12 +231,12 @@ "I18N_LIBRARY_RATINGS_TOOLTIP": "Nısbeti", "I18N_LIBRARY_SEARCH_PLACEHOLDER": "Şıma yê kamci mewzuy meraq kenê?", "I18N_LIBRARY_VIEWS_TOOLTIP": "Asayışi", - "I18N_LIBRARY_VIEW_ALL": "Pêro bımocne.", + "I18N_LIBRARY_VIEW_ALL": "Pêrune bıvêne", "I18N_LICENSE_PAGE_LICENSE_HEADING": "Lisans", "I18N_LICENSE_PAGE_PARAGRAPH_1": "Keşıfê zerrekê Oppia'y pero CC-BY-SA 4.0 bın de lisans biyo.", "I18N_MODAL_CANCEL_BUTTON": "Bıtexelne", "I18N_MODAL_CONTINUE_BUTTON": "Dewam ke", - "I18N_ONE_SUBSCRIBER_TEXT": "1 aboney tı est o.", + "I18N_ONE_SUBSCRIBER_TEXT": "1 aboneyê to esto.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "Hesabo kı Bıesteriyo", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "Detayê esterıtışi", "I18N_PLAYER_AUDIO_EXPAND_TEXT": "Goş bıde derse", @@ -295,7 +299,6 @@ "I18N_SIDEBAR_CONTACT_US": "İrtıbati", "I18N_SIDEBAR_DONATE": "Bexş", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Dest pêke", "I18N_SIDEBAR_LIBRARY_LINK": "Kıtıbxane", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Weqfê Oppia", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Oppia ya bomos", @@ -324,13 +327,13 @@ "I18N_TOPNAV_ABOUT_OPPIA": "Oppia heqe", "I18N_TOPNAV_ADMIN_PAGE": "ripel admin", "I18N_TOPNAV_BLOG": "Blog", - "I18N_TOPNAV_CLASSROOM": "Sınıf", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Temel Matematik", "I18N_TOPNAV_CONTACT_US": "İrtıbat", "I18N_TOPNAV_CREATOR_DASHBOARD": "Dashboard vıraz", "I18N_TOPNAV_DONATE": "Bexş", "I18N_TOPNAV_FORUM": "Forum", "I18N_TOPNAV_GET_STARTED": "Dest pêke", + "I18N_TOPNAV_LEARN": "Sınıf", "I18N_TOPNAV_LIBRARY": "Kıtıbxane", "I18N_TOPNAV_LOGOUT": "Veciyayış", "I18N_TOPNAV_MODERATOR_PAGE": "ripel moderator", @@ -338,5 +341,5 @@ "I18N_TOPNAV_PREFERENCES": "Tercihi", "I18N_TOPNAV_SIGN_IN": "Cı kewe", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Oppia bomos", - "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Abone bevec" + "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Aboneyiye ra veciye" } diff --git a/assets/i18n/el.json b/assets/i18n/el.json index a2091333ca38..c2fa503357bc 100644 --- a/assets/i18n/el.json +++ b/assets/i18n/el.json @@ -149,6 +149,8 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Προτεινόμενο:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Πρόταση", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Προβολή πρότασης", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "Συντάκτες μαθήματος", + "I18N_LESSON_INFO_HEADER": "Πληροφορίες μαθήματος", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Το έχετε ήδη ολοκληρώσει", "I18N_LIBRARY_ALL_CATEGORIES": "Όλες οι Κατηγορίες", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "Όλες οι κατηγορίες έχουν επιλεχθεί", @@ -279,7 +281,6 @@ "I18N_SIDEBAR_CONTACT_US": "Επικοινωνήστε μαζί μας", "I18N_SIDEBAR_DONATE": "Δωρεά", "I18N_SIDEBAR_FORUM": "Φόρουμ", - "I18N_SIDEBAR_GET_STARTED": "Ξεκινήστε", "I18N_SIDEBAR_LIBRARY_LINK": "Βιβλιοθήκη", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Ίδρυμα Oppia", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Διδάξτε με την Oppia", diff --git a/assets/i18n/en.json b/assets/i18n/en.json index 2fbe396106a9..417e7ab9bdf9 100644 --- a/assets/i18n/en.json +++ b/assets/i18n/en.json @@ -1,5 +1,6 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "About foundation", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "About the Oppia Foundation", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "About the Oppia Foundation | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Create an Exploration", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "about a topic you care about.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Earn feedback", @@ -26,7 +27,7 @@ "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Explore Lessons Made by the Community", "I18N_ABOUT_PAGE_EXPLORE_LESSONS_CONTENT": "Educators and community members around the world use Oppia's lesson creation platform as a way to create and share lessons. You can find over 20,000 lessons for 17 different subjects in our Exploration library, and maybe you'll be inspired to create your own!", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Donate", - "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON" : "Get Involved", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "Get Involved", "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "The Oppia Foundation", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_1": "The Oppia website and source code are supported by the Oppia Foundation, a tax-exempt 501(c)(3) non-profit organization registered in the State of California.", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_2": "The Foundation relies on the generous support of contributors and donors from around the world to work toward its mission of empowering anyone to learn anything they want in an enjoyable and effective way.", @@ -47,13 +48,14 @@ "I18N_ABOUT_PAGE_SECTION_ONE_CONTENT": "Oppia provides a novel and engaging approach to online learning that is specially designed to meet the unique needs of under-resourced learners around the world.", "I18N_ABOUT_PAGE_SECTION_SEVEN_TITLE": "Get Started with Curated Tips", "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "What would you like to do today?", - "I18N_ABOUT_PAGE_TABS_ABOUT" : "About", + "I18N_ABOUT_PAGE_TABS_ABOUT": "About", "I18N_ABOUT_PAGE_TABS_CREDITS": "Credits", "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Foundation", "I18N_ABOUT_PAGE_TEACH_BUTTON": "I want to teach", "I18N_ABOUT_PAGE_TITLE": "About | Oppia", "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "Get Started with Oppia", "I18N_ABOUT_PAGE_WIFI_FEATURE": "Low Bandwidth Required", + "I18N_ACTION_ACCESS_ANDROID_APP": "Access the Android app", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Apply to Teach With Oppia", "I18N_ACTION_BROWSE_EXPLORATIONS": "Browse our Explorations", "I18N_ACTION_BROWSE_LESSONS": "Browse our Lessons", @@ -65,6 +67,36 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "Guide for teachers", "I18N_ACTION_TIPS_FOR_PARENTS": "Tips for parents and guardians", "I18N_ACTION_VISIT_CLASSROOM": "Visit classroom", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Cancel", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Done", + "I18N_ADD_NEW_SYLLABUS_ITEMS": "New Syllabus Items", + "I18N_ADD_SYLLABUS_DESCRIPTION_TEXT": "Add skills or stories to your syllabus to automatically send them to your learners.", + "I18N_ADD_SYLLABUS_SEARCH_PLACEHOLDER": "Search eg. Story, Physics, English", + "I18N_ANDROID_PAGE_AVAILABLE_FOR_DOWNLOAD_TEXT": "Start learning for free on Android today", + "I18N_ANDROID_PAGE_BETA_DESCRIPTION": "A beta version of Oppia's Android app is now free to download and use in English and Brazilian Portuguese.", + "I18N_ANDROID_PAGE_CONSENT_CHECKBOX_LABEL": "I confirm that I am over the age of 18, or that I have consent and approval from my legal parent or guardian.", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "Email address", + "I18N_ANDROID_PAGE_FEATURES_SECTION_HEADER": "Education for everyone.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_1": "Try our helpful hints to guide you along", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_2": "Keep learning even when you are offline.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_3": "The app is available in English and Brazilian Portuguese.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_4": "More languages will be added soon!", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_5": "Create and maintain up to 10 profiles on one device.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_1": "Learn through engaging stories", + "I18N_ANDROID_PAGE_FEATURE_TEXT_2": "Learn anytime, anywhere", + "I18N_ANDROID_PAGE_FEATURE_TEXT_3": "Learn in your language", + "I18N_ANDROID_PAGE_FEATURE_TEXT_4": "Switch between learners", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Name (optional)", + "I18N_ANDROID_PAGE_SUPPORT_TEXT": "We support all Android versions going back to Lollipop (Android 5).", + "I18N_ANDROID_PAGE_TITLE": "Android | Oppia", + "I18N_ANDROID_PAGE_UPDATES_MAIN_TEXT": "Subscribe to receive updates on Oppia’s Android app", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "Subscribe", + "I18N_ANDROID_PAGE_UPDATES_SUBTEXT": "We promise not to send spam, and you will only receive the occasional email. You can unsubscribe at any time.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_0": "You're headed in the right direction, but you need to recheck your spelling.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_1": "You’re close to the right answer. Could you please correct your spelling?", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_2": "Please recheck your spelling.", + "I18N_ASSIGNED_STORIES_AND_SKILLS": "Assigned Stories and Skills", + "I18N_ASSIGNED_STORIES_AND_SKILLS_EMPTY_MESSAGE": "No stories or skills have been assigned to learners in this group.", "I18N_ATTRIBUTION_HTML_STEP_ONE": "Copy and paste the HTML", "I18N_ATTRIBUTION_HTML_STEP_TWO": "Make sure the link appears as \"<[linkText]>\"", "I18N_ATTRIBUTION_HTML_TITLE": "Attribute in HTML", @@ -72,13 +104,35 @@ "I18N_ATTRIBUTION_PRINT_STEP_TWO": "Attach a copy of the \"<[link]>\"", "I18N_ATTRIBUTION_PRINT_TITLE": "Attribute in Print", "I18N_ATTRIBUTION_TITLE": "How to attribute this lesson for sharing or reusing", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "Author Profile", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TITLE": "Blog | Author | Oppia", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TOTAL_POSTS_DISPLAY": "<[totalNumber]> posts", "I18N_BLOG_CARD_PREVIEW_CONTEXT": "This is how the blog card will appear on the Home Page and on your Author Profile.", "I18N_BLOG_CARD_PREVIEW_HEADING": "Blog Card Preview", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_EXPLAIN_TEXT": "Add a short description about you. Note that anything you write here will be shown to the public.", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "Bio", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_DESCRIPTION": "Before you can publish any blog posts, please add your author name and short biography. These will be shown on your blog author page and your author name will be shown on the posts you publish.", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Add your Author Name and Biography", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Name", "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Create New Blog Post", - "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "It looks like you have not created any story yet!", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "It looks like you have not created any blog posts yet!", "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "New Post", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Save", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Drafts", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Published", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Blog", + "I18N_BLOG_HOME_PAGE_NO_RESULTS_FOUND": "Sorry, there are no blog posts to show.", + "I18N_BLOG_HOME_PAGE_OPPIA_DESCRIPTION": "Building a community to provide quality education for those who lack access to it.", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "Latest Posts", + "I18N_BLOG_HOME_PAGE_POSTS_NUMBER_DISPLAY": "Displaying <[startingNumber]> - <[endingNumber]> of <[totalNumber]> posts", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "Keywords", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "Tags", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "Choose Tags", + "I18N_BLOG_HOME_PAGE_TITLE": "Oppia Blog | Oppia", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "Welcome to the Oppia Blog!", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_HEADING": "Showing Search Results", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_DISPLAY": "Displaying <[startingNumber]> - <[endingNumber]> of total search results.", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_OUT_OF_TOTAL_DISPLAY": "Displaying <[startingNumber]> - <[endingNumber]> of <[totalNumber]> posts.", "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Add Thumbnail Image", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Body", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Cancel", @@ -94,6 +148,15 @@ "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "more tags can still be added.", "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "Thumbnail", "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "Title", + "I18N_BLOG_POST_EDITOR_TITLE_INVALID_CHARACTERS_ERROR": "Title field contains invalid characters. Only words (a-zA-Z0-9) separated by spaces, hyphens (-), ampersand (&) and colon (:) are allowed.", + "I18N_BLOG_POST_EDITOR_TITLE_IS_DUPLICATE_ERROR": "Blog Post with the given title already exists. Please provide a different title.", + "I18N_BLOG_POST_EDITOR_TITLE_MAX_LENGTH_ERROR": "Blog Post title should have at most <[maxChars]> characters.", + "I18N_BLOG_POST_EDITOR_TITLE_MIN_LENGTH_ERROR": "Blog Post title should have at least <[minChars]> characters.", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "Suggested For You.", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "Tags", + "I18N_BLOG_POST_PAGE_TITLE": "<[blogPostTitle]> | Blog | Oppia", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_EXTENSIONS_PREFIX": "Allowed image extensions: ", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_MAX_FILE_SIZE": "The maximum allowed image size is <[imageSize]> KB. Note that, sometimes, cropping might increase the image size.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "Choose a file or drag it here", "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Error: Could not read image file.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Add a thumbnail", @@ -105,12 +168,57 @@ "I18N_CLASSROOM_CALLOUT_HEADING_1": "Math Foundations", "I18N_CLASSROOM_CALLOUT_HEADING_2": "Introducing: The Oppia Classroom", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Check out the first comprehensive course in the all-new Oppia Classroom! Curated lessons - reviewed by educators - so that you can master basic math skills in topics ranging from Place Values to Multiplication and Division.", + "I18N_CLASSROOM_MATH_TITLE": "Math", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_HEADING": "Already know some math?", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_TEXT": "Take a 10-15 question quiz to find out where to start.", + "I18N_CLASSROOM_PAGE_BEGIN_WITH_FIRST_TOPIC_BUTTON": "Begin with <[firstTopic]>", "I18N_CLASSROOM_PAGE_COMING_SOON": "Coming Soon", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Course Details", "I18N_CLASSROOM_PAGE_HEADING": "The Oppia Classroom", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_HEADING": "New to math?", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_TEXT": "Start from the basics with our first topic, <[firstTopic]>.", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Explore More Lessons Made by the Community", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Search through our Community Library", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "Take a test", + "I18N_CLASSROOM_PAGE_TITLE": "Learn <[classroomName]> with Oppia | Oppia", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Topics Covered", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "<[collectionTitle]> - Oppia Editor", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "Untitled - Oppia Editor", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Begin", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Continue", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "You have finished the collection! Feel free to replay any explorations below.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "Hover over an icon to preview an exploration.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "No Exploration has been added to this Collection.", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> - Oppia", + "I18N_COMING_SOON": "Coming Soon!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "COLLECTION", + "I18N_COMPLETED_STORY": "Completed '<[story]>'", + "I18N_COMPLETE_CHAPTER": "Complete a chapter in '<[topicName]>'", + "I18N_CONCEPT_CARD_NEED_HELP": "Need help? Take a look at the concept card.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_1": "You just completed the first checkpoint! Good start!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_2": "Great work completing your first checkpoint! Keep it going!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "A perfect start! Keep it up!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "You completed a checkpoint! Good job!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_2": "Awesome, you completed a checkpoint! Keep going!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_3": "Nice work! You just completed a checkpoint!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_1": "You're halfway through, you'll be done in no time!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_2": "You just made it halfway, nice work!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_3": "Wow! You've already made it halfway through the lesson! Amazing work!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "Just one more to go, woohoo!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_2": "Let's go! Just one more left!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_3": "You're doing great, just one more to go!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "You're making good progress! Keep going!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_2": "Amazing! You just completed your second checkpoint!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_3": "One more checkpoint completed, you're doing great!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_1": "You're almost there! Keep it up!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_2": "You've almost made it to the end! Keep it going!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_3": "Nice work! You're almost at the finish line!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "Hurray!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "Awesome!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_3": "Checkpoint!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "Good job!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "Great work!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "Well done!", "I18N_CONTACT_PAGE_BREADCRUMB": "Contact", "I18N_CONTACT_PAGE_HEADING": "Get Involved!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Thanks for your interest in helping out with the Oppia project!", @@ -139,10 +247,13 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "So, if you'd like to create free, effective lessons for students around the world, you've come to the right place. We encourage you to check out our creator tutorials and existing lessons, and start creating your own lesson. In addition, if you'd like to ensure that your lessons have a large impact, please consider applying to our Teach with Oppia program, where we will help you create, test, and improve your explorations for optimal impact.", "I18N_CONTACT_PAGE_PARAGRAPH_9": "Like an existing exploration, but found something that could be better? You can suggest changes to any exploration directly from the exploration's page. Simply click the pencil icon in the upper right hand corner, and share what you think could be improved. The lesson's creator will receive your suggestions and have the opportunity to merge them into the exploration. This is an incredibly valuable way to contribute, especially if you can base your suggestions off the experiences of students playing through the exploration.", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "Improving existing explorations", + "I18N_CONTACT_PAGE_TITLE": "Contact | Oppia", "I18N_CONTINUE_REGISTRATION": "Continue Registration", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "OK", "I18N_COOKIE_BANNER_EXPLANATION": "This website uses cookies and similar technologies to support core functionality, keep the site secure, and analyze our website traffic. Learn more in our Privacy Policy.", "I18N_CORRECT_FEEDBACK": "Correct!", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "Your group link", + "I18N_CREATE_ACCOUNT": "Create Account", "I18N_CREATE_ACTIVITY_QUESTION": "What do you want to create?", "I18N_CREATE_ACTIVITY_TITLE": "Create an Activity", "I18N_CREATE_COLLECTION": "Create Collection", @@ -151,6 +262,8 @@ "I18N_CREATE_EXPLORATION_QUESTION": "Do you want to create an exploration?", "I18N_CREATE_EXPLORATION_TITLE": "Create an Exploration", "I18N_CREATE_EXPLORATION_UPLOAD": "Upload", + "I18N_CREATE_LEARNER_GROUP": "Create Group", + "I18N_CREATE_LEARNER_GROUP_PAGE_TITLE": "Create Learner Group | Oppia", "I18N_CREATE_NO_THANKS": "No, Thanks", "I18N_CREATE_YES_PLEASE": "Yes, please!", "I18N_CREATOR_IMPACT": "Impact", @@ -159,7 +272,7 @@ "I18N_DASHBOARD_EXPLORATIONS": "Explorations", "I18N_DASHBOARD_EXPLORATIONS_EMPTY_MESSAGE": "It looks like you haven't created any explorations yet. Let's get started!", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY": "Sort By", - "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_AVERAGE_RATING" : "Average Rating", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_AVERAGE_RATING": "Average Rating", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_CATEGORY": "Category", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_UPDATED": "Last Updated", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_OPEN_FEEDBACK": "Open Feedback", @@ -202,14 +315,100 @@ "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "In addition, published explorations and collections that have no other owners will be transitioned to community ownership.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "If you have any questions or concerns about the account removal process, please send an email to privacy@oppia.org.", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "This will take you to a page where you can delete your Oppia account.", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "Delete Account | Oppia", + "I18N_DELETE_LEARNER_GROUP": "Delete Group", + "I18N_DELETE_LEARNER_GROUP_MODAL_BODY_TEXT": "Are you sure you want to delete <[groupName]> learner group?", + "I18N_DEST_IF_STUCK_INFO_TOOLTIP": "You can now specify a new card in which you can walk the learners through the concepts used in the question, if they get really stuck!", + "I18N_DIAGNOSTIC_TEST_CURRENT_PROGRESS": "Current Progress: <[progressPercentage]> %", + "I18N_DIAGNOSTIC_TEST_EXIT_TEST": "Exit Test", + "I18N_DIAGNOSTIC_TEST_HEADING": "Learner Diagnostic Test", + "I18N_DIAGNOSTIC_TEST_INTRO_TEXT_1": "Answer a few questions to help us recommend a few topics for you to get started with Math lessons.", + "I18N_DIAGNOSTIC_TEST_INTRO_TEXT_2": "Please note that you will not be able to change your answer after moving to the next question.", + "I18N_DIAGNOSTIC_TEST_RESULT_GO_TO_CLASSROOM_BUTTON_TEXT": "Go to classroom", + "I18N_DIAGNOSTIC_TEST_RESULT_HEADER_TEXT": "Test complete. Well done!", + "I18N_DIAGNOSTIC_TEST_RESULT_START_TOPIC": "Start <[topicName]>", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_1_FOR_NO_TOPIC": "Great job! It seems you already have a good understanding of the topics in the Math Classroom.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_2_FOR_NO_TOPIC": "Feel free to go through any of the lessons to review or improve upon what you know. We are constantly updating the Classroom with new lessons, so check back again.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_ONE_TOPIC": "Based on your answers, we recommend starting with this topic.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_TWO_TOPICS": "Based on your answers, we recommend starting with either one of these topics.", + "I18N_DIAGNOSTIC_TEST_START_BUTTON": "Start the test", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Drag an image into this area", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Upload a file", "I18N_DONATE_PAGE_BREADCRUMB": "Donate", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "Donate | Make a Positive Impact | Oppia", + "I18N_DONATE_PAGE_BUDGET_HEADING": "Where does your money go?", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_1": "Expanding Outreach", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_2": "Spreading the word", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "Maintenance", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_1": "Your donations strengthen our global partnerships and help us reach and support new learning communities.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_2": "Your donations help with our efforts in raising awareness of Oppia across the world.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_3": "Donations keep the Oppia platforms and servers running smoothly and reliably.", + "I18N_DONATE_PAGE_FAQ_ANSWER_1": "Oppia is a supplementary online educational platform that aims to address the needs of learners who lack access to quality education. We hope the platform and lessons will help make high-quality education accessible to as many learners as possible.", + "I18N_DONATE_PAGE_FAQ_ANSWER_10": "For general questions about Oppia, please reach out to contact@oppia.org.", + "I18N_DONATE_PAGE_FAQ_ANSWER_2": "Oppia exists to help improve equitable access to education. Many online platforms (e.g. college-level MOOC courses) favor learners who already have basic education. Those platforms also require that learners have internet connectivity, motivation to learn on their own, good knowledge of English and basic literacy etc. However, this is not true for students in underserved communities and regions, and can widen socioeconomic gaps. Oppia narrows that gap by including features specifically aimed at under-resourced communities.", + "I18N_DONATE_PAGE_FAQ_ANSWER_3": "Since its inception, the Oppia platform has served over 1.5 million learners worldwide. Oppia has run some small-scale studies, including randomized tests, to measure the effectiveness of lessons, and these have shown positive results. The team is currently working on providing more data to show the impact that the platform is having across the world, and will continue to conduct more studies.", + "I18N_DONATE_PAGE_FAQ_ANSWER_4_1": "Oppia's focus on access to education sets it apart from other online platforms. Many current providers use video lessons, which can be inaccessible to learners in areas without reliable internet connection. Oppia's lessons do not need much bandwidth, and encourage students to experiment and problem-solve.", + "I18N_DONATE_PAGE_FAQ_ANSWER_4_2": "Oppia also focuses on under-resourced regions, including South America, Sub-Saharan Africa, and South/Southeast Asia. By targeting and localising our lessons, educational materials, and platform to these regions, we ensure that our platform is accessible to those who need it and maximize our impact in those regions.", + "I18N_DONATE_PAGE_FAQ_ANSWER_5": "Yes, we are a 501(c)(3) tax-exempt organization and your donation is tax-deductible within the guidelines of U.S. law.", + "I18N_DONATE_PAGE_FAQ_ANSWER_6": "Please do not give by check, and consider donating by card or through Paypal.", + "I18N_DONATE_PAGE_FAQ_ANSWER_7": "Yes, but Oppia will not automatically file for such gifts. Please check with your employer if necessary.", + "I18N_DONATE_PAGE_FAQ_ANSWER_8": "We do not accept donations of stock or by wire transfer. Please consider donating by card or through Paypal.", + "I18N_DONATE_PAGE_FAQ_ANSWER_9": "Yes, reach out to: contact@oppia.org", + "I18N_DONATE_PAGE_FAQ_HEADING_TEXT": "Frequently Asked Questions", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "What is Oppia?", + "I18N_DONATE_PAGE_FAQ_QUESTION_10": "I would like to find out more about Oppia. Who can I reach out to?", + "I18N_DONATE_PAGE_FAQ_QUESTION_2": "Why does Oppia exist?", + "I18N_DONATE_PAGE_FAQ_QUESTION_3": "How is Oppia measuring impact and what has the platform accomplished so far?", + "I18N_DONATE_PAGE_FAQ_QUESTION_4": "What makes Oppia different from other online education platforms?", + "I18N_DONATE_PAGE_FAQ_QUESTION_5": "Is this donation tax deductible?", + "I18N_DONATE_PAGE_FAQ_QUESTION_6": "How do I give by check?", + "I18N_DONATE_PAGE_FAQ_QUESTION_7": "Do you accept employee matching gifts?", + "I18N_DONATE_PAGE_FAQ_QUESTION_8": "Can I donate stock or make my donation by wire transfer?", + "I18N_DONATE_PAGE_FAQ_QUESTION_9": "Is there someone I can talk to if I am interested in becoming a corporate partner?", + "I18N_DONATE_PAGE_HEADING_1": "Join us in ensuring access to", + "I18N_DONATE_PAGE_HEADING_2": "high quality and engaging education.", "I18N_DONATE_PAGE_IMAGE_TITLE": "Your generous gift funds:", - "I18N_DONATE_PAGE_TITLE": "Donate to the
Oppia Foundation", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_1": "From Khanpur, India", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_2": "From Palestine", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "From India", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_SECTION_HEADING": "What Our Learners are Saying", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_1": "I liked learning the lessons on mobile because we get very good questions and if we get something wrong we are told how to correct it and we are not scared when we are doing this I really liked this app.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_2": "I enjoyed playing the lesson a lot, I did not feel bored and I feel like I master the negative numbers now.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_3": "I really had fun while solving the questions because they had many colorful shapes and images. The images made it easier to understand the topics as well!", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "Read our blog", + "I18N_DONATE_PAGE_STATISTIC_1": "Learners served worldwide.", + "I18N_DONATE_PAGE_STATISTIC_2": "Lessons in our virtual library", + "I18N_DONATE_PAGE_STATISTIC_3": "Randomized trial completed, with more to come", + "I18N_DONATE_PAGE_STATISTIC_4": "Volunteers from all over the globe", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "Thanks for subscribing!", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_1": "You will start receiving updates in your inbox soon. We promise not to spam, and you can unsubscribe at any time.", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_2": "With help and support from our community (including you!), Oppia has and continues to serve the most under-resourced learners around the world.", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "Email address", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "Name (optional)", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "Subscribe now", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_1": "Join us today!", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_2": "Become a part of the movement in bringing effective, engaging education to underserved learners around the world. Sign up for our newsletter to get updates and learn more about how you can get involved.", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "Thanks for donating!", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_1": "With your help and support, Oppia will be able to continue serving the most underserved learners around the world.", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_2": "Learn more about Oppia and the impact that your support will have", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_3": "If you have any questions, please reach out at anytime.", + "I18N_DONATE_PAGE_TITLE": "Donate to the Oppia Foundation", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Hear from our Oppia community", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "In 2012, Oppia started with a simple idea: to improve the education of students around the world while improving the quality of teaching. This vision has since turned into an educational platform with over 11,000 explorations that have been used by more than 430,000 users worldwide.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "Please donate to The Oppia Foundation, a registered 501(c)(3) nonprofit, and join us in bringing the joys of teaching and learning to people everywhere.", + "I18N_DONATE_PAGE_VISION_TEXT": "To date, Oppia has served more than 1.5 million learners around the world - many of whom come from the world's more underserved areas. Oppia relies on donations, and with just $11, you can help us continue our work and expand the reach of our platform, to expand equitable education for all.", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "Watch a video", + "I18N_EDIT_LEARNER_GROUP_PAGE_TITLE": "Edit Learner Group | Oppia", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "You don’t have any group yet", + "I18N_EMPTY_SOLUTION_MESSAGE": "Please provide the solution for the state.", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "You just completed your 1st chapter!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "You just completed your 5th chapter!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "You just completed your 10th chapter!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_4": "You just completed your 25th chapter!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "You just completed your 50th chapter!", + "I18N_END_CHAPTER_MILESTONE_PROGRESS_MESSAGE": "{chaptersToGo, plural, one{Complete 1 more chapter to reach your next milestone!} other{Complete # more chapters to reach your next milestone!}}", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "To the next lesson!", + "I18N_END_CHAPTER_PRACTICE_SESSION_TEXT": "Practice your newly acquired skills!", + "I18N_END_CHAPTER_REVISION_TAB_TEXT": "Revise what you've learnt so far!", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "Here's what you can do next!", "I18N_ERROR_DISABLED_EXPLORATION": "Disabled Exploration", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Sorry, but the exploration you clicked on is currently disabled. Please try again later.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Disabled Exploration - Oppia", @@ -222,13 +421,143 @@ "I18N_ERROR_MESSAGE_404": "Sorry, we looked and looked but we just couldn't find that page.", "I18N_ERROR_MESSAGE_500": "Something went horribly wrong. But it wasn't your fault. An internal error occurred.", "I18N_ERROR_NEXT_STEPS": "The best thing to do now is probably to return to the \">home page. However, if this issue recurs, and you think it shouldn't, please let us know on our \" target=\"_blank\">issue tracker. Sorry about this.", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "Error <[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "Error <[statusCode]> - Oppia", "I18N_ERROR_PAGE_TITLE_400": "Error 400 - Oppia", "I18N_ERROR_PAGE_TITLE_401": "Error 401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "Error 404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "Error 500 - Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "Ready for more cupcakes? Take this short quiz to check your understanding of what you've learned so far!", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "Equality of Fractions (Recap)", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION": "Is it possible for one fraction to be a different one in disguise? Let's see what happens when Matthew meets Crumb for the second time.", + "I18N_EXPLORATION_0FBWxCE5egOw_TITLE": "Equivalent Fractions", + "I18N_EXPLORATION_0X0KC9DXWwra_DESCRIPTION": "In Kamal's home, everyone celebrates Samir's birthday. Kamal adds some fun by making a math game for Ava and Samir. See if you can solve the questions!", + "I18N_EXPLORATION_0X0KC9DXWwra_TITLE": "Recap: Problem-Solving Skills", + "I18N_EXPLORATION_1904tpP0CYwY_DESCRIPTION": "It is time for Aria to start planting vegetables! Continue your gardening journey as you help her in the garden and start memorizing your multiples.", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE": "Single Digit Expressions from 1-5", + "I18N_EXPLORATION_2mzzFVDLuAj8_DESCRIPTION": "Join James and his uncle as they learn about ratios and how to use them!", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "What Is a Ratio?", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION": "Nina and her mother bump into their friend, who also owns a fruit stall. Join Nina as she uses division to help their friend with the stall!", + "I18N_EXPLORATION_40a3vjmZ7Fwu_TITLE": "Remainders and Special Cases", + "I18N_EXPLORATION_53Ka3mQ6ra5A_DESCRIPTION": "Maya, Omar and Malik visit a supermarket to get more ingredients, and need to add larger numbers. See if you can help them!", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "Adding larger numbers", + "I18N_EXPLORATION_5I4srORrwjt2_DESCRIPTION": "At the snack bar, Kamal says they have to be smart in how they spend their limited amount of money. Help Ava and Samir find what snacks they can get!", + "I18N_EXPLORATION_5I4srORrwjt2_TITLE": "Proportionality and Unitary Method", + "I18N_EXPLORATION_5NWuolNcwH6e_DESCRIPTION": "James tries to make his own smoothies ... but they don't turn out so good. What mistake had he made? Play this lesson to find out!", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE": "The Importance of Order", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION": "Help Matthew solve a problem for one of Mr. Baker's customers as he learns about mixed numbers and the number line. Play this lesson to get started!", + "I18N_EXPLORATION_670bU6d9JGBh_TITLE": "Mixed Numbers and the Number Line 1", + "I18N_EXPLORATION_6Q6IyIDkjpYC_DESCRIPTION": "Mr. Baker has a very large order coming in and needs Matthew's help in buying more ingredients. Can you figure out what they need by using fractions?", + "I18N_EXPLORATION_6Q6IyIDkjpYC_TITLE": "Subtracting Fractions", + "I18N_EXPLORATION_8HTzQQUPiK5i_DESCRIPTION": "Join Nina and her mother as they go to the market. Help them use division to figure out how many bags they need for their groceries!", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "What is Division?", + "I18N_EXPLORATION_9DITEN8BUEHw_DESCRIPTION": "Learn how to evaluate expressions involving multiple addition and subtraction operations.", + "I18N_EXPLORATION_9DITEN8BUEHw_TITLE": "Adding & subtracting several numbers", + "I18N_EXPLORATION_9trAQhj6uUC2_DESCRIPTION": "Fractions can be used to represent parts of a cake. But can they also be used to represent parts of groups of things? Play this lesson to find out!", + "I18N_EXPLORATION_9trAQhj6uUC2_TITLE": "Fractions of a Group", + "I18N_EXPLORATION_BDIln52yGfeH_DESCRIPTION": "When they get to the amusement park, Ava and Samir want to have fun, but Kamal says they need to see if they have enough money. Help them with the math!", + "I18N_EXPLORATION_BDIln52yGfeH_TITLE": "Simplifying Equations", + "I18N_EXPLORATION_BJd7yHIxpqkq_DESCRIPTION": "Help our three heroes make a better pizza, while learning how to add by zero and figure out missing numbers in an \"addition fact\".", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE": "Basics of Addition", + "I18N_EXPLORATION_IrbGLTicm0BI_DESCRIPTION": "While Ava and Kamal are waiting for Mrs. Plum, let's see if you have learned how to apply different strategies to solve real world problems!", + "I18N_EXPLORATION_IrbGLTicm0BI_TITLE": "Recap: Solving Real-World Problems", + "I18N_EXPLORATION_Jbgc3MlRiY07_DESCRIPTION": "After learning all these new skills, Ava wants to find out what she can do with them. Join Ava in applying her new skills to solve real-world problems!", + "I18N_EXPLORATION_Jbgc3MlRiY07_TITLE": "Modelling Real-World Scenarios", + "I18N_EXPLORATION_K645IfRNzpKy_DESCRIPTION": "Jaime learns the place value of each digit in a big number.", + "I18N_EXPLORATION_K645IfRNzpKy_TITLE": "What are Place Values", + "I18N_EXPLORATION_K89Hgj2qRSzw_DESCRIPTION": "Kamal reveals the techniques he used to quickly figure out the time they need to wake up. Want to see how he does it? Play this lesson to find out!", + "I18N_EXPLORATION_K89Hgj2qRSzw_TITLE": "The Distributive Law", + "I18N_EXPLORATION_Knvx24p24qPO_DESCRIPTION": "Jaime understands the value of his arcade score.", + "I18N_EXPLORATION_Knvx24p24qPO_TITLE": "Finding the Values of a Number", + "I18N_EXPLORATION_MRJeVrKafW6G_DESCRIPTION": "Aria's garden is a huge success! Every week of the summer, more and more fruits and vegetables are growing. Help Aria count how many vegetables grew.", + "I18N_EXPLORATION_MRJeVrKafW6G_TITLE": "Multiplying by Powers of Ten", + "I18N_EXPLORATION_MjZzEVOG47_1_DESCRIPTION": "We learned that the \"denominator\" of a fraction is the number of equal parts in the whole. But why must the parts be the same? Let's find out!", + "I18N_EXPLORATION_MjZzEVOG47_1_TITLE": "The Meaning of \"Equal Parts\"", + "I18N_EXPLORATION_OKxYhsWONHZV_DESCRIPTION": "Join Maya and Omar as they learn how numbers can be \"put together\" or \"added\" to create a new number!", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "What is Addition?", + "I18N_EXPLORATION_PLAYER_PAGE_TITLE": "<[explorationTitle]> - Oppia", + "I18N_EXPLORATION_PsfDKdhd6Esz_DESCRIPTION": "Maya, Omar and Malik seem to have lost money due to the spoiled ingredients. Using subtraction, can you help them figure out how to account for this?", + "I18N_EXPLORATION_PsfDKdhd6Esz_TITLE": "Subtracting large numbers, Part 2", + "I18N_EXPLORATION_R7WpsSfmDQPV_DESCRIPTION": "Together with Aria, let's learn what multiplication is, how to write expressions with it, and how to use it to solve problems in Aria's neighborhood!", + "I18N_EXPLORATION_R7WpsSfmDQPV_TITLE": "Parts of Multiplication Expressions", + "I18N_EXPLORATION_RvopsvVdIb0J_DESCRIPTION": "It's time for James to sell his new smoothie! He sets up a stall with Uncle Berry. Can they figure out how much money each of them should get?", + "I18N_EXPLORATION_RvopsvVdIb0J_TITLE": "Tying Ratios to Actual Numbers", + "I18N_EXPLORATION_SR1IKIdLxnm1_DESCRIPTION": "Ava got bored from playing the amusement park games, so Kamal created a fun math game. Can you beat Kamal's game? Click on this lesson to find out!", + "I18N_EXPLORATION_SR1IKIdLxnm1_TITLE": "Recap: Variables", + "I18N_EXPLORATION_STARTING_FROM_BEGINNING": "Congratulations for completing this lesson! You will now start the lesson from beginning the next time you come back.", + "I18N_EXPLORATION_STATE_PREVIOUSLY_COMPLETED": "You answered this question in a previous session.", + "I18N_EXPLORATION_VKXd8qHsxLml_DESCRIPTION": "Maya, Omar and Malik notice that some of their ingredients have gone bad. Can you help them figure out how much they have left, using subtraction?", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE": "Subtracting large numbers, Part 1", + "I18N_EXPLORATION_Vgde5_ZVqrq5_DESCRIPTION": "James has figured out what he wants his own smoothie recipe to look like, but has trouble combining all the parts together. Can you help him with this?", + "I18N_EXPLORATION_Vgde5_ZVqrq5_TITLE": "Combining Ratios", + "I18N_EXPLORATION_W0xq3jW5GzDF_DESCRIPTION": "Something unexpected happens when Maya, Omar and Malik try to make a second pizza.", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "What is Subtraction?", + "I18N_EXPLORATION_WulCxGAmGE61_DESCRIPTION": "Nina visits Sandra's house. Join her as she uses division to help Sandra out with even tricker problems, like transferring all the fruits into crates!", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE": "Division by Multiples of Ten", + "I18N_EXPLORATION_WwqLmeQEn9NK_DESCRIPTION": "Jamie continues to learn more techniques for rounding numbers.", + "I18N_EXPLORATION_WwqLmeQEn9NK_TITLE": "Rounding Numbers, Part 2", + "I18N_EXPLORATION_Xa3B_io-2WI5_DESCRIPTION": "Join Matthew as he helps Mr. Baker repair the damage, while learning how to add fractions.", + "I18N_EXPLORATION_Xa3B_io-2WI5_TITLE": "Adding Fractions", + "I18N_EXPLORATION_aAkDKVDR53cG_DESCRIPTION": "Jamie learns if a number is smaller or greater than another number.", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE": "Comparing Numbers", + "I18N_EXPLORATION_aHikhPlxYgOH_DESCRIPTION": "Join Matthew as he learns how mixed numbers are just ordinary fractions in disguise.", + "I18N_EXPLORATION_aHikhPlxYgOH_TITLE": "Mixed Numbers and the Number Line 2", + "I18N_EXPLORATION_aqJ07xrTFNLF_DESCRIPTION": "After using the unitary method to find which snack Ava should buy, it's Samir's turn by using a new method. Join Samir find what snack to get!", + "I18N_EXPLORATION_aqJ07xrTFNLF_TITLE": "Solving Problems with Box Models", + "I18N_EXPLORATION_avwshGklKLJE_DESCRIPTION": "Jamie learns to simplify a number without making a lot of changes to its value.", + "I18N_EXPLORATION_avwshGklKLJE_TITLE": "Rounding Numbers, Part 1", + "I18N_EXPLORATION_cQDibOXQbpi7_DESCRIPTION": "Aria is ready to plant some bigger vegetables in her garden! Help her plant and water them while memorizing more multiples with her.", + "I18N_EXPLORATION_cQDibOXQbpi7_TITLE": "Single Digit Expressions from 5-9", + "I18N_EXPLORATION_hNOP3TwRJhsz_DESCRIPTION": "Aria is starting school again! She wants a big garden for the kids at her school. Help her plan it with Omar using multiplication with bigger numbers.", + "I18N_EXPLORATION_hNOP3TwRJhsz_TITLE": "Multi-Digit Multiplication, Part 1", + "I18N_EXPLORATION_ibeLZqbbjbKF_DESCRIPTION": "At the train station, Ava and Kamal find that no train! Kamal finds a mistake in the calculations. Will you help them find when the train arrives?", + "I18N_EXPLORATION_ibeLZqbbjbKF_TITLE": "Plugging in Values for Variables", + "I18N_EXPLORATION_k2bQ7z5XHNbK_DESCRIPTION": "Is it possible for two different ratios to mean the same thing? Find out with James and Uncle Berry as they try a new recipe for chocolate smoothies.", + "I18N_EXPLORATION_k2bQ7z5XHNbK_TITLE": "Equivalent Ratios", + "I18N_EXPLORATION_kYSrbNDCv5sH_DESCRIPTION": "Ava wants to make the most of Samir's birthday, so she begins to plan her day. Help her use shortcuts for evaluating expressions to figure things out!", + "I18N_EXPLORATION_kYSrbNDCv5sH_TITLE": "The Commutative and Associative Laws", + "I18N_EXPLORATION_lNpxiuqufPiw_DESCRIPTION": "Ava will soon need to apply her skills to some real-world problems. Will you be able to help? Try this lesson to see if you've mastered expressions!", + "I18N_EXPLORATION_lNpxiuqufPiw_TITLE": "Recap: Working With Expressions", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION": "Join Nina as she helps Sandra make fruit juice for her stall, using a new technique of division!", + "I18N_EXPLORATION_lOU0XPC2BnE9_TITLE": "Long Division, Single-Digit Divisors", + "I18N_EXPLORATION_m1nvGABWeUoh_DESCRIPTION": "Ava and Samir finish playing games and go to the shop to use their tickets. There, they find a mysterious machine! Click the next lesson to find out!", + "I18N_EXPLORATION_m1nvGABWeUoh_TITLE": "What is an Average/Mean?", + "I18N_EXPLORATION_nLmUS6lbmvnl_DESCRIPTION": "Can James figure out whether a smoothie is more \"milky\" or \"yogurt-y\" just by looking at the recipe, rather than needing to make each smoothie manually?", + "I18N_EXPLORATION_nLmUS6lbmvnl_TITLE": "Comparing Ratios", + "I18N_EXPLORATION_nTMZwH7i0DdW_DESCRIPTION": "Ava and Kamal go to the train station. They see Mrs. Plum, a baker, and help her solve problems using expressions with revenue, cost, and profit.", + "I18N_EXPLORATION_nTMZwH7i0DdW_TITLE": "From Word Problems to Expressions", + "I18N_EXPLORATION_osw1m5Q3jK41_DESCRIPTION": "It's cupcake time again! Take this opportunity to make sure that you've understood the skills you've learned in the previous lessons!", + "I18N_EXPLORATION_osw1m5Q3jK41_TITLE": "Operations with Fractions (Recap)", + "I18N_EXPLORATION_rDJojPOc0KgJ_DESCRIPTION": "Ava and Kamal are buying presents for their cousin's birthday! Join them as they figure out how to calculate the prices by evaluating expressions.", + "I18N_EXPLORATION_rDJojPOc0KgJ_TITLE": "Evaluating Expressions - Order of Operations", + "I18N_EXPLORATION_rfX8jNkPnA-1_DESCRIPTION": "Can you help Matthew earn some cupcakes? Take this short quiz to see how much you remember about Fractions.", + "I18N_EXPLORATION_rfX8jNkPnA-1_TITLE": "Representing Fractions (Recap)", + "I18N_EXPLORATION_rwN3YPG9XWZa_DESCRIPTION": "While enjoying some ice cream, Ava and Kamal try to answer some questions that Ava has about their upcoming visit to the amusement park!", + "I18N_EXPLORATION_rwN3YPG9XWZa_TITLE": "Solving Word Problems", + "I18N_EXPLORATION_tIoSb3HZFN6e_DESCRIPTION": "James learns how to reduce a ratio to its simplest form, in order to make his calculations easier.", + "I18N_EXPLORATION_tIoSb3HZFN6e_TITLE": "Writing Ratios in Simplest Form", + "I18N_EXPLORATION_umPkwp0L1M0-_DESCRIPTION": "Join Matthew as he meets Mr. Baker for the first time and learns about fractions. What is a fraction? Play this lesson to find out more!", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE": "What is a Fraction?", + "I18N_EXPLORATION_v8fonNnX4Ub1_DESCRIPTION": "Ava and Kamal continue to help Mrs Plum with her baking business, but there are some unknowns in the expressions. Will Ava be able to help?", + "I18N_EXPLORATION_v8fonNnX4Ub1_TITLE": "Writing Expressions with Variables", + "I18N_EXPLORATION_wE9pyaC5np3n_DESCRIPTION": "Nina and Sandra enter a contest. Join Nina as she uses her division skills to sell as much fruit and juice as possible, to win the grand prize!", + "I18N_EXPLORATION_wE9pyaC5np3n_TITLE": "Multi-Digit Division", + "I18N_EXPLORATION_zIBYaqfDJrJC_DESCRIPTION": "Continue on your gardening adventure with Aria as she plants fruits, learns, and practices multiplication with Omar!", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE": "What Multiplication Means", + "I18N_EXPLORATION_zNb0Bh27QtJ4_DESCRIPTION": "At the snack bar, Kamal checks his pockets and can't find his wallet. Without the wallet, they can't get any snacks! Can you help find Kamal's wallet?", + "I18N_EXPLORATION_zNb0Bh27QtJ4_TITLE": "Arithmetic Progressions", + "I18N_EXPLORATION_zTg2hzTz37jP_DESCRIPTION": "After lots of planning, Aria got her friends to help her plant the garden for her school! Use your skills to help them plant an amazing garden!", + "I18N_EXPLORATION_zTg2hzTz37jP_TITLE": "Multi-Digit Multiplication, Part 2", + "I18N_EXPLORATION_zVbqxwck0KaC_DESCRIPTION": "James and Uncle Berry are invited to make smoothies for their neighbour's party. Could this be the start of their rise to fame as smoothie makers?", + "I18N_EXPLORATION_zVbqxwck0KaC_TITLE": "Proportional Relationships", + "I18N_EXPLORATION_zW39GLG_BdN2_DESCRIPTION": "As Matthew learns how to compare fractions in terms of size, an accident happens at the bakery, and Mr. Baker gets annoyed. Let's see what happened!", + "I18N_EXPLORATION_zW39GLG_BdN2_TITLE": "Comparing Fractions", + "I18N_FACILITATOR_DASHBOARD_PAGE_TITLE": "Facilitator Dashboard | Oppia", + "I18N_FEEDBACK_INSTRUCTION": "Feedback message should be at most <[count]> characters.", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anonymous", "I18N_FOOTER_ABOUT": "About", "I18N_FOOTER_ABOUT_ALL_CAPS": "ABOUT OPPIA", + "I18N_FOOTER_ANDROID_APP": "Android App", "I18N_FOOTER_AUTHOR_PROFILES": "Author Profiles", "I18N_FOOTER_BROWSE_LIBRARY": "Browse the Library", "I18N_FOOTER_CONTACT_US": "Contact Us", @@ -270,6 +599,7 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_9": "When learners go through your exploration, they can send you feedback to alert you to problems or to share ideas for making it better.", "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "Improve Your Exploration", "I18N_GET_STARTED_PAGE_TITLE": "Get Started", + "I18N_GOAL_LIMIT": "Limit of <[limit]> goals", "I18N_GOT_IT": "Got It", "I18N_HEADING_VOLUNTEER": "Volunteer", "I18N_HINT_NEED_HELP": "Need help? View a hint for this problem!", @@ -278,8 +608,16 @@ "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Type code in the editor", "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Go to code editor", "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Drag and drop items", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "Please do not put 0 in the denominator", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Enter a fraction in the form \"x/y\", or a mixed number in the form \"A x/y\".", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Enter a fraction in the form x/y.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS": "Please only use numerical digits, spaces or forward slashes (/)", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "None of the numbers in the fraction should have more than 7 digits", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "Please enter a valid fraction (e.g., 5/3 or 1 2/3)", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "Please enter a non-empty fraction value.", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "Please enter your answer as a fraction (e.g., 5/3 instead of 1 2/3).", + "I18N_INTERACTIONS_FRACTIONS_PROPER_FRACTION": "Please enter an answer with a \"proper\" fractional part (e.g., 1 2/3 instead of 5/3).", + "I18N_INTERACTIONS_FRACTIONS_SIMPLEST_FORM": "Please enter an answer in simplest form (e.g., 1/3 instead of 2/6).", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Add Edge", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "Add Node", "I18N_INTERACTIONS_GRAPH_DELETE": "Delete", @@ -301,7 +639,7 @@ "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Click on the image", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Select an image to display]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "You may select more choices.", - "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Please select one or more choices.} other{Please select # or more choices.}}", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Please select all correct choices.} other{Please select # or more choices.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{No more than 1 choice may be selected.} other{No more than # choices may be selected.}}", "I18N_INTERACTIONS_MAP_INSTRUCTION": "Click on the map", "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "View map", @@ -311,20 +649,44 @@ "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "Show music staff", "I18N_INTERACTIONS_MUSIC_PLAY": "Play", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Play Target Sequence", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "Please enter a valid currency (e.g., $5 or Rs 5)", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "Please write currency units at the beginning", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_UNIT_CHARS": "Please ensure that the unit only contains numbers, alphabets, (, ), *, ^, /, -", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "Please ensure that the value is either a fraction or a number", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Possible unit formats", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_DECIMAL": "At most 1 decimal point should be present.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_EXPONENT": "At most 1 exponent sign (e) should be present.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_MINUS": "At most 1 minus (-) sign should be present.", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_COMMA": "The answer can contain at most 15 digits (0-9) excluding symbols (, or -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_DOT": "The answer can contain at most 15 digits (0-9) excluding symbols (. or -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "The answer should be a valid number.", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "The answer must be greater than or equal to zero.", + "I18N_INTERACTIONS_NUMERIC_INPUT_MINUS_AT_BEGINNING": "Minus (-) sign is only allowed in beginning.", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_INVALID_CHARS": "Only digits 0-9, '.', 'e', and '-' are allowed.", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_TRAILING_DECIMAL": "Trailing decimals are not allowed.", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "Are you sure you want to reset your code?", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Cancel", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Confirmation Required", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Reset Code", "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "Edit the code. Click 'Play' to check it!", "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Show code editor", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "Please enter a valid ratio (e.g. 1:2 or 1:2:3).", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "Ratios cannot have 0 as an element.", + "I18N_INTERACTIONS_RATIO_INVALID_CHARS": "Please write a ratio that consists of digits separated by colons (e.g. 1:2 or 1:2:3).", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "Your answer has multiple colons (:) next to each other.", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "Please enter a valid ratio (e.g. 1:2 or 1:2:3).", + "I18N_INTERACTIONS_RATIO_NON_INTEGER_ELEMENTS": "For this question, each element in your ratio should be a whole number (not a fraction or a decimal).", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Add item", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Oops, it looks like your set has duplicates!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Add one item per line.)", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "No answer given.", "I18N_INTERACTIONS_SUBMIT": "Submit", + "I18N_INTERACTIONS_TERMS_LIMIT": "The creator has specified the number of terms in the answer to be <[termsCount]>", + "I18N_INVALID_TAGS_AND_ATTRIBUTES_ALERT": "Some invalid tags and attributes have been removed from the uploaded image. If your image looks distorted, please \" target=\"_blank\">let us know, then try uploading a different SVG.", + "I18N_JOIN_LEARNER_GROUP_BUTTON": "Join Group", "I18N_LANGUAGE_FOOTER_VIEW_IN": "View Oppia in:", "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Good Afternoon", + "I18N_LEARNER_DASHBOARD_ALL": "All", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Edit Goals", "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Bronze", "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Community Lessons", @@ -333,6 +695,8 @@ "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "<[numberMoved]> of the collections you completed have been moved to the 'in progress' section as new explorations have been added to them!", "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Continue where you left off", "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Current Goals", + "I18N_LEARNER_DASHBOARD_DECLINE_INVITATION_MODAL_BODY": "Are you sure you want to decline the invitation to <[groupName]>?", + "I18N_LEARNER_DASHBOARD_DECLINE_INVITATION_MODAL_HEADER": "Decline Invitation?", "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "It looks like there aren't any collections in your 'Play Later' list. Head over to the library and build your own curated playlist!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "It looks like you haven't completed any collections yet. Head over to the library to start an exciting new collection!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "It looks like you haven't completed any explorations yet. Head over to the library to start an exciting new exploration!", @@ -346,6 +710,7 @@ "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "You don’t have any active feedback threads yet. Your feedback helps improve the quality of our lessons. You can do so by starting any of our lessons and submitting your valuable feedback!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "It looks like you have no partially-complete collections at the moment. Head over to the library to start an exciting new collection!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "It looks like you have no partially-complete explorations at the moment. Head over to the library to start an exciting new exploration!", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "It looks like you reached the goal selection limit. Head over to the library and explore more explorations.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "Get started by ", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "Setting a goal allows Oppia to give you better recommendations in your dashboard that contribute to your learning journey.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "setting a goal! ", @@ -360,9 +725,16 @@ "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Goals", "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "Gold", "I18N_LEARNER_DASHBOARD_HOME_SECTION": "Home", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "Incomplete", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "In Progress", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "It looks like you haven't tried any of our explorations yet.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Let's get started on this exciting journey!", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION": "Learner Groups", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "Your Groups", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_INVITATIONS": "Your Invitations", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_NO_GROUPS": "You have not joined any learner groups yet.", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_DECLINE_INVITATION": "Decline", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_VIEW_PREFERENCES": "View Preferences", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Learn Something New", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Good Morning", "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "New Story content available", @@ -374,6 +746,7 @@ "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_EXPLORATIONS": "{numberNonexistent, plural, one{1 of the explorations in progress is no longer available. We are sorry for the inconvenience} other{# of the explorations in progress are no longer available. We are sorry for the inconvenience}}", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "It looks like you haven't started any collections yet. Head over to the library to start an exciting new collection!", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "It looks like you haven't started any explorations yet. Head over to the library to start an exciting new exploration!", + "I18N_LEARNER_DASHBOARD_PAGE_TITLE": "Learner Dashboard | Oppia", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "Play Later", "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "Progress", "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE": "Drag and rearrange the activities in the order in which you want to play them!", @@ -386,7 +759,7 @@ "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Sending...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Silver", "I18N_LEARNER_DASHBOARD_SKILLS": "Skills", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "Skill Proficiency", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Skill Progress", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Stories Completed", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Subscriptions", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Progress:", @@ -396,7 +769,86 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Suggested:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Suggestion", "I18N_LEARNER_DASHBOARD_TOOLTIP": "Collections are multiple related explorations that are intended to be completed in a sequence.", + "I18N_LEARNER_DASHBOARD_VIEW": "View", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "View Suggestion", + "I18N_LEARNER_GROUPS_SECTION_TITLE": "Your Learner Groups", + "I18N_LEARNER_GROUP_ADD_GROUP_DETAILS": "Add Group Details", + "I18N_LEARNER_GROUP_ADD_NEW_SYLLABUS_ITEMS": "Add New Syllabus Items", + "I18N_LEARNER_GROUP_ADD_SYLLABUS_ITEMS": "Add Syllabus Items", + "I18N_LEARNER_GROUP_ADD_TO_SYLLABUS": "Add to syllabus", + "I18N_LEARNER_GROUP_ASSIGNED_SKILLS": "{skillsCount, plural, =1{Assigned Skill} other{Assigned Skills}}", + "I18N_LEARNER_GROUP_ASSIGNED_STORIES": "{storiesCount, plural, =1{Assigned Story} other{Assigned Stories}}", + "I18N_LEARNER_GROUP_ASSIGNED_SYLLABUS_TAB": "Assigned Syllabus", + "I18N_LEARNER_GROUP_BACK_TO_ALL_LEARNERS_PROGRESS": "Back To All Learners' Progress", + "I18N_LEARNER_GROUP_BACK_TO_SYLLABUS": "Back To Syllabus", + "I18N_LEARNER_GROUP_CREATED_TITLE": "Your group <[groupName]> has been created.", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "Next", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "Previous Step", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "Description", + "I18N_LEARNER_GROUP_DETAILS_GROUP_DESCRIPTION": "Group Description (Describe the group objectives in 2-4 lines)", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "Group Title", + "I18N_LEARNER_GROUP_DETAILS_MODAL_DESCRIPTION": "Group Description", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "Details", + "I18N_LEARNER_GROUP_FACILITATOR_LABEL_TEXT": "Facilitator", + "I18N_LEARNER_GROUP_GROUP_DETAILS_SECTION": "Group Details", + "I18N_LEARNER_GROUP_INVITATION_MODAL_HEADER": "Learner group invitation", + "I18N_LEARNER_GROUP_INVITE_LEARNERS": "Invite Learners", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_BY_USERNAME": "Invite a learner with their username", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_PLACEHOLDER_TEXT": "Add the learner's username and hit enter", + "I18N_LEARNER_GROUP_INVITE_LEARNER_BUTTON_TEXT": "Invite Learner", + "I18N_LEARNER_GROUP_INVITE_LIST_TEXT": "Invite List", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "Added", + "I18N_LEARNER_GROUP_ITEM_ALREADY_ADDED_TO_SYLLABUS": "Already In Syllabus", + "I18N_LEARNER_GROUP_JOINING_MESSAGE": "You are about to join \"<[groupName]>\"", + "I18N_LEARNER_GROUP_LEARNERS": "{learnersCount, plural, =1{LEARNER} other{LEARNERS}}", + "I18N_LEARNER_GROUP_LEARNERS_MODAL_TEXT": "Group Learners", + "I18N_LEARNER_GROUP_LEARNERS_PROGRESS_TAB": "Learners' Progress", + "I18N_LEARNER_GROUP_LEARNERS_SECTION": "{learnersCount, plural, =1{Learner} other{Learners}}", + "I18N_LEARNER_GROUP_MINIMUM_SYLLABUS_ITEMS_INFO": "You need to add at least one syllabus item(Skill/Story) in order to make a group.", + "I18N_LEARNER_GROUP_NO_INVITATIONS": "You do not have any pending invitations.", + "I18N_LEARNER_GROUP_NO_ITEMS_ADDED": "You have not added any new story or skill. Start by adding one!", + "I18N_LEARNER_GROUP_NO_LEARNERS_HAVE_JOINED": "There is no one in this group. How about inviting a learner?", + "I18N_LEARNER_GROUP_NO_LEARNERS_INVITED": "You have not invited any learners yet. Start by inviting one!", + "I18N_LEARNER_GROUP_NO_RESULTS_FOUND": "No results found.", + "I18N_LEARNER_GROUP_OVERVIEW_TAB": "Overview", + "I18N_LEARNER_GROUP_PAGE_TITLE": "Learner Group | Oppia", + "I18N_LEARNER_GROUP_PERMISSION_NOT_GIVEN": "Permission Not Given", + "I18N_LEARNER_GROUP_PREFERENCE": "Learner Group Preference", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "Save", + "I18N_LEARNER_GROUP_PREFERENCES_TAB": "Preferences", + "I18N_LEARNER_GROUP_PROGRESS_IN_STORIES_SECTION": "Progress in Stories", + "I18N_LEARNER_GROUP_PROGRESS_NO_LEARNERS": "There are no learners in this group. Please invite learners to see their progress.", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_INFO_TEXT": "As a learner, you can decide if facilitators are able to view your lesson progress and provide you with feedback. You can change this setting at any time.", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_INFO_TITLE": "Set sharing permissions:", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_FALSE": "No, maybe later", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_TRUE": "Yes, I would like to share my progress", + "I18N_LEARNER_GROUP_SEARCH_BY_USERNAME": "Search By Username", + "I18N_LEARNER_GROUP_SECTION_FEATURE_INFO_DESC": "Facilitators can invite you to join groups with other learners. Once you become a member of a group, you will be able to see the learner group syllabus. You can also choose to share your progress with the facilitator. You can change your sharing permissions at any time.", + "I18N_LEARNER_GROUP_SECTION_FEATURE_INFO_TITLE": "By joining a learner group, you can", + "I18N_LEARNER_GROUP_SHOWING_PROGRESS_FOR_LEARNER": "Showing progress for", + "I18N_LEARNER_GROUP_SKILLS_ANALYSIS_SECTION": "Skills Analysis", + "I18N_LEARNER_GROUP_SKILLS_MASTERED_SECTION": "Skills Mastered", + "I18N_LEARNER_GROUP_SKILLS_SECTION_PROGRESS_DESCRIPTION": "This section shows common skills which the learners of this group are struggling with.", + "I18N_LEARNER_GROUP_STORIES_SECTION_PROGRESS_DESCRIPTION": "This section shows common stories which the learners of this group have completed.", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "View Details", + "I18N_LEARNER_GROUP_SYLLABUS_COMPLETION": "complete", + "I18N_LEARNER_GROUP_SYLLABUS_ITEM_NOT_STARTED_YET": "Not started yet", + "I18N_LEARNER_GROUP_SYLLABUS_LESSONS": "lessons", + "I18N_LEARNER_GROUP_SYLLABUS_TAB": "Syllabus", + "I18N_LEARNER_GROUP_USER_STORIES_PROGRESS": "Progress in Assigned Stories", + "I18N_LEARNER_GROUP_VIEW_DETAILS": "View Details", + "I18N_LEARNER_GROUP_VIEW_OVERVIEW_SUMMARY_TITLE": "Explore your progress through your lessons:", + "I18N_LEARNER_GROUP_VIEW_PREFERENCES": "GROUP PREFERENCES", + "I18N_LEARNER_GROUP_WITHDRAW_INVITE": "Withdraw", + "I18N_LEARNT_TOPIC": "Learnt <[topicName]>", + "I18N_LEARN_TOPIC": "Learn <[topicName]>", + "I18N_LEAVE_LEARNER_GROUP": "LEAVE GROUP", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BODY": "Are you sure you want to leave <[groupName]>?", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BUTTON": "Leave", + "I18N_LEAVE_LEARNER_GROUP_MODAL_HEADER": "Leave Learner Group?", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "LESSON AUTHORS", + "I18N_LESSON_INFO_HEADER": "Lesson Info", + "I18N_LESSON_INFO_TOOLTIP_MESSAGE": "You've reached a checkpoint. Great job! View your progress and other lesson information here.", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "You have completed this", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Already added to playlist", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Add to 'Play Later' list", @@ -445,9 +897,9 @@ "I18N_LIBRARY_GROUPS_FEATURED_ACTIVITIES": "Featured Activities", "I18N_LIBRARY_GROUPS_HUMANITIES": "Humanities", "I18N_LIBRARY_GROUPS_LANGUAGES": "Languages", - "I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS" : "Mathematics & Statistics", + "I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS": "Mathematics & Statistics", "I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED": "Recently Published", - "I18N_LIBRARY_GROUPS_SCIENCE" : "Science", + "I18N_LIBRARY_GROUPS_SCIENCE": "Science", "I18N_LIBRARY_GROUPS_SOCIAL_SCIENCE": "Social Science", "I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS": "Top-Rated Explorations", "I18N_LIBRARY_INCOMPLETE_ACTIVITY_ICON": "You're partway through this activity.", @@ -461,6 +913,7 @@ "I18N_LIBRARY_NO_OBJECTIVE": "No objective specified.", "I18N_LIBRARY_N_CATEGORIES": "{categoriesCount, plural, =1{1 Category} other{# Categories}}", "I18N_LIBRARY_N_LANGUAGES": "{languagesCount, plural, =1{1 Language} other{# Languages}}", + "I18N_LIBRARY_PAGE_BROWSE_MODE_TITLE": "Find explorations to learn from - Oppia", "I18N_LIBRARY_PAGE_TITLE": "Community Library Lessons | Oppia", "I18N_LIBRARY_RATINGS_TOOLTIP": "Ratings", "I18N_LIBRARY_SEARCH_PLACEHOLDER": "What are you curious about?", @@ -470,28 +923,37 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "License", "I18N_LICENSE_PAGE_PARAGRAPH_1": "All content in Oppia's lessons is licensed under CC-BY-SA 4.0.", "I18N_LICENSE_PAGE_PARAGRAPH_2": "The software powering Oppia is open source, and its code is released under an Apache 2.0 license.", + "I18N_LICENSE_PAGE_TITLE": "License Page | Oppia", "I18N_LICENSE_TERMS_HEADING": "License Terms", + "I18N_LOGIN_PAGE_TITLE": "Sign in | Oppia", "I18N_LOGOUT_LOADING": "Logging out", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "Logout | Oppia", "I18N_LOGOUT_PAGE_TITLE": "Logout", "I18N_MATH_COURSE_DETAILS": "Oppia’s curated math foundations course teaches the basic building blocks of math, covering essential concepts like addition, multiplication and fractions. Once you have mastered these basic concepts, you can move onto more advanced lessons! Each topic builds on the previous one, so you can start from the beginning and complete the lessons from any skill level, or just dive right in if you need help on a particular topic.", "I18N_MATH_TOPICS_COVERED": "Start from the basics with our first topic, Place Values. Or, if you want to brush up on a specific subject, jump to any topic and dive in!", "I18N_MODAL_CANCEL_BUTTON": "Cancel", "I18N_MODAL_CONTINUE_BUTTON": "Continue", + "I18N_MODAL_REMOVE_BUTTON": "Remove", "I18N_NEXT_LESSON": "Next Lesson", + "I18N_NO": "No", + "I18N_NO_RESULTS_FOUND_FOR_MATCHING_USERNAME": "No results found for matching username.", "I18N_ONE_SUBSCRIBER_TEXT": "You have 1 subscriber.", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Partnerships", + "I18N_PARTNERSHIPS_PAGE_TITLE": "Partnerships | Oppia", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "Pending Account Deletion", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "Account To Be Deleted", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "Your account is scheduled for deletion, and will be deleted in around 24 hours. You will be notified by email after the deletion is completed.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1_HEADING": "Deletion procedure in progress", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2": "This action will delete this user account and also all private data associated with this account. Data that is already public will be anonymized so that it cannot be associated with this account. The ownership of some of the already-public data might be transferred to the community.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "Deletion details", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_TITLE": "Pending Account Deletion | Oppia", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_1": "Everyone is welcome to play and give feedback on published explorations. With everyone's help, we can continuously improve the lessons on the site and make them as effective as possible. ", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_2": "Use good judgement when publishing explorations. Explorations should have significant educational value and cannot contain advertising, spam, vandalism and/or abuse.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_3": "Be a good citizen. Creating multiple accounts, abusing the feedback systems, using explorations to trick users, or other similar antisocial behavior will not be tolerated and may result in account suspension.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "Community Guidelines", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_TEXT": "If you need any clarification on these guidelines, please feel free to ask on our forum.", "I18N_PLAYBOOK_HEADING": "Creator Guidelines", + "I18N_PLAYBOOK_PAGE_TITLE": "Creator Guidelines | Oppia", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_HEADING": "Making Your Explorations Publishable", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_1": "Teach something meaningful - Present information that's new to the target audience — don't just test knowledge they're already assumed to have. Also, if you'd like to teach a topic covered by an existing exploration, consider submitting feedback for the current exploration to help improve it instead — it's easier!", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_2": "Teach more than a single factoid - When picking topics, try and pick either an involved, tricky concept that has nuances and depth, or a collection of related, interesting facts. It can also be nice to have a sense of progression and challenge, so that the learner gets the chance to apply a concept they've just learned to an unseen situation.", @@ -578,6 +1040,9 @@ "I18N_PLAYER_UNRATED": "Unrated", "I18N_PLAYER_VIEWS_TOOLTIP": "Views", "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "Practice Session", + "I18N_PRACTICE_SESSION_PAGE_TITLE": "Practice Session: <[topicName]> - Oppia", + "I18N_PRACTICE_SESSION_START_BUTTON_TEXT": "Start Practicing", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Audio language", "I18N_PREFERENCES_BIO": "Bio", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "This field is optional. Anything you write here is public and world-viewable.", "I18N_PREFERENCES_BREADCRUMB": "Preferences", @@ -591,9 +1056,14 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "Receive news and updates about the site", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "Receive emails when a creator you've subscribed to publishes a new exploration", "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "We were not able to add you to our mailing list automatically. Please visit the following link to sign up to our mailing list: ", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "Export account", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "This will download your Oppia account data as a JSON formatted text file.", + "I18N_PREFERENCES_EXPORT_ACCOUNT_WARNING_TEXT": "Please do not leave this page. Your data is currently being loaded and will be downloaded as a JSON formatted text file upon completion. If something goes wrong, please contact", "I18N_PREFERENCES_HEADING": "Preferences", "I18N_PREFERENCES_HEADING_SUBTEXT": "Any changes that you make on this page will be auto-saved.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "You've not subscribed to any creators yet. Feel free to subscribe to your favourite author by clicking on the 'subscribe' button in the author profile page. By subscribing to an author, you will be notified by e-mail when the author publishes a new lesson.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Impact", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "Preferences | Oppia", "I18N_PREFERENCES_PAGE_TITLE": "Change your profile preferences - Oppia", "I18N_PREFERENCES_PICTURE": "Picture", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Preferred Audio Language", @@ -601,8 +1071,9 @@ "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE_PLACEHOLDER": "Preferred Audio Language", "I18N_PREFERENCES_PREFERRED_DASHBOARD": "Preferred Dashboard", "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "This is the dashboard that will be shown by default on login.", - "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "Preferred Exploration Languages", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "Preferred exploration languages", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "These languages will be selected by default when you search the gallery for explorations.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "Select preferred languages.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "Preferred Site Language", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "This is the language in which the site is shown.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "Preferred Site Language", @@ -610,15 +1081,25 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Drag to crop and resize:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Error: Could not read image file.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Upload Profile Picture", + "I18N_PREFERENCES_SEARCH_LABEL": "Search", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Select preferred languages...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Site language", "I18N_PREFERENCES_SUBJECT_INTERESTS": "Subject Interests", + "I18N_PREFERENCES_SUBJECT_INTERESTS_ERROR_TEXT": "Subject interests must be unique and in lowercase.", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "E.g.: mathematics, computer science, art, ...", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "Add a new subject interest (using lowercase letters and spaces)...", + "I18N_PREFERENCES_SUBJECT_INTERESTS_LABEL": "New Subject Interests", "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "Enter subject interests...", "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Creators you've subscribed to", "I18N_PREFERENCES_USERNAME": "Username", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Not yet selected", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "Privacy Policy | Oppia", "I18N_PROFILE_NO_EXPLORATIONS": "This user hasn't created or edited any explorations yet.", + "I18N_PROFILE_PAGE_TITLE": "Profile | Oppia", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "Do you want to continue?", + "I18N_PROGRESS_REMINDER_MODAL_HEADER": "You have completed <[progress]> of", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "No, restart from the beginning", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "Yes, resume the lesson", "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Score Breakdown", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "My Dashboard", "I18N_QUESTION_PLAYER_NEW_SESSION": "Replay", @@ -629,26 +1110,57 @@ "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "Skill Descriptions", "I18N_QUESTION_PLAYER_TEST_FAILED": "Session failed. Please review the skills and try again", "I18N_QUESTION_PLAYER_TEST_PASSED": "Session complete. Well done!", + "I18N_REDIRECTION_TO_STUCK_STATE_MESSAGE": "Seems like you are a little stuck here. Let's review the concepts via a short revision pathway.", + "I18N_REFRESHER_EXPLORATION_MODAL_BODY": "It looks like you're having some trouble with this question. Would you like to try a short exploration to refresh your memory, and return here after you've completed that?", + "I18N_REFRESHER_EXPLORATION_MODAL_TITLE": "Would you like a refresher?", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Registration Session Expired", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "Sorry, your registration session has expired. Please click \"Continue Registration\" to restart the process.", + "I18N_RELEASE_COORDINATOR_PAGE_TITLE": "Oppia Release Coordinator Panel", "I18N_RESET_CODE": "Reset Code", + "I18N_RESTART_EXPLORATION_BUTTON": "Restart lesson", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Review Test", + "I18N_REVIEW_TEST_PAGE_TITLE": "Review Test: <[storyName]> - Oppia", + "I18N_SAVE_BUTTON_ALERT_TOOLTIP": "Progress cannot be saved if you haven’t reached the first checkpoint.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_1": "Your learning progress will be automatically saved if you have an account.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "Already have an account? ", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_3": "Use the link below to save progress for 72 hours.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_5": "Write or copy the link below", "I18N_SAVE_PROGRESS": "Log in or sign up to save your progress and play through the next lesson.", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "Copy", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "Copied!", + "I18N_SAVE_PROGRESS_TEXT": "Save Progress", "I18N_SHARE_LESSON": "Share this lesson", + "I18N_SHOW_LESS": "Show Less", + "I18N_SHOW_MORE": "Show More", "I18N_SHOW_SOLUTION_BUTTON": "Show Solution", - "I18N_SIDEBAR_ABOUT_LINK": "About Oppia", + "I18N_SIDEBAR_ABOUT_LINK": "About Us", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "About Oppia Foundation", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Classroom", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Basic Mathematics", "I18N_SIDEBAR_CONTACT_US": "Contact Us", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "We are here to help with any questions you have.", "I18N_SIDEBAR_DONATE": "Donate", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "Your contributions help provide quality education to all.", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Get Started", + "I18N_SIDEBAR_GET_INVOLVED": "Get Involved", + "I18N_SIDEBAR_HOME": "Home", + "I18N_SIDEBAR_LEARN": "Learn", "I18N_SIDEBAR_LIBRARY_LINK": "Library", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "Math Foundations", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "Beginner friendly lessons to help you get started in math.", "I18N_SIDEBAR_OPPIA_FOUNDATION": "The Oppia Foundation", "I18N_SIDEBAR_PARTNERSHIPS": "Partnerships", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "Bring quality education to students in your region.", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "Addition And Subtraction", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "Community Library", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "Additional resources made by the community.", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "Multiplication", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "Place Values", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "See All Lessons", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Teach with Oppia", "I18N_SIDEBAR_VOLUNTEER": "Volunteer", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "Join our global team to create and improve lessons.", "I18N_SIGNIN_LOADING": "Signing in", "I18N_SIGNIN_PAGE_TITLE": "Sign in", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "By checking the box to the left of this text, you acknowledge, agree, and accept to be bound by the <[sitename]> Terms of Use, found here.", @@ -674,6 +1186,7 @@ "I18N_SIGNUP_LOADING": "Loading", "I18N_SIGNUP_PAGE_TITLE": "Join the community - Oppia", "I18N_SIGNUP_REGISTRATION": "Registration", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "Don't ask me again", "I18N_SIGNUP_SEND_ME_NEWS": "Send me news and updates about the site", "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]> is an open commons of learning resources. All material on it is freely reusable and shareable.", "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]> exists to foster the creation and continual improvement of a set of high-quality learning resources that are freely available to anyone.", @@ -681,9 +1194,14 @@ "I18N_SIGNUP_USERNAME": "Username", "I18N_SIGNUP_USERNAME_EXPLANATION": "Your username will be shown next to your contributions.", "I18N_SIGNUP_WHY_LICENSE": "Why CC-BY-SA?", + "I18N_SKILL_LEVEL_BEGINNER": "Beginner", + "I18N_SKILL_LEVEL_INTERMIDIATE": "Intermediate", + "I18N_SKILL_LEVEL_NEEDS_WORK": "Needs Work", + "I18N_SKILL_LEVEL_PROFICIENT": "Proficient", "I18N_SOLICIT_ANSWER_DETAILS_FEEDBACK": "Okay, now let's go back to your answer.", "I18N_SOLICIT_ANSWER_DETAILS_QUESTION": "Could you explain why you picked this answer?", "I18N_SOLUTION_EXPLANATION_TITLE": "Explanation:", + "I18N_SOLUTION_NEED_HELP": "Would you like to view the complete solution?", "I18N_SOLUTION_TITLE": "Solution", "I18N_SPLASH_BENEFITS_ONE": "Personalized Learning", "I18N_SPLASH_BENEFITS_THREE": "Easy-to-Follow Lessons", @@ -724,9 +1242,65 @@ "I18N_SPLASH_VOLUNTEERS_CONTENT": "No matter who you are, you can find a home at Oppia. We always need more people to improve lessons by suggesting questions, contributing graphics, or translating lessons.", "I18N_SPLASH_VOLUNTEERS_TITLE": "Run By the Community", "I18N_START_HERE": "Click here to start!", + "I18N_STORY_3M5VBajMccXO_DESCRIPTION": "In this story, we'll join Matthew as he visits a bakery to buy a cake. Unfortunately, he doesn't have enough money for a full cake. So, Mr. Baker helps him out by dividing Matthew's chosen cake into smaller pieces that he can afford. What happens next? Play the lessons to find out!", + "I18N_STORY_3M5VBajMccXO_TITLE": "Matthew Visits the Bakery", + "I18N_STORY_JhiDkq01dqgC_DESCRIPTION": "Join Ava and her father as they go to the amusement park. Help them by using your knowledge of expressions and equations to solve problems they face!", + "I18N_STORY_JhiDkq01dqgC_TITLE": "A Day at the Amusement Park", + "I18N_STORY_Qu6THxP29tOy_DESCRIPTION": "Learn how to add and subtract with Maya, Omar and their grandpa, as they make pizza together!", + "I18N_STORY_Qu6THxP29tOy_TITLE": "Maya, Omar and Malik make a pizza!", + "I18N_STORY_RRVMHsZ5Mobh_DESCRIPTION": "In this story, we'll follow Jaime and his sister Nic as they learn how to represent and read the value of a number.", + "I18N_STORY_RRVMHsZ5Mobh_TITLE": "Jaime's Adventures in the Arcade", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - Completed!", + "I18N_STORY_VIEWER_PAGE_TITLE": "Learn <[topicName]> | <[storyTitle]> | Oppia", + "I18N_STORY_ialKSV0VYV0B_DESCRIPTION": "Meet James and his uncle as they find out how they can use ratios to make delicious drinks!", + "I18N_STORY_ialKSV0VYV0B_TITLE": "James's Smoothie Adventures", + "I18N_STORY_rqnxwceQyFnv_DESCRIPTION": "Join Nina as she uses division techniques to help her Mom and Sandra at the market!", + "I18N_STORY_rqnxwceQyFnv_TITLE": "Nina Visits the Market", + "I18N_STORY_vfJDB3JAdwIx_DESCRIPTION": "Join Aria and her father Omar as they use multiplication techniques to plant seeds in their garden!", + "I18N_STORY_vfJDB3JAdwIx_TITLE": "Aria wants to plant a garden", + "I18N_STRUGGLING_WITH_SKILL": "<[username]> is struggling with this skill", "I18N_SUBSCRIBE_BUTTON_TEXT": "Subscribe", - "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Next Skill", + "I18N_SUBTOPIC_0abdeaJhmfPm_adding-fractions_TITLE": "Adding Fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_comparing-fractions_TITLE": "Comparing Fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_dividing-fractions_TITLE": "Dividing Fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_equivalent-fractions_TITLE": "Equivalent Fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_fractions-of-a-group_TITLE": "Fractions of a Group", + "I18N_SUBTOPIC_0abdeaJhmfPm_mixed-numbers_TITLE": "Mixed Numbers", + "I18N_SUBTOPIC_0abdeaJhmfPm_multiplying-fractions_TITLE": "Multiplying Fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_number-line_TITLE": "The Number Line", + "I18N_SUBTOPIC_0abdeaJhmfPm_subtracting-fractions_TITLE": "Subtracting Fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE": "What Is a Fraction?", + "I18N_SUBTOPIC_5g0nxGUmx5J5_calculations-with-ratios_TITLE": "Calculations with Ratios", + "I18N_SUBTOPIC_5g0nxGUmx5J5_combining-ratios_TITLE": "Combining Ratios", + "I18N_SUBTOPIC_5g0nxGUmx5J5_equivalent-ratios_TITLE": "Equivalent Ratios", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "What is a Ratio?", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE": "Basic Concepts of Multiplication ", + "I18N_SUBTOPIC_C4fqwrvqWpRm_memorizing-expressions_TITLE": "Memorizing Multiplication Expressions", + "I18N_SUBTOPIC_C4fqwrvqWpRm_multiplication-techniques_TITLE": "Techniques of multiplication", + "I18N_SUBTOPIC_C4fqwrvqWpRm_rules-to-simplify_TITLE": "Rules to simplify multiplication", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Next Skill:", + "I18N_SUBTOPIC_VIEWER_PAGE_TITLE": "Review <[subtopicTitle]> | Oppia", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "Previous Skill:", + "I18N_SUBTOPIC_dLmjjMDbCcrf_algebraic-expressions_TITLE": "Simplifying algebraic expressions", + "I18N_SUBTOPIC_dLmjjMDbCcrf_modelling-scenarios_TITLE": "Modelling real-world scenarios using equations", + "I18N_SUBTOPIC_dLmjjMDbCcrf_order-of-operations_TITLE": "Order of Operations", + "I18N_SUBTOPIC_dLmjjMDbCcrf_problem-solving_TITLE": "Problem-Solving Strategies", + "I18N_SUBTOPIC_dLmjjMDbCcrf_solving-equations_TITLE": "Manipulating and solving equations", + "I18N_SUBTOPIC_dLmjjMDbCcrf_variables_TITLE": "Representing Unknowns with Variables", + "I18N_SUBTOPIC_iX9kYCjnouWN_comparing-numbers_TITLE": "Comparing Numbers", + "I18N_SUBTOPIC_iX9kYCjnouWN_naming-numbers_TITLE": "Naming Numbers", + "I18N_SUBTOPIC_iX9kYCjnouWN_place-names-and-values_TITLE": "The Place Names and Their Values ", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "Rounding Numbers", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE": "Basic concepts of Division", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Problem Solving", + "I18N_SUBTOPIC_qW12maD4hiA8_techniques-of-division_TITLE": "Techniques of Division", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE": "Adding Numbers", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "Relationship Between Addition and Subtraction", + "I18N_SUBTOPIC_sWBXKH4PZcK6_estimation_TITLE": "Estimation", + "I18N_SUBTOPIC_sWBXKH4PZcK6_sequences _TITLE": "Sequences ", + "I18N_SUBTOPIC_sWBXKH4PZcK6_subtracting-numbers_TITLE": "Subtracting Numbers", + "I18N_SYLLABUS_SKILL_TITLE": "Skill", + "I18N_SYLLABUS_STORY_TITLE": "Story", "I18N_TEACH_BENEFITS_ONE": "Effective, High-Quality Learning for All Ages", "I18N_TEACH_BENEFITS_THREE": "Always Free and Easy-to-use", "I18N_TEACH_BENEFITS_TITLE": "Our Benefits", @@ -735,12 +1309,13 @@ "I18N_TEACH_PAGE_CLASSROOM_BUTTON": "VISIT CLASSROOM", "I18N_TEACH_PAGE_CLASSROOM_CONTENT": "In the Classroom, you can find a set of lessons that the Oppia team has designed and tested to make sure they’re effective and fun for all learners. All of the lessons have been reviewed by teachers and experts, so you can feel assured that your students are getting an effective education, while learning at their own pace.", "I18N_TEACH_PAGE_CLASSROOM_TITLE": "Learn from Oppia’s Tested and Verified Lessons", - "I18N_TEACH_PAGE_CONTENT" : "Oppia is an engaging, new approach to online learning that specifically is made to ensure that everyone has access to quality education.", + "I18N_TEACH_PAGE_CONTENT": "Oppia is an engaging, new approach to online learning that specifically is made to ensure that everyone has access to quality education.", "I18N_TEACH_PAGE_HEADING": "Oppia for Parents, Teachers, and Guardians", "I18N_TEACH_PAGE_LIBRARY_BUTTON": "BROWSE LIBRARY", "I18N_TEACH_PAGE_LIBRARY_CONTENT": "Educators and community members around the world use Oppia’s lesson creation platform as a way to create and share lessons. You can find over 20,000 lessons for 17 different subjects in our Exploration Library, and maybe you’ll be inspired to create your own!", "I18N_TEACH_PAGE_LIBRARY_TITLE": "Explore Lessons Made By the Community", "I18N_TEACH_PAGE_SIX_TITLE": "Get Started Learning Today", + "I18N_TEACH_PAGE_TITLE": "Guide to Oppia for Parents and Teachers | Oppia", "I18N_TEACH_STUDENT_DETAILS_1": "Riya Sogani", "I18N_TEACH_STUDENT_DETAILS_2": "Wala Awad", "I18N_TEACH_STUDENT_DETAILS_3": "Himanshu Taneja, Kurukshetra, India", @@ -748,57 +1323,108 @@ "I18N_TEACH_TESTIMONIAL_1": "“I am grateful to have had the opportunity to educate underprivileged Indian children and bridge the gaps in their understanding of critical math concepts. Watching these students’ confidence increase as they learnt was worth the extra hours.”", "I18N_TEACH_TESTIMONIAL_2": "“Oppia is the first of its type! It assists students to learn all they need about a specific topic in an attractive and engaging way; it also encourages them to use smart devices for their own good.”", "I18N_TEACH_TESTIMONIAL_3": "“I never expected the students to learn tech and do maths lessons so fast. It's their first exposure to smarttech and they were really struggling to handle them at first. Now, I feel so elated to see them doing the Oppia lessons even before I enter the class!”", + "I18N_TERMS_PAGE_TITLE": "Terms of Use | Oppia", "I18N_THANKS_PAGE_BREADCRUMB": "Thanks", + "I18N_THANKS_PAGE_TITLE": "Thanks | Oppia", + "I18N_TIME_FOR_BREAK_BODY_1": "You seem to be submitting answers very quickly. Are you starting to get tired?", + "I18N_TIME_FOR_BREAK_BODY_2": "If so, consider taking a break! You can come back later.", + "I18N_TIME_FOR_BREAK_FOOTER": "I'm ready to continue the lesson", + "I18N_TIME_FOR_BREAK_TITLE": "Time for a break?", + "I18N_TOPIC_0abdeaJhmfPm_DESCRIPTION": "You'll often need to talk about parts of an object: a recipe might ask for half a cup of flour, or you might spill part of a bottle of milk. In this topic, you'll learn how to use fractions to understand and describe situations like these.", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "Fractions", + "I18N_TOPIC_5g0nxGUmx5J5_DESCRIPTION": "Ratios are useful for calculating how much ingredients to use if you have a recipe for four people but want to cook for two. In this topic, you'll learn how to use ratios to easily compare the size of one thing to another.", + "I18N_TOPIC_5g0nxGUmx5J5_TITLE": "Ratios and Proportional Reasoning", + "I18N_TOPIC_C4fqwrvqWpRm_DESCRIPTION": "If you bought 60 boxes of five cakes, how many cakes would you have in total? In this topic, you'll learn how to use multiplication to solve problems like this (without having to add lots of numbers together each time!).", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "Multiplication", + "I18N_TOPIC_LANDING_PAGE_TITLE": "<[topicTitle]> | <[topicTagline]> | Oppia", + "I18N_TOPIC_LEARN": "Learn", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 lesson} other{# lessons}}", + "I18N_TOPIC_TITLE": "Topic", "I18N_TOPIC_VIEWER_CHAPTER": "Chapter", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 chapter} other{# chapters}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "Coming Soon!", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "Come back later when lessons are available for this topic.", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "Come back later when practice questions are available for this topic.", "I18N_TOPIC_VIEWER_DESCRIPTION": "Description", "I18N_TOPIC_VIEWER_LESSON": "Lesson", "I18N_TOPIC_VIEWER_LESSONS": "Lessons", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "Come back later when lessons are available for this topic.", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "Master Skills for <[topicName]>", + "I18N_TOPIC_VIEWER_NO_QUESTION_WARNING": "There are no questions created yet for the selected subtopic(s).", + "I18N_TOPIC_VIEWER_PAGE_TITLE": "<[topicName]> | <[pageTitleFragment]> | Oppia", "I18N_TOPIC_VIEWER_PRACTICE": "Practice", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_MESSAGE": "The Practice feature is still in beta and is only available in English. Would you like to continue?", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_TITLE": "Confirm Practice Language", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "(Beta)", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "Come back later when practice questions are available for this topic.", "I18N_TOPIC_VIEWER_REVISION": "Revision", "I18N_TOPIC_VIEWER_SELECT_SKILLS": "Select the skills from the <[topicName]> lessons you would like to practice.", "I18N_TOPIC_VIEWER_SKILL": "Skill", "I18N_TOPIC_VIEWER_SKILLS": "Skills", "I18N_TOPIC_VIEWER_START_PRACTICE": "Start", "I18N_TOPIC_VIEWER_STORIES": "Stories", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "Stories you can play", "I18N_TOPIC_VIEWER_STORY": "Story", "I18N_TOPIC_VIEWER_STUDY_SKILLS": "Study Skills for <[topicName]>", "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "Use the following Review Cards to help you study skills about <[topicName]>.", "I18N_TOPIC_VIEWER_VIEW_ALL": "View All", "I18N_TOPIC_VIEWER_VIEW_LESS": "View Less", + "I18N_TOPIC_dLmjjMDbCcrf_DESCRIPTION": "You'll often need to solve problems with unknown numbers -- for example, if you've bought an item that is on sale and want to find out the original price. In this topic, you'll learn how to do this with equations, expressions, and formulae.", + "I18N_TOPIC_dLmjjMDbCcrf_TITLE": "Expressions and Equations", + "I18N_TOPIC_iX9kYCjnouWN_DESCRIPTION": "Did you know that all possible numbers of things can be expressed using just ten digits (0,1,2,3,...,9)? In this topic, we'll learn how we can use place values to do that, and see why \"5\" has a different value in \"25\" and \"2506\".", + "I18N_TOPIC_iX9kYCjnouWN_TITLE": "Place Values", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION": "If you had thirty-two tomatoes to share among four people, how many tomatoes should each person get? In this topic, you'll learn to use division to work out how to split something into parts.", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "Division", + "I18N_TOPIC_sWBXKH4PZcK6_DESCRIPTION": "If you had four eggs and your friend gave you 37 more, how many would you have in total? How about if you then lost eight? In this topic, you'll learn how to solve problems like these with the basic skills of addition and subtraction.", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "Addition and Subtraction", "I18N_TOPNAV_ABOUT": "About", "I18N_TOPNAV_ABOUT_OPPIA": "About Oppia", "I18N_TOPNAV_ADMIN_PAGE": "Admin Page", + "I18N_TOPNAV_ANDROID_APP_DESCRIPTION": "Oppia’s early Android app is now available in English and Brazilian Portuguese. Try it out and provide feedback!", + "I18N_TOPNAV_ANDROID_APP_HEADING": "Android App", "I18N_TOPNAV_BLOG": "Blog", "I18N_TOPNAV_BLOG_DASHBOARD": "Blog Dashboard", - "I18N_TOPNAV_CLASSROOM": "Classroom", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Basic Mathematics", "I18N_TOPNAV_CONTACT_US": "Contact Us", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "We are here to help with any questions you have.", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Contributor Dashboard", "I18N_TOPNAV_CREATOR_DASHBOARD": "Creator Dashboard", "I18N_TOPNAV_DONATE": "Donate", "I18N_TOPNAV_DONATE_DESCRIPTION": "Your contributions help provide quality education to all.", + "I18N_TOPNAV_FACILITATOR_DASHBOARD": "Facilitator Dashboard", "I18N_TOPNAV_FORUM": "Forum", "I18N_TOPNAV_GET_INVOLVED": "Get Involved", "I18N_TOPNAV_GET_STARTED": "Get Started", + "I18N_TOPNAV_HOME": "Home", + "I18N_TOPNAV_LEARN": "Learn", "I18N_TOPNAV_LEARNER_DASHBOARD": "Learner Dashboard", - "I18N_TOPNAV_LIBRARY": "Library", + "I18N_TOPNAV_LEARNER_GROUP": "Learner Group", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "Foundational lessons to get started in Math.", + "I18N_TOPNAV_LEARN_HEADING": "Ways to learn more", + "I18N_TOPNAV_LEARN_LINK_1": "See All Lessons", + "I18N_TOPNAV_LEARN_LINK_2": "Continue Learning", + "I18N_TOPNAV_LIBRARY": "Community Library", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "Additional resources made by the community to help you learn more.", "I18N_TOPNAV_LOGOUT": "Logout", "I18N_TOPNAV_MODERATOR_PAGE": "Moderator Page", "I18N_TOPNAV_OPPIA_FOUNDATION": "The Oppia Foundation", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Participation Playbook", - "I18N_TOPNAV_PARTNERSHIPS": "Partnerships", + "I18N_TOPNAV_PARTNERSHIPS": "Schools and Organizations", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "Partner and bring Oppia to your school, community or area.", "I18N_TOPNAV_PREFERENCES": "Preferences", "I18N_TOPNAV_SIGN_IN": "Sign in", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Sign in with Google", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Teach with Oppia", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Topics and Skills Dashboard", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "Try it today!", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "Join our global team to create and improve lessons.", "I18N_TOTAL_SUBSCRIBERS_TEXT": "You have a total of <[totalSubscribers]> subscribers.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Unsubscribe", + "I18N_VIEW_ALL_TOPICS": "View all <[classroomName]> topics", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Volunteer", + "I18N_VOLUNTEER_PAGE_TITLE": "Volunteer | Oppia", "I18N_WARNING_MODAL_DESCRIPTION": "This will show the full solution. Are you sure?", "I18N_WARNING_MODAL_TITLE": "Warning!", - "I18N_WORKED_EXAMPLE": "Worked Example" + "I18N_WORKED_EXAMPLE": "Worked Example", + "I18N_YES": "Yes" } diff --git a/assets/i18n/eo.json b/assets/i18n/eo.json index df83007758b9..61782a4a1bc8 100644 --- a/assets/i18n/eo.json +++ b/assets/i18n/eo.json @@ -32,6 +32,9 @@ "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Malnetoj", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Publikigita", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Ĉi tiu karto estas longega; eble tio malinteresas lernantojn. Konsideru mallongigon, aŭ disigon en du kartojn.", + "I18N_CLASSROOM_MATH_TITLE": "Matematiko", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Daŭrigi", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "KOLEKTO", "I18N_CONTACT_PAGE_HEADING": "Partoprenu!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Dankon pro via intereso pri helpado de la projekto Oppia!", "I18N_CONTACT_PAGE_PARAGRAPH_10_HEADING": "Plibonigi kaj mastrumi la retejon", @@ -355,6 +358,7 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Treni por stuci kaj regrandigi:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Eraro: Bilda dosiero ne povis legiĝi.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Alŝuti Profilan Bildon", + "I18N_PREFERENCES_SEARCH_LABEL": "Serĉi", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Elekti preferatajn lingvojn...", "I18N_PREFERENCES_SUBJECT_INTERESTS": "Interesaj Temoj", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "Ekz.: matematiko, komputiko, arto, ...", @@ -370,7 +374,6 @@ "I18N_SIDEBAR_CONTACT_US": "Kontakti Nin", "I18N_SIDEBAR_DONATE": "Donaci", "I18N_SIDEBAR_FORUM": "Forumo", - "I18N_SIDEBAR_GET_STARTED": "Komenci", "I18N_SIDEBAR_LIBRARY_LINK": "Biblioteko", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Fondaĵo Oppia", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Instrui per Oppia", diff --git a/assets/i18n/es.json b/assets/i18n/es.json index 64ed512edb1e..ad58fccdfea5 100644 --- a/assets/i18n/es.json +++ b/assets/i18n/es.json @@ -65,6 +65,9 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "Guía para maestros", "I18N_ACTION_TIPS_FOR_PARENTS": "Consejos para padres y guardianes", "I18N_ACTION_VISIT_CLASSROOM": "Visitar el aula", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "Dirección de correo electrónico", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Nombre", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "Notificarme", "I18N_ATTRIBUTION_HTML_STEP_ONE": "Copia y pega el HTML", "I18N_ATTRIBUTION_HTML_STEP_TWO": "Asegúrate de que el enlace aparece como \"<[linkText]>\"", "I18N_ATTRIBUTION_HTML_TITLE": "Atribuir en HTML", @@ -72,13 +75,20 @@ "I18N_ATTRIBUTION_PRINT_STEP_TWO": "Adjunte una copia del documento <[link]>", "I18N_ATTRIBUTION_PRINT_TITLE": "Atributo en la impresión", "I18N_ATTRIBUTION_TITLE": "Cómo atribuir esta lección para compartir o reutilizar", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "Perfil del autor", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TOTAL_POSTS_DISPLAY": "<[totalNumber]> publicaciones", "I18N_BLOG_CARD_PREVIEW_CONTEXT": "Esto es cómo la tarjeta de blog aparecerá en la página de inicio y en tu perfil de autor(a).", "I18N_BLOG_CARD_PREVIEW_HEADING": "Preestreno de tarjeta de blog", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "Biografía", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Editar nombre y biografía", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Nombre", "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Crear entrada de blog nueva", "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "¡Parece que no has creado cualquier cuento ya!", "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Nueva publicación", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Guardar", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Borradores", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Publicado", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Blog", "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Añadir imagen miniatura", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Cuerpo", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Cancelar", @@ -202,14 +212,13 @@ "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "Además, exploraciones publicadas y colecciones que no tienen otros dueños se convertirán en propiedad comunitaria.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "Si tiene cualesquiera preguntas o preocupaciones sobre el proceso de eliminar la cuenta, por favor envía un correo electrónico a privacy@oppia.org.", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Esto te tomará a una página donde puedes eliminar tu cuenta de Oppia.", + "I18N_DEST_IF_STUCK_INFO_TOOLTIP": "Ahora puede especificar una nueva tarjeta en la que puede guiar a los alumnos a través de los conceptos utilizados en la pregunta, ¡si realmente se atascan!", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Arrastra una imagen a esta área", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Subir un archivo", "I18N_DONATE_PAGE_BREADCRUMB": "Donar", "I18N_DONATE_PAGE_IMAGE_TITLE": "Su donación generosa financia", - "I18N_DONATE_PAGE_TITLE": "Donar a la
Fundación Oppia", + "I18N_DONATE_PAGE_TITLE": "Donar a la Fundación Oppia", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Escuche a nuestra comunidad de Oppia", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "En 2012, Oppia comenzó con una idea simple: mejorar la educación de los estudiantes de todo el mundo mientras se mejora la calidad de la enseñanza. Desde entonces, esta visión se ha convertido en una plataforma educativa con más de 11.000 exploraciones que han sido utilizadas por más de 430.000 usuarios en todo el mundo.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "Haga una donación a la Fundación Oppia, una organización 501(c)(3) registrada, y únase a nosotros para llevar la alegría de enseñar y aprender a personas de todo el mundo.", "I18N_ERROR_DISABLED_EXPLORATION": "Exploración deshabilitada", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Disculpa, pero la exploración que clickeaste está deshabilidada por ahora. Por favor, prueba de nuevo más tarde.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Exploración deshabilitada - Oppia", @@ -332,6 +341,7 @@ "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Aún no tienes ningún hilo de comentarios activo. Sus comentarios ayudan a mejorar la calidad de nuestras lecciones. ¡Puede hacerlo iniciando cualquiera de nuestras lecciones y enviando sus valiosos comentarios!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "Parece que, por el momento, no tienes colecciones completadas parcialmente. Visita la biblioteca para comenzar una emocionante colección nueva.", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "Parece que no tienes exploraciones parcialmente completas de momento. Visita la biblioteca para comenzar una emocionante exploración nueva.", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "Parece lograste el límite de selección del objetivo. Cabeza encima a la biblioteca y explorar más exploraciones.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "Empiece por ", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "Establecer un objetivo permite que Oppia le brinde mejores recomendaciones en su panel de control que contribuyan a su viaje de aprendizaje.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "establecer una meta! ", @@ -372,7 +382,7 @@ "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Enviando...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Plata", "I18N_LEARNER_DASHBOARD_SKILLS": "Habilidades", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "Competencia de Habilidad", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Progreso de habilidades", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Historias completadas", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Suscripciones", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Progreso:", @@ -383,6 +393,9 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Sugerencia", "I18N_LEARNER_DASHBOARD_TOOLTIP": "Colecciones son varias exploraciones relacionadas que se tiene la intención de completar en una secuencia.", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Ver sugerencia", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "Autores de la lección", + "I18N_LESSON_INFO_HEADER": "Información sobre la lección", + "I18N_LESSON_INFO_TOOLTIP_MESSAGE": "Has llegado a un punto de control. Buen trabajo! Vea su progreso y otra información de la lección aquí.", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Has completado esto", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Ya añadido a la lista de reproducción", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Añadir a la lista de «Jugar más tarde»", @@ -454,9 +467,7 @@ "I18N_LIBRARY_VIEWS_TOOLTIP": "Visitas", "I18N_LIBRARY_VIEW_ALL": "Ver todo", "I18N_LICENSE_PAGE_LICENSE_HEADING": "Licencia", - "I18N_LICENSE_PAGE_PARAGRAPH_1": "Todo el contenido de las lecciones de Oppia tiene licencia CC-BY-SA 4.0 .", "I18N_LICENSE_PAGE_PARAGRAPH_2": "El software que potencia a Oppia es de código abierto y su código se publica bajo una licencia Apache 2.0.", - "I18N_LICENSE_TERMS_HEADING": "Términos de licencia", "I18N_LOGOUT_LOADING": "Cerrando sesión", "I18N_LOGOUT_PAGE_TITLE": "Cerrar sesión", "I18N_MATH_COURSE_DETAILS": "El curso de fundamentos matemáticos seleccionado por Oppia enseña los componentes básicos de las matemáticas, cubriendo conceptos esenciales como suma, multiplicación y fracciones. Una vez que hayas dominado estos conceptos básicos, ¡puedes pasar a lecciones más avanzadas! Cada tema se basa en el anterior, por lo que puedes comenzar desde el principio y completar las lecciones desde cualquier nivel de habilidad, o simplemente sumergirte directamente si necesitas ayuda sobre un tema en particular.", @@ -607,7 +618,7 @@ "I18N_PROFILE_NO_EXPLORATIONS": "Este usuario aún no ha creado o editado ninguna exploración aún.", "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Aprender más sobre tu resultado", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Mi tablero", - "I18N_QUESTION_PLAYER_NEW_SESSION": "Sesión nueva", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Responder", "I18N_QUESTION_PLAYER_RETRY_TEST": "Volver a intentar Prueba", "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Regresa al Cuento", "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "Revisar la habilidad con la puntuación más baja", @@ -622,13 +633,13 @@ "I18N_SHARE_LESSON": "Comparte esta lección", "I18N_SHOW_SOLUTION_BUTTON": "Mostrar solución", "I18N_SIDEBAR_ABOUT_LINK": "Acerca de Oppia", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "Sobre la Fundación Oppia", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Aula", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Matemáticas básicas", "I18N_SIDEBAR_CONTACT_US": "Contáctanos", "I18N_SIDEBAR_DONATE": "Donar", "I18N_SIDEBAR_FORUM": "Foro", - "I18N_SIDEBAR_GET_STARTED": "Comenzar", "I18N_SIDEBAR_LIBRARY_LINK": "Biblioteca", "I18N_SIDEBAR_OPPIA_FOUNDATION": "La fundación Oppia", "I18N_SIDEBAR_PARTNERSHIPS": "Asociaciones", @@ -758,7 +769,6 @@ "I18N_TOPNAV_ADMIN_PAGE": "Página de administración", "I18N_TOPNAV_BLOG": "Blog", "I18N_TOPNAV_BLOG_DASHBOARD": "Panel de blog", - "I18N_TOPNAV_CLASSROOM": "Aula", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Matemáticas básicas", "I18N_TOPNAV_CONTACT_US": "Contáctanos", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Pantalla principal de colaboradores", @@ -767,6 +777,7 @@ "I18N_TOPNAV_FORUM": "Foro", "I18N_TOPNAV_GET_INVOLVED": "Involúcrate", "I18N_TOPNAV_GET_STARTED": "Comenzar", + "I18N_TOPNAV_LEARN": "Aula", "I18N_TOPNAV_LEARNER_DASHBOARD": "Pantalla principal de principiantes", "I18N_TOPNAV_LIBRARY": "Biblioteca", "I18N_TOPNAV_LOGOUT": "Salir", diff --git a/assets/i18n/eu.json b/assets/i18n/eu.json index 3197f2789af6..5b9eba95b40c 100644 --- a/assets/i18n/eu.json +++ b/assets/i18n/eu.json @@ -278,7 +278,6 @@ "I18N_SIDEBAR_CONTACT_US": "Jarri harremanetan gurekin", "I18N_SIDEBAR_DONATE": "Dohaintza egin", "I18N_SIDEBAR_FORUM": "Foroa", - "I18N_SIDEBAR_GET_STARTED": "Hasi", "I18N_SIDEBAR_LIBRARY_LINK": "Liburutegia", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Oppia Fundazioa", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Irakatsi Oppia erabiliz", diff --git a/assets/i18n/fa.json b/assets/i18n/fa.json index f392d14c5c68..1e46addcd6b5 100644 --- a/assets/i18n/fa.json +++ b/assets/i18n/fa.json @@ -23,6 +23,8 @@ "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "خطا: ناتوان در خواندن پروندهٔ تصویری", "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "افزودن یک بندانگشتی", "I18N_CLASSROOM_PAGE_COMING_SOON": "به‌زودی", + "I18N_COMPLETE_CHAPTER": "فصل را در <[topicName]> کامل کنید.", + "I18N_CREATE_ACCOUNT": "ایجاد حساب", "I18N_CREATE_ACTIVITY_QUESTION": "چه چیزی را میخواهید بسازید؟", "I18N_CREATE_ACTIVITY_TITLE": "ایجاد یک فعالیت", "I18N_CREATE_COLLECTION": "ایجاد مجموعه", @@ -72,16 +74,33 @@ "I18N_FORMS_TYPE_NUMBER_AT_MOST": "لطفا یک عدد وارد کنید که از <[maxValue]> بزرگتر باشد.", "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "لطفا یک عدد دهدهی معتبر وارد کنید.", "I18N_GET_STARTED_PAGE_TITLE": "شروع کنید", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "یک عبارت در اینجا وارد کنید.", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "رفتن به ویرایشگر کد", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "لطفا 0 را در مخرج قرار ندهید", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "هیچ یک از اعداد کسر نباید بیشتر از 7 رقم داشته باشند", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "لطفا یک کسر معتبر وارد کنید (مانند 5/3 یا 1 2/3)", + "I18N_INTERACTIONS_FRACTIONS_PROPER_FRACTION": "لطفا پاسخ را با بخش کسری مناسب وارد کنید (مانند 1 2/3 به‌جای 5/3).", + "I18N_INTERACTIONS_FRACTIONS_SIMPLEST_FORM": "لطفاً پاسخ را به ساده‌ترین شیوه وارد کنید (مانند 1/3 به‌جای 2/6)", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "افزودن یادداشت", "I18N_INTERACTIONS_GRAPH_DELETE": "حذف", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "نمودار نامعتبر است!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "ایجاد یک نمودار", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "نمایش نمودار", "I18N_INTERACTIONS_GRAPH_MOVE": "انتقال", "I18N_INTERACTIONS_GRAPH_RESET_BUTTON": "بازنشانی", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "روزآمدسازی برچسب", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "به روزرسانی وزن", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "روی تصویر کلیک کنید", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[یک تصویر برای نمایش انتخاب کنید]", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "روی نقشه کلیک کنید", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "مشاهدهٔ نقشه", "I18N_INTERACTIONS_MUSIC_CLEAR": "پاک‌سازی", "I18N_INTERACTIONS_MUSIC_PLAY": "پخش", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "لطفا یک ارز معتبر وارد کنید (مانند 5$ یا 5 Rs)", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "نمایش ویرایشگر کد", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "لطفا یک نسبت معتبر وارد کنید (مانند 1:2 یا 1:2:3).", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "نسبت‌ها نمی‌تواند 0 را به عنوان عنصر بپذیرد.", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "پاسخ شما دارای دونقطه (:) در کنار هم است.", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "افزودن آیتم", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "اوه, بنظر می رسه شما موارد تکراری تنظیم کرده اید!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(افزودن یک آیتم در هر خط.)", @@ -93,6 +112,10 @@ "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "نقره‌ای", "I18N_LEARNER_DASHBOARD_SKILLS": "مهارت‌ها", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "پیشرفت:", + "I18N_LEARNER_DASHBOARD_VIEW": "نمایش", + "I18N_LEARNT_TOPIC": "<[topicName]> آموخته شد", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "مؤلفان درس", + "I18N_LESSON_INFO_HEADER": "اطلاعات درس", "I18N_LIBRARY_ALL_CATEGORIES": "همه دسته بندی ها", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "همه دسته بندی ها انتخاب شده", "I18N_LIBRARY_ALL_LANGUAGES": "همهٔ زبان‌ها", @@ -188,11 +211,11 @@ "I18N_PLAYER_UNRATED": "بدون رتبه بندی", "I18N_PLAYER_VIEWS_TOOLTIP": "بازدیدها", "I18N_PREFERENCES_BIO": "شرح‌حال", - "I18N_PREFERENCES_BREADCRUMB": "تنظيمات", + "I18N_PREFERENCES_BREADCRUMB": "ترجیحات", "I18N_PREFERENCES_CANCEL_BUTTON": "لغو", "I18N_PREFERENCES_CHANGE_PICTURE": "تغییر تصویر پروفایل", "I18N_PREFERENCES_EMAIL": "رایانامه", - "I18N_PREFERENCES_HEADING": "تنظيمات", + "I18N_PREFERENCES_HEADING": "ترجیحات", "I18N_PREFERENCES_PICTURE": "تصویر", "I18N_PREFERENCES_PROFILE_PICTURE_ADD": "افزودن تصویر پروفایل", "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "کشیدن به چیدنگاه و تغییر اندازه:", @@ -201,37 +224,55 @@ "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "انتخاب زبان های مورد نظر ...", "I18N_PREFERENCES_USERNAME": "نام کاربری", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "هیچ داده‌ای انتخاب نشده", - "I18N_SIDEBAR_ABOUT_LINK": "درباره", + "I18N_RESET_CODE": "بازنشانی کد", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "از قبل حسابی دارید؟", + "I18N_SAVE_PROGRESS_TEXT": "ذخیره‌سازی پیشرفت", + "I18N_SHOW_LESS": "نشان دادن کمتر", + "I18N_SHOW_MORE": "نمایش بیشتر", + "I18N_SIDEBAR_ABOUT_LINK": "دربارهٔ ما", "I18N_SIDEBAR_BLOG": "بلاگ", "I18N_SIDEBAR_CONTACT_US": "تماس با ما", "I18N_SIDEBAR_DONATE": "کمک مالی", "I18N_SIDEBAR_FORUM": "انجمن", - "I18N_SIDEBAR_GET_STARTED": "شروع", + "I18N_SIDEBAR_HOME": "خانه", + "I18N_SIDEBAR_LEARN": "یادگیری", "I18N_SIDEBAR_LIBRARY_LINK": "کتابخانه", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "آموزش با Oppia", + "I18N_SIGNUP_EMAIL_PREFERENCES": "ترجیحات ایمیل", "I18N_SIGNUP_FIELD_REQUIRED": "این فیلد ضروری است.", "I18N_SIGNUP_LOADING": "در حال بارگیری...", "I18N_SIGNUP_PAGE_TITLE": "عضویت در جامعه - Oppia", "I18N_SIGNUP_REGISTRATION": "ثبت نام", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "دوباره نپرس", "I18N_SIGNUP_USERNAME": "نام کاربری", "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "با تشکر!", "I18N_SPLASH_PAGE_TITLE": "Oppia: آموزش، یادگیری، کاوش", "I18N_SPLASH_SITE_FEEDBACK": "بازخورد های سایت", + "I18N_TIME_FOR_BREAK_FOOTER": "برای ادامه درس آماده‌ام", + "I18N_TIME_FOR_BREAK_TITLE": "وقت استراحت است؟", + "I18N_TOPIC_LEARN": "یادگیری", + "I18N_TOPIC_TITLE": "مبحث", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "بعداً زمانی که برای این موضوع درسی وجود داشت، برگردید.", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "بعداً زمانی که سوالات تمرینی برای این موضوع دردسترس است، برگردید.", "I18N_TOPNAV_ABOUT": "درباره", "I18N_TOPNAV_ABOUT_OPPIA": "درباره Oppia", "I18N_TOPNAV_ADMIN_PAGE": "صفحه مدیر", "I18N_TOPNAV_BLOG": "بلاگ", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "ریاضیات پایه", "I18N_TOPNAV_CONTACT_US": "تماس با ما", - "I18N_TOPNAV_CREATOR_DASHBOARD": "داشبورد سازنده", + "I18N_TOPNAV_CREATOR_DASHBOARD": "پیش‌خوان سازنده", "I18N_TOPNAV_DONATE": "کمک مالی", "I18N_TOPNAV_FORUM": "انجمن", "I18N_TOPNAV_GET_STARTED": "شروع", + "I18N_TOPNAV_HOME": "خانه", + "I18N_TOPNAV_LEARN": "یادگیری", "I18N_TOPNAV_LIBRARY": "کتابخانه", "I18N_TOPNAV_LOGOUT": "خروج", "I18N_TOPNAV_MODERATOR_PAGE": "صفحه ناظر", "I18N_TOPNAV_OPPIA_FOUNDATION": "بنیاد Oppia", - "I18N_TOPNAV_PREFERENCES": "تنظیمات", + "I18N_TOPNAV_PREFERENCES": "ترجیحات", "I18N_TOPNAV_SIGN_IN": "ورود", "I18N_TOPNAV_TEACH_WITH_OPPIA": "آموزش با Oppia", - "I18N_VOLUNTEER_PAGE_BREADCRUMB": "داوطلب" + "I18N_VOLUNTEER_PAGE_BREADCRUMB": "داوطلب", + "I18N_YES": "بله" } diff --git a/assets/i18n/fi.json b/assets/i18n/fi.json index 933165096c6f..6ced91d81827 100644 --- a/assets/i18n/fi.json +++ b/assets/i18n/fi.json @@ -1,5 +1,5 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Tietoja säätiöstä", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Tietoja Oppia Foundationista", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Luo tutkimus", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "aiheesta josta välität.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Ansaitse palautetta", @@ -46,6 +46,9 @@ "I18N_ACTION_EXPLORE_LESSONS": "Selaa oppitunteja", "I18N_ACTION_GUIDE_FOR_TEACHERS": "Opas opettajille", "I18N_ACTION_TIPS_FOR_PARENTS": "Vinkkejä vanhemmille ja huoltajille", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_0": "Olet menossa oikeaan suuntaan, mutta sinun on tarkistettava oikeinkirjoituksesi.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_1": "Olet lähellä oikeaa vastausta. Voisitko korjata kirjoitusasuasi?", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_2": "Tarkista oikeinkirjoituksesi uudestaan.", "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Uusi julkaisu", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Luonnokset", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Julkaistu", @@ -72,6 +75,8 @@ "I18N_CLASSROOM_PAGE_HEADING": "Oppia-luokkahuone", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Tutki lisää yhteisön tekemiä oppitunteja", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Hae yhteisökirjastostamme", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Jatka", + "I18N_COMING_SOON": "Tulossa pian!", "I18N_CONTACT_PAGE_BREADCRUMB": "Yhteys", "I18N_CONTACT_PAGE_HEADING": "Osallistu!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Kiitos mielenkiinnostasi auttaa Oppia-projektissa!", @@ -100,6 +105,7 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "Joten, jos haluat luoda ilmaisia, tehokkaita oppitunteja opiskelijoille ympäri maailmaa, olet tullut oikeaan paikkaan. Kehotamme sinua vilkaisemaan luomisoppaamme ja olemassaolevat opetuksemme, ja aloita oman oppituntisi luominen. Sen lisäksi, jos haluat varmistaa, että opetuksillasi on suuri vaikutus, harkitse hakea Teach with Oppia -ohjelmaamme, missä autamme sivua luomaan, testaamaan ja kehittämään tutkimuksiasi optimaaliseen vaikutukseen.", "I18N_CONTACT_PAGE_PARAGRAPH_9": "Pidät nykyisestä tutkimuksesta, mutta löysit jotain joka voisi olla paremmin? Voit ehdottaa muutoksia mihin tahansa tutkimukseen suoraan tutkimuksen sivulta. Yksinkertaisesti klikkaa lyijykynäkuvaketta oikeassa yläkulmassa, ja jaa mitä luulet voivasi parantaa. Oppitunnin luoja saa ehdotuksesi ja hänellä on mahdollisuus yhdistää ne tutkimukseen. Tämä on uskomattoman hyödyllinen tapa osallistua, etenkin jos voit perustaa ehdotuksesi opiskelijoiden kokemuksiin tutkimuksia suorittaessa.", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "Nykyisten tutkimusten parantaminen", + "I18N_CONTACT_PAGE_TITLE": "Yhteys | Oppia", "I18N_CONTINUE_REGISTRATION": "Jatka rekisteröitymistä", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "OK", "I18N_CORRECT_FEEDBACK": "Oikein!", @@ -149,6 +155,7 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_1_2": "Yksityiset tutkimukset ja kokoelmat", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Käyttäjätilastot", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Yleiskatsaus", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "Poista käyttäjä | Oppia", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Vedä kuva tähän alueeseen", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Tallenna tiedosto", "I18N_DONATE_PAGE_BREADCRUMB": "Lahjoita", @@ -213,6 +220,9 @@ "I18N_GET_STARTED_PAGE_TITLE": "Aloita", "I18N_HEADING_VOLUNTEER": "Vapaaehtoinen", "I18N_HINT_TITLE": "Vihje", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Kirjoita ohjelmakoodia editoriin", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Mene koodimuokkaimeen", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Raahaa ja pudota kohteita", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Anna murtoluku muodossa \"x/y\", tai sekaluku muodossa \"A x/y\".", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Anna murtoluku muodossa x/y.", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "liitä reuna", @@ -221,6 +231,8 @@ "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "Napauta kohdepistettä luodaksesi reunan (klikkaa samaa kohdepistettä peruuttaaksesi reunan luonnin).", "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "Napauta reunan alkukohtaa luodaksesi.", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Pätemätön kaavio", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "Luo kaavio", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Katso kaavio", "I18N_INTERACTIONS_GRAPH_MOVE": "Siirrä", "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "Napauta mitä tahansa kohtaa siirtääksesi kärjen siihen pisteeseen.", "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "Napauta kärkeä siirtääksesi.", @@ -231,10 +243,14 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "ja <[vertices]> kärkipisteet", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Päivitä sivu", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Päivitä merkitys", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Klikkaa kuvaa", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Valitse näytettävä kuva]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Voit valita enemmän valintoja.", "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Valitse yksi tai useampi vaihtoehto.} other{Valitse # tai useampi vaihtoehto.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{Ei enempää kuin 1 vaihtoehto voidaan valita.} other{Ei enempää kuin # vaihtoehtoa voidaan valita.}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "Klikkaa karttaa", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "Katso kartta", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "Kirjoita yhtälö tähän.", "I18N_INTERACTIONS_MUSIC_CLEAR": "Tyhjennä", "I18N_INTERACTIONS_MUSIC_PLAY": "Toista", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Toista kohdejärjestys", @@ -243,6 +259,7 @@ "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Peruuta", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Vahvistus vaaditaan", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Nollaa koodi", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Näytä koodimuokkain", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Lisää kohde", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Oho, näyttää siltä, että erässäsi on kaksoiskappaleita!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Lisää yksi kohde per rivi.)", @@ -250,6 +267,7 @@ "I18N_INTERACTIONS_SUBMIT": "Lähetä", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Näytä Oppia kielellä:", "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Hyvää iltapäivää", + "I18N_LEARNER_DASHBOARD_ALL": "Kaikki", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Muokkaa tavoitteita", "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Pronssi", "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Yhteisön oppitunnit", @@ -306,6 +324,7 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Ehdotettu:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Ehdotus", "I18N_LEARNER_DASHBOARD_TOOLTIP": "Kokoelmat ovat monia yhteenkuuluvia tutkimuksia, jotka on tarkoitus suorittaa järjestyksessä.", + "I18N_LEARNER_DASHBOARD_VIEW": "Näytä", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Näytä ehdotus", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Olet suorittanut tämän", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Jo lisätty soittolistaan", @@ -378,17 +397,23 @@ "I18N_LIBRARY_VIEWS_TOOLTIP": "Näyttökerrat", "I18N_LIBRARY_VIEW_ALL": "Näytä kaikki", "I18N_LICENSE_PAGE_LICENSE_HEADING": "Lisenssi", - "I18N_LICENSE_PAGE_PARAGRAPH_1": "Kaikki sisältö Oppian tutkimuksissa on lisensoitu CC-BY-SA 4.0 -lisenssillä.", + "I18N_LICENSE_PAGE_PARAGRAPH_1": "Kaikki Oppian oppituntien sisältö on lisensoitu CC-BY-SA 4.0 -lisenssillä.", "I18N_LICENSE_PAGE_PARAGRAPH_2": "Oppiaa ylläpitävä ohjelmisto on avointa lähdekoodia, ja sen koodi on julkaistu Apache 2.0 -lisenssin alla.", + "I18N_LICENSE_PAGE_TITLE": "Lisenssisivu | Oppia", + "I18N_LOGIN_PAGE_TITLE": "Kirjaudu sisään | Oppia", "I18N_LOGOUT_LOADING": "Kirjaudutaan ulos", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "Kirjaudu ulos | Oppia", "I18N_LOGOUT_PAGE_TITLE": "Kirjaudu ulos", "I18N_MODAL_CANCEL_BUTTON": "Peruuta", "I18N_MODAL_CONTINUE_BUTTON": "Jatka", + "I18N_NO": "Ei", "I18N_ONE_SUBSCRIBER_TEXT": "Sinulla on 1 tilaaja.", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Kumppanuudet", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "Poistettava tili", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "Poiston tiedot", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_TITLE": "Odottaa käyttäjätilin poistamista | Oppia", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "Yhteisön ohjesäännöt", + "I18N_PLAYBOOK_PAGE_TITLE": "Tekijän ohjeet | Oppia", "I18N_PLAYBOOK_TAB_PARTICIPATION_PLAYBOOK": "Osanotto pelikirjaan", "I18N_PLAYER_AUDIO_EXPAND_TEXT": "Kuuntele oppituntia", "I18N_PLAYER_AUDIO_LANGUAGE": "Kieli", @@ -462,6 +487,7 @@ "I18N_PLAYER_THANK_FEEDBACK": "Kiitos palautteesta!", "I18N_PLAYER_UNRATED": "Luokittelematon", "I18N_PLAYER_VIEWS_TOOLTIP": "Näyttökerrat", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Äänen kieli", "I18N_PREFERENCES_BIO": "Bio", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "Tämä kenttä on valinnainen. Kaikki, mitä kirjoitat täällä, on julkista ja maailmanlaajuisesti katseltavissa.", "I18N_PREFERENCES_BREADCRUMB": "Asetukset", @@ -473,9 +499,12 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_FEEDBACK_NEWS": "Vastaanota sähköposteja kun joku lähettää sinulle palautetta tutkimuksessa", "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "Vastaanota uutisia ja päivityksiä sivustosta", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "Vastaanota sähköpostiviestejä, kun tilaamaasi tekijä julkaisee uuden tutkimuksen", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "Vie käyttäjä", "I18N_PREFERENCES_HEADING": "Asetukset", "I18N_PREFERENCES_HEADING_SUBTEXT": "Kaikki tällä sivulla tekemäsi muutokset tallennetaan automaattisesti.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "Et ole tilannut yhtään luojaa vielä.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Vaikutus", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "Asetukset | Oppia", "I18N_PREFERENCES_PAGE_TITLE": "Muuta profiiliasetuksiasi - Oppia", "I18N_PREFERENCES_PICTURE": "Kuva", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Ensisijainen äänikieli", @@ -491,7 +520,9 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Vedä rajataksesi kokoa:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Virhe: Kuvatiedostoa ei voitu lukea.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Lataa profiilikuva", + "I18N_PREFERENCES_SEARCH_LABEL": "Hae", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Valitse haluamasi kielet...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Sivuston kieli", "I18N_PREFERENCES_SUBJECT_INTERESTS": "Aihepiirit", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "Esim.: matematiikka, tietojenkäsittelytiede, taide, ...", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "Lisää uusi aihepiiri (käyttämällä pieniä kirjaimia ja välejä)...", @@ -499,24 +530,35 @@ "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Luojat, joita olet tilannut", "I18N_PREFERENCES_USERNAME": "Käyttäjänimi", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Ei vielä valittu", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "Tietosuojakäytäntö | Oppia", "I18N_PROFILE_NO_EXPLORATIONS": "Tämä käyttäjä ei ole vielä luonut tai muokannut tutkimuksia.", + "I18N_PROFILE_PAGE_TITLE": "Profiili | Oppia", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Oma ohjausnäkymä", "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Palaa tarinaan", "I18N_QUESTION_PLAYER_SCORE": "Pisteet", + "I18N_RESET_CODE": "Nollaa koodi", + "I18N_SHOW_LESS": "Näytä vähemmän", + "I18N_SHOW_MORE": "Näytä lisää", "I18N_SHOW_SOLUTION_BUTTON": "Näytä ratkaisu", - "I18N_SIDEBAR_ABOUT_LINK": "Tietoja Oppiasta", + "I18N_SIDEBAR_ABOUT_LINK": "Tietoja meistä", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "Tietoja Oppia Foundationista", "I18N_SIDEBAR_BLOG": "Blogi", "I18N_SIDEBAR_CLASSROOM": "Luokkahuone", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Matematiikan perusta", "I18N_SIDEBAR_CONTACT_US": "Ota yhteyttä", "I18N_SIDEBAR_DONATE": "Lahjoita", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "Muokkauksesi auttavat tarjoamaan laadukasta koulutusta kaikille.", "I18N_SIDEBAR_FORUM": "Foorumi", - "I18N_SIDEBAR_GET_STARTED": "Aloittaminen", + "I18N_SIDEBAR_GET_INVOLVED": "Tule mukaan", + "I18N_SIDEBAR_HOME": "Koti", + "I18N_SIDEBAR_LEARN": "Lue", "I18N_SIDEBAR_LIBRARY_LINK": "Kirjasto", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Oppia Foundation", "I18N_SIDEBAR_PARTNERSHIPS": "Kumppanuudet", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "Katso kaikki oppitunnit", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Teach with Oppia", "I18N_SIDEBAR_VOLUNTEER": "Vapaaehtoinen", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "Liity meidän maailmanlaajuiseen tiimiin luomaan ja parantamaan oppitunteja.", "I18N_SIGNIN_LOADING": "Kirjaudutaan sisään", "I18N_SIGNIN_PAGE_TITLE": "Kirjaudu sisään", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Merkkaamalla tämän tekstin vasemmalla puolella olevan ruudun, tunnustat, myönnät ja hyväksyt sitoutuvasi <[sitename]>n käyttöehtoihin, jotka löytyvät täältä.", @@ -570,6 +612,7 @@ "I18N_SPLASH_START_CONTRIBUTING": "Aloita muokkaaminen", "I18N_SPLASH_START_LEARNING": "Aloita oppiminen", "I18N_SPLASH_START_TEACHING": "Aloita opettaminen", + "I18N_SPLASH_STUDENTS_TITLE": "Hauskaa ja tehokasta oppimista", "I18N_SPLASH_STUDENT_DETAILS_1": "- Mira, Opiskelija, Palestiina", "I18N_SPLASH_STUDENT_DETAILS_2": "- Dheeraj, Opiskelija, Intia", "I18N_SPLASH_STUDENT_DETAILS_3": "- Sama, Opiskelija, Palestiina", @@ -582,6 +625,8 @@ "I18N_START_HERE": "Klikkaa tästä aloittaaksesi!", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - Suoritettu!", "I18N_SUBSCRIBE_BUTTON_TEXT": "Tilaa", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Seuraava taito:", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "Edellinen taito:", "I18N_TEACH_BENEFITS_THREE": "Aina ilmainen ja helppokäyttöinen", "I18N_TEACH_BENEFITS_TITLE": "Etumme", "I18N_TEACH_BENEFITS_TWO": "Hauskoja, tarinapohjaisia ​​oppitunteja", @@ -590,9 +635,16 @@ "I18N_TEACH_PAGE_HEADING": "Oppia vanhemmille, opettajille ja huoltajille", "I18N_TEACH_PAGE_LIBRARY_BUTTON": "SELAA KIRJASTOA", "I18N_TEACH_PAGE_SIX_TITLE": "Aloita oppiminen tänään", + "I18N_TEACH_STUDENT_DETAILS_3": "Himanshu Taneja, Kurukshetra, Intia", + "I18N_TEACH_STUDENT_DETAILS_4": "Yamama, ohjaaja, Palestiina", + "I18N_TERMS_PAGE_TITLE": "Käyttöehdot | Oppia", "I18N_THANKS_PAGE_BREADCRUMB": "Kiitos", + "I18N_THANKS_PAGE_TITLE": "Kiitos | Oppia", + "I18N_TOPIC_LEARN": "Lue", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 oppitunti} other{# oppituntia}}", + "I18N_TOPIC_TITLE": "Aihe", "I18N_TOPIC_VIEWER_CHAPTER": "Kappale", + "I18N_TOPIC_VIEWER_COMING_SOON": "Tulossa pian!", "I18N_TOPIC_VIEWER_DESCRIPTION": "Kuvaus", "I18N_TOPIC_VIEWER_LESSON": "Oppitunti", "I18N_TOPIC_VIEWER_LESSONS": "Oppitunnit", @@ -609,7 +661,6 @@ "I18N_TOPNAV_ABOUT_OPPIA": "Tietoja Oppiasta", "I18N_TOPNAV_ADMIN_PAGE": "Ylläpitosivu", "I18N_TOPNAV_BLOG": "Blogi", - "I18N_TOPNAV_CLASSROOM": "Luokkahuone", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Matematiikan perusteet", "I18N_TOPNAV_CONTACT_US": "Ota yhteyttä", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Yhteisön hallintapaneeli", @@ -618,13 +669,18 @@ "I18N_TOPNAV_FORUM": "Foorumi", "I18N_TOPNAV_GET_INVOLVED": "Osallistu!", "I18N_TOPNAV_GET_STARTED": "Aloittaminen", + "I18N_TOPNAV_HOME": "Koti", + "I18N_TOPNAV_LEARN": "Lue", "I18N_TOPNAV_LEARNER_DASHBOARD": "Oppijan ohjausnäkymä", - "I18N_TOPNAV_LIBRARY": "Kirjasto", + "I18N_TOPNAV_LEARN_HEADING": "Tapoja lukea lisää", + "I18N_TOPNAV_LEARN_LINK_1": "Katso kaikki oppitunnit", + "I18N_TOPNAV_LEARN_LINK_2": "Jatka lukemista", + "I18N_TOPNAV_LIBRARY": "Yhteisön kirjasto", "I18N_TOPNAV_LOGOUT": "Kirjaudu ulos", "I18N_TOPNAV_MODERATOR_PAGE": "Moderaattorisivu", "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia Foundation", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Myötävaikutuksen pelikirja", - "I18N_TOPNAV_PARTNERSHIPS": "Kumppanuudet", + "I18N_TOPNAV_PARTNERSHIPS": "Koulut ja järjestöt", "I18N_TOPNAV_PREFERENCES": "Asetukset", "I18N_TOPNAV_SIGN_IN": "Kirjaudu sisään", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Kirjaudu sisään Googlella", @@ -634,5 +690,6 @@ "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Peruuta tilaus", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Vapaaehtoinen", "I18N_WARNING_MODAL_TITLE": "Varoitus!", - "I18N_WORKED_EXAMPLE": "Työstetty esimerkki" + "I18N_WORKED_EXAMPLE": "Työstetty esimerkki", + "I18N_YES": "Kyllä" } diff --git a/assets/i18n/fit.json b/assets/i18n/fit.json index 1f7c15e760cd..a5f66ec2e70f 100644 --- a/assets/i18n/fit.json +++ b/assets/i18n/fit.json @@ -78,7 +78,7 @@ "I18N_FOOTER_OPPIA_FOUNDATION": "Oppia Foundation", "I18N_FOOTER_TERMS_OF_SERVICE": "Käyttöehdot", "I18N_FORMS_TYPE_NUMBER": "Kirjoita numero", - "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "Ole hyvä ja syötä numero joka oon vähintään <[minValue]>.", + "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "Ole hyvä ja syötä numero joka oon vähintänsä <[minValue]>.", "I18N_FORMS_TYPE_NUMBER_AT_MOST": "Ole hyvä syötä numero joka oon enintään <[maxValue]>", "I18N_GET_STARTED_PAGE_TITLE": "Aloita", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Anna murtoluku muodossa \"x/y\", tai sekaluku muodossa \"A x/y\".", diff --git a/assets/i18n/fr.json b/assets/i18n/fr.json index 97f789f588e0..d810e4be0ddb 100644 --- a/assets/i18n/fr.json +++ b/assets/i18n/fr.json @@ -1,52 +1,53 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "À propos de la fondation", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "À propos de la Fondation Oppia", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "À propos de la Fondation Oppia | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Créer une exploration", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "à propos d’un sujet auquel vous tenez.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Collecter des avis", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK_TEXT": "pour améliorer votre exploration.", - "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "À propos de Oppia", + "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "À propos d’Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_1": "La mission d’Oppia est d’aider chacun à apprendre ce qu’il souhaite d’une manière efficace et plaisante.", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_2": "En créant un ensemble de leçons libres de grande qualité et démontrées comme efficaces, avec l’aide d’éducateurs du monde entier, Oppia vise à fournir aux étudiants une éducation de qualité — quel que soit l’endroit où ils sont et les ressources traditionnelles auxquelles ils ont accès.", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_3": "Pour l’instant, les éducateurs ont créé plus de <[numberOfExplorations]> leçons, que nous appelons explorations. Et elles sont utilisées par pratiquement <[numberofStudentsServed]> étudiants du monde entier.", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_4": "Les explorations aident les étudiants à apprendre d’une manière amusante et créative en utilisant des vidéos, des images et des questions ouvertes. Et comme les étudiants ont souvent des idées fausses similaires, Oppia fournit aussi aux éducateurs la possibilité de les corriger directement dans les explorations, en leur permettant de fournir des avis ciblés à plusieurs étudiants en même temps.", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_5": "Si vous êtes un étudiant intéressé par l’apprentissage avec Oppia, vous pouvez commencer votre aventure d’apprentissage en navigant dans nos explorations.", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_6": "Si vous êtes un enseignant intéressé par l’impact sur la vie des étudiants du monde entier, vous pouvez demander à rejoindre notre programme Enseigner avec Oppia, qui vise à fournir des leçons sur des sujets que les étudiants trouvent en général difficiles.", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_7": "En enseignant avec Oppia, vous pouvez améliorer vos talents de communication et d’empathie, tout en aidant à améliorer l’éducation des élèves et étudiants du monde entier. Ou, si vous n’êtes pas encore prêt à enseigner, vous pouvez toujours partager votre avis sur les leçons pour les rendre meilleures pour les autres étudiants !", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_2": "En créant un ensemble de leçons libres de grande qualité et démontrées comme efficaces, avec l’aide d’éducateurs du monde entier, Oppia vise à fournir aux élèves une éducation de qualité — quel que soit l’endroit où ils sont et les ressources traditionnelles auxquelles ils ont accès.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_3": "Pour l’instant, les éducateurs ont créé plus de <[numberOfExplorations]> leçons, que nous appelons explorations. Et elles sont utilisées par pratiquement <[numberofStudentsServed]> élèves du monde entier.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_4": "Les explorations aident les élèves à apprendre d’une manière amusante et créative en utilisant des vidéos, des images et des questions ouvertes. Et comme les élèves ont souvent des idées fausses similaires, Oppia fournit aussi aux éducateurs la possibilité de les corriger directement dans les explorations, en leur permettant de fournir des avis ciblés à plusieurs élèves en même temps.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_5": "Si vous êtes un élève intéressé par l’apprentissage avec Oppia, vous pouvez commencer votre aventure d’apprentissage en naviguant dans nos explorations.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_6": "Si vous êtes un enseignant intéressé par l’impact sur la vie des élèves du monde entier, vous pouvez demander à rejoindre notre programme Enseigner avec Oppia, qui vise à fournir des leçons sur des sujets que les élèves trouvent en général difficiles.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_7": "En enseignant avec Oppia, vous pouvez améliorer vos talents de communication et d’empathie, tout en aidant à améliorer l’éducation des élèves et étudiants du monde entier. Autrement, si vous n’êtes pas encore prêt à enseigner, vous pouvez toujours partager votre avis sur les leçons pour les rendre meilleures pour les autres élèves !", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_8": "Que vous soyez un éducateur K-12, un étudiant diplômé ou un individu passionné sur un sujet particulier, ou que vous vouliez partager votre connaissance, Oppia vous accueille avec plaisir. Rejoignez la communauté et commencez à explorer avec nous.", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "Publier et partager", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE_TEXT": "vos créations avec la communauté.", "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "Sous-titres audio", "I18N_ABOUT_PAGE_BREADCRUMB": "À propos", - "I18N_ABOUT_PAGE_CREATE_LESSON_CONTENT": "Avec le système de création de contenu de Oppia, vous pouvez facilement créer et personnaliser les leçons sur les sujets qui vous passionnent.", + "I18N_ABOUT_PAGE_CREATE_LESSON_CONTENT": "Avec le système de création de contenu d’Oppia, vous pouvez facilement créer et personnaliser les leçons sur les sujets qui vous passionnent.", "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "Remerciements", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT": "Les contributeurs d’Oppia viennent du monde entier — beaucoup sont des élèves ou étudiants, même nouveaux, et des enseignants. Nous voudrions remercier les contributeurs suivants qui ont aidé à bâtir la plateforme. Si vous souhaitez nous aider, voici comment vous pouvez vous impliquer !", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT_BOTTOM": "L’équipe de développement d’Oppia est également très reconnaissante pour les avis, commentaires, idées, aides et suggestions proposés par <[listOfNames]>.", "I18N_ABOUT_PAGE_CREDITS_THANK_TRANSLATEWIKI": "Nous aimerions enfin remercier translatewiki.net pour avoir fourni les traductions participatives.", "I18N_ABOUT_PAGE_EASILY_CREATE_LESSON": "Créer facilement des leçons", - "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Explorer les leçons faites par la communauté", - "I18N_ABOUT_PAGE_EXPLORE_LESSONS_CONTENT": "Les éducateurs et les membres de la communauté dans le monde entier utilisent la plateforme de création de leçons de Oppia comme moyen de créer et partager des leçons. Vous pouvez trouver plus de 20000 leçons sur 17 sujets différents dans notre bibliothèque d’Exploration, et peut-être serez-vous inspiré pour créer la vôtre !", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Explorer les leçons réalisées par la communauté", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS_CONTENT": "Les éducateurs et les membres de la communauté dans le monde entier utilisent la plateforme de création de leçons d’Oppia comme moyen de créer et partager des leçons. Vous pouvez trouver plus de 20 000 leçons sur 17 sujets différents dans notre bibliothèque d’Exploration, et peut-être serez-vous inspiré pour créer la vôtre !", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Donner", "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "S’impliquer", "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "La Fondation Oppia", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_1": "Le site web d’Oppia et son code source sont pris en charge par la Fondation Oppia, une organisation à but non lucratif, exemptée d’impôts aux États-Unis selon la clause 501(c)(3) de la législation fédérale américaine, enregistrée dans l’État de Californie.", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_2": "La Fondation compte sur le soutien généreux de contributeurs et donateurs du monde entier pour travailler à sa mission qui est de permettre à quiconque d’apprendre ce qu’il veut d'une manière agréable et efficace.", - "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_3": "Si vous voulez rejoindre les centaines d’autres individus soutenant ces efforts, veuillez vous informer sur donner à la Fondation Oppia ou s’impliquer sous d’autres manières.", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_3": "Si vous voulez rejoindre les centaines d’autres personnes soutenant ces efforts, veuillez vous informer sur donner à la Fondation Oppia ou s’impliquer sous d’autres manières.", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4": "Les directeurs de la Fondation sont Ben Henning, Jacob Davis et Sean Lip. Les statuts officiels et minutes des assemblées de la Fondation sont disponibles en lecture. Si vous voulez contacter la Fondation, veuillez écrire à : admin@oppia.org.", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4_HEADING": "Directeurs", "I18N_ABOUT_PAGE_HEADING": "Oppia : l’éducation pour tous", "I18N_ABOUT_PAGE_LANGUAGE_FEATURE": "Traduction vers les dialectes locaux", "I18N_ABOUT_PAGE_LEARN_BUTTON": "Je veux apprendre", - "I18N_ABOUT_PAGE_LEARN_FROM": "Apprendre via les leçons supervisées de Oppia", - "I18N_ABOUT_PAGE_LEARN_FROM_CONTENT": "Dans la salle de classe, vous pouvez trouver un ensemble de leçons que l’équipe Oppia a conçues et testées pour s’assurer qu’elles sont efficaces et amusantes pour tous les apprentis. Toutes les leçons ont été revues par des enseignants et des experts, afin que vous vous sentiez confiants que vos étudiants obtiennent une éducation efficace en apprenant à leur propre rythme.", - "I18N_ABOUT_PAGE_LESSON_FEATURE": "Leçons basées sur des histoires", + "I18N_ABOUT_PAGE_LEARN_FROM": "Apprendre à partir des leçons supervisées d’Oppia", + "I18N_ABOUT_PAGE_LEARN_FROM_CONTENT": "Dans la salle de classe, vous pouvez trouver un ensemble de leçons que l’équipe d’Oppia a conçues et testées pour s’assurer qu’elles sont efficaces et amusantes pour tous les apprenants. Toutes les leçons ont été revues par des enseignants et des experts, afin que vous vous sentiez confiants que vos élèves obtiennent une éducation efficace en apprenant à leur propre rythme.", + "I18N_ABOUT_PAGE_LESSON_FEATURE": "Leçons basées sur des récits", "I18N_ABOUT_PAGE_MOBILE_FEATURE": "Navigation adaptée au mobile", "I18N_ABOUT_PAGE_OUR_FEATURES": "Nos fonctionnalités", - "I18N_ABOUT_PAGE_OUR_FEATURES_CONTENT": "Construit par des éducateurs, des tuteurs et des apprentis du monde entier, nous travaillons dur pour nous assurer que cette plateforme et les leçons que nous créons sont intéressantes, efficaces et largement accessibles.", + "I18N_ABOUT_PAGE_OUR_FEATURES_CONTENT": "Construites par des éducateurs, des répétiteurs et des apprenants du monde entier, nous travaillons dur pour nous assurer que cette plateforme ainsi que les leçons que nous créons sont intéressantes, efficaces et largement accessibles.", "I18N_ABOUT_PAGE_OUR_OUTCOMES": "Nos résultats", "I18N_ABOUT_PAGE_OUR_OUTCOMES_CONTENT": "Nous faisons tout notre possible pour l’efficacité et l’excellence. C’est pourquoi nous menons continuellement des études utilisateur et des tests aléatoires, pour nous assurer que nos leçons sont à la hauteur de nos standards élevés.", "I18N_ABOUT_PAGE_SECTION_ONE_CONTENT": "Oppia fournit une approche nouvelle et intéressante pour l’apprentissage en ligne, qui est spécialement conçue pour répondre aux besoins uniques d’apprentis en manque de ressources, dans le monde entier.", "I18N_ABOUT_PAGE_SECTION_SEVEN_TITLE": "Commencer avec les conseils supervisés", - "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "Que voulez-vous faire aujourd’hui  ?", + "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "Que voulez-vous faire aujourd’hui ?", "I18N_ABOUT_PAGE_TABS_ABOUT": "À propos", "I18N_ABOUT_PAGE_TABS_CREDITS": "Crédits", "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Fondation", @@ -54,6 +55,7 @@ "I18N_ABOUT_PAGE_TITLE": "À propos d’Oppia", "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "Commencer avec Oppia", "I18N_ABOUT_PAGE_WIFI_FEATURE": "Bande passante faible demandée", + "I18N_ACTION_ACCESS_ANDROID_APP": "Accéder à l’application Android", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Postuler pour enseigner avec Oppia", "I18N_ACTION_BROWSE_EXPLORATIONS": "Parcourir nos explorations", "I18N_ACTION_BROWSE_LESSONS": "Parcourir nos leçons", @@ -65,26 +67,78 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "Guide pour les enseignants", "I18N_ACTION_TIPS_FOR_PARENTS": "Conseils pour les parents et les gardiens", "I18N_ACTION_VISIT_CLASSROOM": "Visiter la salle de classe", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Annuler", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Fait", + "I18N_ADD_NEW_SYLLABUS_ITEMS": "Nouveaux éléments du programme", + "I18N_ADD_SYLLABUS_DESCRIPTION_TEXT": "Ajoutez des compétences ou des récits à votre programme pour les envoyer automatiquement à vos élèves.", + "I18N_ADD_SYLLABUS_SEARCH_PLACEHOLDER": "Rechercher par ex. : histoire, physique, anglais", + "I18N_ANDROID_PAGE_AVAILABLE_FOR_DOWNLOAD_TEXT": "disponible pour le téléchargement.", + "I18N_ANDROID_PAGE_BETA_DESCRIPTION": "Une version bêta de l'application Android d'Oppia est désormais téléchargeable et utilisable gratuitement en anglais et portugais brésilien.", + "I18N_ANDROID_PAGE_CONSENT_CHECKBOX_LABEL": "Vous confirmez que vous avez plus de 18 ans ou que vous avez le consentement et l'approbation de votre parent ou tuteur légal.", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "Adresse de courriel", + "I18N_ANDROID_PAGE_FEATURES_SECTION_HEADER": "Éducation pour tout le monde.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_1": "Et essayez nos conseils utiles pour vous guider.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_2": "Jouez les leçons même sans connexion Internet.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_3": "L'application est disponible en anglais et portugais brésilien.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_4": "D’autres langues seront bientôt ajoutées!", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_5": "Créez et gérez jusqu à 10 profils sur un seul appareil.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_1": "Apprenez à travers des récits captivants.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_2": "Apprenez n’importe quand, n’importe où.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_3": "Apprenez dans votre langue.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_4": "Basculer entre les élèves", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Nom", + "I18N_ANDROID_PAGE_SUPPORT_TEXT": "Nous prenons en charge toutes les versions d’Android depuis Lollipop (Android 5).", + "I18N_ANDROID_PAGE_TITLE": "Android | Oppia", + "I18N_ANDROID_PAGE_UPDATES_MAIN_TEXT": "Abonnez-vous pour recevoir des mises à jour sur l'application Android d'Oppia", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "Me notifier", + "I18N_ANDROID_PAGE_UPDATES_SUBTEXT": "Nous nous engageons à ne pas envoyer de spam et vous ne recevrez que des e-mails occasionnels. Vous pouvez vous désinscrire à tout moment.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_0": "Vous allez dans la bonne direction, mais vous devez revérifier votre orthographe.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_1": "Vous êtes proche de la bonne réponse. Pourriez-vous corriger votre orthographe s’il vous plait ?", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_2": "Merci de revérifier votre orthographe.", + "I18N_ASSIGNED_STORIES_AND_SKILLS": "Récits et compétences attribués", + "I18N_ASSIGNED_STORIES_AND_SKILLS_EMPTY_MESSAGE": "Aucun récit ni aucune compétence n’a été attribué aux élèves de ce groupe.", "I18N_ATTRIBUTION_HTML_STEP_ONE": "Copier et coller le HTML", "I18N_ATTRIBUTION_HTML_STEP_TWO": "S’assurer que le lien apparaît comme « <[linkText]> »", "I18N_ATTRIBUTION_HTML_TITLE": "Attribut dans HTML", "I18N_ATTRIBUTION_PRINT_STEP_ONE": "Copier et coller l’attribution", "I18N_ATTRIBUTION_PRINT_STEP_TWO": "Attacher une copie de « <[link]> »", "I18N_ATTRIBUTION_PRINT_TITLE": "Attribut dans l’impression", - "I18N_ATTRIBUTION_TITLE": "Comment attribuer cette leçon pour partage ou réutilisation", + "I18N_ATTRIBUTION_TITLE": "Comment attribuer cette leçon pour le partage ou la réutilisation", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "Profil de l’auteur", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TITLE": "Blogue | Auteur | Oppia", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TOTAL_POSTS_DISPLAY": "<[totalNumber]> billets", "I18N_BLOG_CARD_PREVIEW_CONTEXT": "Voici comment la carte de blog apparaîtra sur la page d’accueil et sur votre profil d’auteur.", - "I18N_BLOG_CARD_PREVIEW_HEADING": "Aperçu de la carte de blog", - "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Créer une nouvelle note de blog", - "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Il semble que vous n’avez pas encore créé d’histoire !", + "I18N_BLOG_CARD_PREVIEW_HEADING": "Aperçu de la carte de blogue", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_EXPLAIN_TEXT": "Ceci est une courte description à votre sujet. Tout ce que vous écrirez ici sera rendu public et visible de tout le monde.", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "Biographie", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_DESCRIPTION": "Le nom saisi ici apparaîtra en tant que nom de l’auteur sur les articles de blogue que vous rédigerez. La biographie sera affichée sur votre page de billet de blogue spécifique à l’auteur.", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Modifier votre nom d’auteur et votre biographie", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Nom", + "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Créer une nouvelle note de blogue", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Il semble que vous n’ayez encore créé aucun billet de blogue !", "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Nouveau billet", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Enregistrer", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Brouillons", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Publié", - "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Ajouter l’image vignette", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Blogue", + "I18N_BLOG_HOME_PAGE_NO_RESULTS_FOUND": "Désolé, il n’y a aucun article de blogue à afficher.", + "I18N_BLOG_HOME_PAGE_OPPIA_DESCRIPTION": "Construire une communauté pour offrir une éducation de qualité à ceux qui n’y ont pas accès.", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "Derniers billets", + "I18N_BLOG_HOME_PAGE_POSTS_NUMBER_DISPLAY": "Affichage des billets <[startingNumber]> à <[endingNumber]> sur <[totalNumber]>", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "Mots-clés", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "Mots-clés", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "Choisissez les mots-clés", + "I18N_BLOG_HOME_PAGE_TITLE": "Blogue Oppia | Oppia", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "Bienvenue sur le blogue d’Oppia !", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_HEADING": "Affichage des résultats de la recherche", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_DISPLAY": "Affichage partiel des résultats de recherche de <[startingNumber]> à <[endingNumber]>.", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_OUT_OF_TOTAL_DISPLAY": "Affichage des billets de <[startingNumber]> à <[endingNumber]> sur <[totalNumber]>.", + "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Ajouter l’image de vignette", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Corps", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Annuler", "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Supprimer", - "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "Modifier l’image vignette", - "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "Dernière sauvegarde à", + "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "Modifier l’image de vignette", + "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "Dernier enregistrement à", "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "Publier", "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "Terminé", "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "Enregistrer comme brouillon", @@ -94,23 +148,75 @@ "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "d’autres balises peuvent encore être ajoutées.", "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "Vignette", "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "Titre", + "I18N_BLOG_POST_EDITOR_TITLE_MAX_LENGTH_ERROR": "Le titre du billet de blogue doit contenir au plus <[maxChars]> caractères.", + "I18N_BLOG_POST_EDITOR_TITLE_MIN_LENGTH_ERROR": "Le titre du billet de blogue doit comporter au moins <[minChars]> caractères.", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "Suggéré pour vous.", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "Mots-clés", + "I18N_BLOG_POST_PAGE_TITLE": "<[blogPostTitle]> | Blogue | Oppia", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_EXTENSIONS_PREFIX": "Extensions d’image autorisées :", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_MAX_FILE_SIZE": "La taille maximale d’image autorisée est de <[imageSize]> Kio. Notez que, parfois, le recadrage peut augmenter la taille de l’image.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "Choisir un fichier ou le tirer ici", - "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Erreur : Impossible de lire le fichier image.", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Erreur : impossible de lire le fichier image.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Ajouter une vignette", "I18N_BLOG_POST_UNTITLED_HEADING": "Sans titre", - "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "Ce contenu de carte est trop long. Veuillez le garder en dessous de 4500 caractères pour l’enregistrer.", + "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "Ce contenu de carte est trop long. Veuillez le garder en dessous de 4 500 caractères pour l’enregistrer.", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Cette carte est très longue et les étudiants pourraient s'en désintéresser. Essayez de la réduire ou de la scinder en deux cartes.", - "I18N_CHAPTER_COMPLETION": "Félicitations pour avoir terminé ce chapitre !", + "I18N_CHAPTER_COMPLETION": "Félicitations pour avoir terminé ce chapitre !", "I18N_CLASSROOM_CALLOUT_BUTTON": "Explorer", "I18N_CLASSROOM_CALLOUT_HEADING_1": "Mathématiques élémentaires", "I18N_CLASSROOM_CALLOUT_HEADING_2": "Présentation : la salle de cours Oppia", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Regarder le premier cours complet dans le tout nouveau plan de cours approuvé de la salle de classe Oppia ! Des leçons supervisées — revues par des éducateurs — de manière que vous puissiez maîtriser les compétences mathématiques de base sur des sujets allant de placer des valeurs à la multiplication et à la division.", + "I18N_CLASSROOM_MATH_TITLE": "Mathématiques", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_HEADING": "Vous connaissez déjà un peu les mathématiques ?", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_TEXT": "Répondez à un quiz de 10 à 15 questions pour savoir où commencer.", + "I18N_CLASSROOM_PAGE_BEGIN_WITH_FIRST_TOPIC_BUTTON": "Commencez par <[firstTopic]>", "I18N_CLASSROOM_PAGE_COMING_SOON": "Disponible prochainement ", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Détails du cours", "I18N_CLASSROOM_PAGE_HEADING": "La salle de cours Oppia", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_HEADING": "Débutant en mathématiques ?", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_TEXT": "Commencez par les bases avec notre premier sujet, <[firstTopic]>.", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Explorer plus de leçons réalisées par la Communauté", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Parcourir notre bibliothèque communautaire", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "Passer un test", + "I18N_CLASSROOM_PAGE_TITLE": "Apprendre <[classroomName]> avec Oppia | Oppia", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Sujets couverts", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "<[collectionTitle]> – Éditeur Oppia", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "Sans titre – Éditeur Oppia", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Début", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Continuer", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "Vous avez terminé la collection ! Soyez libre de rejouer n’importe laquelle des explorations ci-dessous.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "Survolez une icône pour prévisualiser une exploration.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "Aucune exploration n’a été ajoutée à cette collection.", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> – Oppia", + "I18N_COMING_SOON": "Prochainement !", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "COLLECTION", + "I18N_COMPLETED_STORY": "« <[story]> » terminé", + "I18N_COMPLETE_CHAPTER": "Terminer un chapitre sur « <[topicName]> »", + "I18N_CONCEPT_CARD_NEED_HELP": "Besoin d’aide ? Jetez un œil à la carte conceptuelle.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_1": "Vous venez juste de terminer le premier jalon ! Bon début !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_2": "Bon travail en terminant votre premier jalon ! Poursuivez !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "Un début parfait ! Poursuivez !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "Vous avez terminé un jalon ! Bon travail !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_2": "Impressionnant, vous avez terminé un jalon ! Poursuivez !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_3": "Bon travail ! Vous venez de terminer un jalon !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_1": "Vous êtes à mi-parcours, vous aurez bientôt terminé !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_2": "Vous venez de passer la mi-parcours, beau travail !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_3": "Waouh ! Vous avez déjà fait la moitié de la leçon ! Travail impressionnant !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "Juste un de plus à faire, waouh !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_2": "Allez-y ! Plus qu’un seul !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_3": "Vous avancez bien, plus qu’un à faire !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "Vous avancez bien ! Poursuivez !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_2": "Impressionnant ! Vous venez de terminer votre deuxième jalon !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_3": "Un nouveau jalon terminé, vous travaillez bien !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_1": "Vous y êtes presque ! Poursuivez !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_2": "Vous avez presque terminé ! Poursuivez !", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_3": "Bon travail ! Vous êtes presque à l’arrivée !", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "Hourra !", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "Impressionnant !", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_3": "Jalon atteint !", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "Bon travail !", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "Bon travail !", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "Bien joué !", "I18N_CONTACT_PAGE_BREADCRUMB": "Contact", "I18N_CONTACT_PAGE_HEADING": "Impliquez-vous !", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Merci de votre intérêt pour l’aide que vous apportez au projet Oppia !", @@ -139,10 +245,13 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "Donc, si vous voulez créer des leçons libres et efficaces pour les étudiants du monde entier, vous êtes au bon endroit. Nous vous encourageons à regarder nos tutoriels pour créateur et nos leçons existantes et à commencer à créer votre propre leçon. De plus, si vous voulez vous assurer que vos leçons auront un grand impact, envisagez de vous inscrire à notre programme Enseigner avec Oppia, où nous vous aiderons à créer, tester et améliorer vos explorations pour un impact optimal.", "I18N_CONTACT_PAGE_PARAGRAPH_9": "Vous aimez une exploration existante, mais vous avez trouvé quelque chose qui pourrait être mieux ? Vous pouvez suggérer des modifications dans une exploration directement depuis sa page. Cliquez simplement sur l’icône du stylo dans le coin en haut à droite et partagez ce que vous pensez pouvoir être amélioré. Le créateur de la leçon recevra vos propositions et aura la possibilité de les intégrer dans l’exploration. C’est un moyen très précieux de contribuer, surtout si vous pouvez baser vos suggestions sur les expériences des étudiants ayant travaillé avec l’exploration.", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "Améliorer des explorations existantes", + "I18N_CONTACT_PAGE_TITLE": "Contact | Oppia", "I18N_CONTINUE_REGISTRATION": "Continuer l’inscription", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "OK", - "I18N_COOKIE_BANNER_EXPLANATION": "Ce site web utilise des cookies et des technologies assimilées pour offrir sa fonctionnalité centrale, conserver le site sécurisé, et analyser le trafic de notre site web. En savoir plus dans notre politique de confidentialité.", + "I18N_COOKIE_BANNER_EXPLANATION": "Ce site web utilise des cookies et des technologies assimilées pour offrir sa fonctionnalité centrale, conserver le site sécurisé et analyser le trafic de notre site web. En savoir plus dans notre politique de confidentialité.", "I18N_CORRECT_FEEDBACK": "Correct !", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "Votre lien de groupe", + "I18N_CREATE_ACCOUNT": "Créer un compte", "I18N_CREATE_ACTIVITY_QUESTION": "Que voulez-vous créer ?", "I18N_CREATE_ACTIVITY_TITLE": "Créer une activité", "I18N_CREATE_COLLECTION": "Créer une collection", @@ -151,13 +260,15 @@ "I18N_CREATE_EXPLORATION_QUESTION": "Voulez-vous créer une exploration ?", "I18N_CREATE_EXPLORATION_TITLE": "Créer une exploration", "I18N_CREATE_EXPLORATION_UPLOAD": "Téléverser", + "I18N_CREATE_LEARNER_GROUP": "Créer un groupe", + "I18N_CREATE_LEARNER_GROUP_PAGE_TITLE": "Créer un groupe d’élèves | Oppia", "I18N_CREATE_NO_THANKS": "Non merci", "I18N_CREATE_YES_PLEASE": "Oui, SVP !", "I18N_CREATOR_IMPACT": "Impact", "I18N_DASHBOARD_COLLECTIONS": "Collections", "I18N_DASHBOARD_CREATOR_DASHBOARD": "Tableau de bord du créateur", "I18N_DASHBOARD_EXPLORATIONS": "Explorations", - "I18N_DASHBOARD_EXPLORATIONS_EMPTY_MESSAGE": "Il semble que vous n'avez pas encore créé d’exploration. Commençons !", + "I18N_DASHBOARD_EXPLORATIONS_EMPTY_MESSAGE": "Il semble que vous n’ayez encore créé aucune exploration. Commençons !", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY": "Trié par", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_AVERAGE_RATING": "Note moyenne", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_CATEGORY": "Catégorie", @@ -173,7 +284,7 @@ "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "Avis ouverts", "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "Nombre d’exécutions", "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "Abonnés", - "I18N_DASHBOARD_STORIES": "Histoires", + "I18N_DASHBOARD_STORIES": "Récits", "I18N_DASHBOARD_SUBSCRIBERS": "Abonnés", "I18N_DASHBOARD_SUGGESTIONS": "Suggestions", "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "Exploration", @@ -186,7 +297,7 @@ "I18N_DELETE_ACCOUNT_PAGE_BREADCRUMB": "Supprimer le compte", "I18N_DELETE_ACCOUNT_PAGE_BUTTON": "Supprimer mon compte", "I18N_DELETE_ACCOUNT_PAGE_HEADING": "Supprimer un compte", - "I18N_DELETE_ACCOUNT_PAGE_LIST_1_1": "Paramètres utilisateur et préférences de messagerie", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_1": "Paramètres utilisateur et préférences de courriel", "I18N_DELETE_ACCOUNT_PAGE_LIST_1_2": "Explorations et collections privées", "I18N_DELETE_ACCOUNT_PAGE_LIST_1_3": "Progrès de la leçon", "I18N_DELETE_ACCOUNT_PAGE_LIST_1_4": "Statistiques concernant les explorations et les collections créées par l’utilisateur", @@ -195,21 +306,107 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Validations faites des explorations publiques et des collections qui ont d’autres propriétaires", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "Validations faites aux sujets, récits, compétences et questions", "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "Afin de confirmer la suppression, veuillez entrer votre nom d’utilisateur dans le champ ci-dessous et appuyez sur le bouton « Supprimer mon compte ». Cette action ne peut pas être annulée.", - "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Cette action supprimera ce compte utilisateur ainsi que toutes les données privées associées à ce compte. Les données déjà publiques seront anonymisées, de manière à ne plus pouvoir être associées à ce compte. Certaines des catégories mentionnées ci-dessous pourraient ne pas d’appliquer à votre compte.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Cette action supprimera ce compte utilisateur ainsi que toutes les données privées associées à ce compte. Les données déjà publiques seront anonymisées, de manière à ne plus pouvoir être associées à ce compte, sauf pour les données de sauvegarde (qui sont conservées durant six mois). Certaines des catégories mentionnées ci-dessous pourraient ne pas d’appliquer à votre compte.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Vue d’ensemble", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "Voici les types de données qui seront supprimés :", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "Voici les types de données qui seront anonymisés :", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "De plus, les explorations et les collections publiées qui n’ont pas d’autre propriétaire seront transférées vers la propriété de la communauté.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "Si vous avez des questions ou des inquiétudes sur le processus de suppression de compte, veuillez envoyer un courriel à privacy@oppia.org.", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Cela vous conduira vers une page où vous pourrez supprimer votre compte Oppia.", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "Supprimer un compte | Oppia", + "I18N_DELETE_LEARNER_GROUP": "Supprimer le groupe", + "I18N_DELETE_LEARNER_GROUP_MODAL_BODY_TEXT": "Voulez-vous vraiment supprimer le groupe dʼélèves <[groupName]> ?", + "I18N_DEST_IF_STUCK_INFO_TOOLTIP": "Vous pouvez maintenant spécifier une nouvelle carte dans laquelle vous pouvez guider les élèves au travers des concepts utilisés dans la question, quand ils sont vraiment bloqués !", + "I18N_DIAGNOSTIC_TEST_CURRENT_PROGRESS": "Progression actuelle : <[progressPercentage]> %", + "I18N_DIAGNOSTIC_TEST_EXIT_TEST": "Quitter le test", + "I18N_DIAGNOSTIC_TEST_HEADING": "Test de diagnostic de l’élève", + "I18N_DIAGNOSTIC_TEST_INTRO_TEXT_1": "Répondez à quelques questions pour nous aider à vous recommander quelques sujets pour aborder les leçons de mathématiques.", + "I18N_DIAGNOSTIC_TEST_INTRO_TEXT_2": "Veuillez noter que vous ne pourrez pas modifier votre réponse après être passé à la question suivante.", + "I18N_DIAGNOSTIC_TEST_RESULT_GO_TO_CLASSROOM_BUTTON_TEXT": "Aller en classe", + "I18N_DIAGNOSTIC_TEST_RESULT_HEADER_TEXT": "Test terminé. Bien joué !", + "I18N_DIAGNOSTIC_TEST_RESULT_START_TOPIC": "Démarrer <[topicName]>", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_1_FOR_NO_TOPIC": "Bon travail ! Il semble que vous ayez déjà une bonne compréhension des sujets dans la classe de mathématiques.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_2_FOR_NO_TOPIC": "N’hésitez pas à suivre l’une des leçons pour revoir ou améliorer ce que vous savez. Nous mettons constamment à jour la salle de classe avec de nouvelles leçons, alors revenez plus tard.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_ONE_TOPIC": "D’après vos réponses, nous vous recommandons de commencer par ce sujet.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_TWO_TOPICS": "D’après vos réponses, nous vous recommandons de commencer par l’un ou l’autre de ces sujets.", + "I18N_DIAGNOSTIC_TEST_START_BUTTON": "Commencer le test", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Faire glisser une image dans cette zone", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Téléverser un fichier", "I18N_DONATE_PAGE_BREADCRUMB": "Faire un don", - "I18N_DONATE_PAGE_IMAGE_TITLE": "Votre don généreux finance :", - "I18N_DONATE_PAGE_TITLE": "Donner à la
Fondation Oppia", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "Faire un don | Avoir un impact positif | Oppia", + "I18N_DONATE_PAGE_BUDGET_HEADING": "Où va votre argent ?", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_1": "Étendre la sensibilisation", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_2": "Passer le mot", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "Maintenance", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_1": "Vos dons renforcent nos partenariats mondiaux et nous aident à atteindre et à soutenir de nouvelles communautés d’apprentissage.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_2": "Vos dons nous aident dans nos efforts pour faire connaître Oppia à travers le monde.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_3": "Les dons assurent le bon fonctionnement et la fiabilité des plateformes et des serveurs d’Oppia.", + "I18N_DONATE_PAGE_FAQ_ANSWER_1": "Oppia est une plateforme éducative en ligne complémentaire qui vise à répondre aux besoins des apprenants qui n’ont pas accès à une éducation de qualité. Nous espérons que la plateforme et les leçons contribueront à réaliser une éducation de haute qualité accessible à autant d’élèves que possible.", + "I18N_DONATE_PAGE_FAQ_ANSWER_10": "Pour des questions générales sur Oppia, veuillez contacter contact@oppia.org.", + "I18N_DONATE_PAGE_FAQ_ANSWER_2": "Oppia existe pour aider à améliorer l’accès équitable à l’éducation. De nombreuses plateformes en ligne (par exemple, les cours MOOC de niveau collège) favorisent les élèves qui ont déjà une formation de base. Ces plateformes exigent également que les élèves disposent d’une connexion Internet, d’une motivation pour apprendre par eux-mêmes, d’une bonne connaissance de l’anglais et de l’alphabétisation de base, etc. Cependant, cela n’est pas vrai pour les étudiants dans des communautés et des régions mal desservies et peut creuser les écarts socioéconomiques. Oppia réduit cet écart en incluant des fonctionnalités spécifiquement destinées aux communautés disposant de moins de ressources.", + "I18N_DONATE_PAGE_FAQ_ANSWER_3": "Depuis sa création, la plateforme Oppia a servi plus de 1,5 million d’élèves dans le monde. Oppia a mené des études à petite échelle, y compris des tests aléatoires, pour mesurer l’efficacité des leçons et celles-ci ont donné des résultats positifs. L’équipe travaille actuellement à fournir plus de données pour montrer l’impact de la plate-forme à travers le monde et continuera à mener d’autres études.", + "I18N_DONATE_PAGE_FAQ_ANSWER_4_1": "L’accent mis par Oppia sur l’accès à l’éducation le distingue des autres plateformes en ligne. De nombreux fournisseurs actuels utilisent des cours en vidéo qui peuvent être inaccessibles aux élèves dans des zones sans connexion Internet fiable. Les leçons d’Oppia ne nécessitent pas beaucoup de bande passante et encouragent les élèves à expérimenter et à résoudre des problèmes.", + "I18N_DONATE_PAGE_FAQ_ANSWER_4_2": "Oppia se concentre également sur les régions moins favorisées, notamment l’Amérique du Sud, l’Afrique subsaharienne et l’Asie du Sud ou du Sud-Est. En ciblant et en localisant nos cours, nos supports pédagogiques et notre plateforme dans ces régions, nous nous assurons que notre plateforme est accessible à ceux qui en ont besoin et maximisons notre impact dans ces régions.", + "I18N_DONATE_PAGE_FAQ_ANSWER_5": "Oui, nous sommes une organisation exonérée d’impôt selon l’article 501(c)(3) et votre don est déductible d’impôt conformément aux directives de la loi américaine.", + "I18N_DONATE_PAGE_FAQ_ANSWER_6": "Veuillez ne pas donner par chèque et envisager de faire un don par carte ou via Paypal.", + "I18N_DONATE_PAGE_FAQ_ANSWER_7": "Oui, mais Oppia n’enregistrera pas automatiquement de tels cadeaux. Veuillez vérifier auprès de votre employeur si nécessaire.", + "I18N_DONATE_PAGE_FAQ_ANSWER_8": "Nous n’acceptons pas les dons de titres financiers ni par virement bancaire. Veuillez songer à faire un don par carte ou via Paypal.", + "I18N_DONATE_PAGE_FAQ_ANSWER_9": "Oui, contactez : contact@oppia.org", + "I18N_DONATE_PAGE_FAQ_HEADING_TEXT": "Questions fréquemment posées", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "Qu’est-ce qu’Oppia ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_10": "J’aimerais en savoir plus sur Oppia. À qui puis-je m’adresser ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_2": "Pourquoi Oppia existe-t-il ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_3": "Comment Oppia mesure-t-il l’impact et qu’est-ce que la plate-forme a accompli jusqu’à présent ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_4": "Qu’est-ce qui différencie Oppia des autres plateformes d’éducation en ligne ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_5": "Ce don est-il déductible des impôts ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_6": "Comment donner par chèque ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_7": "Acceptez-vous les cadeaux jumelés des employés ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_8": "Puis-je donner des actions ou faire mon don par virement bancaire ?", + "I18N_DONATE_PAGE_FAQ_QUESTION_9": "Y a-t-il quelqu’un à qui je peux parler si je souhaite devenir une entreprise partenaire ?", + "I18N_DONATE_PAGE_HEADING_1": "Joignez-vous à nous pour assurer l’accès à", + "I18N_DONATE_PAGE_HEADING_2": "une éducation de qualité et engageante.", + "I18N_DONATE_PAGE_IMAGE_TITLE": "Votre généreux don finance :", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_1": "De Khanpur en Inde", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_2": "De Palestine", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "D’Inde", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_SECTION_HEADING": "Ce que disent nos élèves", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_1": "J’ai aimé apprendre les leçons sur mobile parce que nous recevons de très bonnes questions et si nous nous trompons, on nous dit comment le corriger et nous n’avons pas peur quand nous le faisons. J’ai vraiment aimé cette application.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_2": "J’ai beaucoup apprécié de suivre la leçon, je ne me suis pas ennuyé et je sens que je maîtrise maintenant les nombres négatifs.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_3": "J’ai vraiment eu du plaisir en répondant aux questions parce qu’elles ont beaucoup de formes colorées et d’images. Les images facilitent aussi la compréhension des sujets !", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "Lire notre blogue", + "I18N_DONATE_PAGE_STATISTIC_1": "Élèves servis dans le monde entier.", + "I18N_DONATE_PAGE_STATISTIC_2": "Leçons dans notre bibliothèque virtuelle", + "I18N_DONATE_PAGE_STATISTIC_3": "Essai randomisé terminé, avec plus encore à venir", + "I18N_DONATE_PAGE_STATISTIC_4": "Bénévoles du monde entier", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "Merci pour votre abonnement !", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_1": "Vous commencerez bientôt à recevoir des mises à jour dans votre boîte de réception. Nous nous engageons à ne pas polluposter et vous pouvez vous désabonner à tout moment.", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_2": "Avec l’aide et le soutien de notre communauté (y compris vous-même !), Oppia a servi et continue de servir les élèves les moins favorisés du monde entier.", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "Adresse de courriel", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "Nom (facultatif)", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "Abonnez-vous maintenant", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_1": "Rejoignez-nous aujourd’hui !", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_2": "Faites partie du mouvement pour offrir une éducation efficace et attrayante aux élèves mal desservis du monde entier. Abonnez-vous à notre bulletin d’actualités pour recevoir des mises à jour et en savoir plus sur la façon dont vous pouvez vous impliquer.", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "Merci pour le don !", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_1": "Avec votre aide et votre soutien, Oppia pourra continuer à servir les élèves les moins bien desservis à travers le monde.", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_2": "En savoir plus sur Oppia et l’impact que votre soutien aura", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_3": "Si vous avez des questions, veuillez nous contacter à tout moment.", + "I18N_DONATE_PAGE_TITLE": "Donner à la Fondation Oppia", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Écoutez notre communauté Oppia", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "En 2012? Oppia a commencé à partir d’une idée simple : améliorer l’éducation des étudiants du monde entier en améliorant la qualité de l’enseignement. Cette vision s’est depuis transformée en une plateforme d’éducation avec plus de 11000 explorations utilisées par plus de 430000 utilisateurs dans le monde entier.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "Veuillez donner à la Fondation Oppia, une organisation à but non lucratif inscrite au registre 501(c)(3), et rejoignez-nous en apportant les joies de l’enseignement et de l’apprentissage aux gens partout.", + "I18N_DONATE_PAGE_VISION_TEXT": "À ce jour, Oppia a servi plus de 1,5 million d’élèves à travers le monde, dont beaucoup viennent des régions les moins bien desservies du monde. Oppia dépend des dons, et avec seulement 11 $US , vous pouvez nous aider à poursuivre notre travail et à étendre la portée de notre plateforme, pour étendre l’éducation équitable pour tous.", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "Regarder une vidéo", + "I18N_EDIT_LEARNER_GROUP_PAGE_TITLE": "Modifier le groupe d’élèves | Oppia", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "Vous n’avez encore aucun groupe", + "I18N_EMPTY_SOLUTION_MESSAGE": "Veuillez fournir la solution pour l’état.", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "Vous venez de terminer votre 1er chapitre !", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "Vous venez de terminer votre 5e chapitre !", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "Vous venez de terminer votre 10e chapitre !", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_4": "Vous venez de terminer votre 25e chapitre !", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "Vous venez de terminer votre 50e chapitre !", + "I18N_END_CHAPTER_MILESTONE_PROGRESS_MESSAGE": "Terminez {chaptersToGo, plural, one{un autre chapitre} other{# autres chapitres}} pour atteindre votre jalon suivant !", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "À la leçon suivante !", + "I18N_END_CHAPTER_PRACTICE_SESSION_TEXT": "Pratiquez vos compétences nouvellement acquises !", + "I18N_END_CHAPTER_REVISION_TAB_TEXT": "Révisez ce que vous avez appris jusqu’à présent !", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "Voici ce que vous pouvez faire ensuite !", "I18N_ERROR_DISABLED_EXPLORATION": "Exploration désactivée", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Désolé, mais l’exploration sur laquelle vous avez cliqué est désactivée pour le moment. Veuillez réessayer ultérieurement.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Exploration désactivée – Oppia", @@ -222,13 +419,143 @@ "I18N_ERROR_MESSAGE_404": "Désolé, nous avons cherché et cherché encore, mais nous ne pouvons tout simplement pas trouver cette page.", "I18N_ERROR_MESSAGE_500": "Quelque chose s’est très mal passé. Mais ce n’était pas de votre faute car une erreur interne s’est produite.", "I18N_ERROR_NEXT_STEPS": "La meilleure chose à faire est probablement de revenir à la \">page d’accueil. Toutefois, si le problème se reproduit et que vous pensez que ce n’est pas normal, merci de nous le faire savoir sur notre \" target=\"_blank\">suivi de problèmes. Désolé de cela.", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "Erreur <[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "Erreur <[statusCode]> – Oppia", "I18N_ERROR_PAGE_TITLE_400": "Erreur 400 (requête erronée) – Oppia", "I18N_ERROR_PAGE_TITLE_401": "Erreur 401 (non autorisé) – Oppia", "I18N_ERROR_PAGE_TITLE_404": "Erreur 404 (non trouvé) – Oppia", "I18N_ERROR_PAGE_TITLE_500": "Erreur 500 (erreur interne du serveur) – Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "Vous êtes prêt pour plus de friandises ? Répondez à ce court questionnaire pour vérifier votre compréhension de ce que vous avez appris jusqu’ici !", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "Égalité des fractions (récapitulatif)", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION": "Est-il possible qu’une fraction soit déguisée en une fraction différente ? Voyons ce qui se passe quand Matthew rencontre Crumb une seconde fois.", + "I18N_EXPLORATION_0FBWxCE5egOw_TITLE": "Fractions équivalentes", + "I18N_EXPLORATION_0X0KC9DXWwra_DESCRIPTION": "Dans la maison de Kamal, chacun fête l’anniversaire de Samir. Kamal ajoute un peu d’amusement en proposant un jeu mathématique pour Ava et Samir. Voyez si vous pouvez résoudre les questions posées !", + "I18N_EXPLORATION_0X0KC9DXWwra_TITLE": "Récapitulatif : faculté de résolution de problème", + "I18N_EXPLORATION_1904tpP0CYwY_DESCRIPTION": "Il est temps pour Aria de commencer à planter des légumes ! Poursuivez votre aventure dans le jardinage en l’aidant dans le jardin et commencez à mémoriser vos multiples.", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE": "Expressions à un chiffre entre 1 et 5", + "I18N_EXPLORATION_2mzzFVDLuAj8_DESCRIPTION": "Rejoignez James et son oncle lors de leur apprentissage des rapports et de la façon de les utiliser !", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "Qu’est-ce qu’un rapport ?", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION": "Nina et sa mère heurtent leur amie, qui possède également un étal de fruits. Rejoignez Nina quand elle utilise la division pour aider son amie avec son étal !", + "I18N_EXPLORATION_40a3vjmZ7Fwu_TITLE": "Rappels et cas spéciaux", + "I18N_EXPLORATION_53Ka3mQ6ra5A_DESCRIPTION": "Maya, Omar et Malik visitent un supermarché pour obtenir plus d’ingrédients et ont besoin d’additionner de plus grands nombres. Voyez si vous pouvez les aider !", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "Addition de plus grands nombres", + "I18N_EXPLORATION_5I4srORrwjt2_DESCRIPTION": "Au snack-bar, Kamal dit qu’il doit être intelligent sur la façon qu’ils dépensent leur quantité limitée d’argent. Aidez Ava et Samir à trouver quels en-cas qu’ils peuvent obtenir !", + "I18N_EXPLORATION_5I4srORrwjt2_TITLE": "Proportionnalité et méthode unitaire", + "I18N_EXPLORATION_5NWuolNcwH6e_DESCRIPTION": "James essaye de réaliser ses propres smoothies... mais ils s’avèrent ne pas être bien bons. Quelle erreur a-t-il commise ? Jouez cette leçon pour le découvrir !", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE": "L’importance de l’ordre", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION": "Aidez Matthew à résoudre un problème pour un des clients de M. Baker qui apprend au sujet des nombres et de la ligne numérique. Jouez cette leçon pour démarrer !", + "I18N_EXPLORATION_670bU6d9JGBh_TITLE": "Nombres mélangés et la ligne numérique 1", + "I18N_EXPLORATION_6Q6IyIDkjpYC_DESCRIPTION": "M. Baker a reçu une très grosse commande et a besoin de l’aide de Matthew pour acheter plus d’ingrédients. Pouvez-vous déterminer ce dont ils ont besoin en utilisant des fractions ?", + "I18N_EXPLORATION_6Q6IyIDkjpYC_TITLE": "Soustraction des fractions", + "I18N_EXPLORATION_8HTzQQUPiK5i_DESCRIPTION": "Joignez-vous à Nina et sa mère quand elles vont au marché. Aidez-les à utiliser la division pour déterminer de combien de sacs elles ont besoin pour leurs courses !", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "Qu’est-ce que la division ?", + "I18N_EXPLORATION_9DITEN8BUEHw_DESCRIPTION": "Apprenez comment évaluer des expressions comprenant de multiples opérations d’addition et de soustraction.", + "I18N_EXPLORATION_9DITEN8BUEHw_TITLE": "Addition et soustraction de plusieurs nombres", + "I18N_EXPLORATION_9trAQhj6uUC2_DESCRIPTION": "Les fractions peuvent être utilisées pour représenter des parts d’un gâteau. Mais peuvent-elle également être utilisées pour représenter des parties de groupes de choses ? Suivez cette leçon pour le découvrir !", + "I18N_EXPLORATION_9trAQhj6uUC2_TITLE": "Fractions d’un groupe", + "I18N_EXPLORATION_BDIln52yGfeH_DESCRIPTION": "Alors qu’ils se rendent au parc de loisir, Ava et Samir veulent s’amuser mais Kamal déclare qu’ils ont besoin de voir s’ils ont assez d’argent. Aidez-les avec les mathématiques !", + "I18N_EXPLORATION_BDIln52yGfeH_TITLE": "Simplification d’équations", + "I18N_EXPLORATION_BJd7yHIxpqkq_DESCRIPTION": "Aidez nos trois héros à réaliser une meilleure pizza, tout en apprenant comment comment faire des additions depuis zéro et trouvez des nombres manquants dans un « fait supplémentaire ».", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE": "Bases de l’addition", + "I18N_EXPLORATION_IrbGLTicm0BI_DESCRIPTION": "Alors qu’Ava et Kamal attendent Mme Plum, voyons si vous avez appris comment appliquer différentes stratégies pour résoudre des problèmes du monde réel !", + "I18N_EXPLORATION_IrbGLTicm0BI_TITLE": "Récapitulatif : résolution de problèmes du monde réel", + "I18N_EXPLORATION_Jbgc3MlRiY07_DESCRIPTION": "Après avoir appris toutes ces nouvelles facultés, Ava veut découvrir ce qu’elle peut en faire. Rejoignez Ava pour mettre ses nouvelles facultés en application pour résoudre des problèmes du monde réel !", + "I18N_EXPLORATION_Jbgc3MlRiY07_TITLE": "Scénarii de modélisation du monde réel", + "I18N_EXPLORATION_K645IfRNzpKy_DESCRIPTION": "Jaime apprend la valeur placée de chaque chiffre dans un grand nombre.", + "I18N_EXPLORATION_K645IfRNzpKy_TITLE": "Que sont les valeurs positionnelles", + "I18N_EXPLORATION_K89Hgj2qRSzw_DESCRIPTION": "Kamal révèle les techniques qu’il a utilisées pour déterminer rapidement le temps dont il a besoin au réveil. Vous voulez savoir ce qu’il fait ? Jouez cette leçon pour le découvrir !", + "I18N_EXPLORATION_K89Hgj2qRSzw_TITLE": "La loi distributive", + "I18N_EXPLORATION_Knvx24p24qPO_DESCRIPTION": "Jaime comprend la valeur de son score de jeu d’arcade.", + "I18N_EXPLORATION_Knvx24p24qPO_TITLE": "Trouver les valeurs d’un nombre", + "I18N_EXPLORATION_MRJeVrKafW6G_DESCRIPTION": "Le jardin d’Aria est un grande réussite ! Chaque semaine de l’été, de plus en plus de fruits et légumes poussent. Aidez Aria à compter combien de légumes ont poussé.", + "I18N_EXPLORATION_MRJeVrKafW6G_TITLE": "Multiplication par des puissances de dix", + "I18N_EXPLORATION_MjZzEVOG47_1_DESCRIPTION": "Nous avons appris que le « dénominateur » d’une fraction est le nombre de parts égales dans la totalité. Mais pourquoi les parts doivent-elles être les mêmes ? Découvrons-le !", + "I18N_EXPLORATION_MjZzEVOG47_1_TITLE": "La signification des « parts égales »", + "I18N_EXPLORATION_OKxYhsWONHZV_DESCRIPTION": "Rejoignez Maya et Omar qui apprennent comment des nombres peuvent être « mis ensemble » ou « ajoutés » pour créer un nouveau nombre !", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "Qu’est-ce que l’addition ?", + "I18N_EXPLORATION_PLAYER_PAGE_TITLE": "<[explorationTitle]> – Oppia", + "I18N_EXPLORATION_PsfDKdhd6Esz_DESCRIPTION": "Maya, Omar et Malik semblent voir perdu de l’argent à cause d’ingrédients abîmés. En utilisant la soustraction, pouvez-vous les aider à savoir comment en tenir compte ?", + "I18N_EXPLORATION_PsfDKdhd6Esz_TITLE": "Soustraction des grands nombres, 2e partie", + "I18N_EXPLORATION_R7WpsSfmDQPV_DESCRIPTION": "Ensemble avec Aria, apprenons ce qu’est la multiplication, comment écrire des expressions avec elle et comment l’utiliser pour résoudre des problèmes dans le voisinage d’Aria !", + "I18N_EXPLORATION_R7WpsSfmDQPV_TITLE": "Parties d’expressions de multiplication", + "I18N_EXPLORATION_RvopsvVdIb0J_DESCRIPTION": "Il est temps pour Jacques de vendre son nouveau smoothie ! Il installe un stand avec Oncle Berry. Peuvent-ils deviner combien d’argent chacun d’eux devrait obtenir ?", + "I18N_EXPLORATION_RvopsvVdIb0J_TITLE": "Relier les rapports aux nombres réels", + "I18N_EXPLORATION_SR1IKIdLxnm1_DESCRIPTION": "Ava est fatiguée de jouer aux jeux du parc de loisir, donc Kamal a créé un jeu de math amusant. Pouvez-vous battre Kamal à ce jeu ? Cliquez sur cette leçon pour le découvrir !", + "I18N_EXPLORATION_SR1IKIdLxnm1_TITLE": "Récapitulatif : variables", + "I18N_EXPLORATION_STARTING_FROM_BEGINNING": "Toutes nos félicitations pour avoir terminé cette leçon ! Vous reprendrez maintenant la leçon depuis le début si vous y revenez la prochaine fois.", + "I18N_EXPLORATION_STATE_PREVIOUSLY_COMPLETED": "Vous avez répondu à cette question lors d’une session précédente.", + "I18N_EXPLORATION_VKXd8qHsxLml_DESCRIPTION": "Maya, Omar et Malik ont remarqué que certains de leurs ingrédients se sont abîmés. Pouvez-vous les aider à trouver combien ils en ont perdu, en utilisant la soustraction ?", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE": "Soustraction des grands nombres, 1re partie", + "I18N_EXPLORATION_Vgde5_ZVqrq5_DESCRIPTION": "Jacques a trouvé à quoi il voulait que ressemble sa propre recette de smoothie, mais il a du mal à combiner toutes les parties ensemble. Pouvez-vous l’aider pour cela ?", + "I18N_EXPLORATION_Vgde5_ZVqrq5_TITLE": "Combinaison de rapports", + "I18N_EXPLORATION_W0xq3jW5GzDF_DESCRIPTION": "Quelque chose d’inattendu s’est produit lorsque Maya, Omar et Malik ont essayé de faire une seconde pizza.", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "Qu’est-ce que la soustraction ?", + "I18N_EXPLORATION_WulCxGAmGE61_DESCRIPTION": "Nina visite la maison de Sandra. Rejoignez-la alors qu’elle utilise la division pour aider Sandra avec un problème encore plus piégeux, comme transférer tous les fruits dans des caisses !", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE": "Division par des multiples de dix", + "I18N_EXPLORATION_WwqLmeQEn9NK_DESCRIPTION": "Jamie continue d’apprendre davantage de techniques pour arrondir les nombres.", + "I18N_EXPLORATION_WwqLmeQEn9NK_TITLE": "Arrondi des nombres, 2e partie", + "I18N_EXPLORATION_Xa3B_io-2WI5_DESCRIPTION": "Rejoignez Matthieu alors qu’il aide M. Baker a réparer les dégâts, tout en apprenant comment ajouter des fractions.", + "I18N_EXPLORATION_Xa3B_io-2WI5_TITLE": "Addition des fractions", + "I18N_EXPLORATION_aAkDKVDR53cG_DESCRIPTION": "Jamie apprend si un nombre est plus petit ou plus grand qu’un autre nombre.", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE": "Comparaison de nombres", + "I18N_EXPLORATION_aHikhPlxYgOH_DESCRIPTION": "Rejoignez Matthew qui apprend comment des nombres mélangés sont juste des fractions ordinaires déguisées.", + "I18N_EXPLORATION_aHikhPlxYgOH_TITLE": "Nombres mélangés et la ligne des nombres, 2", + "I18N_EXPLORATION_aqJ07xrTFNLF_DESCRIPTION": "Après avoir utilisé la méthode unitaire pour trouver quel snack Ava devrait acheter, c’est au tour de Samir en utilisant une nouvelle méthode. Rejoignez Samir pour trouver quel snack il peut obtenir !", + "I18N_EXPLORATION_aqJ07xrTFNLF_TITLE": "Résolution de problème avec des modèles de boîtes", + "I18N_EXPLORATION_avwshGklKLJE_DESCRIPTION": "Jamie apprend à simplifier un nombre sans faire de gros changements à sa valeur.", + "I18N_EXPLORATION_avwshGklKLJE_TITLE": "Arrondi des nombres, 1re partie", + "I18N_EXPLORATION_cQDibOXQbpi7_DESCRIPTION": "Aria est prêt à planter quelques légumes plus gros sans son jardin ! Aidez-la à les planter et les arroser tout en mémorisant avec elle quelques multiples.", + "I18N_EXPLORATION_cQDibOXQbpi7_TITLE": "Expressions à un seul chiffre entre 5 et 9", + "I18N_EXPLORATION_hNOP3TwRJhsz_DESCRIPTION": "Aria reprend l’école ! Elle veut un grand jardin pour les enfants de son école. Aidez-la à le planter avec Omar en utilisant la multiplication avec des nombres plus grands.", + "I18N_EXPLORATION_hNOP3TwRJhsz_TITLE": "Multiplication à plusieurs chiffres, 1re partie", + "I18N_EXPLORATION_ibeLZqbbjbKF_DESCRIPTION": "À la gare, Ava et Kamal ne trouve aucun train ! Kamal découvre une erreur dans les calculs. Les aiderez-vous à trouver quand arrive le train ?", + "I18N_EXPLORATION_ibeLZqbbjbKF_TITLE": "Attribution de valeurs pour des variables", + "I18N_EXPLORATION_k2bQ7z5XHNbK_DESCRIPTION": "Est-il possible que deux rapports différents signifient la même chose ? Découvrez-le avec James et Oncle Berry qui essayent une nouvelle recette pour des smoothies au chocolat.", + "I18N_EXPLORATION_k2bQ7z5XHNbK_TITLE": "Rapports équivalents", + "I18N_EXPLORATION_kYSrbNDCv5sH_DESCRIPTION": "Ava veut organiser au mieux l’anniversaire de Samir, elle commence donc par planifier sa journée. Aidez-la à utiliser des raccourcis évaluer des expressions afin de déterminer le tout !", + "I18N_EXPLORATION_kYSrbNDCv5sH_TITLE": "Les lois commutatives et associatives", + "I18N_EXPLORATION_lNpxiuqufPiw_DESCRIPTION": "Ava aura bientôt besoin de mettre en application ses connaissances dans quelques problèmes du monde réel. Pourrez-vous l’aider ? Essayez cette leçon pour voir si vous avez maîtrisé les expressions !", + "I18N_EXPLORATION_lNpxiuqufPiw_TITLE": "Récapitulatif : travail avec les expressions", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION": "Joignez-vous à Nina qui aide Sandra à réaliser des jus de fruit pour son étal, en utilisant une nouvelle technique de division !", + "I18N_EXPLORATION_lOU0XPC2BnE9_TITLE": "Division longue, diviseurs à un seul chiffre", + "I18N_EXPLORATION_m1nvGABWeUoh_DESCRIPTION": "Ava et Samir ont fini de jouer et veulent aller à la boutique pour utiliser leurs tickets. Là-bas, ils trouvent une machine mystérieuse ! Cliquez la leçon suivante pour la découvrir !", + "I18N_EXPLORATION_m1nvGABWeUoh_TITLE": "Qu’est-ce qu’une moyenne ?", + "I18N_EXPLORATION_nLmUS6lbmvnl_DESCRIPTION": "James peut-il déterminer si un smoothie est plus « lacté » ou plus « crémeux comme un yaourt », juste en consultant la recette, mais sans avoir besoin de réaliser manuellement chaque smoothie ?", + "I18N_EXPLORATION_nLmUS6lbmvnl_TITLE": "Comparaison de rapports", + "I18N_EXPLORATION_nTMZwH7i0DdW_DESCRIPTION": "Ava et Kamal vont à la gare ferroviaire. Ils voient Mme Plum, la boulangère, et veulent l’aider à résoudre des problèmes en utilisant des expressions avec les recettes ou dépenses et le bénéfice.", + "I18N_EXPLORATION_nTMZwH7i0DdW_TITLE": "Des problèmes réels du monde aux expressions", + "I18N_EXPLORATION_osw1m5Q3jK41_DESCRIPTION": "C’est enfin l’heure des petits gâteaux ! Profitez de l’occasion pour vous assurer que vous avez bien compris les connaissances apprises dans les leçons précédentes !", + "I18N_EXPLORATION_osw1m5Q3jK41_TITLE": "Opérations avec les fractions (récapitulatif)", + "I18N_EXPLORATION_rDJojPOc0KgJ_DESCRIPTION": "Ava et Kamal achètent des cadeaux pour l’anniversaire de leur cousin ! Joignez-vous à eux pour découvrir comment calculer les prix en évaluant des expressions.", + "I18N_EXPLORATION_rDJojPOc0KgJ_TITLE": "Évaluation d’expression – Ordre des opérations", + "I18N_EXPLORATION_rfX8jNkPnA-1_DESCRIPTION": "Pouvez-vous aider Matthew à gagner de bons gâteaux ? Répondez ce court questionnaire pour voir ce dont vous vous souvenez au sujet des fractions.", + "I18N_EXPLORATION_rfX8jNkPnA-1_TITLE": "Représentation des fractions (récapitulatif)", + "I18N_EXPLORATION_rwN3YPG9XWZa_DESCRIPTION": "Tout en dégustant une bonne crème glacée, Ava et Kamal essayent de répondre à quelques questions posées par Ava concernant leur prochaine visite au parc de loisir !", + "I18N_EXPLORATION_rwN3YPG9XWZa_TITLE": "Résolution de problèmes du monde", + "I18N_EXPLORATION_tIoSb3HZFN6e_DESCRIPTION": "James apprend à réduire un rapport à sa forme la plus simple, afin de rendre plus faciles ses calculs.", + "I18N_EXPLORATION_tIoSb3HZFN6e_TITLE": "Écriture de rapports dans la forme la plus simple", + "I18N_EXPLORATION_umPkwp0L1M0-_DESCRIPTION": "Rejoignez Matthew qui rencontre M. Baker pour la première fois et apprends les fractions. Qu’est-ce qu’une fraction ? Jouez cette leçon pour en découvrir plus !", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE": "Qu’est-ce qu’une fraction ?", + "I18N_EXPLORATION_v8fonNnX4Ub1_DESCRIPTION": "Ava et Kamal continuent à aider Mme Plum dans son activité boulangère, mais il y a quelques inconnues dans les expressions. Ava pourra-t-elle l’aider ?", + "I18N_EXPLORATION_v8fonNnX4Ub1_TITLE": "Écriture d’expressions avec des variables", + "I18N_EXPLORATION_wE9pyaC5np3n_DESCRIPTION": "Nina et Sandra participent à un concours. Rejoignez Nina qui utilise ses connaissances sur la division afin de vendre le plus possible de fruits et de jus pour gagner le grand prix !", + "I18N_EXPLORATION_wE9pyaC5np3n_TITLE": "Division avec plusieurs chiffres", + "I18N_EXPLORATION_zIBYaqfDJrJC_DESCRIPTION": "Poursuivez vos aventures dans le jardinage avec Aria, qui cultive des fruits, apprend et pratique la multiplication avec Omar !", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE": "Ce que signifie la multiplication", + "I18N_EXPLORATION_zNb0Bh27QtJ4_DESCRIPTION": "Au snack-bar, Kamal fouille ses poches et ne peut pas trouver son porte-monnaie. Sans le porte-monnaie, ils ne peuvent rien obtenir à manger ! Pouvez-vous aider Kamal à trouver son porte-monnaie ?", + "I18N_EXPLORATION_zNb0Bh27QtJ4_TITLE": "Progressions arithmétiques", + "I18N_EXPLORATION_zTg2hzTz37jP_DESCRIPTION": "Après s’y être pris bien en avance, Aria reçoit ses amies pour l’aider à cultiver le jardin pour son école ! Utilisez vos connaissances pour les aider à planter un jardin fantastique !", + "I18N_EXPLORATION_zTg2hzTz37jP_TITLE": "Multiplication à plusieurs chiffres, 2e partie", + "I18N_EXPLORATION_zVbqxwck0KaC_DESCRIPTION": "James et Oncle Berry sont invités à préparer des smoothies pour la fête de leur voisin. Cela sera-t-il le début de leur célébrité comme préparateurs de smoothies ?", + "I18N_EXPLORATION_zVbqxwck0KaC_TITLE": "Relations de proportionnalité", + "I18N_EXPLORATION_zW39GLG_BdN2_DESCRIPTION": "Alors que Matthew apprend comment comparer des fractions en fonction de leur taille, un accident survient à la boulangerie et M. Baker est ennuyé. Voyons ce qui s’est passé !", + "I18N_EXPLORATION_zW39GLG_BdN2_TITLE": "Comparaison de fractions", + "I18N_FACILITATOR_DASHBOARD_PAGE_TITLE": "Tableau de bord de l’animateur | Oppia", + "I18N_FEEDBACK_INSTRUCTION": "Le message de commentaire ne doit pas dépasser <[count]> caractères.", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anonyme", "I18N_FOOTER_ABOUT": "À propos", "I18N_FOOTER_ABOUT_ALL_CAPS": "À PROPOS D’OPPIA", + "I18N_FOOTER_ANDROID_APP": "Application Android", "I18N_FOOTER_AUTHOR_PROFILES": "Profils des auteurs", "I18N_FOOTER_BROWSE_LIBRARY": "Parcourir la bibliothèque", "I18N_FOOTER_CONTACT_US": "Nous contacter", @@ -245,13 +572,13 @@ "I18N_FOOTER_TEACH_LEARN_ALL_CAPS": "ENSEIGNER / APPRENDRE", "I18N_FOOTER_TEACH_PAGE": "Pour les parents ou enseignants", "I18N_FOOTER_TERMS_OF_SERVICE": "Conditions d’utilisation", - "I18N_FORMS_TYPE_NUMBER": "Entrer un nombre", + "I18N_FORMS_TYPE_NUMBER": "Saisissez un nombre", "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "Veuillez saisir un nombre supérieur ou égal à <[minValue]>.", "I18N_FORMS_TYPE_NUMBER_AT_MOST": "Veuillez saisir un nombre inférieur ou égal à <[maxValue]>.", "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "Veuillez saisir un nombre décimal valide.", "I18N_GENERATE_ATTRIBUTION": "Générer l’attribution", - "I18N_GET_STARTED_PAGE_BREADCRUMB": "Commencer", - "I18N_GET_STARTED_PAGE_HEADING": "C'est parti !", + "I18N_GET_STARTED_PAGE_BREADCRUMB": "Bien démarrer", + "I18N_GET_STARTED_PAGE_HEADING": "C’est parti !", "I18N_GET_STARTED_PAGE_PARAGRAPH_1": "Créer une exploration est facile et libre. Partagez votre connaissance avec des étudiants du monde entier, et obtenez des retours que vous pouvez utiliser pour améliorer l’efficacité de votre exploration.", "I18N_GET_STARTED_PAGE_PARAGRAPH_10": "De plus, tandis que des élèves utilisent votre exploration, vous pourrez voir les erreurs les plus courantes qu’ils font. Quelquefois, cela donne de nouvelles idées sur là où ils sont perturbés. Il est facile d’améliorer l’exploration avec des commentaires en plus, si vous pensez que d’autres élèves sont susceptibles de répéter les mêmes erreurs. Vous pouvez même envoyer l’élève à une autre étape ou « aller plus loin » en posant une autre question.", "I18N_GET_STARTED_PAGE_PARAGRAPH_11": "Pour vous impliquer dans le projet Oppia et nous aider à remplir notre mission vis-à-vis d’une éducation gratuite, universelle et de grande qualité, contactez-nous sur admin@oppia.org, ou découvrez d’autres moyens de vous impliquer avec notre communauté de bénévoles. Nous sommes impatients de vous entendre !", @@ -260,7 +587,7 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_2_HEADING": "Choisir un sujet", "I18N_GET_STARTED_PAGE_PARAGRAPH_3": "Quand vous aurez choisi un sujet, cliquez simplement sur « Créer » et connectez-vous avec votre compte Google. Si vous n’en avez pas, vous pouvez en créer un ici.", "I18N_GET_STARTED_PAGE_PARAGRAPH_3_HEADING": "Créez votre exploration", - "I18N_GET_STARTED_PAGE_PARAGRAPH_4": "Une exploration consiste en plusieurs étapes. Chaque étape peut être comprendre du texte (par ex. une explication écrite), des images et des vidéos. Chaque étape présente à l’étudiant une question, à laquelle il doit répondre afin de poursuivre. Cela peut être une question à choix multiple, nécessiter qu’il saisisse quelque chose, ou toute autre sorte d’interaction disponible.", + "I18N_GET_STARTED_PAGE_PARAGRAPH_4": "Une exploration consiste en plusieurs étapes. Chaque étape peut être comprendre du texte (par ex. une explication écrite), des images et des vidéos. Chaque étape présente à l’étudiant une question, à laquelle il doit répondre afin de poursuivre. Cela peut être une question à choix multiple, nécessiter qu’il saisisse quelque chose, ou bien toute autre sorte d’interaction disponible.", "I18N_GET_STARTED_PAGE_PARAGRAPH_5": "Une fois que l’élève a répondu à la question, Oppia lui donnera un retour et le laissera poursuivre à l’étape suivante. Pour voir comment les élèves expérimentent Oppia, essayez une de ces explorations :", "I18N_GET_STARTED_PAGE_PARAGRAPH_6": "Pour plus d’informations sur la manière de créer des explorations, voir notre documentation utilisateur.", "I18N_GET_STARTED_PAGE_PARAGRAPH_7": "Une fois que vous avez créé votre exploration et êtes prêt pour que des élèves la voient, cliquez sur le bouton « Publier » en haut de la page. Cela rendra votre exploration disponible pour des élèves du monde entier !", @@ -270,18 +597,33 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_9": "Quand les élèves avancent dans votre exploration, ils peuvent vous envoyer des remarques pour vous alerter sur les problèmes ou pour partager des idées d’amélioration.", "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "Améliorer votre exploration", "I18N_GET_STARTED_PAGE_TITLE": "Commencer", + "I18N_GOAL_LIMIT": "Limité à<[limit]> objectifs", "I18N_GOT_IT": "C’est bon", "I18N_HEADING_VOLUNTEER": "Bénévole", "I18N_HINT_NEED_HELP": "Besoin d’aide ? Afficher un conseil pour ce problème !", "I18N_HINT_TITLE": "Conseil", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "Taper une expression ici.", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Taper le code dans l’éditeur", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Aller à l’éditeur de code", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Tirer et lâcher des éléments", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "Ne pas mettre à 0 le dénominateur", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Entrer une fraction au format « x/y » ou un nombre mixte sous la forme « A x/y ».", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Entrer une fraction sous la forme « x/y ».", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS": "Veuillez n’utiliser que des chiffres numériques, des espaces ou des barres obliques (/)", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "Aucun des nombres dans la fraction ne devrait comporter plus de 7 chiffres", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "Veuillez saisir une fraction valide (par exemple, « 5/3 » ou « 1 2/3 »)", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "Veuillez saisir une valeur de fraction non vide.", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "Veuillez saisir votre réponse sous forme de fraction (par exemple, « 5/3 » au lieu de « 1 2/3 »).", + "I18N_INTERACTIONS_FRACTIONS_PROPER_FRACTION": "Veuillez saisir une réponse avec une partie fractionnelle « propre » (par exemple, « 1 2/3 » au lieu de « 5/3 »).", + "I18N_INTERACTIONS_FRACTIONS_SIMPLEST_FORM": "Veuillez saisir une réponse dans sa forme la plus simple (par exemple, « 1/3 » au lieu de « 2/6 »).", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Ajouter une arête", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "Ajouter un nœud", "I18N_INTERACTIONS_GRAPH_DELETE": "Supprimer", "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "Cliquer sur le sommet cible pour créer l’arête (cliquer sur le même sommet pour annuler la création de l’arête).", "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "Cliquer sur le sommet initial de l’arête à créer.", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Graphe non valide !", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "Créer un graphe", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Afficher le graphe", "I18N_INTERACTIONS_GRAPH_MOVE": "Déplacer", "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "Cliquer sur n’importe quel point pour déplacer l’arête vers ce point.", "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "Cliquer sur l’arête à déplacer.", @@ -292,66 +634,108 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "et <[vertices]> sommets", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Mettre à jour le libellé", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Mettre à jour le poids", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Cliquer sur l’image", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Sélectionner une image à afficher]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Vous pouvez sélectionner plus de choix.", - "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Veuillez sélectionner au moins une option.} other{Veuillez sélectionner au moins # options.}}", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Veuillez sélectionner toutes les options correctes.} other{Veuillez sélectionner au moins # options.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{Vous ne pouvez sélectionner qu’un seul choix.} other{Vous ne pouvez pas sélectionner plus de # choix.}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "Cliquer sur la carte", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "Afficher la carte", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "Taper une équation ici.", "I18N_INTERACTIONS_MUSIC_CLEAR": "Effacer", + "I18N_INTERACTIONS_MUSIC_INSTRUCTION": "Tirer les notes vers l’équipe pour former une séquence", + "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "Afficher l’équipe de musique", "I18N_INTERACTIONS_MUSIC_PLAY": "Jouer", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Jouer la séquence cible", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "Veuillez saisir une devise monétaire valide (par exemple, « $5 » ou « Rs 5 »)", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "Veuillez écrire les unités de devises monétaire au début", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_UNIT_CHARS": "Veuillez vous assurer que l’unité ne contient que des chiffres, des lettres ou des symboles parmi « ( ) * ^ / - »", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "Veuillez vous assurer que la valeur est soit une fraction, soit un nombre simple.", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Formats possibles des unités", - "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "Êtes-vous sûr de vouloir réinitialiser votre code ?", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_DECIMAL": "Au plus un séparateur de décimales (.) doit être présent.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_EXPONENT": "Au moins un signe d’exposant (e) doit être présent.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_MINUS": "Au moins un signe moins (-) doit être présent.", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_COMMA": "La réponse doit contenir au maximum 15 chiffres (de 0 à 9) sans compter les symboles (, . -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_DOT": "La réponse doit contenir au maximum 15 chiffres (de 0 à 9), sans compter les symboles (. ou -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "La réponse doit être un nombre valide.", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "La réponse doit être supérieure ou égale à zéro.", + "I18N_INTERACTIONS_NUMERIC_INPUT_MINUS_AT_BEGINNING": "Le signe moins (-) n’est permis qu’au début.", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_INVALID_CHARS": "Seuls les chiffres de « 0 » à « 9 », le point « . », la lettre latine « e » et et le signe moins « - » sont autorisés.", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_TRAILING_DECIMAL": "Les décimales finales en excès ne sont pas acceptées.", + "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "Êtes-vous sûr(e) de vouloir réinitialiser votre code ?", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Annuler", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Confirmation demandée", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Effacer le code", + "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "Modifier le code. Cliquer sur « Jouer » pour le vérifier !", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Afficher l’éditeur de code", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "Veuillez saisir un rapport valide (par exemple, « 1:2 » ou « 1:2:3 »).", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "Les rapports ne doivent inclure aucun 0 comme élément.", + "I18N_INTERACTIONS_RATIO_INVALID_CHARS": "Veuillez écrire un rapport qui consiste en chiffres séparés par des deux-points (par exemple, « 1:2 » ou « 1:2:3 »).", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "Votre réponse comprends plusieurs signes deux-points successifs.", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "Veuillez saisir un rapport valide (par exemple, « 1:2 » ou « 1:2:3 »).", + "I18N_INTERACTIONS_RATIO_NON_INTEGER_ELEMENTS": "Pour cette question, chaque élément dans votre rapport doit être un nombre entier (pas une fraction ni un nombre avec des décimales).", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Ajouter l’élément", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Oups, il semble que votre ensemble a des doublons !", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Ajouter un élément par ligne.)", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Aucune réponse fournie.", "I18N_INTERACTIONS_SUBMIT": "Soumettre", + "I18N_INTERACTIONS_TERMS_LIMIT": "Le créateur a spécifié que le nombre de termes dans la réponse doit être <[termsCount]>", + "I18N_INVALID_TAGS_AND_ATTRIBUTES_ALERT": "Des balises et attributs non valides ont été supprimés de l’image téléversée. Si votre image semble endommagée, veuillez \" target=\"_blank\">nous le faire savoir, puis essayez de téléverser une image SVG différente.", + "I18N_JOIN_LEARNER_GROUP_BUTTON": "Rejoindre le groupe", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Afficher Oppia en :", "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Bon après-midi", + "I18N_LEARNER_DASHBOARD_ALL": "Tout", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Modifier les objectifs", "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Bronze", "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Leçons de la communauté", - "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "Buts atteints", + "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "Objectifs atteints", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "Terminé", "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "<[numberMoved]> des collections que vous avez terminées ont été déplacées vers la section « En cours » parce que de nouvelles explorations leur ont été ajoutées !", "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Continuer là où vous l’avez laissé", - "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Buts actuels", + "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Objectifs actuels", + "I18N_LEARNER_DASHBOARD_DECLINE_INVITATION_MODAL_BODY": "Voulez-vous vraiment refuser lʼinvitation à <[groupName]> ?", + "I18N_LEARNER_DASHBOARD_DECLINE_INVITATION_MODAL_HEADER": "Refuser lʼinvitation ?", "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "Il semble n’y avoir encore aucune collection dans votre liste « À jouer plus tard ». Allez dans la bibliothèque et construisez votre liste à jouer supervisée !", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "Il semblerait que vous n’ayez encore terminé aucune collection. Allez voir à la bibliothèque pour commencer une nouvelle collection excitante !", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "Il semblerait que vous n'ayez encore terminé aucune exploration. Allez voir à la bibliothèque pour commencer une nouvelle exploration excitante !", - "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_GOALS_SECTION": "Atteindre un but par le haut et voir ici votre progrès quand il est achevé !", - "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_STORIES_SECTION": "Basculer vers la salle de classe pour terminer une nouvelle histoire excitante !", - "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Commencer à apprendre par ", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_GOALS_SECTION": "Atteignez un objectif ci-dessus et voyez ici votre progrès quand il est atteint !", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_STORIES_SECTION": "Basculer vers la salle de classe pour terminer un nouveau récit excitant !", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Commencer à apprendre en ", "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_HEADING": "Démarrer", - "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "fixer un but !", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "se fixant un objectif !", "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "Commencer à apprendre en sélectionnant un but ci-dessous !", - "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "Il semble n’y avoir encore aucune exploration dans votre liste « À jouer plus tard ». Allez dans la bibliothèque et construisez votre liste à jouer supervisée !", - "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Vous n'avez pas encore de fils de commentaires actifs. Vos commentaires contribuent à améliorer la qualité de nos cours. Vous pouvez le faire en commençant n'importe laquelle de nos leçons et en soumettant vos précieux commentaires !", + "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "Il semble n’y avoir encore aucune exploration dans votre liste « À jouer plus tard ». Allez dans la bibliothèque et construisez votre liste à jouer supervisée !", + "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Vous n'avez encore aucun fil actif de commentaires. Vos commentaires contribuent à améliorer la qualité de nos cours. Vous pouvez le faire en commençant n’importe laquelle de nos leçons et en soumettant vos précieux commentaires !", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "Il semblerait que vous n’ayez pour l’instant aucune collection partiellement complétée. Allez dans la bibliothèque pour commencer une nouvelle collection excitante !", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "Il semble que vous n’avez pas d’exploration partiellement complétée pour l’instant. Allez dans la bibliothèque pour commencer une nouvelle exploration excitante !", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "Il semble que vous avez atteint la limite de sélection cible. Dirigez-vous vers la bibliothèque et explorez d’autres exploration.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "Commencez par ", - "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "Fixer un objectif permet à Oppia de vous donner de meilleures recommandations dans votre tableau de bord qui contribuent à votre parcours d'apprentissage.", - "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "fixer un objectif! ", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "Fixer un objectif permet à Oppia de vous donner de meilleures recommandations dans votre tableau de bord qui contribuent à votre parcours d’apprentissage.", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "fixer un objectif ! ", "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "Il semblerait que vous ne vous soyez encore abonné à aucun créateur. Allez voir à la bibliothèque pour découvrir les nouveaux créateurs ainsi que leur explorations intéressantes !", - "I18N_LEARNER_DASHBOARD_EMPTY_SUGGESTED_FOR_YOU_SECTION": "Bravo, vous avez terminé toutes les leçons de notre sujet ! Sentez-vous libre de regarder nos autres explorations sur notre page Leçons de la communauté", + "I18N_LEARNER_DASHBOARD_EMPTY_SUGGESTED_FOR_YOU_SECTION": "Bravo, vous avez terminé toutes les leçons de notre sujet ! Sentez-vous libre de regarder nos autres explorations sur notre page Leçons de la communauté.", "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "Bonne soirée", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "Dernière exécutée", "I18N_LEARNER_DASHBOARD_FEEDBACK_SECTION": "Mises à jour des avis", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Répondre", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_STATUS_CHANGE_MESSAGE": "État modifié en « <[threadStatus]> »", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_WARNING": "Éviter de partager des informations personnelles car cette discussion est visible publiquement.", - "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Buts", + "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Objectifs", "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "Or", "I18N_LEARNER_DASHBOARD_HOME_SECTION": "Accueil", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "Incomplet", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "En cours", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "Il semblerait que vous n’ayez pas encore essayé aucune de vos explorations.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Voyons comment commencer cet excitant voyage !", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION": "Groupes dʼélèves", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "Vos groupes", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_INVITATIONS": "Vos invitations", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_NO_GROUPS": "Vous nʼavez encore rejoint aucun groupe dʼélèves.", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_DECLINE_INVITATION": "Refuser", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_VIEW_PREFERENCES": "Préférences dʼaffichage", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Apprendre quelque chose de nouveau", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Bonjour", - "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "Nouveau contenu d’histoire disponible", + "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "Nouveau contenu de récit disponible", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COLLECTIONS_FROM_PLAYLIST": "{numberNonexistent, plural, one{Une des collections dans votre liste « À jouer plus tard » n’est plus disponible} other{# des collections dans votre liste « À jouer plus tard » ne sont plus disponibles}}. Nous sommes désolés de la gêne occasionnée.", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_COLLECTIONS": "{numberNonexistent, plural, one{Une des collections que vous avez terminées n’est plus disponible} other{# des collections que vous avez terminées ne sont plus disponibles}}. Nous sommes désolés de la gêne occasionnée.", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_EXPLORATIONS": "{numberNonexistent, plural, one{Une des explorations que vous avez terminées n’est plus disponible} other{# des explorations que vous avez terminées ne sont plus disponibles}}. Nous sommes désolés de la gêne occasionnée.", @@ -360,29 +744,109 @@ "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_EXPLORATIONS": "{numberNonexistent, plural, one{Une des explorations en cours n’est plus disponible} other{# des explorations en cours ne sont plus disponibles}}. Nous sommes désolés de la gêne occasionnée.", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "Il semblerait que vous n’ayez pas encore démarré de collection. Allez voir à la bibliothèque pour commencer une nouvelle collection excitante !", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "Il semblerait que vous n’ayez encore démarré aucune exploration. Allez voir à la bibliothèque pour commencer une nouvelle exploration excitante !", + "I18N_LEARNER_DASHBOARD_PAGE_TITLE": "Tableau de bord de l’élève | Oppia", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "À jouer plus tard", "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "Progrès", "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE": "Faites glisser et réarrangez les activités dans l’ordre dans lequel vous voulez les jouer !", "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE_MOBILE": "Pressez et faites glisser en maintenant pressé pour réarranger les activités dans l’ordre où vous souhaitez les jouer !", "I18N_LEARNER_DASHBOARD_REMOVE_ACTIVITY_MODAL_BODY": "Êtes-vous sûr de vouloir supprimer « <[entityTitle]> » de votre liste « <[sectionNameI18nId]> » ?", "I18N_LEARNER_DASHBOARD_REMOVE_ACTIVITY_MODAL_HEADER": "Supprimer de la liste « <[sectionNameI18nId]> » ?", - "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "Supprimer", + "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "Retirer", "I18N_LEARNER_DASHBOARD_RETURN_TO_FEEDBACK_THREADS_MESSAGE": "Revenir à la liste des messages", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "Envoyer", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Envoi...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Argent", "I18N_LEARNER_DASHBOARD_SKILLS": "Compétences", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "Maîtrise de la compétence", - "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Histoires terminées", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Progression des compétences", + "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Récits terminés", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Abonnements", - "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Progrès :", + "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Progrès :", "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "Actuel :", "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "Brève description des modifications :", "I18N_LEARNER_DASHBOARD_SUGGESTION_NO_CURRENT_STATE": "Oups ! Cet état n’existe plus !", "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Suggéré :", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Suggestion", "I18N_LEARNER_DASHBOARD_TOOLTIP": "Les collections sont des explorations multiples reliées, qui sont prévues pour être achevées en une fois.", + "I18N_LEARNER_DASHBOARD_VIEW": "Afficher", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Afficher la suggestion", + "I18N_LEARNER_GROUPS_SECTION_TITLE": "Vos groupes d’élèves", + "I18N_LEARNER_GROUP_ADD_GROUP_DETAILS": "Ajouter les détails du groupe", + "I18N_LEARNER_GROUP_ADD_NEW_SYLLABUS_ITEMS": "Ajouter de nouveaux éléments de programme", + "I18N_LEARNER_GROUP_ADD_SYLLABUS_ITEMS": "Ajouter des éléments de programme", + "I18N_LEARNER_GROUP_ADD_TO_SYLLABUS": "Ajouter au programme", + "I18N_LEARNER_GROUP_ASSIGNED_SKILLS": "{skillsCount, plural, =1{Compétence attribuée} other{Compétences attribuées}}", + "I18N_LEARNER_GROUP_ASSIGNED_STORIES": "{storiesCount, plural, =1{Récit attribué} other{Récits attribués}}", + "I18N_LEARNER_GROUP_ASSIGNED_SYLLABUS_TAB": "Programme affecté", + "I18N_LEARNER_GROUP_BACK_TO_ALL_LEARNERS_PROGRESS": "Retour aux progrès de tous les élèves", + "I18N_LEARNER_GROUP_BACK_TO_SYLLABUS": "Retour au programme", + "I18N_LEARNER_GROUP_CREATED_TITLE": "Votre groupe <[groupName]> a été créé.", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "Suivant", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "Étape précédente", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "Description", + "I18N_LEARNER_GROUP_DETAILS_GROUP_DESCRIPTION": "Description du groupe (décrivez les objectifs du groupe en 2 à 4 lignes)", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "Titre du groupe", + "I18N_LEARNER_GROUP_DETAILS_MODAL_DESCRIPTION": "Description du groupe", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "Détails", + "I18N_LEARNER_GROUP_FACILITATOR_LABEL_TEXT": "Animateur", + "I18N_LEARNER_GROUP_GROUP_DETAILS_SECTION": "Détails du groupe", + "I18N_LEARNER_GROUP_INVITATION_MODAL_HEADER": "Invitation au groupe dʼélèves", + "I18N_LEARNER_GROUP_INVITE_LEARNERS": "Inviter des élèves", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_BY_USERNAME": "Inviter des élèves par leur nom d’utilisateur", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_PLACEHOLDER_TEXT": "Ajoutez le nom d’utilisateur de l’élève à inviter et appuyez sur Entrée", + "I18N_LEARNER_GROUP_INVITE_LEARNER_BUTTON_TEXT": "Inviter un élève", + "I18N_LEARNER_GROUP_INVITE_LIST_TEXT": "Liste d’invitation", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "Ajouté", + "I18N_LEARNER_GROUP_ITEM_ALREADY_ADDED_TO_SYLLABUS": "Déjà dans le programme", + "I18N_LEARNER_GROUP_JOINING_MESSAGE": "Vous êtes sur le point de rejoindre « <[groupName]> »", + "I18N_LEARNER_GROUP_LEARNERS": "{learnersCount, plural, =1{ÉLÈVE} other{ÉLÈVES}}", + "I18N_LEARNER_GROUP_LEARNERS_MODAL_TEXT": "Groupe dʼélèves", + "I18N_LEARNER_GROUP_LEARNERS_PROGRESS_TAB": "Progrès des élèves", + "I18N_LEARNER_GROUP_LEARNERS_SECTION": "{learnersCount, plural, =1{Élève} autre{Élèves}}", + "I18N_LEARNER_GROUP_MINIMUM_SYLLABUS_ITEMS_INFO": "Vous devez ajouter au moins un élément du programme (compétence / récit) pour créer un groupe.", + "I18N_LEARNER_GROUP_NO_INVITATIONS": "Vous nʼavez aucune invitation en attente.", + "I18N_LEARNER_GROUP_NO_ITEMS_ADDED": "Vous n’avez ajouté encore aucun nouveau récit ni aucune compétence. Commencer par ajouter cet élément !", + "I18N_LEARNER_GROUP_NO_LEARNERS_HAVE_JOINED": "Il nʼy a personne dans ce groupe. Pourquoi ne pas inviter un élève ?", + "I18N_LEARNER_GROUP_NO_LEARNERS_INVITED": "Vous n’avez encore invité aucun élève. Commencez par un inviter un !", + "I18N_LEARNER_GROUP_NO_RESULTS_FOUND": "Aucun résultat trouvé.", + "I18N_LEARNER_GROUP_OVERVIEW_TAB": "Vue d’ensemble", + "I18N_LEARNER_GROUP_PAGE_TITLE": "Groupe d’élèves | Oppia", + "I18N_LEARNER_GROUP_PERMISSION_NOT_GIVEN": "Autorisation non accordée", + "I18N_LEARNER_GROUP_PREFERENCE": "Préférence du groupe d’élèves", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "Enregistrer", + "I18N_LEARNER_GROUP_PREFERENCES_TAB": "Préférences", + "I18N_LEARNER_GROUP_PROGRESS_IN_STORIES_SECTION": "Progrès dans les récits", + "I18N_LEARNER_GROUP_PROGRESS_NO_LEARNERS": "Il n’y a aucun élève dans ce groupe. Veuillez inviter les élèves à voir leurs progrès.", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_INFO_TEXT": "En tant quʼélève, vous pouvez décider si les animateurs sont en mesure de voir la progression de votre leçon et de vous fournir des commentaires. Vous pouvez modifier ce paramètre à tout moment.", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_INFO_TITLE": "Définir les autorisations de partage :", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_FALSE": "Non, peut-être plus tard", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_TRUE": "Oui, je souhaite partager mes progrès", + "I18N_LEARNER_GROUP_SEARCH_BY_USERNAME": "Rechercher par nom d’utilisateur", + "I18N_LEARNER_GROUP_SECTION_FEATURE_INFO_DESC": "Les animateurs peuvent vous inviter à rejoindre des groupes avec dʼautres élèves. Une fois que vous devenez membre dʼun groupe dʼélèves, vous pourrez voir le programme de ce groupe. Vous pouvez également choisir de partager vos progrès avec lʼanimateur. Vous pouvez modifier vos autorisations de partage à tout moment.", + "I18N_LEARNER_GROUP_SECTION_FEATURE_INFO_TITLE": "En rejoignant un groupe dʼélèves, vous pouvez", + "I18N_LEARNER_GROUP_SHOWING_PROGRESS_FOR_LEARNER": "Affichage des progrès pour", + "I18N_LEARNER_GROUP_SKILLS_ANALYSIS_SECTION": "Analyse des compétences", + "I18N_LEARNER_GROUP_SKILLS_MASTERED_SECTION": "Compétences maîtrisées", + "I18N_LEARNER_GROUP_SKILLS_SECTION_PROGRESS_DESCRIPTION": "Cette section montre les compétences communes avec lesquelles les élèves de ce groupe ont des difficultés.", + "I18N_LEARNER_GROUP_STORIES_SECTION_PROGRESS_DESCRIPTION": "Cette section montre des récits communs que les élèves de ce groupe ont complétés.", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "Afficher les détails", + "I18N_LEARNER_GROUP_SYLLABUS_COMPLETION": "terminé", + "I18N_LEARNER_GROUP_SYLLABUS_ITEM_NOT_STARTED_YET": "Pas encore commencé", + "I18N_LEARNER_GROUP_SYLLABUS_LESSONS": "leçons", + "I18N_LEARNER_GROUP_SYLLABUS_TAB": "Programme", + "I18N_LEARNER_GROUP_USER_STORIES_PROGRESS": "Progrès dans les récits affectés", + "I18N_LEARNER_GROUP_VIEW_DETAILS": "Afficher les détails", + "I18N_LEARNER_GROUP_VIEW_OVERVIEW_SUMMARY_TITLE": "Explorez vos progrès à travers vos leçons :", + "I18N_LEARNER_GROUP_VIEW_PREFERENCES": "PRÉFÉRENCES DE GROUPE", + "I18N_LEARNER_GROUP_WITHDRAW_INVITE": "Se désister", + "I18N_LEARNT_TOPIC": "<[topicName]> appris", + "I18N_LEARN_TOPIC": "Apprendre <[topicName]>", + "I18N_LEAVE_LEARNER_GROUP": "QUITTER LE GROUPE", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BODY": "Voulez-vous vraiment quitter <[groupName]> ?", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BUTTON": "Quitter", + "I18N_LEAVE_LEARNER_GROUP_MODAL_HEADER": "Quitter le groupe dʼélèves ?", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "AUTEURS DE LA LEÇON", + "I18N_LESSON_INFO_HEADER": "Infos sur la leçon", + "I18N_LESSON_INFO_TOOLTIP_MESSAGE": "Vous avez atteint une étape. Bon travail ! Voyez vos progrès et d’autres informations sur la leçon ici.", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Vous avez terminé ceci", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Déjà ajouté à la liste à jouer", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Ajouter à la liste « À jouer plus tard »", @@ -401,7 +865,7 @@ "I18N_LIBRARY_CATEGORIES_ECONOMICS": "Économie", "I18N_LIBRARY_CATEGORIES_EDUCATION": "Éducation", "I18N_LIBRARY_CATEGORIES_ENGINEERING": "Ingénierie", - "I18N_LIBRARY_CATEGORIES_ENGLISH": "Anglais", + "I18N_LIBRARY_CATEGORIES_ENGLISH": "anglais", "I18N_LIBRARY_CATEGORIES_ENVIRONMENT": "Environnement", "I18N_LIBRARY_CATEGORIES_GEOGRAPHY": "Géographie", "I18N_LIBRARY_CATEGORIES_GOVERNMENT": "Gouvernement", @@ -447,6 +911,7 @@ "I18N_LIBRARY_NO_OBJECTIVE": "Aucun objectif spécifié.", "I18N_LIBRARY_N_CATEGORIES": "{categoriesCount, plural, =1{1 Catégorie} other{# Catégories}}", "I18N_LIBRARY_N_LANGUAGES": "{languagesCount, plural, =1{1 langue} other{# langues}}", + "I18N_LIBRARY_PAGE_BROWSE_MODE_TITLE": "Trouver des explorations d’apprentissage – Oppia", "I18N_LIBRARY_PAGE_TITLE": "Leçons de la bibliothèque communautaire | Oppia", "I18N_LIBRARY_RATINGS_TOOLTIP": "Évaluations", "I18N_LIBRARY_SEARCH_PLACEHOLDER": "Qu’est-ce qui suscite votre curiosité ?", @@ -456,35 +921,44 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "Licence", "I18N_LICENSE_PAGE_PARAGRAPH_1": "Tout le contenu des leçons d’Oppia est sous licence CC BY-SA 4.0.", "I18N_LICENSE_PAGE_PARAGRAPH_2": "Le logiciel faisant tourner Oppia est développé en code source ouvert et son code source est à disposition sous la licence Apache 2.0.", + "I18N_LICENSE_PAGE_TITLE": "Page de licence | Oppia", "I18N_LICENSE_TERMS_HEADING": "Conditions de la licence", + "I18N_LOGIN_PAGE_TITLE": "S’inscrire | Oppia", "I18N_LOGOUT_LOADING": "Déconnexion", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "Se déconnecter | Oppia", "I18N_LOGOUT_PAGE_TITLE": "Déconnexion", - "I18N_MATH_COURSE_DETAILS": "Le cours sur les fondements mathématiques supervisé de Oppia enseigne les rudiments de base des maths, couvrant les concepts essentiels comme l’addition, la multiplication et les fractions. Une fois que vous maîtriserez ces concepts de base, vous pourrez aller vers des leçons plus avancées ! Chaque sujet s’appuie sur le précédent, donc vous pouvez commencer au début et terminer les leçons de tout niveau, et simplement aller directement à un sujet particulier, si vous avez besoin d’aide.", + "I18N_MATH_COURSE_DETAILS": "Le cours sur les fondements mathématiques supervisé d’Oppia enseigne les rudiments de base des maths, couvrant les concepts essentiels comme l’addition, la multiplication et les fractions. Une fois que vous maîtriserez ces concepts de base, vous pourrez aller vers des leçons plus avancées ! Chaque sujet s’appuie sur le précédent, vous pouvez donc commencer depuis le début et terminer les leçons depuis tout niveau, ou juste vous y plonger directement si vous avez besoin d’aide sur un sujet particulier.", "I18N_MATH_TOPICS_COVERED": "Démarrer depuis les bases avec notre premier sujet, la notation positionnelle. Ou, si vous voulez vous remettre à niveau sur un sujet spécifique, allez-y directement !", "I18N_MODAL_CANCEL_BUTTON": "Annuler", - "I18N_MODAL_CONTINUE_BUTTON": "Poursuivre", + "I18N_MODAL_CONTINUE_BUTTON": "Continuer", + "I18N_MODAL_REMOVE_BUTTON": "Retirer", "I18N_NEXT_LESSON": "Leçon suivante", + "I18N_NO": "Non", + "I18N_NO_RESULTS_FOUND_FOR_MATCHING_USERNAME": "Aucun résultat trouvé pour le nom d’utilisateur correspondant.", "I18N_ONE_SUBSCRIBER_TEXT": "Vous avez 1 abonné.", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Partenariats", + "I18N_PARTNERSHIPS_PAGE_TITLE": "Partenariats | Oppia", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "Suppression de compte en cours", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "Compte à supprimer", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "Votre compte est programmé pour être supprimé et sera supprimé dans les 24 heures. Vous serez notifié par courriel une fois la suppression terminée.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1_HEADING": "Procédure de suppression en cours", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2": "Cette action supprimera ce compte utilisateur ainsi que toutes les données privées qui lui sont associées. Les données déjà publiques seront anonymisées, de manière à ne plus pouvoir les associer à ce compte. La propriété de certaines des données déjà publiques pourra être transférée à la communauté.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "Détails de la suppression", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_TITLE": "Suppression de compte en cours | Oppia", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_1": "chacun est invité à essayer et donner un avis sur les explorations publiées. Avec l’aide de chacun, nous pouvons sans cesse améliorer les leçons sur le site et les rendre aussi efficaces que possible.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_2": "Utilisez un bon jugement en publiant des explorations. Les explorations doivent avoir une valeur éducative significative et ne peuvent contenir aucune publicité, ni aucun pourriel, vandalisme ou abus.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_3": "Soyez un bon citoyen. Créer plusieurs comptes, abuser du système des avis, utiliser des explorations pour leurrer des utilisateurs ou tout autre comportement antisocial similaire ne sera pas toléré et pourrait entraîner une suspension du compte.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "Lignes de conduite de la communauté", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_TEXT": "Si vous désirez des explications sur ces instructions, veuillez les demander sur notre forum.", "I18N_PLAYBOOK_HEADING": "Lignes de conduite du créateur", + "I18N_PLAYBOOK_PAGE_TITLE": "Lignes directrices du créateur | Oppia", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_HEADING": "Rendre vos explorations publiables", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_1": "Enseignez quelque chose de significatif : présentez les informations qui sont nouvelles pour l’audience cible — ne testez pas simplement la connaissance qu’ils sont supposés déjà avoir. Également, si vous voulez enseigner un sujet couvert par une exploration existante, pensez plutôt à soumettre un avis sur l’exploration en cours, pour aider à l’améliorer — c’est plus facile !", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_2": "Enseignez plus qu’un simple racontars : en choisissant des sujets, essayez de choisir et de prendre soit un concept complexe ou compliqué qui a des nuances et de la profondeur, soit une collection de faits liés intéressants. Il peut aussi être bien d’avoir un sens de la progression et du défi, afin que l’élève ait la chance d’appliquer un concept qu’il vient d’apprendre à une situation nouvelle.", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_3": "Donnez un retour instructif : ne dites pas simplement aux élèves s’ils ont juste ou faux. Expliquez le raisonnement ou allez plus profond pour essayer et les aider à comprendre leur erreur. Utilisez des rappels pour fournir des conseils pour des questions ouvertes et donnez aux élèves de nouveaux aperçus utiles.", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_4": "Relisez : assurez-vous que votre exploration est bien écrite et facile à lire. Elle doit être sans fautes d’orthographe ni erreurs factuelles et anomalies, car cela lui ferait perdre sa crédibilité aux yeux des élèves.", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_5": "Présentez-la avec précision : l’objectif d’apprentissage est montré aux élèves qui parcourent les explorations et doit refléter le contenu de votre exploration. L’exploration doit fournir tout le contenu promis dans l’objectif d’apprentissage et dans l’exploration elle-même.", - "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_6": "Restez intéressant : les meilleures explorations racontent une histoire et ont un retour d’expérience utile ! Elles donnent aux élèves une possibilité de raisonner sur des concepts, d’essayer de nouvelles connaissances et de recevoir un retour utile sur leur travail.", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_6": "Restez intéressant : les meilleures explorations donnent un récit et ont un retour d’expérience utile ! Elles donnent aux élèves une possibilité de raisonner sur des concepts, d’essayer de nouvelles connaissances et de recevoir un retour utile sur leur travail.", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_PARAGRAPH_1": "Quand une exploration est publiée, elle vous est attribuée et devient disponible pour quiconque veut la suivre. Pour que les étudiants en retirent un certain bénéfice, voici quelques recommandations que nous avons jugées utiles :", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_PARAGRAPH_2": "Pour plus d’aide sur la mise en œuvre d’un retour d’expérience utile et de bonnes explorations, consultez notre page Conseils de conception.", "I18N_PLAYBOOK_PUBLICATION_POLICY_HEADING": "Règles de publication", @@ -527,7 +1001,7 @@ "I18N_PLAYER_LAST_UPDATED_TOOLTIP": "Dernière mise à jour", "I18N_PLAYER_LEARN_AGAIN_BUTTON": "Apprendre de nouveau", "I18N_PLAYER_LEAVE_FEEDBACK": "Laisser un avis aux auteurs (une fois envoyé, ceci inclura également une référence à la carte à laquelle vous êtes en ce moment dans l’exploration).", - "I18N_PLAYER_LOADING": "Chargement...", + "I18N_PLAYER_LOADING": "Chargement en cours...", "I18N_PLAYER_NEXT_LESSON": "Leçon suivante", "I18N_PLAYER_NO_OBJECTIVE": "Aucun objectif n’a été spécifié.", "I18N_PLAYER_NO_TAGS": "Aucune balise n’a été spécifiée.", @@ -564,6 +1038,9 @@ "I18N_PLAYER_UNRATED": "Non évalué", "I18N_PLAYER_VIEWS_TOOLTIP": "Vues", "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "Session pratique", + "I18N_PRACTICE_SESSION_PAGE_TITLE": "Session de mise en pratique : <[topicName]> ― Oppia", + "I18N_PRACTICE_SESSION_START_BUTTON_TEXT": "Commencer à pratiquer", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Langue audio", "I18N_PREFERENCES_BIO": "Biographie", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "Ce champs est facultatif. Tout ce que vous écrirez là sera rendu public et visible de tout le monde.", "I18N_PREFERENCES_BREADCRUMB": "Préférences", @@ -576,10 +1053,15 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_FEEDBACK_NEWS": "Recevoir des courriels quand quelqu’un vous envoie un commentaire sur une exploration", "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "Recevoir des nouvelles et des mises à jour du site", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "Recevoir des courriels lorsqu’un créateur auquel vous êtes abonné{{GENDER:||e}} publie une nouvelle exploration", - "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "Nous ne pouvons pas vous ajouter automatiquement à notre liste de diffusion. Veuillez visiter le lien suivant pour vous inscrire à celle-ci.", + "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "Nous n’avons pas pu vous ajouter automatiquement à notre liste de diffusion. Veuillez visiter le lien suivant pour vous inscrire à celle-ci :", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "Exporter le compte", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "Cela téléchargera les données de votre compte Oppia sous forme d’un fichier texte au format JSON.", + "I18N_PREFERENCES_EXPORT_ACCOUNT_WARNING_TEXT": "Veuillez ne pas quitter cette page. Vos données sont actuellement en cours de chargement et seront téléchargées sous forme de fichier texte au format JSON une fois ceci terminé. Si quelque chose se passe mal, veuillez contacter", "I18N_PREFERENCES_HEADING": "Préférences", "I18N_PREFERENCES_HEADING_SUBTEXT": "Toute modification que vous ferez sur cette page sera automatiquement enregistrée.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "Vous ne vous êtes encore abonné à aucun créateur. Vous pouvez librement vous abonner à votre auteur favori en cliquant sur le bouton « s’abonner » dans la page de profil de l’auteur. En vous abonnant à un auteur, vous serez notifié{{GENDER:||e}} par courriel quand l’auteur publiera une nouvelle leçon.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Impact", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "Préférences | Oppia", "I18N_PREFERENCES_PAGE_TITLE": "Modifie les préférences dans votre profil – Oppia", "I18N_PREFERENCES_PICTURE": "Image", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Langue audio préférée", @@ -589,6 +1071,7 @@ "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "Ceci est le tableau de bord qui sera affiché par défaut à la connexion.", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "Langues d’exploration préférées", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "Ces langues seront sélectionnées par défaut quand vous rechercherez des explorations dans la galerie.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "Sélectionner les langues préférées.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "Langue préférée du site", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "C’est la langue dans laquelle le site est affiché.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "Langue préférée du site", @@ -596,44 +1079,86 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Faire glisser pour recadrer et redimensionner :", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Erreur : impossible de lire le fichier image.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Téléverser l’image du profil", + "I18N_PREFERENCES_SEARCH_LABEL": "Rechercher", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Sélectionner les langues préférées...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Langue du site", "I18N_PREFERENCES_SUBJECT_INTERESTS": "Centres d’intérêt", + "I18N_PREFERENCES_SUBJECT_INTERESTS_ERROR_TEXT": "Les descriptions d’intérêts sur le sujet doivent être uniques et en minuscules.", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "Par ex. : mathématiques, informatique, art, etc.", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "Ajouter un nouveau centre d’intérêt (en utilisant des lettres minuscules et des espaces)...", + "I18N_PREFERENCES_SUBJECT_INTERESTS_LABEL": "Intérêts du nouveau sujet", "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "Saisissez des centres d’intérêt...", "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Créateurs auxquels vous vous êtes abonné{{GENDER:||e}}", "I18N_PREFERENCES_USERNAME": "Nom d’utilisateur", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Pas encore sélectionné", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "Politique de confidentialité | Oppia", "I18N_PROFILE_NO_EXPLORATIONS": "Cet utilisateur n’a encore créé ou modifié aucune exploration.", - "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "En savoir plus sur votre score", + "I18N_PROFILE_PAGE_TITLE": "Profil | Oppia", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "Voulez-vous continuer ?", + "I18N_PROGRESS_REMINDER_MODAL_HEADER": "Vous avez terminé <[progress]> de", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "Non, recommencer depuis le début", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "Oui, reprendre la leçon", + "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Tableau des scores", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Mon tableau de bord", - "I18N_QUESTION_PLAYER_NEW_SESSION": "Nouvelle session", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Rejouer", "I18N_QUESTION_PLAYER_RETRY_TEST": "Réessayer le test", - "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Revenir à l’histoire", + "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Revenir au récit", "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "revoir la compétence la moins bien notée", "I18N_QUESTION_PLAYER_SCORE": "Score", "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "Descriptions de compétence", - "I18N_QUESTION_PLAYER_TEST_FAILED": "Échec de la session. Veuillez revoir les compétences et réessayer", + "I18N_QUESTION_PLAYER_TEST_FAILED": "Échec de la session. Veuillez revoir les compétences et essayer à nouveau.", "I18N_QUESTION_PLAYER_TEST_PASSED": "Session terminée. Bien joué !", + "I18N_REDIRECTION_TO_STUCK_STATE_MESSAGE": "On dirait que vous êtes un peu coincé ici. Passons en revue les concepts via un court chemin de révision.", + "I18N_REFRESHER_EXPLORATION_MODAL_BODY": "Il semble que vous ayez des difficultés avec cette question. Voudriez-vous essayer une courte exploration pour vous rafraîchir la mémoire et revenir ici après l’avoir terminée ?", + "I18N_REFRESHER_EXPLORATION_MODAL_TITLE": "Voudriez-vous un rappel pour vous aider ?", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Session d’inscription expirée", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "Désolé mais votre session d’inscription a expiré. Veuillez cliquer sur « Continuer l’inscription » pour redémarrer le processus.", + "I18N_RELEASE_COORDINATOR_PAGE_TITLE": "Panneau du coordinateur de publication d’Oppia", + "I18N_RESET_CODE": "Réinitialiser le code", + "I18N_RESTART_EXPLORATION_BUTTON": "Redémarrer la leçon", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Relire le test", + "I18N_REVIEW_TEST_PAGE_TITLE": "Test de passage en revue : <[storyName]> — Oppia", + "I18N_SAVE_BUTTON_ALERT_TOOLTIP": "La progression ne peut pas être enregistrée si vous n’avez pas atteint le premier point de contrôle.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_1": "Votre progression d’apprentissage sera automatiquement enregistrée si vous avez un compte.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "Vous avez déjà un compte ?", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_3": "Utilisez le lien ci-dessous pour enregistrer la progression pendant 72 heures.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_5": "Écrivez ou copiez le lien ci-dessous", "I18N_SAVE_PROGRESS": "Connectez-vous ou inscrivez-vous pour enregistrer votre progression et continuer jusqu’à la leçon suivante.", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "Copier", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "Copié !", + "I18N_SAVE_PROGRESS_TEXT": "Enregistrer la progression", "I18N_SHARE_LESSON": "Partager cette leçon", + "I18N_SHOW_LESS": "Afficher moins", + "I18N_SHOW_MORE": "Afficher plus", "I18N_SHOW_SOLUTION_BUTTON": "Afficher la solution", - "I18N_SIDEBAR_ABOUT_LINK": "À propos d’Oppia", + "I18N_SIDEBAR_ABOUT_LINK": "À notre sujet", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "À propos de la Fondation Oppia", "I18N_SIDEBAR_BLOG": "Blogue", "I18N_SIDEBAR_CLASSROOM": "Salle de classe", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Mathématiques de base", "I18N_SIDEBAR_CONTACT_US": "Nous contacter", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "Nous sommes là pour vous aider concernant les questions que vous avez.", "I18N_SIDEBAR_DONATE": "Faire un don", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "Vos contributions aident à fournir une éducation de qualité à tous.", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Bien démarrer", + "I18N_SIDEBAR_GET_INVOLVED": "S’impliquer", + "I18N_SIDEBAR_HOME": "Accueil", + "I18N_SIDEBAR_LEARN": "Apprendre", "I18N_SIDEBAR_LIBRARY_LINK": "Bibliothèque", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "Fondements mathématiques", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "Leçons accessibles aux débutants pour vous aider à démarrer en mathématiques.", "I18N_SIDEBAR_OPPIA_FOUNDATION": "La fondation Oppia", "I18N_SIDEBAR_PARTNERSHIPS": "Partenariats", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "Apporter une éducation de qualité aux étudiants dans votre région.", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "Addition et soustraction", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "Bibliothèque de la communauté", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "Ressources supplémentaires réalisées par la communauté.", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "Multiplication", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "Valeurs de l’emplacement", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "Voir toutes les leçons", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Enseigner avec Oppia", "I18N_SIDEBAR_VOLUNTEER": "Bénévole", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "Rejoindre notre équipe globale pour créer et améliorer les leçons.", "I18N_SIGNIN_LOADING": "Connexion", "I18N_SIGNIN_PAGE_TITLE": "Connexion", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "En cochant la case à gauche de ce texte, vous reconnaissez et acceptez d’être lié par les Conditions d’utilisation de <[sitename]>, trouvées ici.", @@ -646,7 +1171,7 @@ "I18N_SIGNUP_EMAIL_PREFERENCES": "Préférences de courriel", "I18N_SIGNUP_EMAIL_PREFERENCES_EXPLAIN": "Vous pouvez modifier ce paramétrage à n’importe quel moment depuis votre page des Préférences.", "I18N_SIGNUP_ERROR_MUST_AGREE_TO_TERMS": "Afin de modifier des explorations sur ce site, vous devez accepter les conditions du site.", - "I18N_SIGNUP_ERROR_NO_USERNAME": "Veuillez entrer un nom d’utilisateur.", + "I18N_SIGNUP_ERROR_NO_USERNAME": "Veuillez saisir un nom d’utilisateur.", "I18N_SIGNUP_ERROR_USERNAME_NOT_AVAILABLE": "Ce nom d’utilisateur n’est pas disponible.", "I18N_SIGNUP_ERROR_USERNAME_ONLY_ALPHANUM": "Les noms d’utilisateurs ne peuvent comporter que des caractères alphanumériques.", "I18N_SIGNUP_ERROR_USERNAME_TAKEN": "Désolé, ce nom d’utilisateur est déjà pris.", @@ -654,21 +1179,27 @@ "I18N_SIGNUP_ERROR_USERNAME_WITH_ADMIN": "Les noms d’utilisateur avec « admin » sont réservés.", "I18N_SIGNUP_ERROR_USERNAME_WITH_SPACES": "Veuillez vous assurer que votre nom d’utilisateur n’a aucune espace.", "I18N_SIGNUP_FIELD_REQUIRED": "Ce champ est obligatoire.", - "I18N_SIGNUP_LICENSE_NOTE": "Veuillez noter qu’en acceptant nos Conditions d’utilisation, vous acceptez que tout le contenu ou les contributions que vous ferez sur notre site sont et seront sous licence CC BY-SA v.4.0. Veuillez relire nos Conditions d’utilisation pour plus d’information sur l’accord de la licence. Pour plus d’information sur CC BY-SA, cliquez ici.", + "I18N_SIGNUP_LICENSE_NOTE": "Veuillez noter qu’en acceptant nos Conditions d’utilisation, vous acceptez que tout le contenu ou les contributions que vous ferez sur notre site sont et seront sous licence CC BY-SA v.4.0. Veuillez relire nos Conditions d’utilisation pour plus d’informations sur l’accord de licence. Pour plus d’informations sur CC BY-SA, cliquez ici.", "I18N_SIGNUP_LICENSE_OBJECTIVE": "Utiliser une licence <[licenselink]> permet au contenu des explorations d’être librement copié, réutilisé, réarrangé et redistribué. La condition principale est que si quelqu’un réarrange, transforme ou construit sur le matériau fourni, ils doivent aussi distribuer leur travail sous la même licence libre.", "I18N_SIGNUP_LOADING": "Chargement", "I18N_SIGNUP_PAGE_TITLE": "Rejoignez la communauté – Oppia", "I18N_SIGNUP_REGISTRATION": "Inscription", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "Ne plus me demander", "I18N_SIGNUP_SEND_ME_NEWS": "M’envoyer les nouvelles et les mises à jour du site", "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]> est une base commune de ressources d’apprentissage. Toutes les matières publiées dessus sont librement réutilisables et partageables.", "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]> existe pour encourager la création et l’amélioration continuelle d’un ensemble de ressources d’apprentissage de haute qualité qui sont librement disponibles pour tout le monde.", "I18N_SIGNUP_UPDATE_WARNING": "Veuillez noter que nous avons récemment mis à jour nos Conditions d’utilisation.", "I18N_SIGNUP_USERNAME": "Nom d’utilisateur", "I18N_SIGNUP_USERNAME_EXPLANATION": "Votre nom d’utilisateur sera affiché à côté de vos contributions.", - "I18N_SIGNUP_WHY_LICENSE": "Pourquoi CC-BY-SA ?", + "I18N_SIGNUP_WHY_LICENSE": "Pourquoi CC BY-SA ?", + "I18N_SKILL_LEVEL_BEGINNER": "Débutant", + "I18N_SKILL_LEVEL_INTERMIDIATE": "Intermédiaire", + "I18N_SKILL_LEVEL_NEEDS_WORK": "A besoin de travail", + "I18N_SKILL_LEVEL_PROFICIENT": "Compétent", "I18N_SOLICIT_ANSWER_DETAILS_FEEDBACK": "Ok, revenons maintenant à votre réponse.", "I18N_SOLICIT_ANSWER_DETAILS_QUESTION": "Pouvez-vous expliquer pourquoi vous avez sélectionné cette réponse ?", - "I18N_SOLUTION_EXPLANATION_TITLE": "Explication :", + "I18N_SOLUTION_EXPLANATION_TITLE": "Explication :", + "I18N_SOLUTION_NEED_HELP": "Souhaitez-vous voir la solution complète ?", "I18N_SOLUTION_TITLE": "Solution", "I18N_SPLASH_BENEFITS_ONE": "Apprentissage personnalisé", "I18N_SPLASH_BENEFITS_THREE": "Leçons faciles à suivre", @@ -709,13 +1240,69 @@ "I18N_SPLASH_VOLUNTEERS_CONTENT": "Peu importe qui vous êtes, vous trouverez un accueil chez Oppia. Nous avons toujours besoin de plus de monde pour améliorer les leçons en suggérant des questions, en contribuant aux graphismes ou en traduisant les leçons.", "I18N_SPLASH_VOLUNTEERS_TITLE": "Géré par la communauté", "I18N_START_HERE": "Cliquer ici pour commencer !", + "I18N_STORY_3M5VBajMccXO_DESCRIPTION": "Dans ce récit, nous rejoindrons Matthew qui visite une pâtisserie pour acheter un gâteau. Malheureusement, il n’a pas assez d’argent pour un gâteau entier. Donc M. Baker va l’aider en divisant le gâteau choisi par Matthew en plus petites parts qu’il peut s’offrir. Que se passe-t-il ensuite ? Jouez les leçons pour le découvrir !", + "I18N_STORY_3M5VBajMccXO_TITLE": "Matthew visite la pâtisserie", + "I18N_STORY_JhiDkq01dqgC_DESCRIPTION": "Rejoignez Ava et son père qui vont au parc de loisir. Aidez-les en utilisant votre connaissance des expressions et des équations afin de résoudre les problèmes auxquels ils sont confrontés !", + "I18N_STORY_JhiDkq01dqgC_TITLE": "Un jour au parc de loisir", + "I18N_STORY_Qu6THxP29tOy_DESCRIPTION": "Apprenez comment ajouter et soustraire avec Maya, Omar et leur grand-père, alors qu’ils préparent ensemble une pizza !", + "I18N_STORY_Qu6THxP29tOy_TITLE": "Maya, Omar et Malik préparent une pizza !", + "I18N_STORY_RRVMHsZ5Mobh_DESCRIPTION": "Dans ce récit, nous suivrons Jaime et sa sœur Nic alors qu’ils apprennent comment représenter et lire la valeur d’un nombre.", + "I18N_STORY_RRVMHsZ5Mobh_TITLE": "Les aventures de Jaime dans l’Arcade", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> ― Terminé !", + "I18N_STORY_VIEWER_PAGE_TITLE": "Apprendre <[topicName]> | <[storyTitle]> | Oppia", + "I18N_STORY_ialKSV0VYV0B_DESCRIPTION": "Faites connaissance avec Jacques et son oncle alors qu’ils cherchent comment ils peuvent utiliser les rapports pour réaliser de délicieuses boissons !", + "I18N_STORY_ialKSV0VYV0B_TITLE": "Aventures du smoothie de James", + "I18N_STORY_rqnxwceQyFnv_DESCRIPTION": "Rejoignez Nina alors qu’elle utilise les techniques de division pour aider sa maman et Sandra au marché !", + "I18N_STORY_rqnxwceQyFnv_TITLE": "Nina visite le marché", + "I18N_STORY_vfJDB3JAdwIx_DESCRIPTION": "Rejoignez Aria et son père Omar alors qu’ils utilisent les techniques de la multiplication pour semer des graines dans leur jardin !", + "I18N_STORY_vfJDB3JAdwIx_TITLE": "Aria veut cultiver un jardin", + "I18N_STRUGGLING_WITH_SKILL": "<[username]> a des difficultés avec cette compétence", "I18N_SUBSCRIBE_BUTTON_TEXT": "S’abonner", - "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Compétence suivante", + "I18N_SUBTOPIC_0abdeaJhmfPm_adding-fractions_TITLE": "Ajout de fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_comparing-fractions_TITLE": "Comparaison de fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_dividing-fractions_TITLE": "Division des fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_equivalent-fractions_TITLE": "Fractions équivalentes", + "I18N_SUBTOPIC_0abdeaJhmfPm_fractions-of-a-group_TITLE": "Fractions d’un groupe", + "I18N_SUBTOPIC_0abdeaJhmfPm_mixed-numbers_TITLE": "Nombres mélangés", + "I18N_SUBTOPIC_0abdeaJhmfPm_multiplying-fractions_TITLE": "Multiplication des fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_number-line_TITLE": "La ligne des nombres", + "I18N_SUBTOPIC_0abdeaJhmfPm_subtracting-fractions_TITLE": "Soustraction des fractions", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE": "Qu’est-ce qu’une fraction ?", + "I18N_SUBTOPIC_5g0nxGUmx5J5_calculations-with-ratios_TITLE": "Calculs avec des rapports", + "I18N_SUBTOPIC_5g0nxGUmx5J5_combining-ratios_TITLE": "Combinaison de rapports", + "I18N_SUBTOPIC_5g0nxGUmx5J5_equivalent-ratios_TITLE": "Rapports équivalents", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "Qu’est-ce qu’un rapport ?", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE": "Concepts de base de la multiplication", + "I18N_SUBTOPIC_C4fqwrvqWpRm_memorizing-expressions_TITLE": "Mémorisation des expressions de multiplication", + "I18N_SUBTOPIC_C4fqwrvqWpRm_multiplication-techniques_TITLE": "Techniques de multiplication", + "I18N_SUBTOPIC_C4fqwrvqWpRm_rules-to-simplify_TITLE": "Règles pour simplifier une multiplication", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Compétence suivante :", + "I18N_SUBTOPIC_VIEWER_PAGE_TITLE": "Passer en revue <[subtopicTitle]> | Oppia", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "Compétence précédente :", + "I18N_SUBTOPIC_dLmjjMDbCcrf_algebraic-expressions_TITLE": "Simplification des expressions algébriques", + "I18N_SUBTOPIC_dLmjjMDbCcrf_modelling-scenarios_TITLE": "Modélisation de scénarios du monde réel avec des équations", + "I18N_SUBTOPIC_dLmjjMDbCcrf_order-of-operations_TITLE": "Ordre des opérations", + "I18N_SUBTOPIC_dLmjjMDbCcrf_problem-solving_TITLE": "Stratégies de résolution de problèmes", + "I18N_SUBTOPIC_dLmjjMDbCcrf_solving-equations_TITLE": "Manipulation et résolution d’équations", + "I18N_SUBTOPIC_dLmjjMDbCcrf_variables_TITLE": "Représentation des inconnues avec des variables", + "I18N_SUBTOPIC_iX9kYCjnouWN_comparing-numbers_TITLE": "Comparaison de nombres", + "I18N_SUBTOPIC_iX9kYCjnouWN_naming-numbers_TITLE": "Nommage des nombres", + "I18N_SUBTOPIC_iX9kYCjnouWN_place-names-and-values_TITLE": "Noms et valeurs des positions", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "Arrondi des nombres", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE": "Concepts de base de la division", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Résolution de problèmes", + "I18N_SUBTOPIC_qW12maD4hiA8_techniques-of-division_TITLE": "Techniques de division", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE": "Addition des nombres", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "Relations entre l’addition et la soustraction", + "I18N_SUBTOPIC_sWBXKH4PZcK6_estimation_TITLE": "Estimation", + "I18N_SUBTOPIC_sWBXKH4PZcK6_sequences _TITLE": "Séquences", + "I18N_SUBTOPIC_sWBXKH4PZcK6_subtracting-numbers_TITLE": "Soustraction des nombres", + "I18N_SYLLABUS_SKILL_TITLE": "Compétence", + "I18N_SYLLABUS_STORY_TITLE": "Récit", "I18N_TEACH_BENEFITS_ONE": "Apprentissage efficace et de haute qualité pour tous les âges", "I18N_TEACH_BENEFITS_THREE": "Toujours libre et facile à utiliser", "I18N_TEACH_BENEFITS_TITLE": "Nos avantages", - "I18N_TEACH_BENEFITS_TWO": "Leçons amusantes et basées sur des histoires", + "I18N_TEACH_BENEFITS_TWO": "Leçons amusantes et basées sur des récits", "I18N_TEACH_PAGE_ACTION_START_LEARNING": "Commencer à apprendre", "I18N_TEACH_PAGE_CLASSROOM_BUTTON": "VISITER LA SALLE DE CLASSE", "I18N_TEACH_PAGE_CLASSROOM_CONTENT": "Dans la salle de classe, vous pourrez trouver un ensemble de leçons que l’équipe Oppia a conçues et testées pour s’assurer qu’elles sont efficaces et amusantes pour tous les étudiants. Toutes les leçons ont été revues par des enseignants et des experts, donc vous pouvez être confiants que vos étudiants obtiendront une éducation efficace, tout en apprenant à leur rythme.", @@ -726,63 +1313,116 @@ "I18N_TEACH_PAGE_LIBRARY_CONTENT": "Les éducateurs et les membres de la communauté dans le monde entier utilisent la plateforme de création de leçons Oppia comme moyen de créer et partager des leçons. Vous pouvez trouver plus de 20000 leçons sur 17 sujets différents dans notre Bibliothèque d’exploration, et peut-être serez-vous inspiré pour créer la vôtre !", "I18N_TEACH_PAGE_LIBRARY_TITLE": "Explorer les leçons faites par la communauté", "I18N_TEACH_PAGE_SIX_TITLE": "Démarrer l’apprentissage aujourd’hui", + "I18N_TEACH_PAGE_TITLE": "Guide d’Oppia pour parents et enseignants | Oppia", "I18N_TEACH_STUDENT_DETAILS_1": "Riya Sogani", "I18N_TEACH_STUDENT_DETAILS_2": "Wala Awad", "I18N_TEACH_STUDENT_DETAILS_3": "Himanshu Taneja, Kurukshetra, Inde", - "I18N_TEACH_STUDENT_DETAILS_4": "Yamama, facilitateur, Palestine", + "I18N_TEACH_STUDENT_DETAILS_4": "Yamama, animateur, Palestine", "I18N_TEACH_TESTIMONIAL_1": "« Je suis reconnaissant d’avoir la possibilité d’éduquer les enfants indiens non privilégiés et de combler les manques dans leur compréhension des concepts mathématiques critiques. Suivre l’accroissement de la confiance de ces étudiants au cours de leur apprentissage valait bien les heures supplémentaires. »", "I18N_TEACH_TESTIMONIAL_2": "« Oppia est le premier de son genre ! Il aide les étudiants à apprendre tout ce dont ils ont besoin sur un sujet spécifique d’une manière attractive et captivante ; il les encourage aussi à utiliser des appareils intelligents pour leur bien. »", "I18N_TEACH_TESTIMONIAL_3": "« Je ne m’étais jamais attendu à ce que les étudiants apprennent les leçons de technologie et de mathématique aussi rapidement. C’est leur premier contact avec les technologies intelligentes, et ils ont eu du mal à les gérer au début. Maintenant, je sens une immense joie à les voir faire les leçons de Oppia avant même que j’entre dans la classe ! »", + "I18N_TERMS_PAGE_TITLE": "Conditions d’utilisation | Oppia", "I18N_THANKS_PAGE_BREADCRUMB": "Merci", + "I18N_THANKS_PAGE_TITLE": "Merci | Oppia", + "I18N_TIME_FOR_BREAK_BODY_1": "Vous semblez soumettre des réponses très vite. Commencez-vous à fatiguer ?", + "I18N_TIME_FOR_BREAK_BODY_2": "Si c’est le cas, songez à faire une pause ! Vous pouvez revenir plus tard.", + "I18N_TIME_FOR_BREAK_FOOTER": "Je suis prêt{{GENDER:||e}} à continuer la leçon", + "I18N_TIME_FOR_BREAK_TITLE": "C’est le moment d’une pause ?", + "I18N_TOPIC_0abdeaJhmfPm_DESCRIPTION": "Vous aurez souvent besoin de parler des parties d’un objet : une recette peut vous demander une demi-tasse de farine ou pourriez avoir à doser une partie d’une bouteille de lait. Dans ce sujet, vous apprendrez comment utiliser des fractions pour comprendre et décrire des situations comme celles-ci.", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "Fractions", + "I18N_TOPIC_5g0nxGUmx5J5_DESCRIPTION": "Les rapports sont utiles pour calculer les quantités d’ingrédients à utiliser si vous avez une recette pour quatre personnes mais que vous voulez cuisiner pour deux. Dans ce sujet, vous apprendrez comment utiliser des rapports pour facilement comparer la taille d’une chose par rapport à une autre.", + "I18N_TOPIC_5g0nxGUmx5J5_TITLE": "Rapports et raisonnement de proportionnalité", + "I18N_TOPIC_C4fqwrvqWpRm_DESCRIPTION": "Si vous aviez acheté 60 boîtes de cinq gâteaux, combien de gâteaux auriez-vous en tout ? Dans ce sujet, vous apprendrez comment utiliser la multiplication pour résoudre des problèmes comme celui-ci (sans avoir à additionner beaucoup de nombres à chaque fois !).", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "Multiplication", + "I18N_TOPIC_LANDING_PAGE_TITLE": "<[topicTitle]> | <[topicTagline]> | Oppia", + "I18N_TOPIC_LEARN": "Apprendre", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 leçon} other{# leçons}}", + "I18N_TOPIC_TITLE": "Sujet", "I18N_TOPIC_VIEWER_CHAPTER": "Chapitre", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 chapitre} other{# chapitres}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "Prochainement !", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "Revenez plus tard quand des leçons seront disponibles pour ce sujet.", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "Revenez plus tard quand des questions de mise en pratique seront disponibles pour ce sujet.", "I18N_TOPIC_VIEWER_DESCRIPTION": "Description", "I18N_TOPIC_VIEWER_LESSON": "Leçon", "I18N_TOPIC_VIEWER_LESSONS": "Leçons", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "Revenez plus tard quand des leçons seront disponibles pour ce sujet.", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "Compétences de maîtrise en <[topicName]>", + "I18N_TOPIC_VIEWER_NO_QUESTION_WARNING": "Il n’y a encore aucune question créée pour le (ou les) sous-sujet(s) sélectionné(s).", + "I18N_TOPIC_VIEWER_PAGE_TITLE": "<[topicName]> | <[pageTitleFragment]> | Oppia", "I18N_TOPIC_VIEWER_PRACTICE": "Mise en application", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_MESSAGE": "La fonctionnalité de mise en pratique est encore en version bêta et n’est disponible qu’en anglais. Voulez-vous continuer ?", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_TITLE": "Confirmer la langue de mise en pratique", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "(bêta)", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "Revenez plus tard quand des questions pratiques seront disponibles pour ce sujet.", "I18N_TOPIC_VIEWER_REVISION": "Révision", "I18N_TOPIC_VIEWER_SELECT_SKILLS": "Sélectionnez les compétences des leçons de <[topicName]> que vous aimeriez mettre en pratique.", "I18N_TOPIC_VIEWER_SKILL": "Compétence", "I18N_TOPIC_VIEWER_SKILLS": "Compétences", "I18N_TOPIC_VIEWER_START_PRACTICE": "Démarrer", "I18N_TOPIC_VIEWER_STORIES": "Récits", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "Récits que vous pouvez jouer", "I18N_TOPIC_VIEWER_STORY": "Récit", "I18N_TOPIC_VIEWER_STUDY_SKILLS": "Compétences d’étude en <[topicName]>", "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "Utilisez les cartes de révision suivantes pour vous aider à étudier les compétences en <[topicName]>.", "I18N_TOPIC_VIEWER_VIEW_ALL": "Tout afficher", "I18N_TOPIC_VIEWER_VIEW_LESS": "Afficher moins", + "I18N_TOPIC_dLmjjMDbCcrf_DESCRIPTION": "Vous aurez souvent besoin de résoudre des problèmes avec des nombres inconnus — par exemple, si vous avez acheté un produit qui est à vendre et vous voulez en savoir le prix d’origine. Dans ce sujet, vous apprendrez comment faire ceci avec des équations, des expressions et des formules.", + "I18N_TOPIC_dLmjjMDbCcrf_TITLE": "Expressions et équations", + "I18N_TOPIC_iX9kYCjnouWN_DESCRIPTION": "Saviez-vous que tous les nombres possibles de choses peuvent être exprimés en utilisant seulement dix chiffres (0, 1, 2, 3, ..., 9) ? Dans ce sujet, nous apprendrons comment nous pouvons utiliser les valeurs positionnelles pour faire cela et verrons pourquoi « 5 » a une valeur différente dans « 25 » et « 2506 ».", + "I18N_TOPIC_iX9kYCjnouWN_TITLE": "Valeurs positionnelles", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION": "Si vous aviez trente-deux tomates à partager entre quatre personnes, combien de tomates obtiendrait chaque personne ? Dans ce sujet, vous apprendrez à utiliser la division pour déterminer comment diviser quelque chose en parts égales.", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "Division", + "I18N_TOPIC_sWBXKH4PZcK6_DESCRIPTION": "Si vous aviez quatre œufs et votre ami vous en donnait 37 de plus, combien alors en auriez-vous en tout ? Et combien si ensuite vous en perdez huit ? Dans ce sujet, vous apprendrez comment résoudre des problèmes comme ceux-ci avec les connaissances de base sur l’addition et la soustraction.", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "Addition et soustraction", "I18N_TOPNAV_ABOUT": "À propos", "I18N_TOPNAV_ABOUT_OPPIA": "À propos d’Oppia", "I18N_TOPNAV_ADMIN_PAGE": "Page d’administration", + "I18N_TOPNAV_ANDROID_APP_DESCRIPTION": "La première application Android d’Oppia est désormais disponible en anglais et en portugais et brésilien. Essayez-la et donnez votre avis !", + "I18N_TOPNAV_ANDROID_APP_HEADING": "Application Android", "I18N_TOPNAV_BLOG": "Blogue", - "I18N_TOPNAV_BLOG_DASHBOARD": "Tableau de bord du blog", - "I18N_TOPNAV_CLASSROOM": "Salle de cours", + "I18N_TOPNAV_BLOG_DASHBOARD": "Tableau de bord du blogue", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Mathématiques de base", "I18N_TOPNAV_CONTACT_US": "Nous contacter", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "Nous sommes là pour vous aider concernant les questions que vous avez.", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Tableau de bord du contributeur", "I18N_TOPNAV_CREATOR_DASHBOARD": "Tableau de bord du créateur", "I18N_TOPNAV_DONATE": "Faire un don", + "I18N_TOPNAV_DONATE_DESCRIPTION": "Vos contributions aident à fournir une éducation de qualité à tous.", + "I18N_TOPNAV_FACILITATOR_DASHBOARD": "Tableau de bord de l’animateur", "I18N_TOPNAV_FORUM": "Forum", "I18N_TOPNAV_GET_INVOLVED": "S’impliquer", "I18N_TOPNAV_GET_STARTED": "Bien commencer", - "I18N_TOPNAV_LEARNER_DASHBOARD": "Tableau de bord de l’étudiant", - "I18N_TOPNAV_LIBRARY": "Bibliothèque", + "I18N_TOPNAV_HOME": "Accueil", + "I18N_TOPNAV_LEARN": "Apprendre", + "I18N_TOPNAV_LEARNER_DASHBOARD": "Tableau de bord de l’élève", + "I18N_TOPNAV_LEARNER_GROUP": "Groupes dʼélèves", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "Leçons fondamentales pour vous aider à démarrer en mathématiques.", + "I18N_TOPNAV_LEARN_HEADING": "Moyens d’en savoir plus", + "I18N_TOPNAV_LEARN_LINK_1": "Voir toutes les leçons", + "I18N_TOPNAV_LEARN_LINK_2": "Continuer à apprendre", + "I18N_TOPNAV_LIBRARY": "Bibliothèque de la communauté", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "Ressources supplémentaires réalisées par la communauté pour vous aider à en apprendre plus.", "I18N_TOPNAV_LOGOUT": "Se déconnecter", "I18N_TOPNAV_MODERATOR_PAGE": "Page du modérateur", "I18N_TOPNAV_OPPIA_FOUNDATION": "La Fondation Oppia", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Recueil de participation", - "I18N_TOPNAV_PARTNERSHIPS": "Partenariats", + "I18N_TOPNAV_PARTNERSHIPS": "Écoles et organisations", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "Nouer un partenariat et amener Oppia dans votre école, votre communauté ou votre zone.", "I18N_TOPNAV_PREFERENCES": "Préférences", "I18N_TOPNAV_SIGN_IN": "S’identifier", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "S’identifier avec Google", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Enseigner avec Oppia", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Tableau de bord des sujets et des compétences", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "Essayez-la aujourd’hui !", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "Rejoindre notre équipe globale pour créer et améliorer des leçons.", "I18N_TOTAL_SUBSCRIBERS_TEXT": "Vous avez un total de <[totalSubscribers]> abonnés.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Se désabonner", + "I18N_VIEW_ALL_TOPICS": "Voir tous les sujets de <[classroomName]>", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Bénévole", - "I18N_WARNING_MODAL_DESCRIPTION": "Cela affichera toute la solution. Êtes-vous sûr ?", + "I18N_VOLUNTEER_PAGE_TITLE": "Bénévolat | Oppia", + "I18N_WARNING_MODAL_DESCRIPTION": "Cela affichera toute la solution. Est-ce vraiment ce que vous souhaitez ?", "I18N_WARNING_MODAL_TITLE": "Attention !", - "I18N_WORKED_EXAMPLE": "Exemple travaillé" + "I18N_WORKED_EXAMPLE": "Exemple travaillé", + "I18N_YES": "Oui" } diff --git a/assets/i18n/he.json b/assets/i18n/he.json index 1c5bdc16cf23..d073eaa2990b 100644 --- a/assets/i18n/he.json +++ b/assets/i18n/he.json @@ -1,11 +1,14 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "על אודות קרן Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "על נושא שמעניין אותך.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "קבלת משוב", "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "על אודות Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_1": "המטרה של Oppia היא לסייע לכל מי שמעוניין ללמוד כל תחום בצורה יעילה ומהנה.", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "פרסום ושיתוף", + "I18N_ABOUT_PAGE_BREADCRUMB": "אודות", "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "תודות", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "תרומה", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "להשתתף במיזם", "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "קרן Oppia", "I18N_ABOUT_PAGE_HEADING": "Oppia: למידה לעם", "I18N_ABOUT_PAGE_LEARN_BUTTON": "מעניין אותי ללמוד", @@ -21,6 +24,8 @@ "I18N_ACTION_CREATE_LESSON_BUTTON": "יצירת שיעורים", "I18N_ACTION_EXPLORE_LESSONS": "חיפוש שיעורים", "I18N_CLASSROOM_PAGE_COMING_SOON": "יגיעו בקרוב", + "I18N_COMING_SOON": "בקרוב!", + "I18N_COMPLETED_STORY": "׳<[story]>׳ הושלם", "I18N_CONTACT_PAGE_PARAGRAPH_1": "תודה על הבעת העניין לסייע לנו עם מיזם Oppia!", "I18N_CONTACT_PAGE_PARAGRAPH_10_HEADING": "שיפור ותחזוקת האתר", "I18N_CONTACT_PAGE_PARAGRAPH_11_HEADING": "תרומות", @@ -29,7 +34,10 @@ "I18N_CONTACT_PAGE_PARAGRAPH_15_HEADING": "אבטחה", "I18N_CONTACT_PAGE_PARAGRAPH_2_HEADING": "כולנו מתנדבים", "I18N_CONTACT_PAGE_PARAGRAPH_4_HEADING": "דרכים בהן ניתן לסייע", - "I18N_CONTINUE_REGISTRATION": "להמשיך בהרשמה", + "I18N_CONTINUE_REGISTRATION": "המשך הרשמה", + "I18N_COOKIE_BANNER_ACKNOWLEDGE": "אישור", + "I18N_CORRECT_FEEDBACK": "נכון!", + "I18N_CREATE_ACCOUNT": "יצירת חשבון", "I18N_CREATE_ACTIVITY_QUESTION": "מה מעניין אותך ליצור?", "I18N_CREATE_ACTIVITY_TITLE": "ליצור פעילות", "I18N_CREATE_COLLECTION": "ליצור אוסף", @@ -106,15 +114,34 @@ "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "נא להכניס מספר שאינו קטן מ־<[minValue]>.", "I18N_FORMS_TYPE_NUMBER_AT_MOST": "נא להכניס מספר שאינו גדול מ־<[maxValue]>.", "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "נא להקליד מספר עשרוני תקני.", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "אין להזין 0 במכנה", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "נא להכניס שבר בצורה x/y.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS": "יש להשתמש בספרות, רווחים, ולוכסנים קדמיים (/) בלבד.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "אין לכלול יותר מ-7 ספרות באך אחד מהמספרים בשבר.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "נא להכניס שבר עשרוני תקין (למשל 5/3 או 1 2/3)", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "אנא הזן ערך כשבר (למשל 5/3 במקום 1 2/3)", + "I18N_INTERACTIONS_FRACTIONS_SIMPLEST_FORM": "נא הזן תשובה בצורה המפושטת ביותר (למשל 1/3 במקום 2/6)", "I18N_INTERACTIONS_GRAPH_DELETE": "מחיקה", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "תרשים שגוי!", "I18N_INTERACTIONS_GRAPH_MOVE": "העברה", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_DOT": "התשובה יכולה להכיל 15 ספרות לכל היותר (0-9) לא כולל תווים מיוחדים (. או -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "התשובה יכולה להכיל רק ספרות (0-9) ואת התו נקודה (.).", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "התשובה צריכה להיות גדולה או שווה ל-0. היא אינה יכולה להכיל את תו המינוס (-).", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "ביטול", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "נא להכניס יחס תקין (למשל 1:2 או 1:2:3)", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "יחס לא יכול להכיל 0 כאיבר", + "I18N_INTERACTIONS_RATIO_INVALID_CHARS": "נא להכניס יחס המורכב מספרות המופרדות בנקודותיים (למשל 1:2, או 1:2:3)", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "תשובתך כוללת סימן נקודותיים (:) כפול כאשר אחד צמוד לשני.", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "מא להזין יחס תקין (למשל 1:2 או 1:2:3)", + "I18N_INTERACTIONS_RATIO_NON_INTEGER_ELEMENTS": "עבור שאלה זאת, כל איבר ביחס צריך להיות מספר שלם (לא שבר או מספר עשרוני).", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "הוספת פריט", "I18N_INTERACTIONS_SUBMIT": "שליחה", + "I18N_INTERACTIONS_TERMS_LIMIT": "היוצר ציין שמספר האיברים בתשובה צריך להיות <[termsCount]>", + "I18N_LEARNER_DASHBOARD_ALL": "הכול", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "הושלם", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "נראה כאילו הגעת למגבלת בחירת המטרות. נא לעבור לספרייה כדי לגלות מחקרים נוספים.", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "האחרונים ששיחקת", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "לא הושלם", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "בתהליך", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "נראה שלא התחלת שום אוסף כלל. מהר לספריה בכדי להתחיל אוסף מרנין!", "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "הסרה", @@ -123,6 +150,7 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "נוכחי:", "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "תיאור קצר של השינויים:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "הצעה", + "I18N_LEARNER_DASHBOARD_VIEW": "צפייה", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "הצגת הצעה", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "השלמת את זה", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "כבר נוסף לרשימת ההפעלה", @@ -183,6 +211,7 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "רישיון", "I18N_MODAL_CANCEL_BUTTON": "ביטול", "I18N_MODAL_CONTINUE_BUTTON": "המשך", + "I18N_NO": "לא", "I18N_ONE_SUBSCRIBER_TEXT": "יש לך מינוי אחד.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "חשבון שיימחק", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "חשבונך מתוזמן למחיקה והוא יימחק תוך 24 שעות בערך. {{GENDER:|תקבל|תקבלי}} הודעה בדוא\"ל כשהמחיקה תושלם.", @@ -243,6 +272,10 @@ "I18N_PREFERENCES_CANCEL_BUTTON": "ביטול", "I18N_PREFERENCES_CHANGE_PICTURE": "החלפת תמונת פרופיל", "I18N_PREFERENCES_EMAIL": "דוא״ל", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "ייצוא חשבון", + "I18N_PREFERENCES_SEARCH_LABEL": "חיפוש", + "I18N_SHOW_LESS": "הצג פחות", + "I18N_SHOW_MORE": "הצג עוד", "I18N_SPLASH_BENEFITS_ONE": "למידה מותאמת אישית", "I18N_SPLASH_BENEFITS_THREE": "שיעורי שקל לעקוב אחריהם", "I18N_SPLASH_BENEFITS_TITLE": "היתרונות שלנו", @@ -273,5 +306,16 @@ "I18N_SPLASH_VOLUNTEERS_CONTENT": "לא משנה מי אתה או את, יש לך בית ב־Oppia. אנו תמיד צריכים אנשים נוספים לשיפור השיעורים על ידי הצעת שאלות, תרומת עזרים חזותיים או תרגום שיעורים.", "I18N_SPLASH_VOLUNTEERS_TITLE": "מופעל על ידי הקהילה", "I18N_START_HERE": "התחל כאן!", - "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - הושלם!" + "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - הושלם!", + "I18N_TIME_FOR_BREAK_BODY_1": "נראה ששליחת התשובות התבצעה די מהר. התעייפת?", + "I18N_TIME_FOR_BREAK_BODY_2": "אם כך, שקול לקחת הפסקה! ניתן לחזור מאוחר יותר.", + "I18N_TIME_FOR_BREAK_FOOTER": "אפשר להמשיך בשיעור", + "I18N_TIME_FOR_BREAK_TITLE": "זמן להפסקה?", + "I18N_TOPIC_LEARN": "ללמוד", + "I18N_TOPIC_TITLE": "נושא", + "I18N_TOPIC_VIEWER_COMING_SOON": "בקרוב!", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "חזרו מאוחר יותר כשיהיו שיעורים זמינים עבור נושא זה.", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "חזרו מאוחר יותר כשתהיינה שאלות תרגול זמינות עבור נושא זה.", + "I18N_VIEW_ALL_TOPICS": "צפה בכל נושאי ה-<[classroomName]>", + "I18N_YES": "כן" } diff --git a/assets/i18n/hi.json b/assets/i18n/hi.json index 8b863452830a..a45c6c68bce7 100644 --- a/assets/i18n/hi.json +++ b/assets/i18n/hi.json @@ -63,6 +63,9 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "शिक्षकों के लिए मार्गदर्शक निर्देश", "I18N_ACTION_TIPS_FOR_PARENTS": "माता-पिता और अभिभावकों के लिए सुझाव", "I18N_ACTION_VISIT_CLASSROOM": "कक्षा में जाएँ", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "नाम", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "नाम", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "सहेजें", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "यह कार्ड काफी लंबा है, और छात्रों की रुचि कम हो सकती है। इसे छोटा करने, या इसे दो कार्डों में तोड़ने पर विचार करें।", "I18N_CLASSROOM_CALLOUT_BUTTON": "खोजें", "I18N_CLASSROOM_CALLOUT_HEADING_1": "गणित की नींव", @@ -71,7 +74,7 @@ "I18N_CLASSROOM_PAGE_COMING_SOON": "जल्द आ रहा है", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "पाठ्यक्रम संबंधी जानकारी", "I18N_CLASSROOM_PAGE_HEADING": "ओपिया क्लासरूम", - "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "समुदाय द्वारा द्वारा बनाए और पाठों का अन्वेषण करें", + "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "समुदाय द्वारा बनाए और पाठों का अन्वेषण करें", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "हमारे समुदाय संग्रह के माध्यम से खोजें", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "शामिल विषय", "I18N_CONTACT_PAGE_BREADCRUMB": "संपर्क", @@ -160,6 +163,7 @@ "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "इस क्षेत्र में एक छवि खींचें", "I18N_DIRECTIVES_UPLOAD_A_FILE": "फ़ाइल अपलोड करें", "I18N_DONATE_PAGE_BREADCRUMB": "दान करें", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "भारत से", "I18N_ERROR_DISABLED_EXPLORATION": "निर्योग्य एक्सप्लोरेशन", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "माफ़ कीजिये,ये एक निर्योग्य एक्सप्लोरेशन है। बाद में पुन: प्रयास करें ।", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "निर्योग्य एक्सप्लोरेशन - Oppia", @@ -237,12 +241,15 @@ "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "वजन अपडेट करे", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[प्रदर्शित करने के लिए एक छवि का चयन करे]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "आप अधिक विकल्प चुन सकते हैं।", - "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{कृपया एक या अधिक विकल्प चुनें। } other{कम से कम # या अधिक विकल्प चुनें}}", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{कृपया सभी सही विकल्प चुनें।} other{कृपया # या उससे ज़्यादा विकल्प चुनें।}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{1 से अधिक विकल्प का चयन नहीं किया जा सकता है।} other{# से अधिक विकल्प का चयन नहीं किया जा सकता है।}}", "I18N_INTERACTIONS_MUSIC_CLEAR": "मिटाये", "I18N_INTERACTIONS_MUSIC_PLAY": "चलाएँ", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "लक्ष्य अनुक्रम चलाएँ", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "संभावित इकाई प्रारूप", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_DECIMAL": "अधिक से अधिक 1 दशमलव बिंदु उपस्थित होना चाहिए।", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "उत्तर एक वैध संख्या होना चाहिए।", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "उत्तर शून्य से बड़ा या उसके बराबर होना चाहिए।", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "क्या आप वाकई कोड रीसेट करना चाहते हैं", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "रद्द करें", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "पुष्टिकरण आवश्यक", @@ -253,6 +260,7 @@ "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "कोई जवाब नहीं दिया गया है।", "I18N_INTERACTIONS_SUBMIT": "जमा करें", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Oppia को देखे:", + "I18N_LEARNER_DASHBOARD_ALL": "सभी", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "पूर्ण", "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "आपके द्वारा किए गए संग्रह में से <[numberMoved]> को 'प्रगति' अनुभाग में स्थानांतरित किया गया क्योंकि उन्हें नए अन्वेषण जोड़े गए हैं!", "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "ऐसा लगता है कि आपकी 'बाद में चलाएं' सूची में कोई संग्रह नहीं है। लाइब्रेरी में जाएं और खुद के चुने पाठों से प्लेलिस्ट बनाएं!", @@ -295,7 +303,13 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "सुझाव:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "सुझाव", "I18N_LEARNER_DASHBOARD_TOOLTIP": "संग्रह एक से अधिक संबंधित अन्वेषण हैं जिन्हें एक क्रम में पूरा किया जाना है।", + "I18N_LEARNER_DASHBOARD_VIEW": "देखें", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "सुझाव देखें", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "अगला", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "विवरण", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "विवरण", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "जोड़ा गया", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "सहेजें", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "आपने इसे पूरा कर लिया है", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "प्लेलिस्ट मैं पहले ही शामिल है", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "'बाद में चलाएं' सूची में जोड़ें", @@ -373,6 +387,7 @@ "I18N_LOGOUT_PAGE_TITLE": "लॉग-आउट करें", "I18N_MODAL_CANCEL_BUTTON": "रद्द करना", "I18N_MODAL_CONTINUE_BUTTON": "जारी रखें", + "I18N_NO": "नहीं", "I18N_ONE_SUBSCRIBER_TEXT": "आपके पास 1 ग्राहक है।", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "खाता हटाना लंबित है", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "खाता हटाया जाना है", @@ -405,7 +420,7 @@ "I18N_PLAYER_CARD_NUMBER_TOOLTIP": "कार्ड #", "I18N_PLAYER_COMMUNITY_EDITABLE_TOOLTIP": "समुदाय संपादन योग्य", "I18N_PLAYER_CONTINUE_BUTTON": "जारी रहना", - "I18N_PLAYER_CONTRIBUTORS_TOOLTIP": "योगदानकर्ताओं", + "I18N_PLAYER_CONTRIBUTORS_TOOLTIP": "योगदानकर्ताएँ", "I18N_PLAYER_DEFAULT_MOBILE_PLACEHOLDER": "जवाब देने के लिए यहां टैप करें!", "I18N_PLAYER_EDIT_TOOLTIP": "संपादित", "I18N_PLAYER_EMBED_TOOLTIP": "जड़ना", @@ -503,6 +518,10 @@ "I18N_PREFERENCES_USERNAME": "उपयोगकर्ता नाम", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "अभी तक चयनित नहीं किया गया है", "I18N_PROFILE_NO_EXPLORATIONS": "इस उपयोगकर्ता ने अभी तक कोई अन्वेषण नहीं बनाया है या संपादित नहीं किया है।", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "क्या आप जारी रखना चाहते हैं?", + "I18N_PROGRESS_REMINDER_MODAL_HEADER": "आपने <[progress]> पूरा कर लिया है और सन्दर्भ है", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "नहीं, वापस शुरुआत से चलें", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "हाँ, पाठ जारी रखें", "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "अपने स्कोर के बारे में और जानें", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "मेरा डैशबोर्ड", "I18N_QUESTION_PLAYER_NEW_SESSION": "नया सत्र", @@ -523,7 +542,6 @@ "I18N_SIDEBAR_CONTACT_US": "संपर्क करें", "I18N_SIDEBAR_DONATE": "दान करे", "I18N_SIDEBAR_FORUM": "मंच", - "I18N_SIDEBAR_GET_STARTED": "शुरू हो जाओ", "I18N_SIDEBAR_LIBRARY_LINK": "पुस्तकालय", "I18N_SIDEBAR_OPPIA_FOUNDATION": "ओपिया फाउंडेशन", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Oppia के साथ सिखाये", @@ -603,6 +621,7 @@ "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> -पूरा हुआ!", "I18N_SUBSCRIBE_BUTTON_TEXT": "सदस्यता लें", "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "अगला कौशल", + "I18N_SYLLABUS_STORY_TITLE": "कहानि", "I18N_TEACH_BENEFITS_ONE": "प्रभावी, उच्च गुणवत्ता वाली शिक्षा, सभी उम्र के लिए", "I18N_TEACH_BENEFITS_THREE": "हमेशा मुफ़्त और उपयोग में आसान", "I18N_TEACH_BENEFITS_TITLE": "हमारे साथ सीखने के लाभ", @@ -622,6 +641,7 @@ "I18N_TEACH_STUDENT_DETAILS_4": "यममा, सूत्रधार, फ़िलिस्तीन", "I18N_THANKS_PAGE_BREADCRUMB": "धन्यवाद", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 पाठ} other{# पाठ}}", + "I18N_TOPIC_TITLE": "विषय", "I18N_TOPIC_VIEWER_CHAPTER": "पाठ", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 अध्याय} other{# अध्याय}}", "I18N_TOPIC_VIEWER_DESCRIPTION": "विवरण", @@ -644,7 +664,6 @@ "I18N_TOPNAV_ABOUT_OPPIA": "Oppia के बारे में", "I18N_TOPNAV_ADMIN_PAGE": "व्यवस्थापक पेज", "I18N_TOPNAV_BLOG": "ब्लॉग", - "I18N_TOPNAV_CLASSROOM": "कक्षा", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "बुनियादी गणित", "I18N_TOPNAV_CONTACT_US": "संपर्क करें", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "योगदानकर्ता डैशबोर्ड", @@ -653,6 +672,8 @@ "I18N_TOPNAV_FORUM": "मंच", "I18N_TOPNAV_GET_INVOLVED": "हमसे जुड़ें", "I18N_TOPNAV_GET_STARTED": "शुरू हो जाओ", + "I18N_TOPNAV_HOME": "घर", + "I18N_TOPNAV_LEARN": "कक्षा", "I18N_TOPNAV_LEARNER_DASHBOARD": "सीखने वाला डैशबोर्ड", "I18N_TOPNAV_LIBRARY": "पुस्तकालय", "I18N_TOPNAV_LOGOUT": "लोग आउट करे", @@ -667,5 +688,6 @@ "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "विषय और कौशल डैशबोर्ड", "I18N_TOTAL_SUBSCRIBERS_TEXT": "आपके पास <[totalSubscribers]> ग्राहकों है।", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "सदस्यता समाप्त", - "I18N_WORKED_EXAMPLE": "हल किया गया उदाहरण" + "I18N_WORKED_EXAMPLE": "हल किया गया उदाहरण", + "I18N_YES": "हाँ" } diff --git a/assets/i18n/hu.json b/assets/i18n/hu.json index 1b4a89ea38c8..b8c57a048125 100644 --- a/assets/i18n/hu.json +++ b/assets/i18n/hu.json @@ -15,9 +15,38 @@ "I18N_ACTION_CREATE_EXPLORATION": "Felfedezés létrehozása", "I18N_ACTION_CREATE_LESSON": "Hozd létre saját leckéd", "I18N_ACTION_TIPS_FOR_PARENTS": "Tippek szülők és gyámok számára", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Mégsem", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Kész", + "I18N_ADD_NEW_SYLLABUS_ITEMS": "Új tananyagelemek", + "I18N_ADD_SYLLABUS_DESCRIPTION_TEXT": "Adj készségeket, történeteket tananyagodhoz, hogy automatikusan elküldhesd őket tanulóidnak.", + "I18N_ASSIGNED_STORIES_AND_SKILLS": "Hozzárendelt történetek és készségek", "I18N_CLASSROOM_PAGE_COMING_SOON": "Hamarosan!", "I18N_CLASSROOM_PAGE_HEADING": "Oppia tanterem", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Keresés a Közösségi Könyvtárban", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_1": "Elérted az első ellenőrzőpontot! Szép kezdés!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_2": "Ügyesen eljutottál az első ellenőrzőpontig! Csak így tovább!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "Remek kezdés! Csak így tovább!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "Elértél egy ellenőrzőpontot! Szép munka!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_2": "Szuper! Elértél egy ellenőrzőpontot! Menj tovább!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_3": "Szép munka! Elértél egy ellenőrzőpontot!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_1": "Félúton vagy, seperc alatt végzel!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_2": "Félúton vagy! Szép munka!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_3": "Azt a! Túl vagy a lecke felén! Remek munka!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "Már csak egy van hátra, hurrá!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_2": "Gyerünk! Már csak egy van hátra!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_3": "Szuper vagy! Már csak egy van hátra!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "Jól haladsz! Folytasd!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_2": "Vagány! Túl vagy a második ellenőrzőponton!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_3": "Elértél egy újabb ellenőrzőpontot, szuperül haladsz!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_1": "Mindjárt ott! Csak így tovább!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_2": "Már majdnem a végére értél! Csak így tovább!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_3": "Szép munka! Mindjárt végzel!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "Hurrá!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "Szuper!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_3": "Ellenőrzőpont!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "Szép munka!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "Remek munka!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "Szép munka!", "I18N_CONTACT_PAGE_PARAGRAPH_11_HEADING": "Adományok", "I18N_CONTACT_PAGE_PARAGRAPH_13_HEADING": "Sajtó", "I18N_CONTACT_PAGE_PARAGRAPH_15_HEADING": "Biztonság", @@ -54,7 +83,9 @@ "I18N_DASHBOARD_TABLE_HEADING_RATING": "Értékelés", "I18N_DASHBOARD_TABLE_HEADING_UNRESOLVED_ANSWERS": "Megoldatlan válaszok", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Innen elnavigálhatsz egy oldalra, ahol törölni tudod Oppia fiókod.", + "I18N_DELETE_LEARNER_GROUP": "Csoport törlése", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Húzz ide egy képet", + "I18N_EDIT_LEARNER_GROUP_PAGE_TITLE": "Tanulócsoport szerkesztése | Oppia", "I18N_ERROR_DISABLED_EXPLORATION": "Letiltott felfedezés", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Sajnáljuk, de a választott felfedezés jelenleg le van tiltva. Kérjük, próbáld újra később.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Letiltott felfedezés – Oppia", @@ -122,6 +153,26 @@ "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Nem válaszoltál.", "I18N_INTERACTIONS_SUBMIT": "Elküldés", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Nyelvváltás:", + "I18N_LEARNER_GROUP_ADD_NEW_SYLLABUS_ITEMS": "Új tananyagelemek hozzáadása", + "I18N_LEARNER_GROUP_BACK_TO_ALL_LEARNERS_PROGRESS": "Vissza a tanulók haladásához", + "I18N_LEARNER_GROUP_BACK_TO_SYLLABUS": "Vissza a tananyaghoz", + "I18N_LEARNER_GROUP_INVITE_LEARNERS": "Tanulók meghívása", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_BY_USERNAME": "Tanulók meghívása felhasználónév alapján", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_PLACEHOLDER_TEXT": "Add meg a meghívni kívánt tanuló felhasználónevét, majd nyomj egy Entert", + "I18N_LEARNER_GROUP_ITEM_ALREADY_ADDED_TO_SYLLABUS": "Már a tananyagban", + "I18N_LEARNER_GROUP_LEARNERS_PROGRESS_TAB": "Tanulók fejlődése", + "I18N_LEARNER_GROUP_NO_LEARNERS_HAVE_JOINED": "Még nem csatlakozott tanuló a csoporthoz.", + "I18N_LEARNER_GROUP_NO_LEARNERS_INVITED": "Még nem hívtál meg tanulókat.", + "I18N_LEARNER_GROUP_OVERVIEW_TAB": "Áttekintés", + "I18N_LEARNER_GROUP_PERMISSION_NOT_GIVEN": "Engedély nincs megadva", + "I18N_LEARNER_GROUP_PREFERENCES_TAB": "Beállítások", + "I18N_LEARNER_GROUP_PROGRESS_NO_LEARNERS": "Ebben a csoportban nincsenek tanulók. Hívj meg a tanulókat, hogy lásd, hogy haladnak!", + "I18N_LEARNER_GROUP_SEARCH_BY_USERNAME": "Keresés felhasználónév alapján", + "I18N_LEARNER_GROUP_SKILLS_ANALYSIS_SECTION": "Készségek elemzése", + "I18N_LEARNER_GROUP_STORIES_SECTION_PROGRESS_DESCRIPTION": "Ebben a részben a csoport tanulóinak elkészült történeteit találod.", + "I18N_LEARNER_GROUP_SYLLABUS_TAB": "Tanterv", + "I18N_LEARNER_GROUP_VIEW_DETAILS": "Részletek megtekintése", + "I18N_LEARNER_GROUP_WITHDRAW_INVITE": "Visszavonás", "I18N_LIBRARY_ALL_CATEGORIES": "Minden kategória", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "Minden kategória kiválasztva", "I18N_LIBRARY_ALL_LANGUAGES": "Minden nyelv", @@ -189,6 +240,7 @@ "I18N_LIBRARY_VIEWS_TOOLTIP": "Megtekintések", "I18N_LIBRARY_VIEW_ALL": "Összes megtekintése", "I18N_LICENSE_PAGE_LICENSE_HEADING": "Licensz", + "I18N_MODAL_REMOVE_BUTTON": "Eltávolítás", "I18N_ONE_SUBSCRIBER_TEXT": "1 feliratkozód van.", "I18N_PLAYBOOK_TAB_PARTICIPATION_PLAYBOOK": "Részvételi szabályzat", "I18N_PLAYER_BACK": "Vissza", @@ -269,13 +321,13 @@ "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Alkotók, akikre feliratkoztál", "I18N_PREFERENCES_USERNAME": "Felhasználónév", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Még nincs kiválasztva", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "Szeretnéd folytatni?", "I18N_SIDEBAR_ABOUT_LINK": "Névjegy", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Tanterem", "I18N_SIDEBAR_CONTACT_US": "Kapcsolatfelvétel", "I18N_SIDEBAR_DONATE": "Adományozás", "I18N_SIDEBAR_FORUM": "Fórum", - "I18N_SIDEBAR_GET_STARTED": "Kezdjük el", "I18N_SIDEBAR_LIBRARY_LINK": "Könyvtár", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Tanítás az Oppiával", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "A négyzet bejelölésével, a szöveg bal oldalán, elismered és elfogadod, hogy be fogod tartani az <[sitename]> Felhasználási feltételeit, amelyeket itt találsz meg.", @@ -307,6 +359,7 @@ "I18N_SIGNUP_USERNAME": "Felhasználónév", "I18N_SIGNUP_USERNAME_EXPLANATION": "A felhasználóneved meg fog jelenni a közreműködéseid mellett.", "I18N_SIGNUP_WHY_LICENSE": "Miért a CC-BY-SA?", + "I18N_SKILL_LEVEL_BEGINNER": "Kezdő", "I18N_SPLASH_JAVASCRIPT_ERROR_DESCRIPTION": "Az Oppia egy szabad, nyílt forráskódú tanulási platform, interaktív foglalkozásokkal, amelyeket 'felfedezéseknek' hívunk. Sajnos, az Oppia megfelelő működéséhez szükséges a JavaScript, és a böngésződben ez ki van kapcsolva. Ha segítség kell a JavaScript bekapcsolásához, \">kattints ide.", "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "Köszönjük.", "I18N_SPLASH_JAVASCRIPT_ERROR_TITLE": "Szükségünk van JavaScriptre a böngésződben", diff --git a/assets/i18n/id.json b/assets/i18n/id.json index 54e0db8c018b..77c7ae66ca8e 100644 --- a/assets/i18n/id.json +++ b/assets/i18n/id.json @@ -1,18 +1,35 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Tentang Yayasan Oppia", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Tentang Yayasan Oppia | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Dapatkan umpan balik", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK_TEXT": "untuk meningkatkan penjelajahan Anda", "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "Tentang Oppia", + "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "Takarir Audio", + "I18N_ABOUT_PAGE_BREADCRUMB": "Tentang", + "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "Kredit", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Menyumbang", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "Ikut Terlibat", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "Yayasan Oppia", "I18N_ABOUT_PAGE_LEARN_BUTTON": "Saya ingin belajar", "I18N_ABOUT_PAGE_TABS_ABOUT": "Tentang", "I18N_ABOUT_PAGE_TABS_CREDITS": "Kredit", "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Yayasan", - "I18N_ABOUT_PAGE_TITLE": "Tentang kami - Oppia", + "I18N_ABOUT_PAGE_TEACH_BUTTON": "Saya ingin mengajar", + "I18N_ABOUT_PAGE_TITLE": "Tentang | Oppia", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Mendaftar ke \"Teach with Oppia\"", "I18N_ACTION_BROWSE_EXPLORATIONS": "Lihat eksplorasi kami", "I18N_ACTION_BROWSE_LESSONS": "Lihat pelajaran kami", "I18N_ACTION_CREATE_EXPLORATION": "Buat eksplorasimu sendiri", "I18N_ACTION_CREATE_LESSON": "Buat pelajaranmu sendiri", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Batal", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Selesai", + "I18N_ANDROID_PAGE_TITLE": "Android | Oppia", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "Bio", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Edit Nama Penulis dan Bio Anda", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Nama", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Pos Baru", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Simpan", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Blog", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Isi", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Batal", "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Hapus", @@ -22,8 +39,18 @@ "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "Label", "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "Miniatur", "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Kesalahan: File gambar tidak dapat dibaca.", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_TEXT": "Ikuti kuis 10-15 pertanyaan untuk mencari tahu dari mana harus memulai.", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Rincian Materi", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_TEXT": "Mulai dari dasar dengan topik pertama kita, <[firstTopic]>.", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "Ikuti tes", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Topik Yang Sudah Dibahas", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "Tinggal satu lagi, semangat!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "Anda membuat kemajuan yang baik! Lanjutkan!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_2": "Anda hampir berhasil! Terus maju!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "Hore!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "Luar biasa!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "Kerja bagus!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "Kerja yang behat!", "I18N_CONTACT_PAGE_PARAGRAPH_15_HEADING": "Keamanan", "I18N_CREATE_ACTIVITY_QUESTION": "Apa yang ingin anda buat?", "I18N_CREATE_ACTIVITY_TITLE": "Buat Aktivitas", @@ -33,6 +60,7 @@ "I18N_CREATE_EXPLORATION_QUESTION": "Apa anda ingin membuat eksplorasi?", "I18N_CREATE_EXPLORATION_TITLE": "Buat Eksplorasi", "I18N_CREATE_EXPLORATION_UPLOAD": "Unggah", + "I18N_CREATE_LEARNER_GROUP": "Buat Grup", "I18N_CREATE_NO_THANKS": "Tidak, Terima Kasih", "I18N_CREATE_YES_PLEASE": "Ya, tentu!", "I18N_DASHBOARD_COLLECTIONS": "Koleksi", @@ -55,6 +83,15 @@ "I18N_DASHBOARD_TABLE_HEADING_PLAYS": "Mainkan", "I18N_DASHBOARD_TABLE_HEADING_RATING": "Peringkat", "I18N_DASHBOARD_TABLE_HEADING_UNRESOLVED_ANSWERS": "Pertanyaan belum terjawab", + "I18N_DIAGNOSTIC_TEST_CURRENT_PROGRESS": "Kemajuan Saat Ini: <[progressPercentage]> %", + "I18N_DIAGNOSTIC_TEST_EXIT_TEST": "Keluar dari Tes", + "I18N_DIAGNOSTIC_TEST_HEADING": "Tes Diagnostik Pelajar", + "I18N_DIAGNOSTIC_TEST_RESULT_GO_TO_CLASSROOM_BUTTON_TEXT": "Pergi ke kelas", + "I18N_DIAGNOSTIC_TEST_RESULT_HEADER_TEXT": "Tes selesai. Bagus sekali!", + "I18N_DIAGNOSTIC_TEST_RESULT_START_TOPIC": "Mulai <[topicName]>", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_1_FOR_NO_TOPIC": "Kerja bagus! Sepertinya Anda sudah memiliki pemahaman yang baik tentang topik-topik di Kelas Matematika.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_2_FOR_NO_TOPIC": "Jangan ragu untuk mengikuti salah satu pelajaran untuk meninjau atau memperbaiki apa yang Anda ketahui. Kami terus memperbarui Kelas dengan pelajaran baru, jadi periksa kembali.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_TWO_TOPICS": "Berdasarkan jawaban Anda, sebaiknya mulai dengan salah satu dari topik ini.", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Tarik gambar ke sini", "I18N_ERROR_DISABLED_EXPLORATION": "Eksplorasi Nonaktif", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Maaf, eksplorasi yang anda pilih sedang nonaktif. Mohon coba lagi lain kali.", @@ -125,6 +162,8 @@ "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Jawaban tidak terisi.", "I18N_INTERACTIONS_SUBMIT": "Simpan", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Lihat Oppia dalam:", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "Selanjutnya", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "Lihat Rincian", "I18N_LIBRARY_ALL_CATEGORIES": "Semua Kategori", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "Semua kategori terpilih", "I18N_LIBRARY_ALL_LANGUAGES": "Semua Bahasa", @@ -268,12 +307,14 @@ "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "Masukkan subjek yang diminati...", "I18N_PREFERENCES_USERNAME": "Username", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Belum terpilih", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "Apakah Anda ingin melanjutkan?", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "Tidak, ulangi dari awal", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "Ya, lanjutkan pelajarannya", "I18N_SIDEBAR_ABOUT_LINK": "Tentang", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CONTACT_US": "Hubungi Kami", "I18N_SIDEBAR_DONATE": "Donasi", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Memulai", "I18N_SIDEBAR_LIBRARY_LINK": "Perpustakaan", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Mengajar dengan Oppia", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Dengan mencentang kotak di sebelah kiri teks ini, anda mengetahui, menyetujui, dan menerima untuk terikat oleh Ketentuan Penggunaan <[sitename]>, yang terletak di sini.", @@ -282,7 +323,7 @@ "I18N_SIGNUP_CLOSE_BUTTON": "Tutup", "I18N_SIGNUP_COMPLETE_REGISTRATION": "Lengkapi Registrasi Anda", "I18N_SIGNUP_DO_NOT_SEND_EMAILS": "Jangan kirim semua email ini", - "I18N_SIGNUP_EMAIL": "Email", + "I18N_SIGNUP_EMAIL": "Surel", "I18N_SIGNUP_EMAIL_PREFERENCES": "Preferensi Email", "I18N_SIGNUP_EMAIL_PREFERENCES_EXPLAIN": "Anda dapat mengganti pengaturan ini kapan saja dari halaman Preferensi anda.", "I18N_SIGNUP_ERROR_MUST_AGREE_TO_TERMS": "Agar dapat mengubah eksplorasi di situs ini, anda harus menyetujui ketentuan situs ini.", @@ -316,6 +357,8 @@ "I18N_SPLASH_THIRD_EXPLORATION_DESCRIPTION": "Oppia memungkinkan Anda untuk menciptakan dan membagikan eksplorasi dalam berbagai subjek, terbatas hanya pada imajinasi Anda.", "I18N_SPLASH_TITLE": "Berpikir di luar buku.", "I18N_SUBSCRIBE_BUTTON_TEXT": "Berlangganan", + "I18N_SYLLABUS_SKILL_TITLE": "Keahlian", + "I18N_SYLLABUS_STORY_TITLE": "Cerita", "I18N_TEACH_PAGE_HEADING": "Bantu Siswa dari Seluruh Dunia", "I18N_TOPNAV_ABOUT": "Tentang", "I18N_TOPNAV_ABOUT_OPPIA": "Tentang Oppia", diff --git a/assets/i18n/it.json b/assets/i18n/it.json index c43577c5f080..6affee64fb7c 100644 --- a/assets/i18n/it.json +++ b/assets/i18n/it.json @@ -39,6 +39,16 @@ "I18N_ACTION_CREATE_LESSON": "Crea la tua lezione", "I18N_ACTION_CREATE_LESSON_BUTTON": "Crea lezioni", "I18N_ACTION_EXPLORE_LESSONS": "Esplora le lezioni", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Annulla", + "I18N_ANDROID_PAGE_AVAILABLE_FOR_DOWNLOAD_TEXT": "disponibile per il download.", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "Indirizzo email", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_5": "Crea e gestisci fino a 10 profili su un dispositivo.", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Nome", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_EXPLAIN_TEXT": "Questa è una piccola descrizione su di te. Tutto ciò che scrivi qui è pubblico e visibile a tutto il mondo.", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "Biografia", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Modifica il tuo nome autore e biografia", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Nome", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Salva", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Corpo", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Annulla", "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Cancella", @@ -58,8 +68,10 @@ "I18N_BLOG_POST_UNTITLED_HEADING": "Senza titolo", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Questa scheda è piuttosto lunga, gli studenti potrebbero perdere interesse. Valuta la possibilità di abbreviarla, o dividerla in due schede.", "I18N_CLASSROOM_CALLOUT_BUTTON": "Esplora", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_TEXT": "Rispondi a un quiz di 10-15 domande per scoprire da dove iniziare.", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Esplora più lezioni create dalla comunità", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Ricerca attraverso la libreria della nostra comunità", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "Fai un test", "I18N_CONTACT_PAGE_BREADCRUMB": "Contatti", "I18N_CONTACT_PAGE_HEADING": "Partecipa!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Grazie per il tuo interesse nell'aiutare il progetto Oppia!", @@ -71,6 +83,7 @@ "I18N_CONTACT_PAGE_PARAGRAPH_3_HEADING": "Come Oppia è differente dalle altre piattaforme di apprendimento", "I18N_CONTACT_PAGE_PARAGRAPH_4_HEADING": "Come puoi aiutare", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "OK", + "I18N_CREATE_ACCOUNT": "Crea utenza", "I18N_CREATE_ACTIVITY_QUESTION": "Cosa vuoi creare?", "I18N_CREATE_ACTIVITY_TITLE": "Crea un'attività", "I18N_CREATE_COLLECTION": "Crea collezione", @@ -100,9 +113,22 @@ "I18N_DASHBOARD_TABLE_HEADING_LAST_UPDATED": "Ultimo aggiornamento", "I18N_DASHBOARD_TABLE_HEADING_PLAYS": "Riproduci", "I18N_DASHBOARD_TABLE_HEADING_RATING": "Valutazione", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_TWO_TOPICS": "In base alle tue risposte, ti consigliamo di iniziare con uno di questi argomenti.", + "I18N_DIAGNOSTIC_TEST_START_BUTTON": "Inizia il test", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Trascina un'immagine all'interno di quest'area", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Carica un file", "I18N_DONATE_PAGE_BREADCRUMB": "Dona", + "I18N_DONATE_PAGE_BUDGET_HEADING": "Dove vanno i tuoi soldi?", + "I18N_DONATE_PAGE_FAQ_QUESTION_9": "C'è qualcuno con cui posso parlare se sono interessato a diventare un partner aziendale?", + "I18N_DONATE_PAGE_HEADING_2": "istruzione di alta qualità e coinvolgente.", + "I18N_DONATE_PAGE_STATISTIC_4": "Volontari da tutto il mondo", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "Indirizzo email", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "Nome (facoltativo)", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "Guarda un video", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "Hai appena completato il tuo primo capitolo!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "Hai appena completato il tuo quinto capitolo!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "Hai appena completato il tuo 50° capitolo!", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "Alla prossima lezione!", "I18N_ERROR_DISABLED_EXPLORATION": "Esplorazione disabilitata", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Spiacenti, ma l'esplorazione su cui hai cliccato è al momento disabilitata. Perfavore, riprova più tardi.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Esplorazione disabilitata - Oppia", @@ -160,6 +186,7 @@ "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Seleziona un'immagine da visualizzare]", "I18N_INTERACTIONS_MUSIC_CLEAR": "Pulisci", "I18N_INTERACTIONS_MUSIC_PLAY": "Riproduci", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_INVALID_CHARS": "Sono consentiti solo le cifre 0-9, '.', 'e', e '-'.", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Annulla", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Conferma richiesta", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Aggiungi elemento", @@ -188,6 +215,8 @@ "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "In corso", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "Sembra che tu non abbia ancora provato nessuna esplorazione.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Inizia questo viaggio emozionante!", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "I tuoi gruppi", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_DECLINE_INVITATION": "Rifiuta", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "Sembra che tu non abbia ancora iniziato alcuna collezione. Torna alla libreria per iniziare una nuova entusiasmante collezione!", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "Sembra che tu non abbia ancora iniziato alcuna esplorazione. Torna alla libreria per iniziare una nuova entusiasmante esplorazione!", "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "Avanzamento", @@ -198,7 +227,7 @@ "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "Invia", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Invio in corso...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Argento", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "Competenza nelle abilità", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Progresso delle abilità", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Storie completate", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Iscrizioni", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Avanzamento:", @@ -208,6 +237,16 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Consigliato:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Suggerimento", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Vedi suggerimento", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "Successivo", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "Descrizione", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "Dettagli", + "I18N_LEARNER_GROUP_FACILITATOR_LABEL_TEXT": "Facilitatore", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "Aggiunto", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "Salva", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_FALSE": "No, forse più tardi", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_TRUE": "Sì, voglio condividere i miei progressi", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "Vedi dettagli", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BUTTON": "Lascia", "I18N_LIBRARY_ALL_CATEGORIES": "Tutte le categorie", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "Tutte le categorie selezionate", "I18N_LIBRARY_ALL_LANGUAGES": "Tutte le lingue", @@ -259,6 +298,7 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "Licenza", "I18N_MODAL_CANCEL_BUTTON": "Annulla", "I18N_MODAL_CONTINUE_BUTTON": "Continua", + "I18N_MODAL_REMOVE_BUTTON": "Rimuovi", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_5": "Rappresentalo accuratamente - L'obiettivo di apprendimento viene mostrato agli studenti che stanno sfogliando le esplorazioni e dovrebbe riflettere il contenuto della tua esplorazione. L'esplorazione dovrebbe fornire tutto il contenuto promesso nell'obiettivo di apprendimento e nell'esplorazione stessa.", "I18N_PLAYER_AUDIO_EXPAND_TEXT": "Ascolta la lezione", "I18N_PLAYER_AUDIO_LANGUAGE": "Lingua", @@ -330,15 +370,21 @@ "I18N_PREFERENCES_USERNAME": "Nome utente", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Non ancora selezionato", "I18N_PROFILE_NO_EXPLORATIONS": "Questo utente non ha ancora creato o modificato nessuna esplorazione.", - "I18N_QUESTION_PLAYER_NEW_SESSION": "Nuova sessione", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "Vuoi continuare?", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Ripeti", "I18N_QUESTION_PLAYER_SCORE": "Punteggio", - "I18N_SIDEBAR_ABOUT_LINK": "Informazioni su Oppia", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "Hai già un'utenza?", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_3": "Usa il collegamento qui sotto per salvare i progressi per 72 ore.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_5": "Scrivi o copia il collegamento qui sotto", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "Copia", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "Copiato!", + "I18N_SAVE_PROGRESS_TEXT": "Salva progressi", + "I18N_SIDEBAR_ABOUT_LINK": "Su di noi", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Matematica base", "I18N_SIDEBAR_CONTACT_US": "Contattaci", "I18N_SIDEBAR_DONATE": "Dona", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Inizia", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Insegna con Oppia", "I18N_SIGNIN_PAGE_TITLE": "Entra", "I18N_SIGNUP_CC_TITLE": "Licenza Creative Commons", @@ -357,6 +403,7 @@ "I18N_SIGNUP_LOADING": "Caricamento", "I18N_SIGNUP_PAGE_TITLE": "Unisciti alla comunità - Oppia", "I18N_SIGNUP_REGISTRATION": "Registrazione", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "Non chiedermelo più", "I18N_SIGNUP_SEND_ME_NEWS": "Inviami notizie e aggiornamenti del sito", "I18N_SIGNUP_UPDATE_WARNING": "Perfavore nota che abbiamo recentemente aggiornato le nostre Condizioni d'uso.", "I18N_SIGNUP_USERNAME": "Nome utente", @@ -377,6 +424,7 @@ "I18N_TOPNAV_ABOUT_OPPIA": "Informazioni su Oppia", "I18N_TOPNAV_ADMIN_PAGE": "Pagina di amministrazione", "I18N_TOPNAV_BLOG": "Blog", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Matematica base", "I18N_TOPNAV_CONTACT_US": "Contattaci", "I18N_TOPNAV_DONATE": "Dona", "I18N_TOPNAV_FORUM": "Forum", diff --git a/assets/i18n/ja.json b/assets/i18n/ja.json index 1ce095489460..52ac806834b6 100644 --- a/assets/i18n/ja.json +++ b/assets/i18n/ja.json @@ -1,4 +1,6 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Oppia財団について", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Oppia財団について | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "探検を作る", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "気になるトピックで", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "フィードバックをもらう", @@ -15,10 +17,14 @@ "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "公開して共有", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE_TEXT": "作品をコミュニティーと", "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "オーディオ字幕", + "I18N_ABOUT_PAGE_BREADCRUMB": "このサイトについて", + "I18N_ABOUT_PAGE_CREATE_LESSON_CONTENT": "Oppiaのコンテンツ作成システムを使用すると、自分が情熱を注いでいるトピックに関するレッスンを簡単に作成、およびカスタマイズできます。", "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "クレジット", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT": "Oppiaに貢献している人は世界中にいます。多くが生徒、卒業生、または教師です。このプラットフォームの構築を支援してくれた次の方々に感謝申し上げます。もし支援に参加されたい場合、こちらで参加方法をご確認ください。", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT_BOTTOM": "Oppiaの開発チームは、<[listOfNames]>からいただいたフィードバック、アイデア、手助け、提案にも感謝しています。", "I18N_ABOUT_PAGE_CREDITS_THANK_TRANSLATEWIKI": "また、translatewiki.netによるクラウドソーシング翻訳にも感謝の念を示します。", + "I18N_ABOUT_PAGE_EASILY_CREATE_LESSON": "簡単にレッスンを作成", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "コミュニティによって作成されたレッスンを探索する", "I18N_ABOUT_PAGE_EXPLORE_LESSONS_CONTENT": "世界各地で教育者やコミュニティ参加者が Oppia の教科作成プラットフォームを使い、授業を設計し共有しています。探検ライブラリを開くと、17教科2万件の授業があり、見ているうちにご自分でも作りたくなるかもしれません。", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "寄付", "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "参加", @@ -36,12 +42,20 @@ "I18N_ABOUT_PAGE_LESSON_FEATURE": "文章問題", "I18N_ABOUT_PAGE_MOBILE_FEATURE": "モバイル機器に対応した操作性", "I18N_ABOUT_PAGE_OUR_FEATURES": "このアプリの機能", + "I18N_ABOUT_PAGE_OUR_FEATURES_CONTENT": "世界中の教育者、講師、学習者によって構築されたこのプラットフォームと作成されたレッスンが、魅力的かつ効果的であり、広く利用されるように努力しています。", + "I18N_ABOUT_PAGE_OUR_OUTCOMES": "私たちの成果", + "I18N_ABOUT_PAGE_OUR_OUTCOMES_CONTENT": "私たちは、効果性と卓越性を追求しています。そのため、私たちはユーザー調査や無作為化試験を継続的に実施し、私たちのレッスンが高い水準に達しているかどうかを確認しています。", + "I18N_ABOUT_PAGE_SECTION_ONE_CONTENT": "Oppiaは、世界中の恵まれない学習者のユニークなニーズを満たすために特別に設計された、斬新で魅力的なオンライン学習のアプローチを提供します。", + "I18N_ABOUT_PAGE_SECTION_SEVEN_TITLE": "厳選されたヒントから始める", "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "これから何をしたいですか?", "I18N_ABOUT_PAGE_TABS_ABOUT": "このサイトについて", "I18N_ABOUT_PAGE_TABS_CREDITS": "クレジット", "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Foundation", "I18N_ABOUT_PAGE_TEACH_BUTTON": "教えたい", "I18N_ABOUT_PAGE_TITLE": "Oppiaについて", + "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "Oppiaを始める", + "I18N_ABOUT_PAGE_WIFI_FEATURE": "必要な帯域幅が少ない", + "I18N_ACTION_ACCESS_ANDROID_APP": "Android アプリにアクセスする", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "「Teach With Oppia」に申し込む", "I18N_ACTION_BROWSE_EXPLORATIONS": "探検の一覧を見る", "I18N_ACTION_BROWSE_LESSONS": "レッスンの一覧を見る", @@ -50,8 +64,85 @@ "I18N_ACTION_CREATE_LESSON": "自分のレッスンを作る", "I18N_ACTION_CREATE_LESSON_BUTTON": "レッスンを作成", "I18N_ACTION_EXPLORE_LESSONS": "レッスンを探す", + "I18N_ACTION_GUIDE_FOR_TEACHERS": "教師のためのガイド", + "I18N_ACTION_TIPS_FOR_PARENTS": "親と保護者のためのヒント", + "I18N_ACTION_VISIT_CLASSROOM": "教室に行く", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "キャンセル", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "完了", + "I18N_ADD_NEW_SYLLABUS_ITEMS": "新しいシラバス項目", + "I18N_ANDROID_PAGE_AVAILABLE_FOR_DOWNLOAD_TEXT": "ダウンロード可能です。", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "メールアドレス", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "名前", + "I18N_ANDROID_PAGE_TITLE": "Android | オピア", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_2": "スペルを再確認してください。", + "I18N_ATTRIBUTION_HTML_STEP_ONE": "HTMLをコピーして貼り付ける", + "I18N_ATTRIBUTION_HTML_STEP_TWO": "リンクが\"<[linkText]>\"として表示されていることを確認する", + "I18N_ATTRIBUTION_HTML_TITLE": "HTMLの属性", + "I18N_ATTRIBUTION_PRINT_STEP_ONE": "クレジットをコピーして貼り付ける", + "I18N_ATTRIBUTION_PRINT_STEP_TWO": " \"<[link]>\"のコピーを添付する", + "I18N_ATTRIBUTION_PRINT_TITLE": "印刷物の属性", + "I18N_ATTRIBUTION_TITLE": "このレッスンを共有または再利用するための属性付けの方法", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "著者プロフィール", + "I18N_BLOG_CARD_PREVIEW_CONTEXT": "このように、ブログカードはトップページと著者プロフィールに表示されます。", + "I18N_BLOG_CARD_PREVIEW_HEADING": "ブログカードのプレビュー", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "名前", + "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "新しいブログの投稿を作成する", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "ブログにはまだ投稿していないようです。", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "新しい投稿", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "保存", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "下書き", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "公開済み", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "ブログ", + "I18N_BLOG_HOME_PAGE_NO_RESULTS_FOUND": "申し訳ありませんが、表示するブログ記事はありません。", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "最新の投稿", + "I18N_BLOG_HOME_PAGE_POSTS_NUMBER_DISPLAY": "<[totalNumber]>件中<[startingNumber]>件目から<[endingNumber]>件目までの投稿を表示しています", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "キーワード", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "タグ", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "タグを選択", + "I18N_BLOG_HOME_PAGE_TITLE": "オピアブログ | オピア", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "オピアブログへようこそ!", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_HEADING": "検索結果の表示", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_DISPLAY": "<[startingNumber]>件目から<[endingNumber]>件目までの検索結果を表示しています。", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_OUT_OF_TOTAL_DISPLAY": "<[totalNumber]>件中<[startingNumber]>件目から<[endingNumber]>件目までの投稿を表示しています。", + "I18N_BLOG_POST_EDITOR_BODY_HEADING": "本文", + "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "キャンセル", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "削除", + "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "サムネイル画像を編集", + "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "最終保存日時:", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "公開", + "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "完了", + "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "下書きとして保存", + "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "プレビュー", + "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "タグ", + "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "サムネイル", + "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "タイトル", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "あなたへのおすすめ。", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "タグ", + "I18N_BLOG_POST_PAGE_TITLE": "<[blogPostTitle]> | ブログ | オピア", + "I18N_BLOG_POST_UNTITLED_HEADING": "無題", + "I18N_CLASSROOM_MATH_TITLE": "数学", + "I18N_CLASSROOM_PAGE_COMING_SOON": "近日公開", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "テストを受ける", + "I18N_CLASSROOM_PAGE_TITLE": "<[classroomName]>をオピアで学ぶ | オピア", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "開始", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "続行", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> - オピア", + "I18N_COMING_SOON": "近日公開", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "コレクション", + "I18N_COMPLETED_STORY": "「<[story]>」を完了", + "I18N_COMPLETE_CHAPTER": "「<[topicName]>」のチャプターを完了する", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_1": "最初のチェックポイントを完了しました。よいスタートです!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_2": "最初のチェックポイントを完了しました!継続しましょう!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "素晴らしい!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_3": "チェックポイント!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "よくできました!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "素晴らしい!", + "I18N_CONTACT_PAGE_PARAGRAPH_11_HEADING": "寄付", + "I18N_CONTACT_PAGE_PARAGRAPH_13_HEADING": "プレス", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "OK", "I18N_CORRECT_FEEDBACK": "正解", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "あなたのグループリンク", + "I18N_CREATE_ACCOUNT": "アカウントを作成", "I18N_CREATE_ACTIVITY_QUESTION": "何を作りますか?", "I18N_CREATE_ACTIVITY_TITLE": "アクティビティーを作る", "I18N_CREATE_COLLECTION": "コレクションを作る", @@ -60,6 +151,7 @@ "I18N_CREATE_EXPLORATION_QUESTION": "探検を作りますか?", "I18N_CREATE_EXPLORATION_TITLE": "探検を作る", "I18N_CREATE_EXPLORATION_UPLOAD": "アップロード", + "I18N_CREATE_LEARNER_GROUP": "グループを作成", "I18N_CREATE_NO_THANKS": "いいえ", "I18N_CREATE_YES_PLEASE": "はい", "I18N_DASHBOARD_COLLECTIONS": "コレクション", @@ -72,11 +164,13 @@ "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TITLE": "タイトル", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TOTAL_PLAYS": "視聴回数", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_UNRESOLVED_ANSWERS": "未解決質問数", + "I18N_DASHBOARD_LESSONS": "レッスン", "I18N_DASHBOARD_OPEN_FEEDBACK": "公開フィードバック", "I18N_DASHBOARD_STATS_AVERAGE_RATING": "平均評価", "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "公開フィードバック", "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "視聴回数", "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "サブスクライブしている人", + "I18N_DASHBOARD_STORIES": "ストーリー", "I18N_DASHBOARD_SUBSCRIBERS": "サブスクライブしている人", "I18N_DASHBOARD_SUGGESTIONS": "提案", "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "探検", @@ -88,7 +182,33 @@ "I18N_DELETE_ACCOUNT_PAGE_BREADCRUMB": "アカウントの削除", "I18N_DELETE_ACCOUNT_PAGE_BUTTON": "アカウントを削除する", "I18N_DELETE_ACCOUNT_PAGE_HEADING": "アカウントを削除", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "概要", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "アカウントの削除 | オピア", + "I18N_DELETE_LEARNER_GROUP": "グループを削除", + "I18N_DIAGNOSTIC_TEST_START_BUTTON": "テストを始める", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "画像をここにドラッグしてください", + "I18N_DIRECTIVES_UPLOAD_A_FILE": "ファイルをアップロード", + "I18N_DONATE_PAGE_BREADCRUMB": "寄付", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "メンテナンス", + "I18N_DONATE_PAGE_FAQ_HEADING_TEXT": "よくある質問", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "Oppiaとは何?", + "I18N_DONATE_PAGE_FAQ_QUESTION_2": "なぜOppiaがあるのか?", + "I18N_DONATE_PAGE_FAQ_QUESTION_4": "他の教育プラットフォームとOppiaの違いは?", + "I18N_DONATE_PAGE_FAQ_QUESTION_5": "寄付控除に入りますか?", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_1": "インド、カーンプルより", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_2": "パレスチナより", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "インドから", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "ブログを読む", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "購読してくださりありがとうございます!", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "メールアドレス", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "今すぐ購読する", + "I18N_DONATE_PAGE_TITLE": "Oppia Foundationに寄付する", + "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "オピアコミュニティの意見を聞く", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "ビデオを見る", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "あなたはちょうど第1章を完了しました!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "あなたはちょうど第10章を完了しました!", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "次のレッスンへ!", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "次にできることはこちらのとおりです。", "I18N_ERROR_DISABLED_EXPLORATION": "無効な状態の探検", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "残念ながら、クリックした探検は現在無効です。あとで再度試してください。", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "無効な状態の探検 - Oppia", @@ -101,12 +221,15 @@ "I18N_ERROR_MESSAGE_404": "指定のページを探しましたが、残念ながら見つかりませんでした。", "I18N_ERROR_MESSAGE_500": "何らかの問題が発生しています。あなたのミスではなく、サーバー内でエラーが発生しました。", "I18N_ERROR_NEXT_STEPS": "恐らく\">ホームページに戻るのが良いでしょう。ただし、もしこの問題が再発していて改善を希望される場合、\" target=\"_blank\">イシュー・トラッカーからお知らせください。ご迷惑をお掛けします。", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "エラー <[statusCode]> | オピア", + "I18N_ERROR_PAGE_TITLE": "エラー <[statusCode]> - オピア", "I18N_ERROR_PAGE_TITLE_400": "エラー 400 - Oppia", "I18N_ERROR_PAGE_TITLE_401": "エラー 401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "エラー 404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "エラー 500 - Oppia", "I18N_FOOTER_ABOUT": "Oppiaについて", "I18N_FOOTER_ABOUT_ALL_CAPS": "OPPIAについて", + "I18N_FOOTER_ANDROID_APP": "Androidアプリ", "I18N_FOOTER_AUTHOR_PROFILES": "著者プロフィール", "I18N_FOOTER_BROWSE_LIBRARY": "ライブラリーを見る", "I18N_FOOTER_CONTACT_US": "連絡先", @@ -131,6 +254,9 @@ "I18N_HEADING_VOLUNTEER": "ボランティア", "I18N_HINT_NEED_HELP": "ヘルプが必要ですか?この問題のヒントを表示します。", "I18N_HINT_TITLE": "ヒント", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "分母にゼロを入力しないでください", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "空白でない分数の値を入力してください。", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "答えは分数で入力してください(例:1 2/3ではなく5/3)。", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "辺を追加", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "頂点を追加", "I18N_INTERACTIONS_GRAPH_DELETE": "削除", @@ -149,22 +275,29 @@ "I18N_INTERACTIONS_MUSIC_CLEAR": "消去", "I18N_INTERACTIONS_MUSIC_PLAY": "再生", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "目標のシーケンスを再生", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "値が分数または数字のいずれかであることを確認してください。", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "本当にコードをリセットしますか?", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "キャンセル", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "確定が必要", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "コードをリセット", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "有効な比率を入力してください(例、1:2または1:2:3)", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "有効な比率を入力してください(例、1:2または1:2:3)", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "項目を追加", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "重複した項目があるようです。", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(1行に1つ入力してください。)", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "解答がありません。", "I18N_INTERACTIONS_SUBMIT": "提出", + "I18N_JOIN_LEARNER_GROUP_BUTTON": "グループに参加", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Oppiaの表示言語:", + "I18N_LEARNER_DASHBOARD_ALL": "すべて", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "目標を編集", "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "達成した目標", "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "中断したところから続行", "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "現在の目標", "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "目標", "I18N_LEARNER_DASHBOARD_HOME_SECTION": "ホーム", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "未完", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_DECLINE_INVITATION": "拒否", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "お勧め", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "おはようございます", "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "新しいストーリーコンテンツを利用できます", @@ -172,6 +305,37 @@ "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "進捗", "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "除去", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "ストーリーが完成しました", + "I18N_LEARNER_DASHBOARD_VIEW": "表示", + "I18N_LEARNER_GROUP_BACK_TO_SYLLABUS": "シラバスに戻る", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "次", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "前のステップ", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "説明", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "グループ名", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "詳細", + "I18N_LEARNER_GROUP_FACILITATOR_LABEL_TEXT": "ファシリテーター", + "I18N_LEARNER_GROUP_GROUP_DETAILS_SECTION": "グループの詳細", + "I18N_LEARNER_GROUP_INVITE_LEARNERS": "学習者を招待する", + "I18N_LEARNER_GROUP_INVITE_LEARNER_BUTTON_TEXT": "学習者を招待する", + "I18N_LEARNER_GROUP_INVITE_LIST_TEXT": "招待リスト", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "追加", + "I18N_LEARNER_GROUP_NO_LEARNERS_HAVE_JOINED": "まだグループに参加している学習者はいません。", + "I18N_LEARNER_GROUP_NO_LEARNERS_INVITED": "まだ学習者を招待していません。", + "I18N_LEARNER_GROUP_NO_RESULTS_FOUND": "見つかりませんでした。", + "I18N_LEARNER_GROUP_OVERVIEW_TAB": "概要", + "I18N_LEARNER_GROUP_PERMISSION_NOT_GIVEN": "許可されていません", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "保存", + "I18N_LEARNER_GROUP_PREFERENCES_TAB": "個人設定", + "I18N_LEARNER_GROUP_SEARCH_BY_USERNAME": "ユーザー名で検索", + "I18N_LEARNER_GROUP_SKILLS_ANALYSIS_SECTION": "スキル分析", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "詳細を表示", + "I18N_LEARNER_GROUP_SYLLABUS_COMPLETION": "完了", + "I18N_LEARNER_GROUP_SYLLABUS_ITEM_NOT_STARTED_YET": "まだ開始されていません", + "I18N_LEARNER_GROUP_SYLLABUS_LESSONS": "レッスン", + "I18N_LEARNER_GROUP_SYLLABUS_TAB": "シラバス", + "I18N_LEARNER_GROUP_VIEW_DETAILS": "詳細を表示", + "I18N_LEARNER_GROUP_WITHDRAW_INVITE": "辞退する", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BUTTON": "離れる", + "I18N_LESSON_INFO_HEADER": "レッスン情報", "I18N_LIBRARY_ALL_CATEGORIES": "すべてのカテゴリー", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "すべてのカテゴリーが選択済み", "I18N_LIBRARY_ALL_LANGUAGES": "すべての言語", @@ -243,6 +407,9 @@ "I18N_LICENSE_PAGE_PARAGRAPH_2": "Oppiaを支えているソフトウェアはオープンソースで、コードApache 2.0ライセンスで提供されています。", "I18N_LICENSE_TERMS_HEADING": "ライセンス条項", "I18N_MODAL_CANCEL_BUTTON": "キャンセル", + "I18N_MODAL_REMOVE_BUTTON": "除去", + "I18N_NO": "いいえ", + "I18N_NO_RESULTS_FOUND_FOR_MATCHING_USERNAME": "ユーザー名に一致する結果が見つかりませんでした。", "I18N_ONE_SUBSCRIBER_TEXT": "サブスクライブしている人は1人です。", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "パートナーシップ", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "コミュニティーガイドライン", @@ -310,6 +477,7 @@ "I18N_PLAYER_THANK_FEEDBACK": "フィードバックをありがとうございます。", "I18N_PLAYER_UNRATED": "評価なし", "I18N_PLAYER_VIEWS_TOOLTIP": "閲覧数", + "I18N_PRACTICE_SESSION_START_BUTTON_TEXT": "練習を開始", "I18N_PREFERENCES_BIO": "自己紹介", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "このフィールドはオプションです。ここに書いた内容は公開され、世界のどこからでも表示できます。", "I18N_PREFERENCES_BREADCRUMB": "個人設定", @@ -324,10 +492,12 @@ "I18N_PREFERENCES_HEADING": "個人設定", "I18N_PREFERENCES_HEADING_SUBTEXT": "このページで実行した変更は自動保存されます。", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "まだどの著者もサブスクライブしていません。", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "影響", "I18N_PREFERENCES_PAGE_TITLE": "プロフィールの個人設定を変更 - Oppia", "I18N_PREFERENCES_PICTURE": "画像", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "探検で優先する言語", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "探検を検索するとき、これらの言語がデフォルトで選択されます。", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "優先する言語を選択します。", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "サイトで優先する言語", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "このサイトが表示される言語です。", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "サイトで優先する言語", @@ -335,7 +505,9 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "ドラッグして切り抜きまたはサイズ変更", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "エラー:画像ファイルが読み込めません。", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "プロフィール画像をアップロード", + "I18N_PREFERENCES_SEARCH_LABEL": "検索", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "優先する言語を選択...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "サイトの言語", "I18N_PREFERENCES_SUBJECT_INTERESTS": "興味のある科目", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "例:mathematics、computer science、art、...", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "興味のある科目を新しく追加(英小文字とスペースで)...", @@ -344,13 +516,23 @@ "I18N_PREFERENCES_USERNAME": "ユーザー名", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "未選択", "I18N_PROFILE_NO_EXPLORATIONS": "このユーザーはまだ探検を作成または編集していません。", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "続けますか?", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "いいえ、最初からやり直します", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "はい、レッスンを再開します", "I18N_QUESTION_PLAYER_SCORE": "点数", + "I18N_RESTART_EXPLORATION_BUTTON": "レッスンを再開する", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "既にアカウントをお持ちですか?", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "コピー", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "コピーしました", + "I18N_SAVE_PROGRESS_TEXT": "進行状況を保存", + "I18N_SHOW_LESS": "表示を折りたたむ", + "I18N_SHOW_MORE": "さらに表示する", "I18N_SIDEBAR_ABOUT_LINK": "Oppiaについて", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "オッピア財団について", "I18N_SIDEBAR_BLOG": "ブログ", "I18N_SIDEBAR_CONTACT_US": "連絡先", "I18N_SIDEBAR_DONATE": "寄付する", "I18N_SIDEBAR_FORUM": "フォーラム", - "I18N_SIDEBAR_GET_STARTED": "始める", "I18N_SIDEBAR_LIBRARY_LINK": "ライブラリー", "I18N_SIDEBAR_OPPIA_FOUNDATION": "オッピア財団", "I18N_SIDEBAR_PARTNERSHIPS": "パートナーシップ", @@ -385,6 +567,8 @@ "I18N_SIGNUP_USERNAME": "ユーザー名", "I18N_SIGNUP_USERNAME_EXPLANATION": "ユーザー名は貢献したコンテンツの隣に表示されます。", "I18N_SIGNUP_WHY_LICENSE": "なぜCC-BY-SA?", + "I18N_SKILL_LEVEL_BEGINNER": "初心者", + "I18N_SKILL_LEVEL_INTERMIDIATE": "中級", "I18N_SOLUTION_EXPLANATION_TITLE": "説明:", "I18N_SPLASH_FIRST_EXPLORATION_DESCRIPTION": "Oppiaの授業、別名「探検」では、対話のないビデオやテキストよりも夢中になれます。ユーザーは行動することで学べます。", "I18N_SPLASH_JAVASCRIPT_ERROR_DESCRIPTION": "Oppiaは無料でオープンソースの学習プラットフォームで、「探検」と呼ばれるインタラクティブなアクティビティーが満載です。Oppiaが正しく動作するにはウェブ・ブラウザーでJavaScriptを有効にする必要がありますが、お使いのブラウザーではJavaScriptが無効になっています。JavaScriptを有効にするヘルプは\">こちらをクリックしてください。", @@ -397,16 +581,26 @@ "I18N_SPLASH_THIRD_EXPLORATION_DESCRIPTION": "Oppiaでは、想像し得る限りさまざまな科目の探検を作ることも共有することもできます。", "I18N_SPLASH_TITLE": "すべての人に無料の教育", "I18N_SUBSCRIBE_BUTTON_TEXT": "サブスクライブする", + "I18N_SYLLABUS_SKILL_TITLE": "スキル", + "I18N_TIME_FOR_BREAK_FOOTER": "レッスンを続けます", + "I18N_TIME_FOR_BREAK_TITLE": "休憩しますか?", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "乗算", + "I18N_TOPIC_LEARN": "学ぶ", + "I18N_TOPIC_TITLE": "トピック", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "このトピックのレッスンが利用できるようになったら、後ほどまたお越しください。", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "このトピックに関する練習問題が利用可能になる頃にまたお越しください。", "I18N_TOPNAV_ABOUT": "このサイトについて", "I18N_TOPNAV_ABOUT_OPPIA": "Oppiaについて", "I18N_TOPNAV_ADMIN_PAGE": "管理者用ページ", + "I18N_TOPNAV_ANDROID_APP_HEADING": "Androidアプリ", "I18N_TOPNAV_BLOG": "ブログ", "I18N_TOPNAV_CONTACT_US": "連絡先", "I18N_TOPNAV_CREATOR_DASHBOARD": "著者ダッシュボード", "I18N_TOPNAV_DONATE": "寄付する", "I18N_TOPNAV_FORUM": "フォーラム", "I18N_TOPNAV_GET_STARTED": "始める", - "I18N_TOPNAV_LIBRARY": "ライブラリー", + "I18N_TOPNAV_LEARNER_GROUP": "学習者グループ", + "I18N_TOPNAV_LIBRARY": "コミュニティーライブラリー", "I18N_TOPNAV_LOGOUT": "ログアウト", "I18N_TOPNAV_MODERATOR_PAGE": "モデレーター用ページ", "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia Foundation", @@ -415,5 +609,7 @@ "I18N_TOPNAV_SIGN_IN": "サインイン", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Teach with Oppia", "I18N_TOTAL_SUBSCRIBERS_TEXT": "サブスクライブしている人は合計<[totalSubscribers]>人です。", - "I18N_UNSUBSCRIBE_BUTTON_TEXT": "サブスクライブ解除" + "I18N_UNSUBSCRIBE_BUTTON_TEXT": "サブスクライブ解除", + "I18N_VOLUNTEER_PAGE_TITLE": "ボランティア | Oppia", + "I18N_YES": "はい" } diff --git a/assets/i18n/kab.json b/assets/i18n/kab.json index bc310c4bfda0..62ffe9c3a721 100644 --- a/assets/i18n/kab.json +++ b/assets/i18n/kab.json @@ -1,7 +1,7 @@ { - "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Rnu taremt", + "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Rnu asnirem", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "ɣef usentel i tḥemmleḍ.", - "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Mudd azal i tikti", + "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Lqeḍ-d tikta", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK_TEXT": "I usnarni n taremt-ik", "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "Ɣef Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_1": "Tuɣdaḍt n Oppia d amuddu n ufus i yal yiwen ad yissin ayen yebɣa s wudem amellil d zhu.", @@ -14,10 +14,14 @@ "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_8": "Telliḍ d aselmad K-12, d anelmad s ugerdas neɣ d amdan iḥemmlen asentel ibanen, diɣ tebɣiḍ ad tebḍuḍ tamusni-ik, Oppia yeqqar-ak anṣuf yis-k. Ddu ɣer uẓeṭṭa syin bdu asnirem yid-neɣ.", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "Suffeɣ-d & Bḍu", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE_TEXT": "asnulfu-ik d tmezdagnut", + "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "Iduzwilen n umeslaw", + "I18N_ABOUT_PAGE_BREADCRUMB": "Ɣef", "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "Ismaden", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT": "Iwiziwen n Oppia usan-d seg umaḍal meṛṛa —ddeqs seg-neɣ d inelmaden akked yiselmaden imaynuten. Nebɣa ad nesnemmer iwiziwen-a i d-imudden afus di bennu n usmel-a. Ma tebɣiḍ ad tmuddeḍ afus , ha-t-a wamek ara tettekkiḍ!", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT_BOTTOM": "Tarbaɛt n tneflit n Oppia tesnemmer-ik ɣef yiwenniten, tiktiwin, tallelt akked yisumar n <[listOfNames]>.", "I18N_ABOUT_PAGE_CREDITS_THANK_TRANSLATEWIKI": "NEssaram daɣen ad nesnemmer translatewiki.net ɣef tsuqqilt s uttekki.", + "I18N_ABOUT_PAGE_EASILY_CREATE_LESSON": "Rnu timsirin s tefsest", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Snirem timsirin i d-tga temɣiwent", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Mudd idrimen", "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "Ttekki", "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "Tasbeddit Oppia", @@ -28,17 +32,53 @@ "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4_HEADING": "Inemhalen", "I18N_ABOUT_PAGE_HEADING": "Oppia : asegmi i yal yiwen", "I18N_ABOUT_PAGE_LEARN_BUTTON": "Bɣiɣ ad lemdeɣ", - "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "Acu tebɣiḍ ass-a?", + "I18N_ABOUT_PAGE_LESSON_FEATURE": "Timsirin i yebnan ɣef teqsiḍin", + "I18N_ABOUT_PAGE_OUR_FEATURES": "Timahilin-nneɣ", + "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "Acu tebɣiḍ ad t-tgeḍ ass-a?", "I18N_ABOUT_PAGE_TABS_ABOUT": "Ɣef", "I18N_ABOUT_PAGE_TABS_CREDITS": "Ismaden", "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Tasbeddit", "I18N_ABOUT_PAGE_TEACH_BUTTON": "Bɣiɣ ad seɣṛeɣ", "I18N_ABOUT_PAGE_TITLE": "Ɣef - Oppia", + "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "Aha bdu akked Oppia", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Ddu ɣer Teach With Oppia", "I18N_ACTION_BROWSE_EXPLORATIONS": "Wali asnirem-ik", "I18N_ACTION_BROWSE_LESSONS": "Snirem timsirin-nneɣ", "I18N_ACTION_CREATE_EXPLORATION": "Rnu asnirem", "I18N_ACTION_CREATE_LESSON": "Rnu tamsirt-ik", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Sefsex", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Yemmed", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "Tansa n yimayl", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Isem", + "I18N_ANDROID_PAGE_TITLE": "Android | Oppia", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "Selɣu-yi-d", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "Amaɣnu n umeskar", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TITLE": "Ablug | Ameskar | Oppia", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Isem", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Tasuffeɣ tamaynut", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Sekles", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Irewwayen", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Teffeɣ-d", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Ablug", + "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Rnu tugna n tenfult", + "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Tafekka", + "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Sefsex", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Kkes", + "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "Ẓreg tugnan n tenfult", + "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "Asekles aneggaru ɣef", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "Suffeɣ-d", + "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "Immed", + "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "Sekles-it am urewway", + "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "Taskant", + "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "Tibzimin", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_PREFIX": "Talast n", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "ugar n tebzimin zemrent ad ttwarnunt.", + "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "Tanfult", + "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "Azwel", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "Fren afaylu neɣ zuɣer-it-id ɣer da", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Tuccḍa: Ur izmir ara ad d-iɣeṛ afaylu n tugna.", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Rnu tanfult", + "I18N_BLOG_POST_UNTITLED_HEADING": "War azwel", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Takarḍa-a ɣezzifet nezzeh, inelmaden izmer ad asen-iruḥ lebɣi ad tt-ɣren. Ɛreḍ ad tt-teswezleḍ neɣ ad tt-tebḍuḍ ɣef snat n tkerḍiwin.", "I18N_CLASSROOM_CALLOUT_BUTTON": "Snirem", "I18N_CLASSROOM_CALLOUT_HEADING_1": "Izaduren n tussnakt", @@ -132,6 +172,8 @@ "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "Ma tesɛiḍ isteqsiyen neɣ kra i k-iceɣben ɣef ukala n tukksa n umiḍan, ttxil azen imayl ɣer privacy@oppia.org.", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Ẓuɣer tugna ɣer temnaḍt-a", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Azen afaylu", + "I18N_DONATE_PAGE_TITLE": "Mudd tawsa i tkebbanit n Oppia", + "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Sell i temɣiwent-nneɣ Oppia", "I18N_ERROR_DISABLED_EXPLORATION": "Sens asnirem", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Suref-aɣ imi asnirem iɣef tsenndeḍ yensa tura. Ɛreḍ ticki.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Asnirem yensa- Oppia", @@ -189,6 +231,7 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_9": "Ticki inelmaden ttaẓen deg usnirem-ik, zemren ad k-d-aznen tamawt akken ad k-id-lɣun ɣef wuguren neɣ ad bḍun tiktiwin n usnerni.", "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "Snerni asnirem-ik", "I18N_GET_STARTED_PAGE_TITLE": "Bdu", + "I18N_HINT_TITLE": "Taxbalut", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Sekcem beṭṭu s umasal \"x/y\", neɣ amḍan s umasal \"A x/y\".", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Sekcem beṭṭu s umasal x/y.", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Rnu amder", @@ -475,7 +518,6 @@ "I18N_SIDEBAR_CONTACT_US": "Nermes-aɣ-d", "I18N_SIDEBAR_DONATE": "Mudd idrimen", "I18N_SIDEBAR_FORUM": "Anmager", - "I18N_SIDEBAR_GET_STARTED": "Bdu", "I18N_SIDEBAR_LIBRARY_LINK": "Tanedlist", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Tasbeddit Oppia", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Selmed s Oppia", @@ -560,7 +602,6 @@ "I18N_TOPNAV_ABOUT_OPPIA": "Ɣef Oppia", "I18N_TOPNAV_ADMIN_PAGE": "Asebter n unedbal", "I18N_TOPNAV_BLOG": "Ablug", - "I18N_TOPNAV_CLASSROOM": "Taneɣrit", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Tussnakt tazadurt", "I18N_TOPNAV_CONTACT_US": "Nermes-aɣ-d", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Tafelwit n usenqed n umettekki", @@ -569,6 +610,7 @@ "I18N_TOPNAV_FORUM": "Anmager", "I18N_TOPNAV_GET_INVOLVED": "Ttekki", "I18N_TOPNAV_GET_STARTED": "Bdu", + "I18N_TOPNAV_LEARN": "Taneɣrit", "I18N_TOPNAV_LEARNER_DASHBOARD": "Tafelwit n usenqed n unelmad", "I18N_TOPNAV_LIBRARY": "Tanedlist", "I18N_TOPNAV_LOGOUT": "Tuffɣa", diff --git a/assets/i18n/ko.json b/assets/i18n/ko.json index a0e9125d8f05..fb701f4d434b 100644 --- a/assets/i18n/ko.json +++ b/assets/i18n/ko.json @@ -1,4 +1,5 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Oppia 재단 정보", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "코드 리셋", "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "Oppia 정보", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "게시하고 공유하기", @@ -28,6 +29,8 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "교사를 위한 가이드", "I18N_ACTION_TIPS_FOR_PARENTS": "부모와 보호자를 위한 조언", "I18N_BLOG_CARD_PREVIEW_HEADING": "블로그 카드 미리 보기", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "이름", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "저장", "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "섬네일 이미지 추가", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "본문", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "취소", @@ -45,7 +48,23 @@ "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "파일을 선택하거나 여기로 끌어오십시오", "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "오류: 이미지 파일을 읽지 못했습니다.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "섬네일 추가", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_HEADING": "이미 수학을 어느 정도 알고 있습니까?", + "I18N_CLASSROOM_PAGE_BEGIN_WITH_FIRST_TOPIC_BUTTON": "<[firstTopic]> 주제로 시작", "I18N_CLASSROOM_PAGE_COMING_SOON": "곧 게시됩니다", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_HEADING": "수학이 처음이십니까?", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_1": "첫 번째 체크포인트를 완료했습니다! 좋은 시작입니다!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_2": "첫 번째 체크포인트를 잘 마쳤습니다! 계속하십시오!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "시작이 좋습니다! 계속하십시오!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "체크포인트를 완료했습니다! 잘 했습니다!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_1": "절반을 마쳤습니다. 곧 끝납니다!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_2": "절반을 마쳤습니다. 잘하셨습니다!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "이제 하나 남았습니다!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_2": "갑시다! 이제 하나 남았습니다!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "잘 하고 있습니다! 계속하십시오!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_1": "거의 다 왔습니다! 계속하십시오!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_2": "거의 끝까지 왔습니다! 계속하십시오!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_3": "잘 하셨습니다! 거의 결승점에 도달했습니다!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "만세!", "I18N_CONTACT_PAGE_BREADCRUMB": "연락하기", "I18N_CONTACT_PAGE_HEADING": "참여하기!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Oppia 프로젝트 지원에 관심을 주셔서 감사합니다!", @@ -56,9 +75,12 @@ "I18N_CONTACT_PAGE_PARAGRAPH_2_HEADING": "우리는 모두 자원봉사자입니다", "I18N_CONTACT_PAGE_PARAGRAPH_3_HEADING": "Oppia가 다른 학습 플랫폼과 어떻게 다릅니까", "I18N_CONTACT_PAGE_PARAGRAPH_4_HEADING": "돕는 방법", + "I18N_CONTACT_PAGE_TITLE": "연락처 | Oppia", "I18N_CONTINUE_REGISTRATION": "등록 계속", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "확인", "I18N_CORRECT_FEEDBACK": "정답입니다!", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "내 그룹 링크", + "I18N_CREATE_ACCOUNT": "계정 만들기", "I18N_CREATE_ACTIVITY_QUESTION": "무엇을 만드시겠습니까?", "I18N_CREATE_ACTIVITY_TITLE": "활동 추가", "I18N_CREATE_COLLECTION": "모음집 만들기", @@ -67,6 +89,7 @@ "I18N_CREATE_EXPLORATION_QUESTION": "탐험을 생성하시겠습니까?", "I18N_CREATE_EXPLORATION_TITLE": "탐험 만들기", "I18N_CREATE_EXPLORATION_UPLOAD": "업로드", + "I18N_CREATE_LEARNER_GROUP": "그룹 만들기", "I18N_CREATE_NO_THANKS": "아니오, 괜찮습니다", "I18N_CREATE_YES_PLEASE": "예!", "I18N_DASHBOARD_COLLECTIONS": "모음집", @@ -89,9 +112,30 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "사용자 통계", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "개요", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "이것은 당신이 Oppia 계정을 삭제할 수 있는 페이지로 보내 드립니다.", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "계정 삭제 | Oppia", + "I18N_DIAGNOSTIC_TEST_CURRENT_PROGRESS": "현재 진척률: <[progressPercentage]> %", + "I18N_DIAGNOSTIC_TEST_EXIT_TEST": "테스트 끝내기", + "I18N_DIAGNOSTIC_TEST_HEADING": "학습자 진단 테스트", + "I18N_DIAGNOSTIC_TEST_RESULT_HEADER_TEXT": "테스트를 마쳤습니다. 고생하셨습니다!", + "I18N_DIAGNOSTIC_TEST_RESULT_START_TOPIC": "<[topicName]> 시작", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "그림을 여기로 끌어놓으십시오", "I18N_DIRECTIVES_UPLOAD_A_FILE": "파일 올리기", "I18N_DONATE_PAGE_BREADCRUMB": "기부", + "I18N_DONATE_PAGE_BUDGET_HEADING": "당신의 돈은 어디로 갑니까?", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "유지보수", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "Oppia는 무엇입니까?", + "I18N_DONATE_PAGE_FAQ_QUESTION_2": "Oppia의 존재 이유는 무엇입니까?", + "I18N_DONATE_PAGE_FAQ_QUESTION_5": "이 기부금 세금 공제가 되나요?", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "블로그 읽기", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "구독해주셔서 고맙습니다!", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "이메일 주소", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "이름 (선택 사항)", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "지금 구독", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_1": "오늘 참여하세요!", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "기부해주셔서 고맙습니다!", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "동영상 보기", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "아직 그룹이 없습니다", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "다음에 수행할 수 있는 작업이 여기 있습니다!", "I18N_ERROR_DISABLED_EXPLORATION": "사용할 수 없는 탐험", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "죄송합니다, 클릭하신 탐험은 현재 사용할 수 없습니다. 나중에 다시 시도해 주세요.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "사용할 수 없는 탐험 - Oppia", @@ -110,6 +154,7 @@ "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "익명", "I18N_FOOTER_ABOUT": "정보", "I18N_FOOTER_ABOUT_ALL_CAPS": "OPPIA 정보", + "I18N_FOOTER_ANDROID_APP": "안드로이드 앱", "I18N_FOOTER_AUTHOR_PROFILES": "제작자 프로필", "I18N_FOOTER_BROWSE_LIBRARY": "라이브러리 탐색하기", "I18N_FOOTER_CONTACT_US": "문의하기", @@ -137,10 +182,12 @@ "I18N_GET_STARTED_PAGE_TITLE": "시작하기", "I18N_GOT_IT": "알겠습니다", "I18N_HINT_TITLE": "힌트", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "코드 편집기로 이동", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "테두리 추가", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "점 추가", "I18N_INTERACTIONS_GRAPH_DELETE": "삭제", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "유효하지 않은 그래프!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "그래프 만들기", "I18N_INTERACTIONS_GRAPH_MOVE": "이동", "I18N_INTERACTIONS_GRAPH_RESET_BUTTON": "초기화", "I18N_INTERACTIONS_GRAPH_RESPONSE_EDGE": "<[edges]> 테두리", @@ -149,13 +196,19 @@ "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "더 많은 항목을 선택할 수 있습니다.", "I18N_INTERACTIONS_MUSIC_CLEAR": "지우기", "I18N_INTERACTIONS_MUSIC_PLAY": "재생", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "유효한 통화를 입력해 주십시오 (예: $5 또는 Rs 5)", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "답변은 유효한 숫자여야 합니다.", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "답변은 0 이상이어야 합니다.", + "I18N_INTERACTIONS_NUMERIC_INPUT_MINUS_AT_BEGINNING": "마이너스(-) 부호는 처음에만 오는 것이 허용됩니다.", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "취소", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "확인 필요", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "코드 편집기 표시", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "항목 추가", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "정답을 입력하지 않았습니다.", "I18N_INTERACTIONS_SUBMIT": "제출", "I18N_LANGUAGE_FOOTER_VIEW_IN": "다음 언어로 Oppia 보기:", "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "좋은 오후입니다", + "I18N_LEARNER_DASHBOARD_ALL": "모두", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "목표 수정", "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "성취된 목표", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "완료", @@ -168,6 +221,7 @@ "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "이 흥미진진한 여행을 시작해요!", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "새로운 내용 배우기", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "좋은 아침입니다", + "I18N_LEARNER_DASHBOARD_PAGE_TITLE": "학습자 대시보드 | Oppia", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "나중에 플레이", "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "진행도", "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE": "플레이하고 싶은 순서대로 활동을 드래그하여 위치를 재조정하세요!", @@ -181,7 +235,20 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_NO_CURRENT_STATE": "이런! 이 상태는 더 이상 존재하지 않습니다!", "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "제안:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "제안", + "I18N_LEARNER_DASHBOARD_VIEW": "보기", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "제안 보기", + "I18N_LEARNER_GROUPS_SECTION_TITLE": "학습자 그룹", + "I18N_LEARNER_GROUP_ADD_GROUP_DETAILS": "그룹 세부 정보 추가", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "다음", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "이전 단계", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "그룹 제목", + "I18N_LEARNER_GROUP_INVITE_LIST_TEXT": "초대 목록", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "추가됨", + "I18N_LEARNER_GROUP_NO_ITEMS_ADDED": "아직 새 항목을 추가하지 않았습니다.", + "I18N_LEARNER_GROUP_NO_RESULTS_FOUND": "결과가 없습니다.", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "자세한 내용 보기", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "수업 저자", + "I18N_LESSON_INFO_HEADER": "수업 정보", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "이것을 완수하셨습니다", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "이미 플레이리스트에 추가됨", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "'나중에 재생' 목록에 추가", @@ -247,15 +314,21 @@ "I18N_LIBRARY_VIEWS_TOOLTIP": "조회수", "I18N_LIBRARY_VIEW_ALL": "모두 보기", "I18N_LICENSE_PAGE_LICENSE_HEADING": "라이선스", + "I18N_LICENSE_PAGE_TITLE": "라이선스 페이지 | Oppia", + "I18N_LOGIN_PAGE_TITLE": "로그인 | Oppia", "I18N_LOGOUT_LOADING": "로그아웃하기", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "로그아웃 | Oppia", "I18N_LOGOUT_PAGE_TITLE": "로그아웃", "I18N_MODAL_CANCEL_BUTTON": "취소", "I18N_MODAL_CONTINUE_BUTTON": "계속", + "I18N_NO": "아니오", "I18N_ONE_SUBSCRIBER_TEXT": "당신에게 1명의 구독자가 있습니다.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1_HEADING": "삭제 절차 진행 중", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "삭제 세부사항", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_TITLE": "보류중인 계정 삭제 | Oppia", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "공동체 지침", "I18N_PLAYBOOK_HEADING": "작성자 지침", + "I18N_PLAYBOOK_PAGE_TITLE": "크리에이터 가이드라인 | Oppia", "I18N_PLAYBOOK_PUBLICATION_POLICY_HEADING": "게시 정책", "I18N_PLAYER_AUDIO_EXPAND_TEXT": "소리", "I18N_PLAYER_AUDIO_LANGUAGE": "언어", @@ -311,6 +384,7 @@ "I18N_PLAYER_TAGS_TOOLTIP": "태그", "I18N_PLAYER_THANK_FEEDBACK": "의견 주셔서 감사합니다!", "I18N_PLAYER_VIEWS_TOOLTIP": "조회수", + "I18N_PRACTICE_SESSION_PAGE_TITLE": "연습 세션: <[topicName]> - Oppia", "I18N_PREFERENCES_BIO": "자기 소개", "I18N_PREFERENCES_BREADCRUMB": "환경 설정", "I18N_PREFERENCES_CANCEL_BUTTON": "취소", @@ -320,6 +394,7 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "이 사이트에 대한 소식이나 업데이트 받기", "I18N_PREFERENCES_HEADING": "환경 설정", "I18N_PREFERENCES_HEADING_SUBTEXT": "당신이 이 페이지에 한 모든 것들은 자동 저장됩니다.", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "환경 설정 | Oppia", "I18N_PREFERENCES_PICTURE": "사진", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "선호하는 오디오 언어", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE_PLACEHOLDER": "선호하는 오디오 언어", @@ -335,19 +410,31 @@ "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "당신이 구독한 개발자", "I18N_PREFERENCES_USERNAME": "사용자 이름", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "아직 선택하지 않음", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "개인정보보호정책 | Oppia", + "I18N_PROFILE_PAGE_TITLE": "프로필 | Oppia", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "계속하시겠습니까?", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "아니요, 처음부터 다시 시작합니다", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "예, 수업을 이어서 진행합니다", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "나의 대시보드", "I18N_QUESTION_PLAYER_NEW_SESSION": "새 세션", "I18N_QUESTION_PLAYER_TEST_FAILED": "세션을 실패했습니다. 스킬을 확인 후 다시 시도해 주십시오", "I18N_QUESTION_PLAYER_TEST_PASSED": "세션을 마쳤습니다. 고생하셨습니다!", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "등록 세션 만료", - "I18N_SIDEBAR_ABOUT_LINK": "Oppia 정보", + "I18N_REVIEW_TEST_PAGE_TITLE": "리뷰 시험: <[storyName]> - Oppia", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "계정이 이미 있습니까?", + "I18N_SHOW_LESS": "덜 보기", + "I18N_SHOW_MORE": "더 보기", + "I18N_SIDEBAR_ABOUT_LINK": "정보", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "Oppia 재단 정보", "I18N_SIDEBAR_BLOG": "블로그", "I18N_SIDEBAR_CLASSROOM": "교실", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "기초수학", "I18N_SIDEBAR_CONTACT_US": "문의하기", "I18N_SIDEBAR_DONATE": "기부", "I18N_SIDEBAR_FORUM": "포럼", - "I18N_SIDEBAR_GET_STARTED": "시작하기", + "I18N_SIDEBAR_GET_INVOLVED": "참여하기", + "I18N_SIDEBAR_HOME": "홈", + "I18N_SIDEBAR_LEARN": "알아보기", "I18N_SIDEBAR_LIBRARY_LINK": "라이브러리", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Oppia 재단", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Teach with Oppia", @@ -372,6 +459,7 @@ "I18N_SIGNUP_LOADING": "불러오는 중", "I18N_SIGNUP_PAGE_TITLE": "공동체 가입하기 - Oppia", "I18N_SIGNUP_REGISTRATION": "등록", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "다시는 묻지 않습니다", "I18N_SIGNUP_SEND_ME_NEWS": "이 사이트에 대한 소식과 업데이트를 받겠습니다", "I18N_SIGNUP_USERNAME": "사용자 이름", "I18N_SIGNUP_USERNAME_EXPLANATION": "당신의 사용자 이름은 당신의 기여 다음에 나타납니다.", @@ -393,6 +481,7 @@ "I18N_START_HERE": "시작하려면 여기를 클릭하세요!", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - 완료!", "I18N_SUBSCRIBE_BUTTON_TEXT": "구독", + "I18N_SYLLABUS_STORY_TITLE": "스토리", "I18N_TEACH_BENEFITS_TITLE": "우리의 장점", "I18N_TEACH_BENEFITS_TWO": "재밌는 스토리 기반 수업", "I18N_TEACH_PAGE_ACTION_START_LEARNING": "학습 시작", @@ -400,7 +489,12 @@ "I18N_TEACH_PAGE_HEADING": "세상 속 학생들을 도와주세요", "I18N_TEACH_PAGE_LIBRARY_BUTTON": "라이브러리 둘러보기", "I18N_TEACH_PAGE_SIX_TITLE": "오늘 학습을 시작하기", + "I18N_TEACH_PAGE_TITLE": "부모와 선생을 위한 Oppia 사용 가이드 | Oppia", "I18N_TEACH_STUDENT_DETAILS_1": "리야 소나기", + "I18N_TERMS_PAGE_TITLE": "이용약관 | Oppia", + "I18N_THANKS_PAGE_TITLE": "감사 표현 | Oppia", + "I18N_TOPIC_LEARN": "알아보기", + "I18N_TOPIC_TITLE": "주제", "I18N_TOPIC_VIEWER_DESCRIPTION": "설명", "I18N_TOPIC_VIEWER_PRACTICE": "연습", "I18N_TOPIC_VIEWER_START_PRACTICE": "시작", @@ -409,23 +503,32 @@ "I18N_TOPNAV_ABOUT": "정보", "I18N_TOPNAV_ABOUT_OPPIA": "Oppia 정보", "I18N_TOPNAV_ADMIN_PAGE": "관리자 페이지", + "I18N_TOPNAV_ANDROID_APP_HEADING": "안드로이드 앱", "I18N_TOPNAV_BLOG": "블로그", "I18N_TOPNAV_BLOG_DASHBOARD": "블로그 대시보드", - "I18N_TOPNAV_CLASSROOM": "교실", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "기초수학", "I18N_TOPNAV_CONTACT_US": "문의하기", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "기여자 대시보드", "I18N_TOPNAV_DONATE": "기부", "I18N_TOPNAV_FORUM": "포럼", "I18N_TOPNAV_GET_INVOLVED": "참여하기", "I18N_TOPNAV_GET_STARTED": "시작하기", + "I18N_TOPNAV_HOME": "홈", + "I18N_TOPNAV_LEARN": "배우기", "I18N_TOPNAV_LEARNER_DASHBOARD": "학습자 대시보드", + "I18N_TOPNAV_LEARN_HEADING": "더 알아보는 방법", + "I18N_TOPNAV_LEARN_LINK_2": "계속 알아보기", + "I18N_TOPNAV_LIBRARY": "커뮤니티 라이브러리", "I18N_TOPNAV_LOGOUT": "로그아웃", "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia 재단", + "I18N_TOPNAV_PARTNERSHIPS": "학교 및 단체", "I18N_TOPNAV_PREFERENCES": "환경 설정", "I18N_TOPNAV_SIGN_IN": "로그인", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Google로 로그인", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Oppia로 가르치기", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "오늘 사용해 보세요!", "I18N_TOTAL_SUBSCRIBERS_TEXT": "당신은 총 <[totalSubscribers]>명의 구독자가 있습니다.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "구독 해지", - "I18N_WARNING_MODAL_TITLE": "경고!" + "I18N_WARNING_MODAL_TITLE": "경고!", + "I18N_YES": "예" } diff --git a/assets/i18n/kum.json b/assets/i18n/kum.json index 248d63280ac2..9e8d885452e6 100644 --- a/assets/i18n/kum.json +++ b/assets/i18n/kum.json @@ -133,7 +133,6 @@ "I18N_SIDEBAR_BLOG": "Блог", "I18N_SIDEBAR_DONATE": "Акъча багъышламакъ", "I18N_SIDEBAR_FORUM": "Форум", - "I18N_SIDEBAR_GET_STARTED": "Башламакъ", "I18N_SIDEBAR_LIBRARY_LINK": "Китапхана", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Oppia булан бирге уьйретмек", "I18N_SIGNUP_CLOSE_BUTTON": "Япмакъ", diff --git a/assets/i18n/lb.json b/assets/i18n/lb.json index efa6b7c2292d..7e20997bf2a4 100644 --- a/assets/i18n/lb.json +++ b/assets/i18n/lb.json @@ -14,7 +14,23 @@ "I18N_ABOUT_PAGE_TABS_CREDITS": "Merci", "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Fondatioun", "I18N_ABOUT_PAGE_TITLE": "Iwwer | Oppia", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "E-Mail-Adress", + "I18N_ANDROID_PAGE_FEATURE_TEXT_3": "Léiert an Ärer Sprooch", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Numm", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "Mech notifiéieren", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Änneren Äre Benotzernumm an Är Biographie", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Numm", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Blog", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "Stéchwierder", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "Fir Iech proposéiert.", + "I18N_CLASSROOM_MATH_TITLE": "Mathematik", "I18N_CLASSROOM_PAGE_COMING_SOON": "Deemnächst", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "Maacht en Test", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Virufueren", + "I18N_COMING_SOON": "Deemnächst!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "KOLLEKTIOUN", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "Hurra!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "Genial!", "I18N_CONTACT_PAGE_BREADCRUMB": "Kontakt", "I18N_CONTACT_PAGE_HEADING": "Maacht mat", "I18N_CONTACT_PAGE_PARAGRAPH_10_HEADING": "Den Internetsite verbesseren a verwalten", @@ -54,9 +70,25 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_1_2": "Benotzerstatistiken", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Benotzerstatistiken", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Iwwersiicht", + "I18N_DIAGNOSTIC_TEST_START_BUTTON": "Den Test ufänken", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "E Bild an dëse Beräich zéien", "I18N_DIRECTIVES_UPLOAD_A_FILE": "E Fichier eroplueden", "I18N_DONATE_PAGE_BREADCRUMB": "Maacht en Don", + "I18N_DONATE_PAGE_BUDGET_HEADING": "Wat geschitt mat Äre Suen?", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "Ënnerhalt", + "I18N_DONATE_PAGE_FAQ_ANSWER_10": "Fir allgemeng Froen iwwer Oppia, kontaktéiert eis wgl. iwwer contact@oppia.org.", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "Wat ass Oppia?", + "I18N_DONATE_PAGE_FAQ_QUESTION_2": "Firwat gëtt et Oppia?", + "I18N_DONATE_PAGE_FAQ_QUESTION_9": "Gëtt et ee mat deem ech schwätze kann, wann ech interesséiert sinn Geschäftspartner ze ginn?", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_2": "Aus Palestina", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "Aus Indien", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "Liest eise Blog", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "Merci fir d'Abonéieren", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "E-Mail-Adress", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "Abonnéiert Iech elo", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_1": "Maacht nach haut bei eis mat!", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "Merci fir Ären Don!", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "E Video kucken", "I18N_ERROR_HEADER_400": "Feeler 400", "I18N_ERROR_HEADER_401": "Feeler 401", "I18N_ERROR_HEADER_404": "Feeler 404", @@ -65,6 +97,9 @@ "I18N_ERROR_PAGE_TITLE_401": "Feeler 401 – Oppia", "I18N_ERROR_PAGE_TITLE_404": "Feeler 404 – Oppia", "I18N_ERROR_PAGE_TITLE_500": "Feeler 500 - Oppia", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "Wat ass e Ratio?", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "Wat ass Divisioun?", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE": "Zuele vergläichen", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anonym", "I18N_FOOTER_ABOUT": "Iwwer", "I18N_FOOTER_ABOUT_ALL_CAPS": "IWWER OPPIA", @@ -87,14 +122,19 @@ "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Gitt e Broch ënner der Form \"x/y\", oder eng gemëscht Zuel ënner der Form \"A x/y\" an.", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Gitt e Broch ënner der Form x/y an.", "I18N_INTERACTIONS_GRAPH_DELETE": "Läschen", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Grafik weisen", "I18N_INTERACTIONS_GRAPH_MOVE": "Réckelen", "I18N_INTERACTIONS_GRAPH_RESET_BUTTON": "Zrécksetzen", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Etikett aktualiséieren", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Gewiicht aktualiséieren", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Op d'Bild klicken", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Sicht e Bild eraus fir ze weisen]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Dir kënnt méi Optiounen eraussichen.", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "Klickt op d'Kaart", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "Kaart weisen", "I18N_INTERACTIONS_MUSIC_CLEAR": "Eidel maachen", "I18N_INTERACTIONS_MUSIC_PLAY": "Ofspillen", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "D'Äntwert muss eng valabel Zuel sinn.", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "Sidd Dir sécher datt Dir Äre Code zrécksetze wëllt?", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Ofbriechen", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Confirmatioun erfuerdert", @@ -103,15 +143,18 @@ "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Keng Äntwert ginn.", "I18N_INTERACTIONS_SUBMIT": "Späicheren", "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Gudde Mëtteg", + "I18N_LEARNER_DASHBOARD_ALL": "All", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Ziler änneren", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "Fäerdeg", "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Aktuell Ziler", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "Fänkt u mat ", "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "Gudden Owend", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "Fir d'lescht gespillt", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Äntwerten", "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Ziler", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "Am Gaang", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Kommt mir fänken dës interessant Rees un!", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "Är Gruppen", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Eppes Neies léieren", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Gudde Moien", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "Méi spéit spillen", @@ -126,7 +169,13 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "Kuerz Beschreiwung vun den Ännerungen:", "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Virgeschlo:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Virschlag", + "I18N_LEARNER_DASHBOARD_VIEW": "Weisen", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Virschlag weisen", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "Beschreiwung", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "Detailer", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "Späicheren", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_FALSE": "Nee, vläicht méi spéit", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BUTTON": "Fortgoen", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Dir sidd heimat fäerdeg", "I18N_LIBRARY_ALL_CATEGORIES": "All Kategorien", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "All Kategorien ugewielt", @@ -182,8 +231,10 @@ "I18N_LIBRARY_VIEW_ALL": "All weisen", "I18N_LICENSE_PAGE_LICENSE_HEADING": "Lizenz", "I18N_LICENSE_TERMS_HEADING": "Lizenzbedingungen", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "Ausloggen | Oppia", "I18N_MODAL_CANCEL_BUTTON": "Ofbriechen", "I18N_MODAL_CONTINUE_BUTTON": "Virufueren", + "I18N_NO": "Neen", "I18N_ONE_SUBSCRIBER_TEXT": "Dir hutt 1 Abonnent.", "I18N_PLAYER_AUDIO_EXPAND_TEXT": "Der Lexioun nolauschteren", "I18N_PLAYER_AUDIO_LANGUAGE": "Sprooch", @@ -218,6 +269,7 @@ "I18N_PLAYER_SUBMIT_BUTTON": "Späicheren", "I18N_PLAYER_THANK_FEEDBACK": "Merci fir de Feedback!", "I18N_PLAYER_UNRATED": "Net bewäert", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Audio Sprooch", "I18N_PREFERENCES_BIO": "Biographie", "I18N_PREFERENCES_BREADCRUMB": "Astellungen", "I18N_PREFERENCES_CANCEL_BUTTON": "Ofbriechen", @@ -226,27 +278,37 @@ "I18N_PREFERENCES_EMAIL_EXPLAIN": "Nëmme Moderateuren an Admine vum Site kënnen Är E-mail-Adress gesinn.", "I18N_PREFERENCES_HEADING": "Astellungen", "I18N_PREFERENCES_HEADING_SUBTEXT": "All Ännerungen déi Dir op dëser Säit maacht gëtt automatesch gespäichert.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Impakt", "I18N_PREFERENCES_PICTURE": "Bild", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "Dëst ass d'Sprooch an där de Site gewise gëtt.", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Feeler: De Bildfichier konnt net gelies ginn.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Profilbild eroplueden", + "I18N_PREFERENCES_SEARCH_LABEL": "Sichen", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Sicht Är Sproochen eraus...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Sprooch vum Site", "I18N_PREFERENCES_USERNAME": "Benotzernumm", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Nach net erausgesicht", "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Gitt méi iwwer Äre Score gewuer", "I18N_QUESTION_PLAYER_NEW_SESSION": "Nei Sessioun", "I18N_QUESTION_PLAYER_RETRY_TEST": "Test nach eng Kéier probéieren", "I18N_QUESTION_PLAYER_SCORE": "Bewäertung", + "I18N_RESET_CODE": "Code zrécksetzen", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Test nokucken", - "I18N_SIDEBAR_ABOUT_LINK": "Iwwer Oppia", + "I18N_SHOW_LESS": "Manner weisen", + "I18N_SHOW_MORE": "Méi weisen", + "I18N_SIDEBAR_ABOUT_LINK": "Iwwer eis", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "Iwwert d'Oppia Foundation", "I18N_SIDEBAR_CLASSROOM": "Klassenzëmmer", "I18N_SIDEBAR_CONTACT_US": "Kontaktéiert Eis", "I18N_SIDEBAR_DONATE": "Maacht en Don", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Fir unzefänken", + "I18N_SIDEBAR_GET_INVOLVED": "Maacht mat", + "I18N_SIDEBAR_LEARN": "Léieren", "I18N_SIDEBAR_LIBRARY_LINK": "Bibliothéik", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Oppia Foundation", "I18N_SIDEBAR_PARTNERSHIPS": "Partnerschaften", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "Multiplikatioun", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "All Lektioune weisen", "I18N_SIDEBAR_VOLUNTEER": "Fräiwëllegen", "I18N_SIGNUP_CC_TITLE": "Creative-Commons-Lizenz", "I18N_SIGNUP_CLOSE_BUTTON": "Zoumaachen", @@ -272,29 +334,49 @@ "I18N_SPLASH_SITE_FEEDBACK": "Feedback vum Site", "I18N_SPLASH_TITLE": "Gratis Bildung fir Jiddereen", "I18N_START_HERE": "Klickt hei fir unzefänken!", - "I18N_SUBSCRIBE_BUTTON_TEXT": "Abonnéieren", + "I18N_STORY_Qu6THxP29tOy_TITLE": "Maya, Omar a Malik maachen eng Pizza!", + "I18N_SUBSCRIBE_BUTTON_TEXT": "Abonéieren", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "Wat ass e Ratio?", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "Zuelen ronnen", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Probleemléisung", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE": "Zuelen addéieren", + "I18N_SUBTOPIC_sWBXKH4PZcK6_estimation_TITLE": "Estimatioun", + "I18N_SUBTOPIC_sWBXKH4PZcK6_sequences _TITLE": "Sequenzen", "I18N_TEACH_PAGE_HEADING": "Studenten op der ganzer Welt hëllefen", "I18N_THANKS_PAGE_BREADCRUMB": "Merci", + "I18N_TIME_FOR_BREAK_FOOTER": "Ech si prett fir mat der Lektioun virunzefueren", + "I18N_TIME_FOR_BREAK_TITLE": "Zäit fir eng Paus?", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "Bréch", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "Multiplikatioun", + "I18N_TOPIC_LEARN": "Léieren", + "I18N_TOPIC_TITLE": "Thema", "I18N_TOPIC_VIEWER_CHAPTER": "Kapitel", + "I18N_TOPIC_VIEWER_COMING_SOON": "Deemnächst!", "I18N_TOPIC_VIEWER_DESCRIPTION": "Beschreiwung", "I18N_TOPIC_VIEWER_LESSONS": "Lektiounen", "I18N_TOPIC_VIEWER_REVISION": "Versioun", "I18N_TOPIC_VIEWER_STORY": "Geschicht", "I18N_TOPIC_VIEWER_VIEW_ALL": "All weisen", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "Divisioun", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "Additioun a Soustractioun", "I18N_TOPNAV_ABOUT": "Iwwer", "I18N_TOPNAV_ABOUT_OPPIA": "Iwwer Oppia", - "I18N_TOPNAV_CLASSROOM": "Klassenzëmmer", "I18N_TOPNAV_CONTACT_US": "Kontaktéiert Eis", "I18N_TOPNAV_DONATE": "Maacht en Don", "I18N_TOPNAV_FORUM": "Forum", "I18N_TOPNAV_GET_INVOLVED": "Maacht mat", "I18N_TOPNAV_GET_STARTED": "Fir unzefänken", + "I18N_TOPNAV_HOME": "Haaptsäit", + "I18N_TOPNAV_LEARN": "Léieren", + "I18N_TOPNAV_LEARN_LINK_1": "All Lektioune weisen", "I18N_TOPNAV_LIBRARY": "Bibliothéik", "I18N_TOPNAV_LOGOUT": "Ausloggen", "I18N_TOPNAV_OPPIA_FOUNDATION": "D'Oppia Foundation", "I18N_TOPNAV_PREFERENCES": "Astellungen", "I18N_TOPNAV_SIGN_IN": "Aloggen", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Alogge mat Google", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "Probéiert et haut!", "I18N_TOTAL_SUBSCRIBERS_TEXT": "Dir hutt am Ganzen <[totalSubscribers]> Abonnenten.", - "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Ofbestellen" + "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Ofbestellen", + "I18N_YES": "Jo" } diff --git a/assets/i18n/lt.json b/assets/i18n/lt.json index e14d56bd7d28..b604ff2b8329 100644 --- a/assets/i18n/lt.json +++ b/assets/i18n/lt.json @@ -2,6 +2,22 @@ "I18N_ABOUT_PAGE_TITLE": "Apie | Oppia", "I18N_ACTION_BROWSE_LESSONS": "Naršyti mūsų Pamokas", "I18N_ACTION_CREATE_LESSON": "Sukurti savo Pamoką", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "tinklaraštis", + "I18N_BLOG_HOME_PAGE_NO_RESULTS_FOUND": "Atsiprašome, tinklaraščio įrašų, kuriuos būtų galima rodyti, nėra", + "I18N_BLOG_HOME_PAGE_OPPIA_DESCRIPTION": "Kurti bendruomenę, kuri teiktų kokybišką išsilavinimą tiems, kuriems jo trūksta.", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "Naujausi įrašai", + "I18N_BLOG_HOME_PAGE_POSTS_NUMBER_DISPLAY": "Rodomas <[totalNumber]> įrašų <[startingNumber]> - <[endingNumber]>.", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "Raktažodžiai", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "Žyma", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "Pasirinkite žymas", + "I18N_BLOG_HOME_PAGE_TITLE": "\"Oppia\" tinklarašis | \"Oppia\"", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "Sveiki atvykę į tinklaraštį \"Oppia\"!", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_HEADING": "Rodomi paieškos rezultatai", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_DISPLAY": "Rodomi visų paieškos rezultatų <[startingNumber]> - <[endingNumber]>.", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_OUT_OF_TOTAL_DISPLAY": "Rodomas <[totalNumber]> įrašų <[startingNumber]> - <[endingNumber]> .", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "Siūloma Jums", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "Žymos", + "I18N_BLOG_POST_PAGE_TITLE": "<[blogPostTitle]> | Tinklaraštis | \"Oppia\"", "I18N_CREATE_ACTIVITY_QUESTION": "Ką norite sukurti?", "I18N_CREATE_ACTIVITY_TITLE": "Sukurti Veiklą", "I18N_CREATE_COLLECTION": "Sukurti Kolekciją", diff --git a/assets/i18n/mk.json b/assets/i18n/mk.json index b21aa99fcfff..737a44600b04 100644 --- a/assets/i18n/mk.json +++ b/assets/i18n/mk.json @@ -17,6 +17,7 @@ "I18N_CONTACT_PAGE_HEADING": "Вклучете се!", "I18N_CONTACT_PAGE_PARAGRAPH_11_HEADING": "Дарувања", "I18N_CONTACT_PAGE_PARAGRAPH_15_HEADING": "Безбедност", + "I18N_CREATE_ACCOUNT": "Создај сметка", "I18N_CREATE_ACTIVITY_QUESTION": "Што сакате да создадете?", "I18N_CREATE_ACTIVITY_TITLE": "Создајте активност", "I18N_CREATE_COLLECTION": "Создајте збирка", @@ -29,7 +30,7 @@ "I18N_CREATE_YES_PLEASE": "Да, секако!", "I18N_CREATOR_IMPACT": "Влијание", "I18N_DASHBOARD_COLLECTIONS": "збирки", - "I18N_DASHBOARD_CREATOR_DASHBOARD": "Табла на креаторот", + "I18N_DASHBOARD_CREATOR_DASHBOARD": "Управувачница на создавачот", "I18N_DASHBOARD_EXPLORATIONS": "истражувања", "I18N_DASHBOARD_EXPLORATIONS_EMPTY_MESSAGE": "Изгледа дека досега не сте создале никакви истражувања. Ајде да почнеме!", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY": "Подреди по", @@ -136,6 +137,10 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Предложен:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Предлог", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Преглед на предлогот", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "Следно", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "Претходен чекор", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "Наслов на групата", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "Додадено", "I18N_LIBRARY_ALL_CATEGORIES": "Сите категории", "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "Сите избрани категории", "I18N_LIBRARY_ALL_LANGUAGES": "Сите јазици", @@ -288,14 +293,17 @@ "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Создавачи на кои сте претплатени", "I18N_PREFERENCES_USERNAME": "Корисничко име", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Сè уште неизбрано", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "Дали сакате да продолжите?", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Моја управувачница", "I18N_QUESTION_PLAYER_SCORE": "Оценка", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "Веќе имате сметка?", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_5": "Напишете или копирајте ја врската подолу", + "I18N_SAVE_PROGRESS_TEXT": "Зачувај напредок", "I18N_SIDEBAR_ABOUT_LINK": "За нас", "I18N_SIDEBAR_BLOG": "Блог", "I18N_SIDEBAR_CONTACT_US": "Контактирајте нè", "I18N_SIDEBAR_DONATE": "Дарувајте", "I18N_SIDEBAR_FORUM": "Форум", - "I18N_SIDEBAR_GET_STARTED": "Започнете", "I18N_SIDEBAR_LIBRARY_LINK": "Библиотека", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Подучувајте со Опија", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Штиклирајќи го кутивчето лево од текстов, признавате, се согласувате и прифаќате дека сте обврзани од <[sitename]> Условите на употреба, кои се сместени тука.", @@ -320,6 +328,7 @@ "I18N_SIGNUP_LOADING": "Вчитувам", "I18N_SIGNUP_PAGE_TITLE": "Вклучете се во заедницата — Опија", "I18N_SIGNUP_REGISTRATION": "Зачленување", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "Не ме прашувај повеќе", "I18N_SIGNUP_SEND_ME_NEWS": "Испраќај ми новости за Опија", "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]> е отворено заедничко складиште на учебни ресурси. Сите материјали на него се за слободна употреба, пренамена и споделување.", "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]> постои за да го поттикне создавањето и постојаното подобрување на квалитетни ресурси, секому ставени слободно на располагање.", diff --git a/assets/i18n/my.json b/assets/i18n/my.json index cb76af51aeb1..85f7c7eddbe7 100644 --- a/assets/i18n/my.json +++ b/assets/i18n/my.json @@ -19,6 +19,9 @@ "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TITLE": "ခေါင်းစဉ်", "I18N_DASHBOARD_SUGGESTIONS": "အကြံပေးမှုများ", "I18N_DIRECTIVES_UPLOAD_A_FILE": "ဖိုင်တစ်ခု တင်ရန်", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "ထိန်းသိမ်းပြုပြင်", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "အီးမေးလ် လိပ်စာ", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "အမည် (ချန်လှပ်ထားနိုင်သည်)", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "အမည်မသိ", "I18N_FOOTER_ABOUT": "အကြောင်း", "I18N_FOOTER_CONTACT_US": "မိမိတို့အား ဆက်သွယ်ရန်", @@ -35,6 +38,8 @@ "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "မလုပ်တော့ပါ", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "အတည်ပြုချက် လိုအပ်သည်", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "ပြီးဆုံးသည်", + "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "မင်္ဂလာ ညနေခင်းပါ", + "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "မင်္ဂလာ မနက်ခင်းပါ", "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "ဖယ်ရှားရန်", "I18N_LEARNER_DASHBOARD_RETURN_TO_FEEDBACK_THREADS_MESSAGE": "မက်ဆေ့များစာရင်းသို့ ပြန်သွားရန်", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "ပို့ရန်", @@ -131,6 +136,7 @@ "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "ပရိုဖိုင်ပုံ တင်ရန်", "I18N_PREFERENCES_USERNAME": "အသုံးပြုသူအမည်", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "မရွေးချယ်ရသေးပါ", + "I18N_QUESTION_PLAYER_NEW_SESSION": "ပြန်လည်ပြသရန်", "I18N_SIDEBAR_ABOUT_LINK": "အကြောင်း", "I18N_SIDEBAR_BLOG": "ဘလော့", "I18N_SIDEBAR_CONTACT_US": "မိမိတို့ကို ဆက်သွယ်ရန်", diff --git a/assets/i18n/nb.json b/assets/i18n/nb.json index dd71888ec9da..1b845dc5b65a 100644 --- a/assets/i18n/nb.json +++ b/assets/i18n/nb.json @@ -49,6 +49,8 @@ "I18N_DASHBOARD_TABLE_HEADING_UNRESOLVED_ANSWERS": "Uløste svar", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Dra et bilde til dette området", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Last opp en fil", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "Du har akkurat ferdiglest ditt første kapittel!", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "Her er hva du kan gjøre videre!", "I18N_ERROR_HEADER_400": "400-feil", "I18N_ERROR_HEADER_401": "401-feil", "I18N_ERROR_HEADER_404": "404-feil", diff --git a/assets/i18n/ne.json b/assets/i18n/ne.json index 6910f5c01f46..b14922025eaf 100644 --- a/assets/i18n/ne.json +++ b/assets/i18n/ne.json @@ -5,20 +5,31 @@ "I18N_ACTION_BROWSE_LESSONS": "हाम्रा पाठहरू हेर्नुहोस्", "I18N_ACTION_CREATE_EXPLORATION": "अन्वेषण निर्माण गर्नुहोस्", "I18N_ACTION_CREATE_LESSON": "तपाईँको आफ्नै पाठ निर्माण गर्नुहोस्", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "रद्द गर्नुहोस्", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "भयो", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "मलाइ खबर गर्नु", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "मेट्ने", "I18N_CLASSROOM_CALLOUT_BUTTON": "अन्वेषण", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "हुर्रे!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "अति उत्तम!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "राम्रो काम!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "राम्रो काम!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "राम्रो काम!", "I18N_CREATE_ACTIVITY_QUESTION": "के बनाउन चाहनुहुन्छ?", "I18N_CREATE_ACTIVITY_TITLE": "क्रियाकलाप बनाउनुहोस्", "I18N_CREATE_COLLECTION": "सङ्ग्रह बनाउनुहोस्", "I18N_CREATE_EXPLORATION": "अन्वेषण निर्माण गर्नुहोस्", "I18N_CREATE_EXPLORATION_CREATE": "बनाउनुहोस्", - "I18N_CREATE_EXPLORATION_QUESTION": "के तपाईँ अन्वेषण निर्माण गर्न चाहनुहुन्छ?", + "I18N_CREATE_EXPLORATION_QUESTION": "के तपाईं अन्वेषण निर्माण गर्न चाहनुहुन्छ?", "I18N_CREATE_EXPLORATION_TITLE": "अन्वेषण निर्माण गर्नुहोस्", "I18N_CREATE_EXPLORATION_UPLOAD": "अपलोड गर्ने", + "I18N_CREATE_LEARNER_GROUP": "समूह सिर्जना गर्नुहोस्", "I18N_CREATE_NO_THANKS": "हैन पर्दैन", "I18N_CREATE_YES_PLEASE": "हो, धन्यवाद!", "I18N_DASHBOARD_COLLECTIONS": "सङ्ग्रहहरू", "I18N_DASHBOARD_EXPLORATIONS": "अन्वेषण", "I18N_DASHBOARD_SUGGESTIONS": "सुझावहरू", + "I18N_DELETE_LEARNER_GROUP": "समूह मेटाउनुहोस्", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Sorry, but the exploration you clicked on is currently disabled. कृपया केही समय पछि पुनः प्रयास गर्नुहोस् ।", "I18N_ERROR_MESSAGE_500": "Something went horribly wrong. But it wasn't your fault. एउटा आन्तरिक त्रुटि रहन गयो ।", "I18N_ERROR_NEXT_STEPS": "The best thing to do now is probably to return to the \">home page. However, if this issue recurs, and you think it shouldn't, please let us know on our \" target=\"_blank\">issue tracker. यसको लागि माफ गर्नुहोस्।", @@ -63,6 +74,17 @@ "I18N_INTERACTIONS_MUSIC_PLAY": "बजाउनुहोस्", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "रद्द गर्नुहोस्", "I18N_INTERACTIONS_SUBMIT": "पेश गर्नुहोस्", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "अर्को", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "समूह शीर्षक", + "I18N_LEARNER_GROUP_GROUP_DETAILS_SECTION": "समूह विवरणहरू", + "I18N_LEARNER_GROUP_INVITE_LIST_TEXT": "निमन्त्रणा सूची", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "थपियो", + "I18N_LEARNER_GROUP_NO_RESULTS_FOUND": "कुनै नतिजाहरू भेटिएनन्।", + "I18N_LEARNER_GROUP_PREFERENCES_TAB": "अभिरुचीहरू", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "विवरणहरू हेर्नुहोस्", + "I18N_LEARNER_GROUP_SYLLABUS_ITEM_NOT_STARTED_YET": "अझै सुरु भएको छैन", + "I18N_LEARNER_GROUP_VIEW_DETAILS": "विवरणहरू हेर्नुहोस्", + "I18N_LEARNER_GROUP_WITHDRAW_INVITE": "फिर्ता लिनुहोस", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "बाद्यसूचीमा थप्नुहोस्", "I18N_LIBRARY_ALL_LANGUAGES": "सबै भाषाहरू", "I18N_LIBRARY_CATEGORIES_BUSINESS": "ब्यवसाय", @@ -100,6 +122,7 @@ "I18N_LIBRARY_GROUPS_LANGUAGES": "भाषाहरू", "I18N_LIBRARY_LOADING": "लोड हुँदैछ", "I18N_LIBRARY_VIEWS_TOOLTIP": "दृश्यहरू", + "I18N_MODAL_REMOVE_BUTTON": "हटाउनुहोस्", "I18N_PLAYBOOK_TAB_PARTICIPATION_PLAYBOOK": "सहभागिता निर्देशिका", "I18N_PLAYER_AUDIO_LANGUAGE": "भाषा", "I18N_PLAYER_AUDIO_MIGHT_NOT_MATCH_TEXT": "अडियोले पाठसँग पूर्णतया मेल नखान सक्छ", @@ -122,11 +145,11 @@ "I18N_PLAYER_SUBMIT_BUTTON": "पेश गर्नुहोस्", "I18N_PLAYER_TAGS_TOOLTIP": "ट्यागहरू", "I18N_PLAYER_VIEWS_TOOLTIP": "दृश्यहरू", - "I18N_PREFERENCES_BREADCRUMB": "प्राथमिकताहरू", + "I18N_PREFERENCES_BREADCRUMB": "अभिरुचीहरू", "I18N_PREFERENCES_CANCEL_BUTTON": "रद्द गर्नुहोस्", "I18N_PREFERENCES_CHANGE_PICTURE": "प्रोफाइल तस्वीर परिवर्तन गर्नुहोस्", "I18N_PREFERENCES_EMAIL": "इमेल", - "I18N_PREFERENCES_HEADING": "प्राथमिकताहरू", + "I18N_PREFERENCES_HEADING": "अभिरुचीहरू", "I18N_PREFERENCES_PICTURE": "तस्वीर", "I18N_PREFERENCES_USERNAME": "प्रयोगकर्ताको नाम", "I18N_SIDEBAR_ABOUT_LINK": "बारेमा", @@ -134,7 +157,6 @@ "I18N_SIDEBAR_CONTACT_US": "हामीलाई सम्पर्क गर्नुहोस्", "I18N_SIDEBAR_DONATE": "दान गर्नेDonate", "I18N_SIDEBAR_FORUM": "फोरम", - "I18N_SIDEBAR_GET_STARTED": "सुरु गर्नुहोस्", "I18N_SIDEBAR_LIBRARY_LINK": "पुस्तकालय", "I18N_SIDEBAR_OPPIA_FOUNDATION": "ओप्पिया फाउण्डेशन", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Oppia को साथमा पढाउनुहोस्", @@ -142,11 +164,16 @@ "I18N_SIGNUP_EMAIL": "इमेल", "I18N_SIGNUP_LOADING": "खुल्दै छ…", "I18N_SIGNUP_REGISTRATION": "दर्ता", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "मलाई फेरि नसोध्नुहोस्", "I18N_SIGNUP_USERNAME": "प्रयोगकर्ताको नाम", + "I18N_SKILL_LEVEL_INTERMIDIATE": "मध्यवर्ती", + "I18N_SKILL_LEVEL_NEEDS_WORK": "काम चाहिन्छ", "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "धन्यवाद ।", "I18N_SPLASH_TITLE": "पुस्तक भन्दा बाहिर सोच्नुहोस्।", "I18N_START_HERE": "शुरू गर्न यहाँ क्लिक गर्नुहोस्!", - "I18N_SUBSCRIBE_BUTTON_TEXT": "सवस्क्रिप्ट", + "I18N_SUBSCRIBE_BUTTON_TEXT": "सदस्यता लिनुहोस्", + "I18N_SYLLABUS_SKILL_TITLE": "सीप", + "I18N_SYLLABUS_STORY_TITLE": "कथा", "I18N_TOPNAV_ABOUT": "बारेमा", "I18N_TOPNAV_ABOUT_OPPIA": "Oppia को बारेमा", "I18N_TOPNAV_ADMIN_PAGE": "व्यवस्थापन पृष्ठ", @@ -160,9 +187,9 @@ "I18N_TOPNAV_LOGOUT": "लगआउट", "I18N_TOPNAV_MODERATOR_PAGE": "पृष्ठ व्यवस्थापन गर्नुहोस्", "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia फाउन्डेसन", - "I18N_TOPNAV_PREFERENCES": "प्राथमिकताहरू", + "I18N_TOPNAV_PREFERENCES": "अभिरुचीहरू", "I18N_TOPNAV_SIGN_IN": "प्रवेश", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Oppia को साथमा पढाउनुहोस्", - "I18N_TOTAL_SUBSCRIBERS_TEXT": "तपाईँका कूल <[totalSubscribers]> सदस्यहरू छन्।", + "I18N_TOTAL_SUBSCRIBERS_TEXT": "तपाईंको कूल <[totalSubscribers]> सदस्यहरू छन्।", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "सदस्यता खारेज गर्नुहोस्" } diff --git a/assets/i18n/nl.json b/assets/i18n/nl.json index 64e60db1f168..ab830c47d928 100644 --- a/assets/i18n/nl.json +++ b/assets/i18n/nl.json @@ -1,5 +1,5 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Over de stichting", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Over de stichting Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Verkenning maken", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "over een onderwerp dat je bezighoudt.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Terugkoppeling krijgen", @@ -54,6 +54,7 @@ "I18N_ABOUT_PAGE_TITLE": "Over | Oppia", "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "Beginnen met Oppia", "I18N_ABOUT_PAGE_WIFI_FEATURE": "Lage bandbreedte vereist", + "I18N_ACTION_ACCESS_ANDROID_APP": "De Android-app ophalen", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Aanmelden voor \"Teach With Oppia\"", "I18N_ACTION_BROWSE_EXPLORATIONS": "Onze verkenningen bekijken", "I18N_ACTION_BROWSE_LESSONS": "Onze lessen bekijken", @@ -65,6 +66,7 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "Gids voor docenten", "I18N_ACTION_TIPS_FOR_PARENTS": "Tips voor ouders en begeleiders", "I18N_ACTION_VISIT_CLASSROOM": "Klaslokaal bezoeken", + "I18N_ANDROID_PAGE_TITLE": "Android | Oppia", "I18N_ATTRIBUTION_HTML_STEP_ONE": "Kopieer en plak de HTML-code", "I18N_ATTRIBUTION_HTML_STEP_TWO": "Controleer dat de koppeling eruit ziet als \"<[linkText]>\"", "I18N_ATTRIBUTION_HTML_TITLE": "Naamsvermelding in HTML", @@ -75,7 +77,7 @@ "I18N_BLOG_CARD_PREVIEW_CONTEXT": "Zo wordt de blogkaart weergegeven op de startpagina en je auteursprofiel.", "I18N_BLOG_CARD_PREVIEW_HEADING": "Voorvertoning blogkaart", "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Nieuw blogbericht maken", - "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Het lijkt erop dat je nog geen verhaal hebt gemaakt!", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Het lijkt erop dat je nog geen blogartikel hebt geschreven!", "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Nieuw bericht", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Werkversies", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Gepubliceerd", @@ -98,13 +100,14 @@ "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Fout: het afbeeldingsbestand wordt niet herkend.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Miniatuurafbeelding toevoegen", "I18N_BLOG_POST_UNTITLED_HEADING": "Zonder titel", - "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "De inhoud van deze kaart is te lang. Hou het korter dan 4500 tekens om op te slaan.", + "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "De inhoud van deze kaart is te lang. Houd het korter dan 4500 tekens om op te slaan.", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Deze kaart is vrij lang en leerlingen kunnen hun interesse verliezen. Overweeg de kaart korter te maken of op te splitsen in meerdere kaarten.", "I18N_CHAPTER_COMPLETION": "Gefeliciteerd met het voltooien van het hoofdstuk!", "I18N_CLASSROOM_CALLOUT_BUTTON": "Verkennen", "I18N_CLASSROOM_CALLOUT_HEADING_1": "Wiskundige basis", "I18N_CLASSROOM_CALLOUT_HEADING_2": "Even voorstellen: Het klaslokaal van Oppia", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Bekijk de eerste uitgebreide cursus in het geheel vernieuwde klaslokal van Oppia! Gecontroleerde lessen - beoordeeld door docenten - zodat je elementaire wiskundige vaardigheden onder de knie kan krijgen in onderwerpen variërend van getallen tot vermenigvuldigen en delen.", + "I18N_CLASSROOM_MATH_TITLE": "Wiskunde", "I18N_CLASSROOM_PAGE_COMING_SOON": "Binnenkort", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Cursusdetails", "I18N_CLASSROOM_PAGE_HEADING": "Het klaslokaal van Oppia", @@ -195,7 +198,7 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Wijzigingen aan openbare verkenningen en verzamelingen met een andere eigenaar", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "Commits voor onderwerpen, verhalen, vaardigheden en vragen", "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "Om de verwijdering te bevestigen, voer je je gebruikersnaam in het onderstaande veld in en druk je op de knop 'Mijn account verwijderen'. Deze handeling kan niet ongedaan worden gemaakt.", - "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Deze handeling verwijdert dit gebruikersaccount en ook alle privégegevens die aan dit account zijn gekoppeld. Gegevens die al openbaar zijn, worden geanonimiseerd zodat ze niet aan dit account kunnen worden gekoppeld. Sommige van de onderstaande categorieën zijn mogelijk niet van toepassing op je account.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Hiermee wordt dit gebruikersaccount verwijderd en ook alle privégegevens die aan dit account zijn gekoppeld. Gegevens die al openbaar zijn, worden geanonimiseerd zodat ze niet aan dit account kunnen worden gekoppeld, met uitzondering van back-upgegevens (die 6 maanden worden bewaard). Sommige van de onderstaande categorieën zijn mogelijk niet van toepassing op uw account.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Overzicht", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "Hier zijn alle typen gegevens die worden verwijderd:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "Hier zijn alle typen gegevens die worden geanonimiseerd:", @@ -206,10 +209,9 @@ "I18N_DIRECTIVES_UPLOAD_A_FILE": "Bestand uploaden", "I18N_DONATE_PAGE_BREADCRUMB": "Doneren", "I18N_DONATE_PAGE_IMAGE_TITLE": "Jouw vrijgevige donatie financiert:", - "I18N_DONATE_PAGE_TITLE": "Doneer aan de
Oppia Foundation", + "I18N_DONATE_PAGE_TITLE": "Doneer aan de Oppia Foundation", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Hoor van onze Oppia-gemeenschap", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "In 2012 begon Oppia met een eenvoudig idee: het verbeteren van de toegang tot en kwaliteit van onderwijs voor leerlingen over de hele wereld. Deze visie is sindsdien uitgegroeid tot een educatief platform met meer dan 11.000 verkenningen die wereldwijd door meer dan 430.000 gebruikers zijn gebruikt.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "Doneer aan The Oppia Foundation, een in de VS geregistreerde 501(c)(3) non-profitorganisatie, en help ons om mensen overal ter wereld het plezier van lesgeven en leren te brengen.", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "Hier heb je nog meer dingen om te doen!", "I18N_ERROR_DISABLED_EXPLORATION": "Verkenning uitgeschakeld", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "De verkenning waar je op hebt geklikt is helaas uitgeschakeld. Probeer het later opnieuw.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Uitgeschakelde verkenning - Oppia", @@ -226,6 +228,8 @@ "I18N_ERROR_PAGE_TITLE_401": "Fout 401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "Fout 404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "Fout 500 - Oppia", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE": "Delen door veelvouden van tien", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE": "Wat is een breuk?", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anoniem", "I18N_FOOTER_ABOUT": "Over", "I18N_FOOTER_ABOUT_ALL_CAPS": "OVER OPPIA", @@ -294,7 +298,7 @@ "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Gewicht bijwerken", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Selecteer een weer te geven afbeelding]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Je mag meerdere mogelijkheden selecteren.", - "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Selecteer ten minste één optie.} other{Selecteer ten minste # opties.}}", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Selecteer alle juiste opties.} other{Selecteer ten minste # opties.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{Er mag maximaal één optie geselecteerd worden.} other{Er mogen maximaal # opties geselecteerd worden.}}", "I18N_INTERACTIONS_MUSIC_CLEAR": "Wissen", "I18N_INTERACTIONS_MUSIC_PLAY": "Afspelen", @@ -310,6 +314,7 @@ "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Geen antwoord gegeven.", "I18N_INTERACTIONS_SUBMIT": "Opslaan", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Oppia bekijken in het:", + "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Goedemiddag", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Doelstellingen bewerken", "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Brons", "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Gemeenschapslessen", @@ -328,7 +333,7 @@ "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "doel in te stellen!", "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "Kies hieronder een doel en begin met leren!", "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "Het lijkt erop dat er geen verkenningen in je lijst 'Later afspelen' staan. Ga naar de bibliotheek en maak je eigen samengestelde afspeellijst!", - "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Je hebt nog geen actieve feedbackthreads. Uw feedback helpt de kwaliteit van onze lessen te verbeteren. U kunt dit doen door een van onze lessen te starten en uw waardevolle feedback in te dienen!", + "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Je hebt nog geen actieve terugkoppelingsgesprekken. Je terugkoppeling helpt de kwaliteit van onze lessen te verbeteren. Je kunt dit doen door een van onze lessen te starten en je waardevolle terugkoppeling te geven!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "Het lijkt erop dat je op dit moment geen gedeeltelijk afgeronde verzamelingen hebt. Ga naar de bibliotheek om een spannende nieuwe verzameling te starten!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "Het lijkt erop dat je momenteel geen gedeeltelijk voltooide verkenningen hebt. Ga naar de bibliotheek om een spannende nieuwe ontdekkingstocht te beginnen!", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "Aan de slag door ", @@ -336,6 +341,7 @@ "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "een doel stellen! ", "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "Het lijkt erop dat je je nog niet bent geabonneerd op makers. Ga naar de bibliotheek om nieuwe makers en hun geweldige verkenningen te ontdekken!", "I18N_LEARNER_DASHBOARD_EMPTY_SUGGESTED_FOR_YOU_SECTION": "Wauw, je hebt al onze themalessen voltooid! Voel je vrij om andere verkenningen te bekijken op onze pagina Lessen van de gemeenschap pagina", + "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "Goedenavond", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "Laatste afgespeeld", "I18N_LEARNER_DASHBOARD_FEEDBACK_SECTION": "Terugkoppeling", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Reageren", @@ -348,6 +354,7 @@ "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "Je hebt nog geen verkenningen geprobeerd.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Laten we beginnen aan deze spannende reis!", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Iets nieuws leren", + "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Goedemorgen", "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "Er zijn nieuwe verhaalonderdelen beschikbaar", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COLLECTIONS_FROM_PLAYLIST": "{numberNonexistent, plural, one{één van de verzamelingen in je lijst 'Laten afspelen' is niet langer beschikbaar. Dat spijt ons} other{# verzamelingen in je lijst 'Laten afspelen' is niet langer beschikbaar. Dat spijt ons}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_COLLECTIONS": "{numberNonexistent, plural, one{één van de verzamelingen die je hebt afgerond is niet langer beschikbaar. Dat spijt ons} other{# verzamelingen die je hebt afgerond zijn niet langer beschikbaar. Dat spijt ons}}", @@ -369,7 +376,7 @@ "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Bezig met verzenden...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Zilver", "I18N_LEARNER_DASHBOARD_SKILLS": "Vaardigheden", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "Vaardigheidsscore", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Vaardigheidsvoortgang", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Voltooide verhalen", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Abonnementen", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Voortgang:", @@ -461,6 +468,7 @@ "I18N_MODAL_CANCEL_BUTTON": "Annuleren", "I18N_MODAL_CONTINUE_BUTTON": "Doorgaan", "I18N_NEXT_LESSON": "Volgende les", + "I18N_NO": "Nee", "I18N_ONE_SUBSCRIBER_TEXT": "Je hebt één abonnee.", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Partnerschappen", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "Account wordt verwijderd", @@ -602,11 +610,12 @@ "I18N_PREFERENCES_USERNAME": "Gebruikersnaam", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Nog niet selecteerd", "I18N_PROFILE_NO_EXPLORATIONS": "Deze gebruiker geeft nog geen verkenningen gemaakt of bewerkt.", - "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Meer leren over je score", + "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Score-uitsplitsing", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Mijn dashboard", - "I18N_QUESTION_PLAYER_NEW_SESSION": "Nieuwe sessie", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Herhalen", "I18N_QUESTION_PLAYER_RETRY_TEST": "Toets opnieuw maken", "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Terug naar het verhaal", + "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "Nog een keer kijken naar laagste score", "I18N_QUESTION_PLAYER_SCORE": "Score", "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "Vaardigheidsbeschrijvingen", "I18N_QUESTION_PLAYER_TEST_FAILED": "Sessie niet gehaald. Controleer de vaardigheden en probeer het opnieuw", @@ -616,15 +625,15 @@ "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Toets bekijken", "I18N_SAVE_PROGRESS": "Log in of meld je aan om je voortgang op te slaan en door te gaan met de volgende les.", "I18N_SHARE_LESSON": "Deze les delen", - "I18N_SHOW_SOLUTION_BUTTON": "Oplossing tonen", - "I18N_SIDEBAR_ABOUT_LINK": "Over Oppia", + "I18N_SHOW_LESS": "Toon Minder", + "I18N_SHOW_SOLUTION_BUTTON": "Oplossing weergeven", + "I18N_SIDEBAR_ABOUT_LINK": "Over ons", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Klaslokaal", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Basis wiskunde", "I18N_SIDEBAR_CONTACT_US": "Contact opnemen", "I18N_SIDEBAR_DONATE": "Doneren", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Aan de slag", "I18N_SIDEBAR_LIBRARY_LINK": "Bibliotheek", "I18N_SIDEBAR_OPPIA_FOUNDATION": "De Oppia Foundation", "I18N_SIDEBAR_PARTNERSHIPS": "Partnerschappen", @@ -655,6 +664,7 @@ "I18N_SIGNUP_LOADING": "Bezig met laden", "I18N_SIGNUP_PAGE_TITLE": "Word lid van de gemeenschap - Oppia", "I18N_SIGNUP_REGISTRATION": "Registratie", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "Vraag het me niet nog een keer", "I18N_SIGNUP_SEND_ME_NEWS": "Stuur mij nieuws en updates over de site", "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]> is an open gemeenschap van leermiddelen. Al het materiaal is vrij te hergebruiken en te delen.", "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]> bestaat om het maken en de voortdurende verbetering van een reeks van hoge-kwaliteit leermiddelen mogelijk te maken die vrij beschikbaar zijn voor iedereen.", @@ -705,9 +715,12 @@ "I18N_SPLASH_VOLUNTEERS_CONTENT": "Wie je ook bent, bij Oppia vind je een thuis. We hebben altijd meer mensen nodig om lessen te verbeteren door vragen te stellen, afbeeldingen bij te dragen of lessen te vertalen.", "I18N_SPLASH_VOLUNTEERS_TITLE": "Uitgevoerd door de gemeenschap", "I18N_START_HERE": "Klik hier om te beginnen!", + "I18N_STORY_JhiDkq01dqgC_TITLE": "Een dag in het pretpark", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - Voltooid!", "I18N_SUBSCRIBE_BUTTON_TEXT": "Abonneren", - "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Volgende vaardigheid", + "I18N_SUBTOPIC_0abdeaJhmfPm_multiplying-fractions_TITLE": "Vermenigvuldigen van breuken", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE": "Wat is een breuk?", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Volgende vaardigheid:", "I18N_TEACH_BENEFITS_ONE": "Effectief en goed leren voor alle leefttijden", "I18N_TEACH_BENEFITS_THREE": "Altijd gratis en eenvoudig te gebruiken", "I18N_TEACH_BENEFITS_TITLE": "Onze voordelen", @@ -730,6 +743,8 @@ "I18N_TEACH_TESTIMONIAL_2": "\"Oppia is de eerste in zijn soort! Het helpt leerlingen om alles te leren wat ze nodig hebben over een specifiek onderwerp op een aantrekkelijke en boeiende manier; het moedigt hen ook aan om slimme apparaten voor hun eigen bestwil te gebruiken.\"", "I18N_TEACH_TESTIMONIAL_3": "\"k had nooit verwacht dat de leerlingen zo snel techniek zouden leren en wiskundelessen zouden volgen. Het is hun eerste kennismaking met slimme techniek en ze hadden in het begin echt moeite om ermee om te gaan. Nu voel ik me zo opgetogen om ze de Oppia-lessen te zien doen nog voordat ik het klaslokaal binnenkom!\"", "I18N_THANKS_PAGE_BREADCRUMB": "Bedankjes", + "I18N_TIME_FOR_BREAK_TITLE": "Tijd voor een pauze?", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "Breuken", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 les} other{# lessen}}", "I18N_TOPIC_VIEWER_CHAPTER": "Hoofdstuk", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 hoofdstuk} other{# hoofdstukken}}", @@ -749,12 +764,12 @@ "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "Gebruik de volgende kaarten om je vaardigheden over <[topicName]> te verbeteren.", "I18N_TOPIC_VIEWER_VIEW_ALL": "Alles weergeven", "I18N_TOPIC_VIEWER_VIEW_LESS": "Minder weergeven", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "Optellen en aftrekken", "I18N_TOPNAV_ABOUT": "Over", "I18N_TOPNAV_ABOUT_OPPIA": "Over Oppia", "I18N_TOPNAV_ADMIN_PAGE": "Beheerderspagina", "I18N_TOPNAV_BLOG": "Blog", "I18N_TOPNAV_BLOG_DASHBOARD": "Blogdashboard", - "I18N_TOPNAV_CLASSROOM": "Klaslokaal", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Basis wiskunde", "I18N_TOPNAV_CONTACT_US": "Contact opnemen", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Dashboard voor bijdragers", @@ -763,13 +778,14 @@ "I18N_TOPNAV_FORUM": "Forum", "I18N_TOPNAV_GET_INVOLVED": "Meedoen", "I18N_TOPNAV_GET_STARTED": "Aan de slag", + "I18N_TOPNAV_LEARN": "Leren", "I18N_TOPNAV_LEARNER_DASHBOARD": "Leerlingendashboard", - "I18N_TOPNAV_LIBRARY": "Bibliotheek", + "I18N_TOPNAV_LIBRARY": "Gemeenschapsbibliotheek", "I18N_TOPNAV_LOGOUT": "Afmelden", "I18N_TOPNAV_MODERATOR_PAGE": "Moderatorpagina", "I18N_TOPNAV_OPPIA_FOUNDATION": "De Oppia Foundation", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Deelnameoverzicht", - "I18N_TOPNAV_PARTNERSHIPS": "Partnerschappen", + "I18N_TOPNAV_PARTNERSHIPS": "Scholen en organisaties", "I18N_TOPNAV_PREFERENCES": "Voorkeuren", "I18N_TOPNAV_SIGN_IN": "Aanmelden", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Aanmelden met Google", @@ -780,5 +796,6 @@ "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Meehelpen", "I18N_WARNING_MODAL_DESCRIPTION": "Hiermee krijg je de volledige oplossing te zien. Weet je het zeker?", "I18N_WARNING_MODAL_TITLE": "Waarschuwing!", - "I18N_WORKED_EXAMPLE": "Uitgewerkt voorbeeld" + "I18N_WORKED_EXAMPLE": "Uitgewerkt voorbeeld", + "I18N_YES": "Ja" } diff --git a/assets/i18n/oc.json b/assets/i18n/oc.json index f383473aa6c9..9fdcc85a903c 100644 --- a/assets/i18n/oc.json +++ b/assets/i18n/oc.json @@ -101,7 +101,6 @@ "I18N_SIDEBAR_CONTACT_US": "Contactatz-nos", "I18N_SIDEBAR_DONATE": "Far un don", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Començar", "I18N_SIDEBAR_LIBRARY_LINK": "Bibliotèca", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Ensenhar amb Oppia", "I18N_SIGNUP_CLOSE_BUTTON": "Tampar", diff --git a/assets/i18n/pl.json b/assets/i18n/pl.json index 68e2477675c3..5e7486e61330 100644 --- a/assets/i18n/pl.json +++ b/assets/i18n/pl.json @@ -1,5 +1,6 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "O fundacji", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "O Fundacji Oppia", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "O Fundacji Oppia | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Utwórz Eksplorację", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "o temacie na który zwracasz uwagę", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Udostępnij opinię", @@ -9,8 +10,10 @@ "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_2": "Tworząc zestaw bezpłatnych, wysokiej jakości, wyraźnie widocznych lekcji z pomocą nauczycieli z całego świata, Oppia dąży do zapewnienia uczniom wysokiej jakości edukacji — niezależnie od tego, gdzie się znajdują i do jakich tradycyjnych zasobów mają dostęp.", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_5": "Jeśli jesteś uczniem zainteresowanym uczeniem się z przy pomocy programu Oppia możesz zacząć swoją przygodę od przeszukiwania ekploracji.", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_6": "Jeśli jesteś nauczycielem zainteresowanym wpływaniem na życie studentów z całego świata, możesz aplikować by dołączyć do naszego Teach with Oppia programu, którego celem jest zapewnianie lekcji poruszających tematy, które studenci zwykle uważają za trudne.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_8": "Niezależnie od tego, czy jesteś nauczycielem K-12, absolwentem, czy osobą, która pasjonuje się konkretnym tematem i chce podzielić się swoją wiedzą, Oppia zaprasza Cię. Dołącz do społeczności i zacznij odkrywać z nami.", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "Opublikuj & Udostępnij", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE_TEXT": "twoje twory ze społecznością.", + "I18N_ABOUT_PAGE_BREADCRUMB": "O projekcie", "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "Twórcy", "I18N_ABOUT_PAGE_EASILY_CREATE_LESSON": "Łatwe tworzenie lekcji", "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Przeglądaj lekcje przygotowane przez społeczność", @@ -21,6 +24,12 @@ "I18N_ABOUT_PAGE_HEADING": "Oppia: edukacja dla wszytskich", "I18N_ABOUT_PAGE_LANGUAGE_FEATURE": "Tłumaczenie na lokalne dialekty", "I18N_ABOUT_PAGE_LEARN_BUTTON": "Chciałbym się uczyć", + "I18N_ABOUT_PAGE_LEARN_FROM": "Ucz się z wyselekcjonowanych lekcji Oppia", + "I18N_ABOUT_PAGE_LEARN_FROM_CONTENT": "W Klasie znajdziesz zestaw lekcji zaprojektowanych i przetestowanych przez zespół Oppia, aby upewnić się, że są skuteczne i przyjemne dla wszystkich uczniów. Wszystkie lekcje zostały sprawdzone przez nauczycieli i ekspertów, dzięki czemu możesz mieć pewność, że Twoi uczniowie uzyskują efektywną edukację, ucząc się we własnym tempie.", + "I18N_ABOUT_PAGE_OUR_OUTCOMES": "Nasze wyniki", + "I18N_ABOUT_PAGE_OUR_OUTCOMES_CONTENT": "Dążymy do skuteczności i doskonałości. Dlatego nieustannie przeprowadzamy badania użytkowników i randomizowane próby, aby zapewnić, że nasze lekcje są zgodne z naszymi wysokimi standardami.", + "I18N_ABOUT_PAGE_SECTION_ONE_CONTENT": "Oppia zapewnia nowatorskie i wciągające podejście do nauki online, które zostało specjalnie zaprojektowane, aby sprostać wyjątkowym potrzebom uczniów niedofinansowanych na całym świecie.", + "I18N_ABOUT_PAGE_SECTION_SEVEN_TITLE": "Zacznij korzystać z wyselekcjonowanych wskazówek", "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "Co chcesz dzisiaj robić?", "I18N_ABOUT_PAGE_TABS_ABOUT": "O aplikacji", "I18N_ABOUT_PAGE_TABS_CREDITS": "Twórcy", @@ -33,6 +42,8 @@ "I18N_ACTION_BROWSE_LESSONS": "Przeglądaj nasze lekcje", "I18N_ACTION_CREATE_EXPLORATION": "Utwórz Eksplorację", "I18N_ACTION_CREATE_LESSON": "Utwórz swoją własną Lekcję", + "I18N_ACTION_EXPLORE_LESSONS": "Przeglądaj lekcje", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Nowy post", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Anuluj", "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Usuń", "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "Edytuj obraz miniatury", @@ -46,11 +57,15 @@ "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Dodaj miniaturę", "I18N_CHAPTER_COMPLETION": "Gratulujemy ukończenia rozdziału!", "I18N_CLASSROOM_CALLOUT_HEADING_1": "Podstawy matematyczne", + "I18N_CLASSROOM_MATH_TITLE": "Matematyka", + "I18N_CLASSROOM_PAGE_COMING_SOON": "Wkrótce", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Szczegóły kursu", "I18N_CLASSROOM_PAGE_HEADING": "Strona Oppia Classroom", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Przeglądaj więcej lekcji przygotowanych przez Społeczność", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Przeszukaj naszą Bibliotekę Społeczności", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Pokryte tematy", + "I18N_COMING_SOON": "Wkrótce!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "KOLEKCJA", "I18N_CONTACT_PAGE_BREADCRUMB": "Kontakt", "I18N_CONTACT_PAGE_HEADING": "Dołącz do nas", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Dziękujemy za zainteresowanie pomocą projektowi Oppia.", @@ -79,6 +94,7 @@ "I18N_CREATE_NO_THANKS": "Nie, dziękuję", "I18N_CREATE_YES_PLEASE": "Tak, proszę!", "I18N_DASHBOARD_COLLECTIONS": "Kolekcje", + "I18N_DASHBOARD_CREATOR_DASHBOARD": "Pulpit nawigacyjny", "I18N_DASHBOARD_EXPLORATIONS": "Eksploracje", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY": "Sortuj według", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_AVERAGE_RATING": "Przeciętna ocena", @@ -109,8 +125,9 @@ "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Przegląd", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Upuść plik graficzny na ten obszar", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Załaduj plik", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "W 2012 roku Oppia wystartowała z prostym pomysłem: poprawić edukację uczniów na całym świecie przy jednoczesnej poprawie jakości nauczania. Ta wizja od tego czasu przekształciła się w platformę edukacyjną z ponad 11 000 eksploracji, z których skorzystało ponad 430 000 użytkowników na całym świecie.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "Przekaż darowiznę na rzecz The Oppia Foundation, zarejestrowanej organizacji non-profit 501(c)(3), i dołącz do nas w niesieniu radości nauczania i uczenia się ludziom na całym świecie.", + "I18N_DONATE_PAGE_BREADCRUMB": "Wesprzyj", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "Przekaż darowiznę | Wywieraj pozytywny wpływ | Oppia", + "I18N_DONATE_PAGE_IMAGE_TITLE": "Twoje hojne fundusze podarunkowe:", "I18N_ERROR_DISABLED_EXPLORATION": "Nieaktywna Eksploracja", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Przepraszamy, jednak eksploracja na którą kliknąłeś jest obecnie nieaktywna. Prosimy spróbować później.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Nieaktywna Eksploracja - Oppia", @@ -122,10 +139,21 @@ "I18N_ERROR_MESSAGE_401": "Nie możesz tu wejść. Szybko, wracaj zanim przyjdzie nauczyciel!", "I18N_ERROR_MESSAGE_404": "Przepraszamy, szukaliśmy i szukaliśmy, ale nie znaleźliśmy tej strony.", "I18N_ERROR_MESSAGE_500": "Coś poszło strasznie nie tak. Ale to nie była twoja wina. Wystąpił błąd wewnętrzny.", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "Błąd <[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "Błąd <[statusCode]> - Oppia", "I18N_ERROR_PAGE_TITLE_400": "Błąd 400 - Oppia", "I18N_ERROR_PAGE_TITLE_401": "Błąd 401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "Błąd 404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "Błąd 500 - Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "Gotowy na więcej babeczek? Rozwiąż ten krótki quiz, aby sprawdzić, czego nauczyłeś się do tej pory!", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "Równość ułamków (podsumowanie)", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "Co to jest stosunek?", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "Co to jest dodatek?", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "Co to jest odejmowanie?", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE": "Dzielenie przez wielokrotności dziesięciu", + "I18N_EXPLORATION_WwqLmeQEn9NK_TITLE": "Zaokrąglanie liczb, Część 2", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION": "Dołącz do Niny, która pomaga Sandrze przygotować sok owocowy na jej stragan, używając nowej techniki dzielenia!", + "I18N_EXPLORATION_wE9pyaC5np3n_DESCRIPTION": "Nina i Sandra biorą udział w konkursie. Dołącz do Niny, która wykorzystuje swoje umiejętności dzielenia, aby sprzedać jak najwięcej owoców i soków, by wygrać główną nagrodę!", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anonimowy", "I18N_FOOTER_ABOUT": "Informacje", "I18N_FOOTER_ABOUT_ALL_CAPS": "O OPPIA", @@ -160,39 +188,63 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_8_HEADING": "Podziel się swoją eksploracją", "I18N_GET_STARTED_PAGE_PARAGRAPH_9": "Kiedy uczniowie przejdą przez twoją eksplorację, mogą przesłać ci informację zwrotną, aby ostrzec cię o problemach lub podzielić się pomysłami, jak je ulepszyć.", "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "Popraw swoją eksplorację", + "I18N_GET_STARTED_PAGE_TITLE": "Rozpocznij", "I18N_GOT_IT": "Rozumiem", "I18N_HEADING_VOLUNTEER": "Wolontariat", "I18N_HINT_TITLE": "Wskazówka", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Przejdź do edytora kodu", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Przeciągnij i upuść elementy", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "Proszę nie umieszczać 0 w mianowniku", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "\nŻadna z liczb w ułamku nie powinna mieć więcej niż 7 cyfr", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "Proszę wpisać swoją odpowiedź jako ułamek (przykład, 5/3 zamiast 1 2/3).", "I18N_INTERACTIONS_GRAPH_DELETE": "Usuń", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Niepoprawny graf!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "Utwórz wykres", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Zobacz wykres", "I18N_INTERACTIONS_GRAPH_MOVE": "Przenieś", "I18N_INTERACTIONS_GRAPH_RESET_BUTTON": "Zresetuj", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Możesz wybrać więcej opcji.", "I18N_INTERACTIONS_MUSIC_CLEAR": "Wyczyść", "I18N_INTERACTIONS_MUSIC_PLAY": "Odtwórz", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Odtwórz sekwencję docelową", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "Wprowadź prawidłową walutę (przykład, $5 czy Rs 5)", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Anuluj", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Dodaj element", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Brak odpowiedzi.", + "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Brąz", + "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Lekcje społeczności", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "Ukończono", "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_HEADING": "Rozpocznij", + "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Odpowiedz", "I18N_LEARNER_DASHBOARD_HOME_SECTION": "Strona główna", + "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Dzień dobry", + "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "Postęp", "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "Usuń", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "Wyślij", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Wysyłanie...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Srebro", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Subskrypcje", + "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Postęp:", "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "Bieżąca", "I18N_LEARNER_DASHBOARD_SUGGESTION_NO_CURRENT_STATE": "Ups! Ten stan już nie istnieje!", + "I18N_LEARNT_TOPIC": "Uczony<[topicName]>", + "I18N_LEARN_TOPIC": "Uczony<[topicName]>", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "AUTORZY LEKCJI", + "I18N_LESSON_INFO_HEADER": "Informacje o lekcji", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Dodaj do listy „Zagraj później”", "I18N_LIBRARY_ALL_CATEGORIES": "Wszystkie Kategorie", + "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "Wybierz wszystkie kategorie", "I18N_LIBRARY_ALL_LANGUAGES": "Wszystkie języki", "I18N_LIBRARY_ALL_LANGUAGES_SELECTED": "Wszystkie wybrane języki", "I18N_LIBRARY_CATEGORIES_ALGORITHMS": "Algorytmy", "I18N_LIBRARY_CATEGORIES_ARCHITECTURE": "Architektura", "I18N_LIBRARY_CATEGORIES_ART": "Sztuka", "I18N_LIBRARY_CATEGORIES_BIOLOGY": "Biologia", + "I18N_LIBRARY_CATEGORIES_BUSINESS": "Biznes", "I18N_LIBRARY_CATEGORIES_CHEMISTRY": "Chemia", + "I18N_LIBRARY_CATEGORIES_CODING": "Kodowanie", + "I18N_LIBRARY_CATEGORIES_COMPUTING": "Informatyka", + "I18N_LIBRARY_CATEGORIES_ECONOMICS": "Ekonomia", "I18N_LIBRARY_CATEGORIES_EDUCATION": "Edukacja", "I18N_LIBRARY_CATEGORIES_ENGINEERING": "Inżynieria", "I18N_LIBRARY_CATEGORIES_ENGLISH": "Angielski", @@ -200,19 +252,32 @@ "I18N_LIBRARY_CATEGORIES_GEOGRAPHY": "Geografia", "I18N_LIBRARY_CATEGORIES_GOVERNMENT": "Rząd", "I18N_LIBRARY_CATEGORIES_HISTORY": "Historia", + "I18N_LIBRARY_CATEGORIES_HOBBIES": "Hobby", + "I18N_LIBRARY_CATEGORIES_INTERACTIVE_FICTION": "Interaktywna fikcja", + "I18N_LIBRARY_CATEGORIES_LANGUAGES": "Języki", "I18N_LIBRARY_CATEGORIES_LAW": "Prawo", + "I18N_LIBRARY_CATEGORIES_LIFE_SKILLS": "Umiejętności życiowe", "I18N_LIBRARY_CATEGORIES_MATHEMATICS": "Matematyka", + "I18N_LIBRARY_CATEGORIES_MATHS": "Matematyka", "I18N_LIBRARY_CATEGORIES_MEDICINE": "Medycyna", "I18N_LIBRARY_CATEGORIES_MUSIC": "Muzyka", "I18N_LIBRARY_CATEGORIES_PHILOSOPHY": "Filozofia", "I18N_LIBRARY_CATEGORIES_PHYSICS": "Fizyka", + "I18N_LIBRARY_CATEGORIES_PROGRAMMING": "Programowanie", "I18N_LIBRARY_CATEGORIES_PSYCHOLOGY": "Psychologia", + "I18N_LIBRARY_CATEGORIES_PUZZLES": "Zagadki", "I18N_LIBRARY_CATEGORIES_READING": "Czytanie", + "I18N_LIBRARY_CATEGORIES_RELIGION": "Religia", "I18N_LIBRARY_CATEGORIES_SPORT": "Sport", "I18N_LIBRARY_CATEGORIES_STATISTICS": "Statystyki", "I18N_LIBRARY_CATEGORIES_WELCOME": "Witaj", + "I18N_LIBRARY_GROUPS_COMPUTING": "Informatyka", + "I18N_LIBRARY_GROUPS_HUMANITIES": "Nauki humanistyczne", "I18N_LIBRARY_GROUPS_LANGUAGES": "Języki", + "I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS": "Matematyka i statystyka", + "I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED": "Ostatnio opublikowane", "I18N_LIBRARY_GROUPS_SCIENCE": "Nauka", + "I18N_LIBRARY_GROUPS_SOCIAL_SCIENCE": "Nauki społeczne", "I18N_LIBRARY_LAST_UPDATED": "Ostatnia aktualizacja", "I18N_LIBRARY_LOADING": "Wczytywanie", "I18N_LICENSE_PAGE_LICENSE_HEADING": "Licencja", @@ -220,6 +285,7 @@ "I18N_MODAL_CANCEL_BUTTON": "Anuluj", "I18N_MODAL_CONTINUE_BUTTON": "Kontynuuj", "I18N_NEXT_LESSON": "Następna lekcja", + "I18N_NO": "Nie", "I18N_ONE_SUBSCRIBER_TEXT": "Masz 1 subskrybenta.", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Związki partnerskie", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "Szczegóły usunięcia", @@ -242,6 +308,8 @@ "I18N_PREFERENCES_CHANGE_PICTURE": "Zmiana zdjęcia w profilu", "I18N_PREFERENCES_EMAIL": "E-mail", "I18N_PREFERENCES_EMAIL_EXPLAIN": "Tylko moderatorzy i administratorzy stron mogą zobaczyć twój adres poczty elektronicznej.", + "I18N_SHOW_LESS": "Pokaż mniej", + "I18N_SHOW_MORE": "Pokaż więcej", "I18N_SIGNIN_PAGE_TITLE": "Zaloguj się", "I18N_SIGNUP_CLOSE_BUTTON": "Zamknij", "I18N_SIGNUP_ERROR_NO_USERNAME": "Wprowadź swoją nazwę użytkownika", @@ -249,13 +317,20 @@ "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "Dziękujemy!", "I18N_SPLASH_TITLE": "Myśl nieszablonowo", "I18N_START_HERE": "Kliknij tutaj aby zacząć!", + "I18N_TIME_FOR_BREAK_BODY_1": "Wydaje się, że bardzo szybko przesyłasz odpowiedzi. Czy jesteś zmęczony?", + "I18N_TIME_FOR_BREAK_TITLE": "Czas na przerwę?", + "I18N_TOPIC_LEARN": "uczyć się", + "I18N_TOPIC_TITLE": "Temat", "I18N_TOPIC_VIEWER_CHAPTER": "Rozdział", "I18N_TOPIC_VIEWER_LESSON": "Lekcje", "I18N_TOPIC_VIEWER_LESSONS": "Lekcje", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "Wróć później, gdy dostępne będą lekcje na ten temat.", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "Wróć później, gdy dostępne będą pytania praktyczne dotyczące tego tematu.", "I18N_TOPIC_VIEWER_SKILL": "Umiejętności", "I18N_TOPNAV_LIBRARY": "Biblioteka", "I18N_TOPNAV_LOGOUT": "Wyloguj się", "I18N_TOPNAV_PREFERENCES": "Ustawienia", "I18N_TOPNAV_SIGN_IN": "Zaloguj", - "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Zaloguj się za pomocą Google" + "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Zaloguj się za pomocą Google", + "I18N_YES": "Tak" } diff --git a/assets/i18n/pt-br.json b/assets/i18n/pt-br.json index 7fda763d17c8..41b303610ccc 100644 --- a/assets/i18n/pt-br.json +++ b/assets/i18n/pt-br.json @@ -1,12 +1,14 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Sobre a Fundação Oppia", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Sobre a Fundação Oppia | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Criar uma Exploração", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "sobre um tópico do seu interesse.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Compartilhar comentários", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK_TEXT": "para melhorar a sua exploração.", - "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "Sobre o Oppia", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_1": "A missão da Oppia é ajudar qualquer pessoa a aprender o que quiser de maneira eficaz e agradável.", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_2": "Ao criar um conjunto de aulas gratuitas, de alta qualidade e comprovadamente eficazes com a ajuda de educadores de todo o mundo, a Oppia visa proporcionar aos alunos uma educação de qualidade. — independentemente de onde eles estão ou de quais recursos tradicionais eles têm acesso.", - "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_3": "Até agora, os educadores criaram mais de <[numberOfExplorations]> dessas lições, que chamamos explorações. E eles servem quase <[numberofStudentsServed]> estudantes em todo o mundo.", + "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "Sobre a Oppia", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_1": "A missão da Oppia é ajudar qualquer pessoa a aprender o que quiser de maneira eficaz e divertida.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_2": "Ao criar um conjunto de aulas gratuitas, de alta qualidade e comprovadamente eficazes com a ajuda de educadores de todo o mundo, a Oppia visa proporcionar a estudantes uma educação de qualidade — independentemente de onde estejam ou a quais recursos tradicionais tenham acesso.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_3": "Até o momento, os educadores criaram mais de <[numberOfExplorations]> dessas lições, que chamamos explorações. Elas são usadas por quase <[numberofStudentsServed]> estudantes em todo o mundo.", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_4": "As explorações ajudam os alunos a aprender de maneira divertida e criativa, usando vídeos, imagens e perguntas abertas. E como os alunos, geralmente, têm ideias equivocadas semelhantes, o Oppia também oferece aos educadores a capacidade de abordar esses erros diretamente nas explorações, capacitando-os a fornecer feedback direcionado a vários alunos de cada vez.", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_5": "Se é estudante e tem interesse em aprender com o Oppia, pode começar a sua aventura de aprendizagem navegando pelas nossas explorações.", "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_6": "Se você é um professor interessado em impactar as vidas de estudantes em todo o mundo, você pode se inscrever para participar do programa Ensine com o Oppia, visando fornecer lições para tópicos que os alunos normalmente acham difícil.", @@ -53,6 +55,7 @@ "I18N_ABOUT_PAGE_TITLE": "Sobre | Oppia", "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "Comece a usar a Oppia", "I18N_ABOUT_PAGE_WIFI_FEATURE": "Baixa conexão de internet exigida", + "I18N_ACTION_ACCESS_ANDROID_APP": "Acesse o aplicativo para Android", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Inscreva-se no \"Ensine com Oppia\"", "I18N_ACTION_BROWSE_EXPLORATIONS": "Navegue por nossas Explorações", "I18N_ACTION_BROWSE_LESSONS": "Navegue por nossas Aulas", @@ -64,18 +67,136 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "Guia para professores", "I18N_ACTION_TIPS_FOR_PARENTS": "Dicas para pais e responsáveis", "I18N_ACTION_VISIT_CLASSROOM": "Visite a sala de aula", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Cancelar", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Concluído", + "I18N_ADD_NEW_SYLLABUS_ITEMS": "Novos Itens de Programa de Estudos", + "I18N_ADD_SYLLABUS_DESCRIPTION_TEXT": "Adicione habilidades ou histórias ao seu programa de estudos para enviar automaticamente para seus estudantes.", + "I18N_ADD_SYLLABUS_SEARCH_PLACEHOLDER": "Pesquisa, por exemplo: História, Física, Inglês", + "I18N_ANDROID_PAGE_AVAILABLE_FOR_DOWNLOAD_TEXT": "Comece a aprender gratuitamente no Android hoje", + "I18N_ANDROID_PAGE_BETA_DESCRIPTION": "Uma versão beta do aplicativo para Android da Oppia agora é gratuita para download e uso em inglês e português do Brasil.", + "I18N_ANDROID_PAGE_CONSENT_CHECKBOX_LABEL": "Confirmo que tenho mais de 18 anos ou que tenho o consentimento e a aprovação dos meus pais ou tutores legais.", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "Endereço eletrônico", + "I18N_ANDROID_PAGE_FEATURES_SECTION_HEADER": "Educação para todos.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_1": "Experimente nossas dicas úteis para guiá-lo ao longo", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_2": "Continue aprendendo mesmo quando estiver off-line.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_3": "O aplicativo está disponível em inglês e brasileiro português.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_4": "Mais idiomas serão adicionados em breve!", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_5": "Crie e mantenha até 10 perfis em um dispositivo.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_1": "Aprenda através de histórias envolventes", + "I18N_ANDROID_PAGE_FEATURE_TEXT_2": "Aprenda a qualquer hora, em qualquer lugar", + "I18N_ANDROID_PAGE_FEATURE_TEXT_3": "Aprenda em seu idioma", + "I18N_ANDROID_PAGE_FEATURE_TEXT_4": "Alternar entre os alunos", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Nome (opcional)", + "I18N_ANDROID_PAGE_SUPPORT_TEXT": "Suportamos todas as versões do Android que remontam ao Lollipop (Android 5).", + "I18N_ANDROID_PAGE_TITLE": "Android | Oppia", + "I18N_ANDROID_PAGE_UPDATES_MAIN_TEXT": "Inscreva-se para receber atualizações sobre o aplicativo Android da Oppia", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "Subscrever", + "I18N_ANDROID_PAGE_UPDATES_SUBTEXT": "Prometemos não enviar spam, e você só receberá e-mails ocasionais. Você pode cancelar a inscrição a qualquer momento.", + "I18N_ASSIGNED_STORIES_AND_SKILLS": "Histórias e Habilidades atribuídas.", + "I18N_ASSIGNED_STORIES_AND_SKILLS_EMPTY_MESSAGE": "Nenhuma história ou habilidade foi atribuída aos estudantes nesse grupo.", + "I18N_ATTRIBUTION_HTML_STEP_ONE": "Copie e cole o HTML", + "I18N_ATTRIBUTION_HTML_STEP_TWO": "Certifique-se de que o link apareça como \"<[linkText]>\"", + "I18N_ATTRIBUTION_HTML_TITLE": "Atribua em HTML", + "I18N_ATTRIBUTION_PRINT_STEP_ONE": "Copie e cole o crédito", + "I18N_ATTRIBUTION_PRINT_STEP_TWO": "Anexe uma cópia do \"<[link]>\"", + "I18N_ATTRIBUTION_PRINT_TITLE": "Atribua na Impressão", + "I18N_ATTRIBUTION_TITLE": "Como atribuir esta lição para compartilhar ou reutilizar", + "I18N_BLOG_CARD_PREVIEW_CONTEXT": "É assim que o cartão do blog aparecerá na página inicial e em seu perfil de autor.", + "I18N_BLOG_CARD_PREVIEW_HEADING": "Pré-Visualização do Cartão do Blog", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "Biografia", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Nome", + "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Criar Nova Postagem no Blog", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Parece que você ainda não criou nenhuma história!", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Nova Postagem", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Salvar", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Rascunhos", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Publicado", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Blog", + "I18N_BLOG_HOME_PAGE_NO_RESULTS_FOUND": "Desculpe, não há postagens para mostrar.", + "I18N_BLOG_HOME_PAGE_OPPIA_DESCRIPTION": "Construir uma comunidade para fornecer educação de qualidade para quem não tem acesso.", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "Postagens mais recentes", + "I18N_BLOG_HOME_PAGE_POSTS_NUMBER_DISPLAY": "Exibing postagens <[startingNumber]> - <[endingNumber]> de <[totalNumber]>", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "Palavras-chave", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "Tags", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "Escolha as Tags", + "I18N_BLOG_HOME_PAGE_TITLE": "Blog Oppia | Oppia", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "Boas-vindas ao Blog do Oppia!", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_HEADING": "Exibindo resultados de pesquisa", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_DISPLAY": "Exibing <[startingNumber]> - <[endingNumber]> do total dos resultados da pesquisa.", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_OUT_OF_TOTAL_DISPLAY": "Exibing postagens <[startingNumber]> - <[endingNumber]> de <[totalNumber]>", + "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Adicionar Imagem Miniatura", + "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Corpo", + "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Cancelar", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Apagar", + "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "Editar Imagem em Miniatura", + "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "Salvo pela última vez em", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "Publicar", + "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "Concluído", + "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "Salvar como rascunho", + "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "Pré-visualização", + "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "Tags", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_PREFIX": "Limite de", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "ainda podem ser adicionadas mais tags.", + "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "Miniatura", + "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "Título", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "Sugestões para você", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "Tags", + "I18N_BLOG_POST_PAGE_TITLE": "<[blogPostTitle]> | Blog | Oppia", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "Escolha um arquivo ou arraste-o aqui", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Erro: não foi possível ler o arquivo da imagem.", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Adicionar uma miniatura", + "I18N_BLOG_POST_UNTITLED_HEADING": "Sem título", "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "O conteúdo deste cartão está muito longo. Favor manter abaixo de 4500 caracteres para salvar.", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Este cartão é bastante longo e os alunos podem perder o interesse. Considere encurtá-lo ou dividi-lo em duas cartas.", + "I18N_CHAPTER_COMPLETION": "Parabéns por completar o capítulo!", "I18N_CLASSROOM_CALLOUT_BUTTON": "Explorar", "I18N_CLASSROOM_CALLOUT_HEADING_1": "Fundamentos de Matemática", - "I18N_CLASSROOM_CALLOUT_HEADING_2": "Apresentando: The Oppia Classroom", - "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Confira o primeiro curso na nova Oppia Classroom! Planos de aula eficientes, criados por professores, para que você possa dominar importantes disciplinas acadêmicas.", + "I18N_CLASSROOM_CALLOUT_HEADING_2": "Apresentando: A Sala de Aula da Oppia", + "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Confira o primeiro curso na nova Sala de Aula da Oppia! Planos de aula eficientes, criados por professores, para que você possa dominar importantes disciplinas acadêmicas.", + "I18N_CLASSROOM_MATH_TITLE": "Matemática", "I18N_CLASSROOM_PAGE_COMING_SOON": "Em breve", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Detalhes do curso", - "I18N_CLASSROOM_PAGE_HEADING": "A Sala de Aula Oppia", + "I18N_CLASSROOM_PAGE_HEADING": "A Sala de Aula da Oppia", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Explore mais lições feitas pela comunidade", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Pesquise em nossa Biblioteca da Comunidade", + "I18N_CLASSROOM_PAGE_TITLE": "Aprenda <[classroomName]> com Oppia | Oppia", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Assuntos abordados", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "<[collectionTitle]> - Editor do Oppia", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "Sem título - Editor do Oppia", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Começar", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Continuar", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "Você terminou a coleção! Sinta-se à vontade para repetir quaisquer explorações abaixo.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "Passe o mouse sobre um ícone para visualizar uma exploração.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "Nenhuma Exploração foi adicionada a esta Coleção.", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> - Oppia", + "I18N_COMING_SOON": "Em Breve", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "COLEÇÃO", + "I18N_COMPLETED_STORY": "'<[story]>' concluída.", + "I18N_COMPLETE_CHAPTER": "Conclua um capítulo em '<[topicName]>'.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_1": "Você acabou de completar o primeiro checkpoint! Começou bem!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_2": "Parabéns por completar seu primeiro checkpoint! Continue assim!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "Início perfeito! Continue assim!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "Você completou um checkpoint! Bom trabalho!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_2": "Incrível, você completou um checkpoint! Continue!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_3": "Bom trabalho! Você acaba de completar um checkpoint!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_1": "Você está no meio do caminho, logo você vai terminar!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_2": "Você chegou ao meio do caminho, bom trabalho!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_3": "Uau! Você já chegou na metade da lição! Ótimo trabalho!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "Só falta mais um, ebaa!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_2": "Vamos lá! Só falta mais um!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_3": "Você está indo muito bem, só falta mais um!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "Você está indo bem! Continue!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_2": "Incrível! Você completou seu segundo checkpoint!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_3": "Mais um checkpoint completo, você está indo muito bem!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_1": "Você está quase lá! Continue assim!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_2": "Você está quase no fim! Continue assim!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_3": "Muito bem! Você está quase no fim!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "Ebaa!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "Incrível!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_3": "Checkpoint!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "Muito bem!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "Muito bom!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "Parabéns!", "I18N_CONTACT_PAGE_BREADCRUMB": "Contato", "I18N_CONTACT_PAGE_HEADING": "Envolver-se!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Obrigado pelo seu interesse em ajudar com o projeto Oppia!", @@ -104,9 +225,13 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "Portanto, se gostava de criar lições gratuitas e efetivas para estudantes à volta do mundo, veio ao site certo. Encorajamo-lo a familiarizar-se com os nossos guias práticos para criadores e com as lições existentes, e a começar a criar a sua lição. Além disso, se quiser garantir que as suas lições tenham grande efeito, pondere candidatar-se ao nosso programa Teach with Oppia (ensinar com o Oppia), onde ajudamos a criar, testar e melhorar as suas explorações para obter o efeito ótimo.", "I18N_CONTACT_PAGE_PARAGRAPH_9": "Gosta de uma exploração existente, mas encontrou algo que pode ser melhorado? Pode sugerir alterações a uma exploração diretamente na página da exploração. Clique simplesmente no lápis, ao canto superior direito, e partilhe a sua opinião de melhoramento. O criador da lição receberá as suas sugestões e terá a oportunidade de fundi-las na exploração. Esta forma de colaboração é incrivelmente valiosa, especialmente se puder basear as suas sugestões nas experiências de estudantes que assistiram à exploração.", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "Melhorando as explorações existentes", + "I18N_CONTACT_PAGE_TITLE": "Contato | Oppia", "I18N_CONTINUE_REGISTRATION": "Continue o registro", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "OK", + "I18N_COOKIE_BANNER_EXPLANATION": "Este site usa cookies e tecnologias semelhantes para suportar a funcionalidade principal, manter o site seguro e analisar o nosso tráfego. Saiba mais em nossa Política de Privacidade .", "I18N_CORRECT_FEEDBACK": "Correto!", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "Link do seu grupo", + "I18N_CREATE_ACCOUNT": "Criar conta", "I18N_CREATE_ACTIVITY_QUESTION": "O que você quer criar?", "I18N_CREATE_ACTIVITY_TITLE": "Crie uma Atividade", "I18N_CREATE_COLLECTION": "Criar Coleção", @@ -115,6 +240,8 @@ "I18N_CREATE_EXPLORATION_QUESTION": "Você quer criar uma exploração?", "I18N_CREATE_EXPLORATION_TITLE": "Criar uma Exploração", "I18N_CREATE_EXPLORATION_UPLOAD": "Enviar", + "I18N_CREATE_LEARNER_GROUP": "Criar grupo", + "I18N_CREATE_LEARNER_GROUP_PAGE_TITLE": "Criar Grupo de Estudante | Oppia", "I18N_CREATE_NO_THANKS": "Não, obrigado", "I18N_CREATE_YES_PLEASE": "Sim, por favor!", "I18N_CREATOR_IMPACT": "Impacto", @@ -132,10 +259,12 @@ "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_UNRESOLVED_ANSWERS": "Respuestas Sin Resolver", "I18N_DASHBOARD_LESSONS": "Lições", "I18N_DASHBOARD_OPEN_FEEDBACK": "Feedback aberto", + "I18N_DASHBOARD_SKILL_PROFICIENCY": "Proficiência em Habilidades", "I18N_DASHBOARD_STATS_AVERAGE_RATING": "Classificação média", "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "Comentários abertos", "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "Total de peças", "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "Assinantes", + "I18N_DASHBOARD_STORIES": "Histórias", "I18N_DASHBOARD_SUBSCRIBERS": "Subscritores", "I18N_DASHBOARD_SUGGESTIONS": "Sugestões", "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "Exploração", @@ -157,16 +286,36 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Explorações públicas e coleções que têm outros proprietários", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "Tópicos, histórias, habilidades e perguntas", "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "Para confirmar a exclusão, insira o seu nome de usuário no campo abaixo e pressione o botão 'Excluir minha conta'. Esta ação não pode ser revertida. ", - "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Esta ação excluirá esta conta e todos os dados associados a ela. Os dados que já são públicos serão anonimizados, para que não possam ser mais associados a esta conta. Algumas categorias mencionadas podem não se aplicar a sua conta.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Esta ação excluirá esta conta de usuário e também todos os dados privados associados a ela. Os dados que já são públicos serão tornados anônimos para que não possam ser associados a esta conta, exceto para dados de backup (que são armazenados por 6 meses). Algumas das categorias mencionadas abaixo podem não se aplicar à sua conta.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Visão geral", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "Estes são os tipos de dados que serão deletados:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "Aqui estão os tipos de dados que serão anônimos:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "Além disso, explorações e coleções publicadas que não têm outros proprietários serão transferidas para propriedade da comunidade.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "Se você tiver alguma dúvida ou preocupação sobre o processo de remoção da conta, envie um e-mail para privacy@oppia.org.", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Isso vai te levar à uma página onde você pode deletar sua conta da Oppia.", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "Apagar Conta | Oppia", + "I18N_DELETE_LEARNER_GROUP": "Excluir Grupo", + "I18N_DEST_IF_STUCK_INFO_TOOLTIP": "Agora você pode especificar um novo cartão no qual você pode guiar os alunos pelos conceitos usados na questão, se eles ficarem muito travados!", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Arraste uma imagem para esta área", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Selecione um arquivo de imagem", "I18N_DONATE_PAGE_BREADCRUMB": "Doar", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "Doe | Cause um Impacto Positivo | Oppia", + "I18N_DONATE_PAGE_IMAGE_TITLE": "Seus doação generosa financia:", + "I18N_DONATE_PAGE_TITLE": "Doe para a Fundação Oppia", + "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Ouça nossa comunidade Oppia", + "I18N_EDIT_LEARNER_GROUP_PAGE_TITLE": "Editar Grupo de Estudante | Oppia", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "Você ainda não tem grupo", + "I18N_EMPTY_SOLUTION_MESSAGE": "Por favor, forneça a solução para a situação.", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "Você completou seu 1º capítulo!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "Você completou seu 5º capítulo!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "Você completou seu 10º capítulo!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_4": "Você completou seu 25º capítulo!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "Você completou seu 50º capítulo!", + "I18N_END_CHAPTER_MILESTONE_PROGRESS_MESSAGE": "{chaptersToGo, plural, one{Complete mais 1 capítulo para alcançar sua próxima meta!} other{Complete mais # capítulos para alcançar sua próxima meta!}}", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "Vamos para a próxima lição!", + "I18N_END_CHAPTER_PRACTICE_SESSION_TEXT": "Pratique suas novas habilidades!", + "I18N_END_CHAPTER_REVISION_TAB_TEXT": "Revise o que você aprendeu até agora!", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "Veja o que você pode fazer agora!", "I18N_ERROR_DISABLED_EXPLORATION": "Exploração desabilitada", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Desculpe, mas a exploração que você clicou está desativada no momento. Por favor, tente novamente mais tarde.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Exploração Desabilitada - Oppia", @@ -179,13 +328,139 @@ "I18N_ERROR_MESSAGE_404": "Desculpe, procuramos e procuramos muito, mas não encontramos essa página.", "I18N_ERROR_MESSAGE_500": "Algo deu terrivelmente errado. Mas não foi sua culpa. Ocorreu um erro interno.", "I18N_ERROR_NEXT_STEPS": "A melhor coisa a fazer agora é provavelmente retornar à \">página inicial. No entanto, se este problema persistir, e você acha que não deveria, por favor avise-nos em nosso \" target=\"_blank\">issue tracker. Sinto muito sobre isso.", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "Erro <[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "Erro <[statusCode]> - Oppia", "I18N_ERROR_PAGE_TITLE_400": "Erro 400 - Oppia", "I18N_ERROR_PAGE_TITLE_401": "Erro 401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "Erro 404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "Erro 500 - Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "Pronto para mais bolinhos? Faça este pequeno teste para verificar sua compreensão do que você aprendeu até agora!", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "Igualdade de Frações (Revisão)", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION": "É possível que uma fração seja outra disfarçada? Vamos ver o que acontece quando Matthew encontra Farelo pela segunda vez.", + "I18N_EXPLORATION_0FBWxCE5egOw_TITLE": "Frações Equivalentes", + "I18N_EXPLORATION_0X0KC9DXWwra_DESCRIPTION": "Na casa de Kamal, todos comemoram o aniversário de Samir. Kamal deixa a festa mais divertida fazendo um jogo de matemática para Ava e Samir. Veja se você consegue resolver as questões!", + "I18N_EXPLORATION_0X0KC9DXWwra_TITLE": "Revisão: Habilidades de Resolução de Problemas", + "I18N_EXPLORATION_1904tpP0CYwY_DESCRIPTION": "É hora de Aria começar a plantar legumes! Continue sua jornada de jardinagem enquanto você ajuda Aria no jardim e começa a memorizar os múltiplos.", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE": "Expressões com Um Dígito de 1 a 5", + "I18N_EXPLORATION_2mzzFVDLuAj8_DESCRIPTION": "Junte-se a James e seu tio enquanto eles aprendem sobre proporções e como usá-las!", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "O que é uma proporção?", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION": "Nina e sua mãe esbarram em uma amiga, que também é dona de uma barraca de frutas. Junte-se a Nina enquanto ela usa a divisão para ajudar sua amiga com a barraca!", + "I18N_EXPLORATION_40a3vjmZ7Fwu_TITLE": "Restos e Casos Especiais", + "I18N_EXPLORATION_53Ka3mQ6ra5A_DESCRIPTION": "Maya, Omar e Malik visitam um supermercado para comprar mais ingredientes e precisam somar números maiores. Veja se você pode ajudá-los!", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "Somando números maiores", + "I18N_EXPLORATION_5I4srORrwjt2_DESCRIPTION": "Na lanchonete, Kamal diz que eles precisam ser espertos na forma como gastam sua limitada quantia de dinheiro. Ajude Ava e Samir a encontrar os lanches que podem comprar!", + "I18N_EXPLORATION_5I4srORrwjt2_TITLE": "Proporcionalidade e Método Unitário", + "I18N_EXPLORATION_5NWuolNcwH6e_DESCRIPTION": "James tenta fazer suas próprias vitaminas... mas elas não ficam tão boas. Que erro será que ele cometeu? Comece esta lição para descobrir!", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE": "A Importância da Ordem", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION": "Ajude Matthew a resolver um problema para um dos clientes do Sr. Padeiro enquanto ele aprende sobre números mistos e a reta numérica. Jogue esta lição para começar!", + "I18N_EXPLORATION_670bU6d9JGBh_TITLE": "Números Mistos e a Linha Numérica 1", + "I18N_EXPLORATION_6Q6IyIDkjpYC_DESCRIPTION": "O Sr. Padeiro tem um pedido muito grande chegando e precisa da ajuda de Matthew para comprar mais ingredientes. Você pode descobrir o que eles precisam usando frações?", + "I18N_EXPLORATION_6Q6IyIDkjpYC_TITLE": "Subtraindo Frações", + "I18N_EXPLORATION_8HTzQQUPiK5i_DESCRIPTION": "Junte-se a Nina e sua mãe enquanto elas vão ao mercado. Ajude-as a usar a divisão para descobrir quantos sacos elas precisam para suas compras!", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "O que é Divisão?", + "I18N_EXPLORATION_9DITEN8BUEHw_DESCRIPTION": "Aprenda a resolver expressões envolvendo múltiplas operações de adição e subtração.", + "I18N_EXPLORATION_9DITEN8BUEHw_TITLE": "Adição e subtração de vários números", + "I18N_EXPLORATION_9trAQhj6uUC2_DESCRIPTION": "Frações podem ser usadas para representar partes de um bolo. Mas será que elas também podem ser usadas ​​para representar partes de grupos de coisas? Comece esta lição para descobrir!", + "I18N_EXPLORATION_9trAQhj6uUC2_TITLE": "Frações de um Grupo", + "I18N_EXPLORATION_BDIln52yGfeH_DESCRIPTION": "Quando eles chegam ao parque de diversões, Ava e Samir querem se divertir, mas Kamal diz que eles precisam ver se têm dinheiro suficiente. Ajude-os com a matemática!", + "I18N_EXPLORATION_BDIln52yGfeH_TITLE": "Simplificando Equações", + "I18N_EXPLORATION_BJd7yHIxpqkq_DESCRIPTION": "Ajude nossos três heróis a fazer uma pizza melhor, enquanto aprende a somar com zero e descobre os números que faltam em um \"fato de adição\".", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE": "Noções Básicas de Adição", + "I18N_EXPLORATION_IrbGLTicm0BI_DESCRIPTION": "Enquanto Ava e Kamal esperam pela Sra. Plum, vamos ver se você aprendeu a aplicar estratégias diferentes para resolver problemas do mundo real!", + "I18N_EXPLORATION_IrbGLTicm0BI_TITLE": "Revisão: Resolvendo Problemas do Mundo Real", + "I18N_EXPLORATION_Jbgc3MlRiY07_DESCRIPTION": "Depois de aprender todas essas novas habilidades, Ava quer descobrir o que pode fazer com elas. Junte-se a Ava na aplicação de suas novas habilidades para resolver problemas do mundo real!", + "I18N_EXPLORATION_Jbgc3MlRiY07_TITLE": "Modelando Cenários do Mundo Real", + "I18N_EXPLORATION_K645IfRNzpKy_DESCRIPTION": "Jaime aprende o valor posicional de cada dígito em um número grande.", + "I18N_EXPLORATION_K645IfRNzpKy_TITLE": "O que são Valores Posicionais", + "I18N_EXPLORATION_K89Hgj2qRSzw_DESCRIPTION": "Kamal revela as técnicas que usou para descobrir rapidamente a hora que eles precisam para acordar. Quer ver como ele faz isso? Jogue esta lição para descobrir!", + "I18N_EXPLORATION_K89Hgj2qRSzw_TITLE": "A Lei Distributiva", + "I18N_EXPLORATION_Knvx24p24qPO_DESCRIPTION": "Jaime entende o valor de sua pontuação no fliperama.", + "I18N_EXPLORATION_Knvx24p24qPO_TITLE": "Encontrando os Valores de um Número", + "I18N_EXPLORATION_MRJeVrKafW6G_DESCRIPTION": "O jardim da Aria é um enorme sucesso! A cada semana do verão, mais e mais frutas e legumes estão crescendo. Ajude Aria a contar quantos vegetais cresceram.", + "I18N_EXPLORATION_MRJeVrKafW6G_TITLE": "Multiplicando por Potências de Dez", + "I18N_EXPLORATION_MjZzEVOG47_1_DESCRIPTION": "Aprendemos que o \"denominador\" de uma fração é o número de partes iguais no todo. Mas por que as partes devem ser as mesmas? Vamos descobrir!", + "I18N_EXPLORATION_MjZzEVOG47_1_TITLE": "O Significado de \"Partes Iguais\"", + "I18N_EXPLORATION_OKxYhsWONHZV_DESCRIPTION": "Junte-se a Maya e Omar enquanto eles aprendem como os números podem ser \"somados\" ou \"adicionados\" para criar um novo número!", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "O que é Adição?", + "I18N_EXPLORATION_PLAYER_PAGE_TITLE": "<[explorationTitle]> - Oppia", + "I18N_EXPLORATION_PsfDKdhd6Esz_DESCRIPTION": "Maya, Omar e Malik parecem ter perdido dinheiro devido aos ingredientes estragados. Usando a subtração, você pode ajudá-los a descobrir como contar isso?", + "I18N_EXPLORATION_PsfDKdhd6Esz_TITLE": "Subtraindo números grandes, Parte 2", + "I18N_EXPLORATION_R7WpsSfmDQPV_DESCRIPTION": "Junto com Aria, vamos aprender o que é multiplicação, como escrever expressões e como usá-la para resolver problemas na vizinhança de Aria!", + "I18N_EXPLORATION_R7WpsSfmDQPV_TITLE": "Partes de Expressões de Multiplicação", + "I18N_EXPLORATION_RvopsvVdIb0J_DESCRIPTION": "É hora de James vender sua nova vitamina! Ele monta uma barraca com o tio Berry. Será que eles conseguem descobrir quanto dinheiro cada um deles deve receber?", + "I18N_EXPLORATION_RvopsvVdIb0J_TITLE": "Vinculando Proporções a Números Reais", + "I18N_EXPLORATION_SR1IKIdLxnm1_DESCRIPTION": "Ava ficou entediada de jogar os jogos do parque de diversões, então Kamal criou um divertido jogo de matemática. Você consegue vencer o jogo de Kamal? Clique nesta lição para descobrir!", + "I18N_EXPLORATION_SR1IKIdLxnm1_TITLE": "Revisão: Variáveis", + "I18N_EXPLORATION_STARTING_FROM_BEGINNING": "Parabéns por completar essa lição! Agora você vai começar a lição do início da próxima vez que voltar.", + "I18N_EXPLORATION_VKXd8qHsxLml_DESCRIPTION": "Maya, Omar e Malik percebem que alguns de seus ingredientes estragaram. Você pode ajudá-los a descobrir quanto eles ainda tem sobrando, usando a subtração?", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE": "Subtraindo números grandes, Parte 1", + "I18N_EXPLORATION_Vgde5_ZVqrq5_DESCRIPTION": "James descobriu como quer que sua receita de vitamina seja, mas ele tem dificuldade em combinar todas as partes. Você pode ajudá-lo com isso?", + "I18N_EXPLORATION_Vgde5_ZVqrq5_TITLE": "Combinando Proporções", + "I18N_EXPLORATION_W0xq3jW5GzDF_DESCRIPTION": "Algo inesperado acontece quando Maya, Omar e Malik tentam fazer uma segunda pizza.", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "O que é Subtração?", + "I18N_EXPLORATION_WulCxGAmGE61_DESCRIPTION": "Nina visita a casa de Sandra. Junte-se a ela enquanto ela usa a divisão para ajudar Sandra com problemas ainda mais complicados, como transferir todas as frutas para caixas!", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE": "Divisão por Múltiplos de Dez", + "I18N_EXPLORATION_WwqLmeQEn9NK_DESCRIPTION": "Jamie continua a aprender mais técnicas para arredondar números.", + "I18N_EXPLORATION_WwqLmeQEn9NK_TITLE": "Arredondando Números, Parte 2", + "I18N_EXPLORATION_Xa3B_io-2WI5_DESCRIPTION": "Junte-se a Matthew enquanto ele ajuda o Sr. Padeiro a reparar os danos, enquanto aprende a somar frações.", + "I18N_EXPLORATION_Xa3B_io-2WI5_TITLE": "Somando Frações", + "I18N_EXPLORATION_aAkDKVDR53cG_DESCRIPTION": "Jamie aprende quando um número é menor ou maior que outro número.", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE": "Comparando Números", + "I18N_EXPLORATION_aHikhPlxYgOH_DESCRIPTION": "Junte-se a Matthew enquanto ele aprende como os números mistos são apenas frações comuns disfarçadas.", + "I18N_EXPLORATION_aHikhPlxYgOH_TITLE": "Números Mistos e a Reta Numérica 2", + "I18N_EXPLORATION_aqJ07xrTFNLF_DESCRIPTION": "Depois de usar o método unitário para descobrir qual lanche Ava deve comprar, é a vez de Samir usar um novo método. Junte-se a Samir para descobrir qual lanche pegar!", + "I18N_EXPLORATION_avwshGklKLJE_DESCRIPTION": "Jamie aprende a simplificar um número sem fazer muitas alterações em seu valor.", + "I18N_EXPLORATION_avwshGklKLJE_TITLE": "Arredondando Números, Parte 1", + "I18N_EXPLORATION_cQDibOXQbpi7_DESCRIPTION": "Aria está pronta para plantar alguns vegetais maiores em seu jardim! Ajude-a a plantar e regá-los enquanto memoriza mais múltiplos com ela.", + "I18N_EXPLORATION_cQDibOXQbpi7_TITLE": "Expressões com Um Dígito de 5 a 9", + "I18N_EXPLORATION_hNOP3TwRJhsz_DESCRIPTION": "Aria está começando a escola novamente! Ela quer uma grande horta para as crianças de sua escola. Ajude-a a planejar com Omar usando a multiplicação com números maiores.", + "I18N_EXPLORATION_hNOP3TwRJhsz_TITLE": "Multiplicação de Vários Dígitos, Parte 1", + "I18N_EXPLORATION_ibeLZqbbjbKF_DESCRIPTION": "Na estação de trem, Ava e Kamal descobrem que não há trem! Kamal encontra um erro nos cálculos. Você vai ajudá-los a descobrir quando o trem chega?", + "I18N_EXPLORATION_ibeLZqbbjbKF_TITLE": "Conectando Valores para Variáveis", + "I18N_EXPLORATION_k2bQ7z5XHNbK_DESCRIPTION": "É possível que duas proporções diferentes signifiquem a mesma coisa? Descubra com James e Tio Berry enquanto eles experimentam uma nova receita de vitamina de chocolate.", + "I18N_EXPLORATION_k2bQ7z5XHNbK_TITLE": "Proporções Equivalentes", + "I18N_EXPLORATION_kYSrbNDCv5sH_DESCRIPTION": "Ava quer aproveitar ao máximo o aniversário de Samir, então começa a planejar seu dia. Ajude-a a usar atalhos para resolver expressões para descobrir as coisas!", + "I18N_EXPLORATION_kYSrbNDCv5sH_TITLE": "As Leis Comutativas e Associativas", + "I18N_EXPLORATION_lNpxiuqufPiw_DESCRIPTION": "Ava logo precisará aplicar suas habilidades em alguns problemas do mundo real. Você será capaz de ajudar? Experimente esta lição para ver se você domina as expressões!", + "I18N_EXPLORATION_lNpxiuqufPiw_TITLE": "Revisão: Trabalhando com Expressões", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION": "Junte-se a Nina enquanto ela ajuda Sandra a fazer suco de frutas para sua barraca, usando uma nova técnica de divisão!", + "I18N_EXPLORATION_lOU0XPC2BnE9_TITLE": "Divisão de Vários Dígitos, Divisores de Um Dígito", + "I18N_EXPLORATION_m1nvGABWeUoh_DESCRIPTION": "Ava e Samir terminam de jogar e vão até a loja para usar seus ingressos. Lá, eles encontram uma máquina misteriosa! Clique na próxima lição para descobrir!", + "I18N_EXPLORATION_m1nvGABWeUoh_TITLE": "O que é uma média?", + "I18N_EXPLORATION_nLmUS6lbmvnl_DESCRIPTION": "Será que James sabe dizer se uma vitamina tem mais leite ou iogurte apenas olhando a receita, em vez de precisar fazer cada vitamina manualmente?", + "I18N_EXPLORATION_nLmUS6lbmvnl_TITLE": "Combinando Proporções", + "I18N_EXPLORATION_nTMZwH7i0DdW_DESCRIPTION": "Ava e Kamal vão para a estação de trem. Eles atendem a Sra. Plum, uma confeiteira, e a ajudam a resolver problemas usando expressões com receita, custo e lucro", + "I18N_EXPLORATION_nTMZwH7i0DdW_TITLE": "De Problemas de Palavras a Expressões", + "I18N_EXPLORATION_osw1m5Q3jK41_DESCRIPTION": "É hora de ganhar uns bolinhos novamente! Aproveite esta oportunidade para certificar-se de que você entendeu as habilidades que aprendeu nas lições anteriores!", + "I18N_EXPLORATION_osw1m5Q3jK41_TITLE": "Operações com Frações (Revisão)", + "I18N_EXPLORATION_rDJojPOc0KgJ_DESCRIPTION": "Ava e Kamal estão comprando presentes para o aniversário de seu primo! Junte-se a eles enquanto eles descobrem como calcular os preços resolvendo expressões.", + "I18N_EXPLORATION_rDJojPOc0KgJ_TITLE": "Resolvendo Expressões - Ordem das Operações", + "I18N_EXPLORATION_rfX8jNkPnA-1_DESCRIPTION": "Você pode ajudar Matthew a ganhar alguns bolinhos? Faça este pequeno teste para ver o quanto você se lembra sobre Frações.", + "I18N_EXPLORATION_rfX8jNkPnA-1_TITLE": "Representando Frações (Revisão)", + "I18N_EXPLORATION_rwN3YPG9XWZa_DESCRIPTION": "Enquanto saboreiam um sorvete, Ava e Kamal tentam responder a algumas perguntas que Ava tem sobre sua próxima visita ao parque de diversões!", + "I18N_EXPLORATION_rwN3YPG9XWZa_TITLE": "Resolvendo Problemas de Palavras", + "I18N_EXPLORATION_tIoSb3HZFN6e_DESCRIPTION": "James aprende a reduzir uma razão à sua forma irredutível, a fim de facilitar seus cálculos.", + "I18N_EXPLORATION_tIoSb3HZFN6e_TITLE": "Escrevendo Proporções na Forma Irredutível", + "I18N_EXPLORATION_umPkwp0L1M0-_DESCRIPTION": "Junte-se a Matthew enquanto ele conhece o Sr. Padeiro pela primeira vez e aprende sobre frações. O que é uma fração? Jogue esta lição para saber mais!", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE": "O que é uma Fração?", + "I18N_EXPLORATION_v8fonNnX4Ub1_DESCRIPTION": "Ava e Kamal continuam a ajudar a Sra. Plum com seu negócio, mas há algumas incógnitas nas expressões. Ava será capaz de ajudar?", + "I18N_EXPLORATION_v8fonNnX4Ub1_TITLE": "Escrevendo Expressões com Variáveis", + "I18N_EXPLORATION_wE9pyaC5np3n_DESCRIPTION": "Nina e Sandra entram em um concurso. Junte-se a Nina enquanto ela usa suas habilidades de divisão para vender o máximo possível de frutas e sucos, para ganhar o grande prêmio!", + "I18N_EXPLORATION_wE9pyaC5np3n_TITLE": "Divisão com Vários Dígitos", + "I18N_EXPLORATION_zIBYaqfDJrJC_DESCRIPTION": "Continue sua aventura de jardinagem com Aria enquanto ela planta frutas, aprende e pratica a multiplicação com Omar!", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE": "O Significado da Multiplicação", + "I18N_EXPLORATION_zNb0Bh27QtJ4_DESCRIPTION": "Na lanchonete, Kamal verifica os bolsos e não encontra a sua carteira. Sem a carteira, eles não podem comprar nenhum lanche! Você consegue ajudar a encontrar a carteira de Kamal?", + "I18N_EXPLORATION_zNb0Bh27QtJ4_TITLE": "Progressões Aritméticas", + "I18N_EXPLORATION_zTg2hzTz37jP_DESCRIPTION": "Depois de muito planejamento, Aria conseguiu que seus amigos a ajudassem a plantar a horta da escola! Use suas habilidades para ajudá-los a criar uma horta incrível!", + "I18N_EXPLORATION_zTg2hzTz37jP_TITLE": "Multiplicação de Vários Dígitos, Parte 2", + "I18N_EXPLORATION_zVbqxwck0KaC_DESCRIPTION": "James e Tio Berry são convidados para fazer vitaminas para a festa do vizinho. Este poderia ser o início de sua fama como venderes de vitaminas?", + "I18N_EXPLORATION_zVbqxwck0KaC_TITLE": "Relações Proporcionais", + "I18N_EXPLORATION_zW39GLG_BdN2_DESCRIPTION": "Enquanto Matthew aprende a comparar frações em termos de tamanho, acontece um acidente na padaria e o Sr. Padeiro fica irritado. Vamos ver o que aconteceu!", + "I18N_EXPLORATION_zW39GLG_BdN2_TITLE": "Comparando Frações", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anônimo", "I18N_FOOTER_ABOUT": "Sobre", "I18N_FOOTER_ABOUT_ALL_CAPS": "SOBRE O OPPIA", + "I18N_FOOTER_ANDROID_APP": "Aplicativo para Android", "I18N_FOOTER_AUTHOR_PROFILES": "Perfis de autores", "I18N_FOOTER_BROWSE_LIBRARY": "Navegar na Biblioteca", "I18N_FOOTER_CONTACT_US": "Contate-nos", @@ -206,6 +481,7 @@ "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "Por favor, digite um número que seja no mínimo <[minValue]>.", "I18N_FORMS_TYPE_NUMBER_AT_MOST": "Por favor, digite um número que seja no máximo <[maxValue]>.", "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "Por favor, digite um número decimal válido.", + "I18N_GENERATE_ATTRIBUTION": "Gerar Atribuição", "I18N_GET_STARTED_PAGE_BREADCRUMB": "Começar", "I18N_GET_STARTED_PAGE_HEADING": "Iniciar!", "I18N_GET_STARTED_PAGE_PARAGRAPH_1": "Criar uma exploração é fácil e gratuito. Compartilhe seu conhecimento com os alunos de todo o mundo e receba comentários que você pode usar para melhorar a eficácia de sua exploração.", @@ -226,17 +502,33 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_9": "Quando os alunos passam por sua exploração, eles podem enviar um feedback para alertá-lo sobre problemas ou compartilhar ideias para torná-lo melhor.", "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "Melhore sua exploração", "I18N_GET_STARTED_PAGE_TITLE": "Começar", + "I18N_GOAL_LIMIT": "Limite de <[limit]> metas", + "I18N_GOT_IT": "Entendi", "I18N_HEADING_VOLUNTEER": "Voluntário", "I18N_HINT_NEED_HELP": "Precisa de ajuda? Veja uma dica para esta questão!", "I18N_HINT_TITLE": "Dica", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "Digite uma expressão aqui.", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Digite o código no editor", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Vá para o editor de código", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Arraste e solte itens", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "Por favor, não coloque 0 no denominador", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Digite uma fração na forma \"x/y\", ou um número misto na forma \"A x/y\".", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Digite uma fração no formato x/y.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS": "Use apenas dígitos numéricos, espaços ou barras (/)", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "Nenhum dos números da fração deve ter mais de 7 dígitos.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "Insira uma fração válida (por exemplo, 5/3 ou 1 2/3)", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "Insira um valor de fração não vazio.", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "Por favor, insira uma fração válida de resposta (por exemplo, 5/3 ou 1 2/3)", + "I18N_INTERACTIONS_FRACTIONS_PROPER_FRACTION": "Por favor, insira uma parte fracionária \"própria\" de resposta (por exemplo, 1 2/3 em vez de 5/3)", + "I18N_INTERACTIONS_FRACTIONS_SIMPLEST_FORM": "Insira sua resposta na forma irredutível (por exemplo, 1/3 em vez de 2/6)", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Adicionar aresta", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "Adicionar nó", "I18N_INTERACTIONS_GRAPH_DELETE": "Apagar", "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "Toque no vértice de destino para criar borda (clique no mesmo vértice para cancelar a criação de borda).", "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "Toque no vértice inicial da borda para criar.", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Gráfico inválido!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "Criar um gráfico", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Ver gráfico", "I18N_INTERACTIONS_GRAPH_MOVE": "Mover", "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "Toque em qualquer ponto para mover o vértice até esse ponto.", "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "Toque no vértice para se mover.", @@ -247,49 +539,90 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "e <[vertices]> vértices", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Atualizar rótulo", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Atualizar peso", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Clique na imagem", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Selecione uma imagem para exibir]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Pode selecionar outras opções.", - "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Por favor, selecione pelo menos uma opção.} other{Por favor, selecione pelo menos # opções.}}", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Por favor, selecione todas as opções corretas.} other{Por favor, selecione pelo menos # opções.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{Somente uma opção pode ser selecionada.} other{No máximo # opções podem ser selecionadas.}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "Clique no mapa", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "Ver mapa", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "Digite uma equação aqui.", "I18N_INTERACTIONS_MUSIC_CLEAR": "Limpar", + "I18N_INTERACTIONS_MUSIC_INSTRUCTION": "Arraste as notas para a pauta para formar uma sequência", + "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "Mostrar pauta da música", "I18N_INTERACTIONS_MUSIC_PLAY": "Tocar", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Tocar sequência marcada", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "Insira uma moeda válida (por exemplo, $5 ou Rs 5)", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "Por favor, escreva unidades monetárias no começo", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_UNIT_CHARS": "Certifique-se de que a unidade contém apenas números, letras, (, ), *, ^, /, -", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "Certifique-se de que o valor seja uma fração ou um número", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Formatos de unidade possíveis", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_DOT": "A resposta pode conter no máximo 15 dígitos (0–9) excluindo símbolos (. ou -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "A resposta pode conter apenas números (0-9) ou símbolos (.).", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "A resposta deve ser maior ou igual a zero. Não deve conter o símbolo de menos (-).", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "Tem certeza de que deseja redefinir seu código?", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Cancelar", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Confirmação necessária", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Reiniciar código", + "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "Edite o código. Clique em 'Reproduzir' para verificar!", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "\nMostrar editor de código", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "Insira uma proporção válida (por exemplo, 1:2 ou 1:2:3).", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "Proporções não podem ter 0 como elemento.", + "I18N_INTERACTIONS_RATIO_INVALID_CHARS": "Escreva uma proporção que consista em dígitos separados por dois pontos (por exemplo, 1:2 ou 1:2:3).", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "Sua resposta tem múltiplos dois-pontos (:) ao lado um do outro.", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "Insira uma proporção válida (por exemplo, 1:2 ou 1:2:3).", + "I18N_INTERACTIONS_RATIO_NON_INTEGER_ELEMENTS": "Para esta pergunta, cada elemento em sua proporção deve ser um número inteiro (não uma fração ou um decimal).", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Adicionar item", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Opa, parece que seu conjunto tem duplicatas!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Adicione um item por linha.)", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Nenhuma resposta dada.", "I18N_INTERACTIONS_SUBMIT": "Enviar", + "I18N_INTERACTIONS_TERMS_LIMIT": "O número de termos na resposta especificado pelo criador é <[termsCount]>", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Exibir o Oppia em:", + "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Boa tarde", + "I18N_LEARNER_DASHBOARD_ALL": "Todas", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Editar metas", + "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Bronze", "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Aulas da comunidade", "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "Metas concluídas", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "Completado", "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "<[numberMoved]> das coleções que você completou foram movidas para a seção \"em andamento\", à medida que novas explorações foram adicionadas a elas!", + "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Continuar de onde você parou", "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Metas atuais", "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "Parece que não há coleções na sua lista \"Reproduzir mais tarde\". Vá até a biblioteca e construa sua própria lista de reprodução com curadoria!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "Parece que você ainda não concluiu nenhuma coleção. Dirija-se à biblioteca para começar uma excitante nova coleção!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "Parece que você ainda não concluiu nenhuma exploração. Dirija-se à biblioteca para começar uma excitante nova exploração!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_GOALS_SECTION": "Termine uma meta acima e veja seu progresso aqui quando tiver terminado!", - "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "Hmm, aqui está um pouco vazio... Comece uma aula selecionando uma das metas abaixo!", - "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "Parece que não há explorações na sua lista \"Reproduzir mais tarde\". Vá até a biblioteca e construa sua própria lista de reprodução com curadoria!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_STORIES_SECTION": "Visite a Sala de Aula para completar uma história nova e empolgante!", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Comece a aprender por ", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_HEADING": "Começar", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "estabelecendo uma meta!", + "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "Comece a aprender selecionando um objetivo abaixo!", + "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "Parece que não há explorações na sua lista \"Reproduzir mais tarde\". Vá até a biblioteca e construa sua própria lista de reprodução!", "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Você ainda não tem nenhum tópico de feedback ativo. Seu feedback ajuda a melhorar a qualidade de nossas aulas. Você pode fazer isso iniciando qualquer uma de nossas lições e enviando seus valiosos comentários!", - "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "Parece que você não tem coleções parcialmente completas no momento. Dirija-se à biblioteca para começar uma excitante nova coleção!", + "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "Parece que você não tem coleções parcialmente completas no momento. Vá até a biblioteca para começar uma excitante nova coleção!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "Parece que você não tem explorações parcialmente completas no momento. Dirija-se à biblioteca para começar uma excitante nova exploração!", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "Parece que você atingiu o limite de seleção de metas. Vá até a biblioteca e conheça mais explorações.", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "Comece por ", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "Definir um objetivo permite que a Oppia forneça as melhores recomendações em seu painel que contribuem para sua jornada de aprendizagem.", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "estabelecendo uma objetivo! ", "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "Parece que você ainda não se inscreveu em nenhum criador de conteúdo. Vá até a biblioteca para descobrir novos criadores e suas incríveis explorações!", + "I18N_LEARNER_DASHBOARD_EMPTY_SUGGESTED_FOR_YOU_SECTION": "Uau, você concluiu todas as lições do Tema! Verifique também nossas outras Explorações na página Lições da Comunidade ", + "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "Boa noite", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "Reproduzido pela última vez", "I18N_LEARNER_DASHBOARD_FEEDBACK_SECTION": "Atualizações de Feedback", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Responder", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_STATUS_CHANGE_MESSAGE": "Status alterado para '<[threadStatus]>'", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_WARNING": "Evite compartilhar informações pessoais, pois essa discussão é visível publicamente.", "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Metas", + "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "Ouro", + "I18N_LEARNER_DASHBOARD_HOME_SECTION": "Início", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "Incompleto", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "Em progresso", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "Parece que você ainda não experimentou nenhuma de nossas explorações.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Vamos começar esta emocionante jornada!", + "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Aprenda algo novo", + "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Bom dia", "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "Nova história disponível", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COLLECTIONS_FROM_PLAYLIST": "{numberNonexistent, plural, one{1 das coleções na sua lista 'Reproduzir mais tarde' já não está disponível. Pedimos desculpa por este incômodo} other{# das coleções na sua lista 'Reproduzir mais tarde' já não estão disponíveis. Pedimos desculpa por este incômodo}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_COLLECTIONS": "{numberNonexistent, plural, one{1 das coleções que concluiu já não está disponível. Pedimos desculpa por este incômodo} other{# das coleções que concluiu já não estão disponíveis. Pedimos desculpa por este incômodo}}", @@ -309,15 +642,22 @@ "I18N_LEARNER_DASHBOARD_RETURN_TO_FEEDBACK_THREADS_MESSAGE": "Retornar à lista de mensagens", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "Enviar", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Enviando...", + "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Prata", + "I18N_LEARNER_DASHBOARD_SKILLS": "Habilidades", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Progresso de Habilidade", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Histórias concluídas", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Subscrições", + "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Progresso:", "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "Atual:", "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "Breve descrição das alterações:", "I18N_LEARNER_DASHBOARD_SUGGESTION_NO_CURRENT_STATE": "Opa! Este estado não existe mais!", "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Sugerido:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Sugestão", "I18N_LEARNER_DASHBOARD_TOOLTIP": "Coleções são várias explorações relacionadas que devem ser concluídas em uma sequência.", + "I18N_LEARNER_DASHBOARD_VIEW": "Visualizar", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Ver sugestão", + "I18N_LEARNT_TOPIC": "<[topicName]> aprendido", + "I18N_LEARN_TOPIC": "Aprenda <[topicName]>", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Você completou este", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Já adicionado à playlist", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Adicionar à Playlist 'Tocar Mais Tarde'", @@ -394,8 +734,12 @@ "I18N_LICENSE_TERMS_HEADING": "Termos de licença", "I18N_LOGOUT_LOADING": "Desconectando", "I18N_LOGOUT_PAGE_TITLE": "Sair", + "I18N_MATH_COURSE_DETAILS": "O curso de fundamentos matemáticos validado pela Oppia ensina os blocos de construção básicos da matemática, cobrindo conceitos essenciais como adição, multiplicação e frações. Depois de dominar esses conceitos básicos, você pode passar para lições mais avançadas! Cada tópico se baseia no anterior, portanto, você pode começar do início e concluir as lições a partir de qualquer nível de habilidade ou começar direto por um tópico específico se precisar de ajuda.", + "I18N_MATH_TOPICS_COVERED": "Comece do básico com nosso primeiro tópico, Valores Posicionais. Ou, se você quiser revisar um tema específico, pule para qualquer tópico e comece já!", "I18N_MODAL_CANCEL_BUTTON": "Cancelar", "I18N_MODAL_CONTINUE_BUTTON": "Continuar", + "I18N_NEXT_LESSON": "Próxima lição", + "I18N_NO": "Não", "I18N_ONE_SUBSCRIBER_TEXT": "Você tem 1 subscrito.", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Parcerias", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "exclusão de conta pendente", @@ -446,6 +790,7 @@ "I18N_PLAYER_GIVE_UP": "Desistir?", "I18N_PLAYER_GIVE_UP_TOOLTIP": "Clique aqui para a resposta.", "I18N_PLAYER_HINT": "Dicas", + "I18N_PLAYER_HINTS": "Dicas", "I18N_PLAYER_HINTS_EXHAUSTED": "Desculpe, estou sem dicas!", "I18N_PLAYER_HINT_IS_AVAILABLE": "Clique aqui para uma dica!", "I18N_PLAYER_HINT_NEED_A_HINT": "Precisa de uma dica?", @@ -495,6 +840,7 @@ "I18N_PLAYER_UNRATED": "Sem classificação", "I18N_PLAYER_VIEWS_TOOLTIP": "Visualizações", "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "Sessão de treino", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Idioma do Áudio", "I18N_PREFERENCES_BIO": "Biografia", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "Este campo é opcional. Qualquer coisa que você escrever aqui é público e visível para todos.", "I18N_PREFERENCES_BREADCRUMB": "Preferências", @@ -508,9 +854,13 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "Receba notícias e atualizações sobre este site", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "Receber e-mails quando um criador que você tenha assinado publicar uma nova pesquisa", "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "Não foi possível adicionar você à nossa lista de discussão automaticamente. Favor visitar o link a seguir para se inscrever em nossa lista de discussão:", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "Exportar conta", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "Isso fará o download dos dados da sua conta Oppia como um arquivo de texto formatado em JSON.", + "I18N_PREFERENCES_EXPORT_ACCOUNT_WARNING_TEXT": "Por favor, não saia desta página. Seus dados estão sendo carregados e serão baixados como um arquivo de texto formatado em JSON após a conclusão. Se algo der errado, entre em contato", "I18N_PREFERENCES_HEADING": "Preferências", "I18N_PREFERENCES_HEADING_SUBTEXT": "Qualquer alteração que você fizer nesta página será salva automaticamente.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "Você ainda não se inscreveu em nenhum criador de conteúdo. Clique no botão \"inscrever-se\" do seu autor favorito no perfil dele. Ao inscrever-se você será notificado por e-mail quando o autor postar uma nova lição.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Impacto", "I18N_PREFERENCES_PAGE_TITLE": "Alterar suas preferências de perfil - Oppia", "I18N_PREFERENCES_PICTURE": "Imagem", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Idioma preferido de áudio", @@ -520,6 +870,7 @@ "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "Este é o painel que será mostrado por padrão na entrada.", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "Idiomas preferidos das Explorações", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "Esses idiomas serão selecionados por padrão quando você pesquisar na galeria por explorações.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "Selecione os idiomas de preferência", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "Idioma preferido do site", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "Este é o idioma no qual o site é mostrado.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "Idioma preferido do site", @@ -527,41 +878,68 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Arraste para cortar e redimensionar:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Erro: Não foi possível ler arquivo de imagem.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Enviar imagem do perfil", + "I18N_PREFERENCES_SEARCH_LABEL": "Pesquisar", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Selecione os idiomas preferidos", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Idioma da página", "I18N_PREFERENCES_SUBJECT_INTERESTS": "Assuntos de interesse", + "I18N_PREFERENCES_SUBJECT_INTERESTS_ERROR_TEXT": "Os assuntos de interesse devem ser únicos e em letras minúsculas.", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "Por exemplo: matemática, ciência da computação, artes...", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "Adicione um novo assunto de interesse (usando letras minúsculas e espaços).", + "I18N_PREFERENCES_SUBJECT_INTERESTS_LABEL": "Novos Assuntos de interesse", "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "Adicionar assuntos de interesse.", "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Criadores nos quais você se inscreveu", "I18N_PREFERENCES_USERNAME": "Nome de usuário", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Ainda não selecionado", "I18N_PROFILE_NO_EXPLORATIONS": "Este usuário ainda não criou ou editou nenhuma exploração.", - "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Saiba mais sobre sua pontuação", + "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Detalhes da Pontuação", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Meu painel", - "I18N_QUESTION_PLAYER_NEW_SESSION": "Nova sessão", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Repetir", "I18N_QUESTION_PLAYER_RETRY_TEST": "Repetir teste", "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Voltar à História", "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "reveja a habilidade com pontuação mais baixa", "I18N_QUESTION_PLAYER_SCORE": "Pontuação", "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "Descrições de habilidades", - "I18N_QUESTION_PLAYER_TEST_FAILED": "Teste falhou. Por favor, revise as habilidades e tente novamente", - "I18N_QUESTION_PLAYER_TEST_PASSED": "Teste completo. Bem feito!", + "I18N_QUESTION_PLAYER_TEST_FAILED": "A sessão falhou. Por favor, revise as habilidades e tente novamente", + "I18N_QUESTION_PLAYER_TEST_PASSED": "Sessão completa. Muito bem!", + "I18N_REFRESHER_EXPLORATION_MODAL_BODY": "Parece que você está tendo problemas com essa pergunta. Você gostaria de tentar uma exploração curta para relembrar e voltar aqui depois de concluí-la?", + "I18N_REFRESHER_EXPLORATION_MODAL_TITLE": "Você gostaria de uma revisão?", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Sessão de registro expirada", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "Desculpe, sua sessão de registro expirou. Por favor, clique em \"Continuar Registro\" para reiniciar o processo", + "I18N_RESET_CODE": "Reiniciar código", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Teste de revisão", - "I18N_SIDEBAR_ABOUT_LINK": "Sobre a Oppia", + "I18N_SAVE_PROGRESS": "Faça login ou registre-se para salvar seu progresso e continuar com a próxima lição.", + "I18N_SHARE_LESSON": "Compartilhe esta lição", + "I18N_SHOW_LESS": "Mostrar Menos", + "I18N_SHOW_MORE": "Mostrar Mais", + "I18N_SHOW_SOLUTION_BUTTON": "Mostrar Solução", + "I18N_SIDEBAR_ABOUT_LINK": "Sobre Nós", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "Sobre a Fundação Oppia", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Sala de aula", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Matemática básica", "I18N_SIDEBAR_CONTACT_US": "Contate-nos", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "Nós estamos aqui para te ajudar com quaisquer dúvidas que você tenha.", "I18N_SIDEBAR_DONATE": "Doar", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "Suas contribuições ajudam a oferecer educação de qualidade para todos.", "I18N_SIDEBAR_FORUM": "Fórum", - "I18N_SIDEBAR_GET_STARTED": "Começar", + "I18N_SIDEBAR_GET_INVOLVED": "Participe", + "I18N_SIDEBAR_HOME": "Início", + "I18N_SIDEBAR_LEARN": "Aprenda", "I18N_SIDEBAR_LIBRARY_LINK": "Biblioteca", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "Fundamentos da Matemática", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "Lições simples e iniciais para te ajudar a aprender matemática.", "I18N_SIDEBAR_OPPIA_FOUNDATION": "A Fundação Oppia", "I18N_SIDEBAR_PARTNERSHIPS": "Parcerias", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "Leve educação de qualidade para estudantes na sua região.", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "Adição e Subtração", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "Biblioteca da Comunidade", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "Recursos adicionais feitos pela comunidade", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "Multiplicação", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "Valor Posicional", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "Veja Todas as Lições", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Ensine com Oppia", "I18N_SIDEBAR_VOLUNTEER": "Voluntários", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "Junte-se ao nosso time global para criar e melhorar lições.", "I18N_SIGNIN_LOADING": "Conectando", "I18N_SIGNIN_PAGE_TITLE": "Entrar", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Ao marcar a caixa à esquerda deste texto, você reconhece, concorda e aceita vincular-se aos Termos de Uso de <[sitename]>, encontrados aqui.", @@ -637,9 +1015,60 @@ "I18N_SPLASH_VOLUNTEERS_CONTENT": "Não importa quem você seja, você pode encontrar uma lugar na Oppia. Sempre precisamos de mais pessoas para melhorar as aulas sugerindo questões, contribuindo com gráficos ou traduzindo lições.", "I18N_SPLASH_VOLUNTEERS_TITLE": "Construído pela comunidade", "I18N_START_HERE": "Clique aqui para começar!", + "I18N_STORY_3M5VBajMccXO_DESCRIPTION": "Nesta história, vamos nos juntar a Matthew enquanto ele visita uma padaria para comprar um bolo. Infelizmente, ele não tem dinheiro suficiente para um bolo completo. Então, o Sr. Padeiro o ajuda dividindo o bolo escolhido por Matthew em pedaços menores que ele pode pagar. O que acontece depois? Jogue as lições para descobrir!", + "I18N_STORY_3M5VBajMccXO_TITLE": "Mateus Visita a Padaria", + "I18N_STORY_JhiDkq01dqgC_DESCRIPTION": "Junte-se a Ava e seu pai enquanto eles vão ao parque de diversões. Ajude-os usando seu conhecimento de expressões e equações para resolver os problemas que eles enfrentam!", + "I18N_STORY_JhiDkq01dqgC_TITLE": "Um Dia no Parque de Diversões", + "I18N_STORY_Qu6THxP29tOy_DESCRIPTION": "Aprenda a Somar e Subtrair com Maya, Omar e seu avô, enquanto eles fazem pizza juntos!", + "I18N_STORY_Qu6THxP29tOy_TITLE": "Maya, Omar e Malik fazem uma pizza!", + "I18N_STORY_RRVMHsZ5Mobh_DESCRIPTION": "Nesta história, seguiremos Jaime e sua irmã Nic enquanto aprendem a representar e ler o valor de um número.", + "I18N_STORY_RRVMHsZ5Mobh_TITLE": "As Aventuras de Jaime no Fliperama", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - Concluído!", + "I18N_STORY_ialKSV0VYV0B_DESCRIPTION": "Conheça James e seu tio enquanto eles descobrem como podem usar proporções para fazer bebidas deliciosas!", + "I18N_STORY_ialKSV0VYV0B_TITLE": "As Aventuras com Vitaminas de James", + "I18N_STORY_rqnxwceQyFnv_DESCRIPTION": "Junte-se a Nina enquanto ela usa técnicas de divisão para ajudar sua mãe e Sandra no mercado!", + "I18N_STORY_rqnxwceQyFnv_TITLE": "Nina Visita o Mercado", + "I18N_STORY_vfJDB3JAdwIx_DESCRIPTION": "Junte-se a Aria e seu pai Omar enquanto eles usam técnicas de multiplicação para plantar sementes em sua horta!", + "I18N_STORY_vfJDB3JAdwIx_TITLE": "Aria quer plantar uma horta", "I18N_SUBSCRIBE_BUTTON_TEXT": "Inscrever", + "I18N_SUBTOPIC_0abdeaJhmfPm_adding-fractions_TITLE": "Somando Frações", + "I18N_SUBTOPIC_0abdeaJhmfPm_comparing-fractions_TITLE": "Comparando Frações", + "I18N_SUBTOPIC_0abdeaJhmfPm_dividing-fractions_TITLE": "Dividindo Frações", + "I18N_SUBTOPIC_0abdeaJhmfPm_equivalent-fractions_TITLE": "Frações Equivalentes", + "I18N_SUBTOPIC_0abdeaJhmfPm_fractions-of-a-group_TITLE": "Frações de um Grupo", + "I18N_SUBTOPIC_0abdeaJhmfPm_mixed-numbers_TITLE": "Números Mistos", + "I18N_SUBTOPIC_0abdeaJhmfPm_multiplying-fractions_TITLE": "Multiplicando Frações", + "I18N_SUBTOPIC_0abdeaJhmfPm_number-line_TITLE": "A Reta Numérica", + "I18N_SUBTOPIC_0abdeaJhmfPm_subtracting-fractions_TITLE": "Subtraindo Frações", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE": "O que é uma Fração?", + "I18N_SUBTOPIC_5g0nxGUmx5J5_calculations-with-ratios_TITLE": "Cálculos com Proporções", + "I18N_SUBTOPIC_5g0nxGUmx5J5_combining-ratios_TITLE": "Combinando Proporções", + "I18N_SUBTOPIC_5g0nxGUmx5J5_equivalent-ratios_TITLE": "Proporções Equivalentes", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "O que é uma Proporção?", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE": "Conceitos Básicos de Multiplicação", + "I18N_SUBTOPIC_C4fqwrvqWpRm_memorizing-expressions_TITLE": "Memorizando Expressões de Multiplicação", + "I18N_SUBTOPIC_C4fqwrvqWpRm_multiplication-techniques_TITLE": "Técnicas de multiplicação", + "I18N_SUBTOPIC_C4fqwrvqWpRm_rules-to-simplify_TITLE": "Regras para simplificar a multiplicação", "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Próxima habilidade", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "Habilidade Anterior:", + "I18N_SUBTOPIC_dLmjjMDbCcrf_algebraic-expressions_TITLE": "Simplificando expressões algébricas", + "I18N_SUBTOPIC_dLmjjMDbCcrf_modelling-scenarios_TITLE": "Modelando cenários do mundo real usando equações", + "I18N_SUBTOPIC_dLmjjMDbCcrf_order-of-operations_TITLE": "Ordem das Operações", + "I18N_SUBTOPIC_dLmjjMDbCcrf_problem-solving_TITLE": "Estratégias de Resolução de Problemas", + "I18N_SUBTOPIC_dLmjjMDbCcrf_solving-equations_TITLE": "Manipulando e resolvendo equações", + "I18N_SUBTOPIC_dLmjjMDbCcrf_variables_TITLE": "Representando Valores Desconhecidos com Variáveis", + "I18N_SUBTOPIC_iX9kYCjnouWN_comparing-numbers_TITLE": "Comparando Números", + "I18N_SUBTOPIC_iX9kYCjnouWN_naming-numbers_TITLE": "Nomeando Números", + "I18N_SUBTOPIC_iX9kYCjnouWN_place-names-and-values_TITLE": "Os Nomes das Casas e seus Valores", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "Arredondando Números", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE": "Conceitos básicos de Divisão", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Resolução de Problemas", + "I18N_SUBTOPIC_qW12maD4hiA8_techniques-of-division_TITLE": "Técnicas de Divisão", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE": "Somando Números", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "Relação entre Adição e Subtração", + "I18N_SUBTOPIC_sWBXKH4PZcK6_estimation_TITLE": "Estimativa", + "I18N_SUBTOPIC_sWBXKH4PZcK6_sequences _TITLE": "Sequências", + "I18N_SUBTOPIC_sWBXKH4PZcK6_subtracting-numbers_TITLE": "Subtraindo Números", "I18N_TEACH_BENEFITS_ONE": "Aprendizado efetivo e de alta qualidade para todas as idades", "I18N_TEACH_BENEFITS_THREE": "Sempre grátis e fácil de usar", "I18N_TEACH_BENEFITS_TITLE": "Nossos benefícios", @@ -662,52 +1091,98 @@ "I18N_TEACH_TESTIMONIAL_2": "\"Oppia é o primeiro do seu tipo! Ele ajuda estudantes a aprender tudo que eles precisam sobre um tópico de uma forma atraente e envolvente; ele também encoraja eles a usarem dispositivos inteligentes para seu próprio bem.\"", "I18N_TEACH_TESTIMONIAL_3": "“Eu nunca esperei que os alunos fossem aprender tecnologia e fazer aulas de matemática tão rápido. É sua primeira exposição à tecnologia inteligente e eles estavam realmente lutando para lidar com isso no início. Agora, eu me sinto tão feliz em vê-los fazendo as aulas da Oppia antes mesmo de eu entrar na classe!”", "I18N_THANKS_PAGE_BREADCRUMB": "Obrigado", + "I18N_TIME_FOR_BREAK_BODY_1": "Parece que você está enviando respostas muito rapidamente. Você está começando a ficar cansado?", + "I18N_TIME_FOR_BREAK_BODY_2": "Se sim, considere fazer uma pausa! Você pode voltar mais tarde.", + "I18N_TIME_FOR_BREAK_FOOTER": "Estou pronto para continuar a lição", + "I18N_TIME_FOR_BREAK_TITLE": "Precisa de uma pausa?", + "I18N_TOPIC_0abdeaJhmfPm_DESCRIPTION": "Muitas vezes você precisará falar sobre partes de um objeto: uma receita pode pedir meia xícara de farinha ou você pode derramar parte de uma garrafa de leite. Neste tópico, você aprenderá a usar frações para entender e descrever situações como essas.", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "Frações", + "I18N_TOPIC_5g0nxGUmx5J5_DESCRIPTION": "As proporções são úteis para calcular quantos ingredientes usar se você tiver uma receita para quatro pessoas, mas quiser cozinhar para duas. Neste tópico, você aprenderá a usar proporções para comparar facilmente o tamanho de uma coisa com outra.", + "I18N_TOPIC_5g0nxGUmx5J5_TITLE": "Razão e Proporção", + "I18N_TOPIC_C4fqwrvqWpRm_DESCRIPTION": "Se você comprasse 60 caixas com cinco bolos cada, quantos bolos você teria no total? Neste tópico, você aprenderá como usar a multiplicação para resolver problemas como este (sem ter que somar muitos números a cada vez!).", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "Multiplicação", + "I18N_TOPIC_LEARN": "Aprenda", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 lição} other{# lições}}", + "I18N_TOPIC_TITLE": "Tópico", "I18N_TOPIC_VIEWER_CHAPTER": "Capítulo", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 capítulo} other{# capítulos}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "Em Breve", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "Volte mais tarde quando as lições estiverem disponíveis para este tópico.", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "Volte mais tarde quando as perguntas para praticar estiverem disponíveis para este tópico.", "I18N_TOPIC_VIEWER_DESCRIPTION": "Descrição", "I18N_TOPIC_VIEWER_LESSON": "Lição", "I18N_TOPIC_VIEWER_LESSONS": "Lições", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "Volte mais tarde quando as lições estiverem disponíveis para este tópico.", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "Habilidades principais para <[topicName]>", + "I18N_TOPIC_VIEWER_NO_QUESTION_WARNING": "Ainda não há perguntas criadas para o(s) subtópico(s) selecionado(s).", "I18N_TOPIC_VIEWER_PRACTICE": "Prática", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "(Beta)", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "Volte mais tarde quando as perguntas para praticar estiverem disponíveis para este tópico.", "I18N_TOPIC_VIEWER_REVISION": "Revisão", - "I18N_TOPIC_VIEWER_SELECT_SKILLS": "Selecione habilidades para praticar seus conhecimentos sobre <[topicName]>.", + "I18N_TOPIC_VIEWER_SELECT_SKILLS": "Selecione as habilidades das lições de <[topicName]> que você gostaria de praticar.", "I18N_TOPIC_VIEWER_SKILL": "Habilidade", "I18N_TOPIC_VIEWER_SKILLS": "Habilidades", "I18N_TOPIC_VIEWER_START_PRACTICE": "Iniciar", "I18N_TOPIC_VIEWER_STORIES": "Histórias", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "Histórias que Você Pode Reproduzir", "I18N_TOPIC_VIEWER_STORY": "História", "I18N_TOPIC_VIEWER_STUDY_SKILLS": "Habilidades de estudo para <[topicName]>", "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "Use os seguintes Cartões de Revisão para ajudá-lo a estudar habilidades sobre <[topicName]>.", "I18N_TOPIC_VIEWER_VIEW_ALL": "Ver tudo", "I18N_TOPIC_VIEWER_VIEW_LESS": "Ver menos", + "I18N_TOPIC_dLmjjMDbCcrf_DESCRIPTION": "Muitas vezes, você precisará resolver problemas com números desconhecidos -- por exemplo, se você comprou um item que está em promoção e deseja descobrir o preço original. Neste tópico, você aprenderá como fazer isso com equações, expressões e fórmulas.", + "I18N_TOPIC_dLmjjMDbCcrf_TITLE": "Expressões e Equações", + "I18N_TOPIC_iX9kYCjnouWN_DESCRIPTION": "Você sabia que todos os números possíveis de coisas podem ser expressos usando apenas dez dígitos (0,1,2,3,...,9)? Neste tópico, aprenderemos como podemos usar valores posicionais para fazer isso e veremos por que \"5\" tem um valor diferente em \"25\" e \"2506\".", + "I18N_TOPIC_iX9kYCjnouWN_TITLE": "Valores Posicionais", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION": "Se você tivesse trinta e dois tomates para dividir entre quatro pessoas, quantos tomates cada pessoa deveria receber? Neste tópico, você aprenderá a usar a divisão para descobrir como dividir algo em partes.", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "Divisão", + "I18N_TOPIC_sWBXKH4PZcK6_DESCRIPTION": "Se você tivesse quatro ovos e seu amigo lhe desse mais 37, quantos ovos você teria no total? E se você perdesse oito? Neste tópico, você aprenderá a resolver problemas como esses com as habilidades básicas de adição e subtração.", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "Adição e Subtração", "I18N_TOPNAV_ABOUT": "Sobre", "I18N_TOPNAV_ABOUT_OPPIA": "Sobre o Oppia", "I18N_TOPNAV_ADMIN_PAGE": "Página Administrativa", + "I18N_TOPNAV_ANDROID_APP_DESCRIPTION": "O aplicativo para Android da Oppia já está disponível em inglês e português brasileiro. Experimente e forneça feedback!", + "I18N_TOPNAV_ANDROID_APP_HEADING": "Aplicativo para Android", "I18N_TOPNAV_BLOG": "Blog", - "I18N_TOPNAV_CLASSROOM": "Sala de aula", - "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Matemática básica", + "I18N_TOPNAV_BLOG_DASHBOARD": "Painel do blog", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Matemática Básica", "I18N_TOPNAV_CONTACT_US": "Contate-nos", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "Nós estamos aqui para te ajudar com quaisquer dúvidas que você tenha.", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Painel de contribuidor", "I18N_TOPNAV_CREATOR_DASHBOARD": "Painel do Criador", "I18N_TOPNAV_DONATE": "Doar", + "I18N_TOPNAV_DONATE_DESCRIPTION": "Suas contribuições ajudam a tornar a educação de qualidade disponível para todos.", "I18N_TOPNAV_FORUM": "Fórum", "I18N_TOPNAV_GET_INVOLVED": "Envolva-se", "I18N_TOPNAV_GET_STARTED": "Começar", + "I18N_TOPNAV_HOME": "Início", + "I18N_TOPNAV_LEARN": "Aprenda", "I18N_TOPNAV_LEARNER_DASHBOARD": "Painel do aprendiz", - "I18N_TOPNAV_LIBRARY": "Biblioteca", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "Lições simples e iniciais para te ajudar a aprender matemática.", + "I18N_TOPNAV_LEARN_HEADING": "Formas de saber mais", + "I18N_TOPNAV_LEARN_LINK_1": "Veja Todas as Lições", + "I18N_TOPNAV_LEARN_LINK_2": "Continuar Aprendendo", + "I18N_TOPNAV_LIBRARY": "Biblioteca da Comunidade", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "Lições adicionais feitas pela comunidade para te ajudar a aprender mais.", "I18N_TOPNAV_LOGOUT": "Sair", "I18N_TOPNAV_MODERATOR_PAGE": "Página do Moderador", "I18N_TOPNAV_OPPIA_FOUNDATION": "A Fundação Oppia", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Manual de Participação", - "I18N_TOPNAV_PARTNERSHIPS": "Parcerias", + "I18N_TOPNAV_PARTNERSHIPS": "Escolas e Organizações", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "Seja um parceiro e introduza a Oppia para a sua escola, comunidade, ou região.", "I18N_TOPNAV_PREFERENCES": "Preferências", "I18N_TOPNAV_SIGN_IN": "Entrar", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Entrar com o Google", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Ensine com Oppia", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Painel de tópicos e habilidades", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "Experimente hoje!", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "Junte-se ao nosso time global para criar e melhorar lições.", "I18N_TOTAL_SUBSCRIBERS_TEXT": "Você tem um total de <[totalSubscribers]> subscrições.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Cancelar inscrição", + "I18N_VIEW_ALL_TOPICS": "Ver todos os tópicos de <[classroomName]>", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Voluntário", - "I18N_WORKED_EXAMPLE": "Exemplo de trabalho" + "I18N_WARNING_MODAL_DESCRIPTION": "Isso mostrará a solução completa. Tem certeza?", + "I18N_WARNING_MODAL_TITLE": "Aviso!", + "I18N_WORKED_EXAMPLE": "Exemplo de trabalho", + "I18N_YES": "Sim" } diff --git a/assets/i18n/pt.json b/assets/i18n/pt.json index e9b137e67ab1..e27064fe4a1e 100644 --- a/assets/i18n/pt.json +++ b/assets/i18n/pt.json @@ -1,4 +1,6 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Sobre a Fundação Oppia", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Sobre a Fundação Oppia | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Criar uma exploração", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "sobre um tema do seu interesse.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Receber comentários", @@ -14,11 +16,16 @@ "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_8": "Quer seja professor do ensino primário ou secundário, um estudante universitário, ou um indivíduo apaixonado por um assunto específico que quer partilhar o seu conhecimento, o Oppia dá-lhe as boas-vindas. Junte-se à comunidade e comece a explorar connosco.", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "Publicar e Partilhar", "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE_TEXT": "as suas criações com a comunidade.", + "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "Legendas de áudio", "I18N_ABOUT_PAGE_BREADCRUMB": "Sobre", + "I18N_ABOUT_PAGE_CREATE_LESSON_CONTENT": "Com o sistema de criação de conteúdo da Oppia, pode facilmente criar e personalizar aulas sobre assuntos que o apaixonam.", "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "Ficha técnica", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT": "Os colaboradores do Oppia vêm de todo o mundo — muitos de nós somos estudantes, formados recentes ou professores. Gostaríamos de agradecer aos seguintes colaboradores que ajudaram a criar a plataforma. Se quiser ajudar, saiba como se pode envolver!", "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT_BOTTOM": "A equipa de desenvolvimento do Oppia também agradece os comentários, ideias, ajuda e sugestões de <[listOfNames]>.", "I18N_ABOUT_PAGE_CREDITS_THANK_TRANSLATEWIKI": "Também gostaríamos de agradecer ao translatewiki.net o fornecimento de traduções por voluntários.", + "I18N_ABOUT_PAGE_EASILY_CREATE_LESSON": "Criar aulas facilmente", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Explorar aulas criadas pela comunidade", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS_CONTENT": "Educadores e membros da comunidade de todo o mundo usam a plataforma de criação de aulas da Oppia como uma forma de criar e partilhar lições. Pode encontrar mais de 20 000 aulas sobre 17 assuntos diferentes na nossa biblioteca de Exploração e talvez se sinta inspirado a criar as suas próprias!", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Donativos", "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "Participar", "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "A fundação Oppia", @@ -28,6 +35,7 @@ "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4": "Os diretores da fundação são Ben Henning, Jacob Davis e Sean Lip. Os estatutos e atas da fundação estão disponíveis para leitura pública. A fundação pode ser contactada por correio eletrónico em: admin@oppia.org.", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4_HEADING": "Diretores", "I18N_ABOUT_PAGE_HEADING": "Oppia: educação para todos", + "I18N_ABOUT_PAGE_LANGUAGE_FEATURE": "Tradução para dialetos locais", "I18N_ABOUT_PAGE_LEARN_BUTTON": "Quero aprender", "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "O que quer fazer hoje?", "I18N_ABOUT_PAGE_TABS_ABOUT": "Sobre", @@ -40,10 +48,14 @@ "I18N_ACTION_BROWSE_LESSONS": "Percorrer as nossas lições", "I18N_ACTION_CREATE_EXPLORATION": "Criar uma exploração", "I18N_ACTION_CREATE_LESSON": "Criar a sua lição", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_DESCRIPTION": "O nome inserido aqui aparecerá como o nome do autor nas postagens do blog que você criará. A biografia será exibida na página de postagem do blog específica do autor.", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Edite o nome e a biografia do autor", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Este cartão é bastante longo e os alunos poderão desinteressar-se. Pondere encurtá-lo ou dividi-lo em dois.", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_TEXT": "Faça um teste de 10 a 15 perguntas para descobrir por onde começar.", "I18N_CLASSROOM_PAGE_HEADING": "A Página da Classe Oppia", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Explore mais lições", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Criado pela Comunidade", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "Faça um teste", "I18N_CONTACT_PAGE_BREADCRUMB": "Contacto", "I18N_CONTACT_PAGE_HEADING": "Participe!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Obrigado pelo seu interesse em ajudar o projeto Oppia!", @@ -91,7 +103,7 @@ "I18N_DASHBOARD_EXPLORATIONS_SORT_BY": "Ordenar por", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_AVERAGE_RATING": "Avaliação média", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_CATEGORY": "Categoria", - "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_UPDATED": "Ultima atualização", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_UPDATED": "Última atualização", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_OPEN_FEEDBACK": "Comentários em aberto", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TITLE": "Título", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TOTAL_PLAYS": "Total de reproduções", @@ -111,6 +123,11 @@ "I18N_DASHBOARD_TABLE_HEADING_UNRESOLVED_ANSWERS": "Respostas não resolvidas", "I18N_DASHBOARD_TOPICS_AND_SKILLS_DASHBOARD": "Consola de temas e competências", "I18N_DELETE_ACCOUNT_PAGE_BREADCRUMB": "Apagar conta", + "I18N_DIAGNOSTIC_TEST_RESULT_HEADER_TEXT": "Teste concluído. Bem feito!", + "I18N_DIAGNOSTIC_TEST_RESULT_START_TOPIC": "Iniciar <[topicName]>", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_1_FOR_NO_TOPIC": "Bom trabalho! Parece que você já tem uma boa compreensão dos tópicos da Sala de aula de matemática.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_2_FOR_NO_TOPIC": "Sinta-se à vontade para passar por qualquer uma das lições para revisar ou melhorar o que você sabe. Estamos constantemente atualizando o Classroom com novas lições, então verifique novamente.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_TWO_TOPICS": "Com base em suas respostas, recomendamos começar com qualquer um desses tópicos.", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Arraste uma imagem para esta área", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Carregar um ficheiro", "I18N_DONATE_PAGE_BREADCRUMB": "Donativos", @@ -304,7 +321,7 @@ "I18N_LIBRARY_GROUPS_SOCIAL_SCIENCE": "Ciências Sociais", "I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS": "Explorações mais bem classificadas", "I18N_LIBRARY_INCOMPLETE_ACTIVITY_ICON": "Concluiu parcialmente esta atividade.", - "I18N_LIBRARY_LAST_UPDATED": "Ultima atualização", + "I18N_LIBRARY_LAST_UPDATED": "Última atualização", "I18N_LIBRARY_LOADING": "A carregar", "I18N_LIBRARY_MAIN_HEADER": "Imagine o que poderia aprender hoje...", "I18N_LIBRARY_N/A": "N/D", @@ -455,7 +472,6 @@ "I18N_SIDEBAR_CONTACT_US": "Contacte-nos", "I18N_SIDEBAR_DONATE": "Fazer donativo", "I18N_SIDEBAR_FORUM": "Fórum", - "I18N_SIDEBAR_GET_STARTED": "Começar", "I18N_SIDEBAR_LIBRARY_LINK": "Biblioteca", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Oppia Foundation", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Ensinar com o Oppia", diff --git a/assets/i18n/qqq.json b/assets/i18n/qqq.json index dd4105712397..3b75de319ec8 100644 --- a/assets/i18n/qqq.json +++ b/assets/i18n/qqq.json @@ -1,5 +1,6 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Text displayed in the About foundation page. - Text shown in the top left corner of the nav bar.", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Text displayed in the About the Oppia Foundation page. - Text shown in the top left corner of the nav bar.", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Title displayed on the browser tab when on the about foundation page.", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Text shown at the bottom of the About section of Oppia's About page.", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "Text shown at the bottom of the About section of Oppia's About page. - This text is a continuation of the Create Text, encouraging users to create new explorations.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Text shown at the bottom of the About section of Oppia's About page.", @@ -54,6 +55,7 @@ "I18N_ABOUT_PAGE_TITLE": "Title of the About page that explains Oppia mission and the Oppia foundation.", "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "Title of the first section of about page indicating to get started.", "I18N_ABOUT_PAGE_WIFI_FEATURE": "Text below fourth icon in second section of Oppia's About page telling about the feature of low bandwidth requirement.", + "I18N_ACTION_ACCESS_ANDROID_APP": "Text displayed inside a button in the splash page. Clicking on it directs the user to the Google play store URL for the Oppia Android app.", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Text displayed in a button on the Teach page. - When the user clicks the button they are taken to a form that allows them to apply to the 'Teach with Oppia' program. The 'Teach with Oppia' text should be in English.", "I18N_ACTION_BROWSE_EXPLORATIONS": "Text displayed in a button on the Home and Teach pages. - When the user clicks the button they are taken to the library page.", "I18N_ACTION_BROWSE_LESSONS": "Text displayed in a button on the Home page. - When the user clicks the button they are taken to the library page. The text in the button needs to be less than 29 characters long.", @@ -65,6 +67,36 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "First button in last section of Oppia's About page targeting guide for teachers page.", "I18N_ACTION_TIPS_FOR_PARENTS": "Second button in last section of Oppia's about page targeting tips for parents and guardians page.", "I18N_ACTION_VISIT_CLASSROOM": "Button below paragraph in first and fourth section in Oppia's About page targeting the classroom.", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Cancel button text on the add new syllabus items section on the edit learner group page.", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Text of button to be clicked when user is done adding new syllabus items on the add new syllabus items section on the edit learner group page.", + "I18N_ADD_NEW_SYLLABUS_ITEMS": "Button text of the add new syllabus items button on the syllabus tab of the edit learner group page.", + "I18N_ADD_SYLLABUS_DESCRIPTION_TEXT": "Text displayed to explain the add syllabus feature while adding new syllabus items to learner group.", + "I18N_ADD_SYLLABUS_SEARCH_PLACEHOLDER": "Placeholder text that appears in the search bar while adding new syllabus items to learner group.", + "I18N_ANDROID_PAGE_AVAILABLE_FOR_DOWNLOAD_TEXT": "Text displayed on the android page to tell the user that the app is available for download.", + "I18N_ANDROID_PAGE_BETA_DESCRIPTION": "Text displayed on the android page to tell the user that the beta version of the app is available.", + "I18N_ANDROID_PAGE_CONSENT_CHECKBOX_LABEL": "Text displayed next to the checkbox on the android page that the user needs to check in order to subscribe to emails regarding the app.", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "Text displayed next to the email field on the android page where the user can enter their email address.", + "I18N_ANDROID_PAGE_FEATURES_SECTION_HEADER": "Header text for the features section of the android page.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_1": "Text displayed on the android page to tell the user about the feature of the app.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_2": "Text displayed on the android page to tell the user about the feature of the app.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_3": "Text displayed on the android page to tell the user about the feature of the app.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_4": "Text displayed on the android page to tell the user about the feature of the app.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_5": "Text displayed on the android page to tell the user about the feature of the app.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_1": "Text displayed on the android page to tell the user about a feature of the app.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_2": "Text displayed on the android page to tell the user about a feature of the app.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_3": "Text displayed on the android page to tell the user about a feature of the app.", + "I18N_ANDROID_PAGE_FEATURE_TEXT_4": "Text displayed on the android page to tell the user about a feature of the app.", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Label for the name field on the android page.\n{{identical|Name}}", + "I18N_ANDROID_PAGE_SUPPORT_TEXT": "Text displayed on the android page letting users know what version of android the app supports.", + "I18N_ANDROID_PAGE_TITLE": "Title displayed on the browser tab when on the android page.", + "I18N_ANDROID_PAGE_UPDATES_MAIN_TEXT": "Text displayed on the android page to tell the user about how to get updates on the app.", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "Text displayed on the submit button to subscribe to email updates on the android page.", + "I18N_ANDROID_PAGE_UPDATES_SUBTEXT": "Text displayed on the android page to tell the user about how to get updates on the app.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_0": "Text displayed as a response when the learner misspells the answer.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_1": "Text displayed as a response when the learner misspells the answer.", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_2": "Text displayed as a response when the learner misspells the answer.", + "I18N_ASSIGNED_STORIES_AND_SKILLS": "Syllabus tab title on the edit learner group page.", + "I18N_ASSIGNED_STORIES_AND_SKILLS_EMPTY_MESSAGE": "Text displayed to user when no skills or stories are assigned to the learner under group syllabus.", "I18N_ATTRIBUTION_HTML_STEP_ONE": "Step one of attributing Oppia in HTML", "I18N_ATTRIBUTION_HTML_STEP_TWO": "Step two of attributing Oppia in HTML", "I18N_ATTRIBUTION_HTML_TITLE": "Title of the attribution section that explains how to attribute Oppia in HTML", @@ -72,13 +104,35 @@ "I18N_ATTRIBUTION_PRINT_STEP_TWO": "Step two of attributing Oppia in Print", "I18N_ATTRIBUTION_PRINT_TITLE": "Title of the attribution section that explains how to attribute Oppia in Print", "I18N_ATTRIBUTION_TITLE": "Title of the modal that lists the various ways in which Oppia can be attributed.", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "Text displayed in the Blog Author Profile Page. - Text shown in the top left corner of the nav bar.", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TITLE": "Title displayed on the browser tab when on the Blog Author Profile.", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TOTAL_POSTS_DISPLAY": "Text displayed in the blog author profile page - Text that displays the total number of blog posts written by the author on the author profile page.", "I18N_BLOG_CARD_PREVIEW_CONTEXT": "Text displayed in the blog card preview modal- Text that explains the purpose of the blog card in the blog card preview modal.", "I18N_BLOG_CARD_PREVIEW_HEADING": "Heading of the blog card preview modal in the blog post editor page.", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_EXPLAIN_TEXT": "Text displayed in the blog dashboard page's author detail model editor. - Text shown below the text entry for the author biography.", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "Heading beside the author's bio in author details editor in blog dashboard page.\n{{Identical|Bio}}", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_DESCRIPTION": "Description about the author name and bio on author details editor in the blog dashboard.", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "Heading of the blog author details editor modal in the blog dashboard page whichc allows to enter author's publicly viewable name and bio.", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "Heading beside the author's name in author details editor in blog dashboard page.\n{{Identical|Name}}", "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Button text on Oppia's Blog Dashboard Page - Button displayed when user has not created any blog post yet. Clicking on button takes user to new blog post editor.", "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Text Displayed on Oppis's Blog Dashboard Page - Text that says that the user has not created any blog post before and asks to create one.", "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Button on Oppia's Blog Dashboard Page. - When user clicks on the button a new blog post is created.", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "Button on Oppia's Blog Dashboard Page. - When user clicks on the button author name and author bio is saved after edit.\n{{Identical|Save}}", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Tab button on Oppia's Blog Dashboard Page. -When user clicks on the button, all the blog post cards for blog posts which are yet to be published are visible.", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Tab button on Oppia's Blog Dashboard Page. -When user clicks on the button, all the blog post cards for blog posts published by the user are visible.", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Text displayed in the Blog Homepage page. - Text shown in the top left corner of the nav bar.\n{{identical|Blog}}", + "I18N_BLOG_HOME_PAGE_NO_RESULTS_FOUND": "Text displayed in the blog home page - Text that says no results are found if blog posts related to search are not found", + "I18N_BLOG_HOME_PAGE_OPPIA_DESCRIPTION": "Text displayed in the blog home page - Subheading below Blog Homepage heading describing Oppia community.", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "Text displayed in the blog home page - Heading above latests posts being shown on blog homepage.", + "I18N_BLOG_HOME_PAGE_POSTS_NUMBER_DISPLAY": "Text displayed in the blog home page - Text that displays the number of blog posts being displayed on the blog homepage out of total blog posts.", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "Text displayed in the blog home page - Heading above search query field in blog homepage.", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "Text displayed in the blog home page - Heading above tags filter field in blog homepage.", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "Text displayed in the blog home page - Placeholder Text inside tags filter field in blog homepage.", + "I18N_BLOG_HOME_PAGE_TITLE": "Title displayed on the browser tab when on the blog home page.", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "Text displayed in the blog home page - Heading on Blog Homepage-Welcome to the Oppia Blog!", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_HEADING": "Text displayed in the blog home page afer search- Heading above search results being shown on blog homepage search results page.", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_DISPLAY": "Text displayed in the blog home page after search for blog posts is performed - Text that displays the number of blog posts being displayed on the search results page.", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_OUT_OF_TOTAL_DISPLAY": "Text displayed in the blog home page after search for blog posts is performed - Text that displays the number of blog posts being displayed on the search results page out of the total search results.", "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Text displayed in the blog post editor page. - Text of the button that allows the user to upload thumbnail image for the blog post.", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Text displayed in the blog post editor page. -Heading Text before the input field to enter blog post content.", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Button displayed in the blog post editor page. - Text of the cancel button of the dialog shown to upload a thumbnail image or in RTE.\n{{Identical|Cancel}}", @@ -94,6 +148,15 @@ "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "Text displayed in the blog post editor page. - Text below Tags heading which highlights the number of tags that can still be added.", "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "Text displayed in the blog post editor page. - Text beside the thumbnail uploader in blog post editor on small screens.", "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "Text displayed in the blog post editor page. -Heading Text before the input field to enter blog post heading.", + "I18N_BLOG_POST_EDITOR_TITLE_INVALID_CHARACTERS_ERROR": "Text displayed in the blog post editor page. -Error text below the input field when title field contains invalid characters.", + "I18N_BLOG_POST_EDITOR_TITLE_IS_DUPLICATE_ERROR": "Text displayed in the blog post editor page. -Error text below the input field when blog post with the given title already exists.", + "I18N_BLOG_POST_EDITOR_TITLE_MAX_LENGTH_ERROR": "Text displayed in the blog post editor page. -Error text below the input field to enter blog post heading incase the title exceeds maximum character limit.", + "I18N_BLOG_POST_EDITOR_TITLE_MIN_LENGTH_ERROR": "Text displayed in the blog post editor page. -Error text below the input field to enter blog post heading incase the title does not statisfy minimum required character limit.", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "Text displayed in the blog post page. - Heading of the blog post recommendation section.", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "Text displayed in the blog post page. - Heading Text beside tags under which the blog post is categorized.", + "I18N_BLOG_POST_PAGE_TITLE": "Title displayed on the browser tab when on the blog post page.", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_EXTENSIONS_PREFIX": "Text displayed in the blog post editor page. - Extensions of the image file that can be uploaded as a thumbnail image for the blog post.", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_MAX_FILE_SIZE": "Text displayed in the blog post editor page. - Maximum size of the image file that can be uploaded as a thumbnail image for the blog post.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "Text displayed in the blog post editor page. - Text of the dialog shown to upload a thumbnail when the user has uploaded an image that is too big and needs to be cropped.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Text displayed in the blog post editor page. - Error text of the dialog shown to upload a thumbnail image. This error is shown when the file uploaded by the user is not an image.", "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Text displayed in the blog post editor page. - Text of the dailog that opens when the user clicks to upload thumbnail image for the blog post.", @@ -105,12 +168,57 @@ "I18N_CLASSROOM_CALLOUT_HEADING_1": "The header displayed in the callout to the classroom page which is present in the library page.", "I18N_CLASSROOM_CALLOUT_HEADING_2": "The subheading for the callout to the classroom page which is present in the library page.", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Text for description of the callout to the classroom page which is present in the library page.", + "I18N_CLASSROOM_MATH_TITLE": "The classroom name that is displayed on the classroom page.", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_HEADING": "The heading text is on the right tile of the math classroom page. It asks the learner whether they already know some math.", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_TEXT": "The text is on the right tile of the math classroom page. It guides the learner to take a quiz that contains 10-15 questions. The result of the quiz will help to find out the topic from where the learner can start learning.", + "I18N_CLASSROOM_PAGE_BEGIN_WITH_FIRST_TOPIC_BUTTON": "Text is displayed on a button that, when clicked, starts the first lesson from the math classroom page.", "I18N_CLASSROOM_PAGE_COMING_SOON": "The text displayed on a topic card in the classroom page when it is not published and inaccessible to the learner.", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "The header displayed above the course details section in the classroom page.", "I18N_CLASSROOM_PAGE_HEADING": "The header displayed above the classroom name in the classroom page.", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_HEADING": "The heading text is on the left tile of the math classroom page. It asks the learner, whether they are new to math.", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_TEXT": "The text is on the left tile of the math classroom page. It guides the learner to start with the basic math with our first topic.", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "The heading for the search bar at the bottom of the classroom page.", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "The subheading for the search bar at the bottom of the classroom page.", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "The text is displayed on the right tile of the math classroom page. The text asks the learner to take a test. After clicking the button the diagnostic test session will start, which helps to recommend a topic to the learner.", + "I18N_CLASSROOM_PAGE_TITLE": "The title displayed on the browser tab when on a classroom page.", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "The header displayed above the topics list section in the classroom page.", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "The title displayed on the browser tab when on the collection editor page and the collection is not untitled.", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "The title displayed on the browser tab when on the collection editor page and the collection is untitled.", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Part of heading displayed on collection player page - It is shown when explorations are not started in a collection.", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Part of heading displayed on collection player page - It is shown when explorations have been started in a collection.", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "The heading displayed on collection player page - It is shown when all explorations have been finished by the user.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "Message shown on preview card on collection player page.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "The heading displayed on collection player page - It is shown when no explorations are present in the collection.", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "The title displayed on the browser tab when on the collection player page.", + "I18N_COMING_SOON": "Text displayed in the topic viewer on pages that aren't released yet.", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "The tag displayed in collection cards on the community library page - Shown if card is that of the collection.", + "I18N_COMPLETED_STORY": "Text displayed on the learner dashboard that marks a story as completed.", + "I18N_COMPLETE_CHAPTER": "Text displayed on the learner dashboard that prompts learner to complete a chapter in the topic.", + "I18N_CONCEPT_CARD_NEED_HELP": "Text shown in the modal to prompt the learner to view the contents of the concept card.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_1": "One of the three messages displayed to the learner for completing the first checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_2": "One of the three messages displayed to the learner for completing the first checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "One of the three messages displayed to the learner for completing the first checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "One of the three generic messages displayed to the learner for completing a checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_2": "One of the three generic messages displayed to the learner for completing a checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_3": "One of the three generic messages displayed to the learner for completing a checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_1": "One of the three messages displayed to the learner for completing half of the checkpoints of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_2": "One of the three messages displayed to the learner for completing half of the checkpoints of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_3": "One of the three messages displayed to the learner for completing half of the checkpoints of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "One of the three messages displayed to the learner for completing all but the last checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_2": "One of the three messages displayed to the learner for completing all but the last checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_3": "One of the three messages displayed to the learner for completing all but the last checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "One of the three messages displayed to the learner for completing the second checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_2": "One of the three messages displayed to the learner for completing the second checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_3": "One of the three messages displayed to the learner for completing the second checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_1": "One of the three messages displayed to the learner for completing all but the last two checkpoints of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_2": "One of the three messages displayed to the learner for completing all but the last two checkpoints of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_TWO_REMAINING_3": "One of the three messages displayed to the learner for completing all but the last two checkpoints of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "One of the six titles displayed on top of the congratulatory messages shown to the learner for completing a checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "One of the six titles displayed on top of the congratulatory messages shown to the learner for completing a checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_3": "One of the six titles displayed on top of the congratulatory messages shown to the learner for completing a checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "One of the six titles displayed on top of the congratulatory messages shown to the learner for completing a checkpoint of a lesson in story mode.\n{{Identical|Good}}", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "One of the six titles displayed on top of the congratulatory messages shown to the learner for completing a checkpoint of a lesson in story mode.", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "One of the six titles displayed on top of the congratulatory messages shown to the learner for completing a checkpoint of a lesson in story mode.", "I18N_CONTACT_PAGE_BREADCRUMB": "Text displayed in the Contact page. - Text shown in the top left corner of the nav bar.\n{{identical|Contact}}", "I18N_CONTACT_PAGE_HEADING": "Heading for Oppia's Contact Page.", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Text on Oppia's Contact Page - This text serves as a guide for people interested in getting involved with Oppia.", @@ -139,10 +247,13 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "Text on Oppia's Contact Page - This text serves as a guide for people interested in getting involved with Oppia.", "I18N_CONTACT_PAGE_PARAGRAPH_9": "Text on Oppia's Contact Page - This text serves as a guide for people interested in getting involved with Oppia.", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "Heading for the text on Oppia's Contact page.", + "I18N_CONTACT_PAGE_TITLE": "Text displayed on the browser tab when on the contact page.", "I18N_CONTINUE_REGISTRATION": "Text displayed on a button that, when clicked, allows the user to continue the signup process.", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "Text displayed on a button at the bottom of the cookie banner. It allows the user to acknowledge and dismiss the banner.", "I18N_COOKIE_BANNER_EXPLANATION": "Text shown in the cookie banner at the bottom of the page.", "I18N_CORRECT_FEEDBACK": "Text shown in the congratulatory message shown to the learner when they get a correct answer.", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "Text displayed before the link to the newly created learner group url after creating a new learner group.", + "I18N_CREATE_ACCOUNT": "Text which appears on the Create Account button in the save progress menu of exploration lesson info modal.", "I18N_CREATE_ACTIVITY_QUESTION": "Text displayed as the first sentence in a dialog. - The dialog appears when the user clicks the I18N_CREATE_EXPLORATION_CREATE button.", "I18N_CREATE_ACTIVITY_TITLE": "Text displayed in the header of a dialog. - The dialog appears when the user clicks the I18N_CREATE_EXPLORATION_CREATE button.", "I18N_CREATE_COLLECTION": "Text of a button in the 'create new activity' dialog. - When the user clicks the button, a new collection is created and they are taken to the exploration editor page.", @@ -151,6 +262,8 @@ "I18N_CREATE_EXPLORATION_QUESTION": "Subtitle of a dialog. - The user is asked if he wants to create a new exploration. See I18N_CREATE_EXPLORATION_CREATE.", "I18N_CREATE_EXPLORATION_TITLE": "Title of a dialog. - see I18N_CREATE_EXPLORATION_CREATE", "I18N_CREATE_EXPLORATION_UPLOAD": "Text displayed in a button in the navigation bar\n{{Identical|Upload}}", + "I18N_CREATE_LEARNER_GROUP": "Text of the create new learner group button on the facilitator dashboard page.", + "I18N_CREATE_LEARNER_GROUP_PAGE_TITLE": "Title of the create new learner group page.", "I18N_CREATE_NO_THANKS": "Text of the cancel button of a dialog. - see I18N_CREATE_EXPLORATION_CREATE.\n{{Identical|No thanks}}", "I18N_CREATE_YES_PLEASE": "Text of the confirmation button of a dialog. - see I18N_CREATE_EXPLORATION_CREATE.", "I18N_CREATOR_IMPACT": "The parameter that defines the impact of the creator on the learners of Oppia.\n{{Identical|Impact}}", @@ -202,14 +315,100 @@ "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "Text on the Delete Account page that explains the details of user account deletion.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "Text on the Delete Account page that explains the details of user account deletion.", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Text that explains to the user that clicking the button will redirect to a new page for user account deletion.", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "Text displayed on the browser tab when on the account deletion page.", + "I18N_DELETE_LEARNER_GROUP": "Delete learner group button text displayed in preferences tab on the edit learner group page.", + "I18N_DELETE_LEARNER_GROUP_MODAL_BODY_TEXT": "Confirmation message displayed in the modal body when the facilitator clicks on the delete learner group button on edit learner group page.", + "I18N_DEST_IF_STUCK_INFO_TOOLTIP": "You can now specify a new card in which you can walk the learners through the concepts used in the question, if they get really stuck!", + "I18N_DIAGNOSTIC_TEST_CURRENT_PROGRESS": "The text is displayed on the bottom right of the progress bar of the diagnostic test page. This text displays the progress percentage of the user in the diagnostic test.", + "I18N_DIAGNOSTIC_TEST_EXIT_TEST": "The text is displayed on the top left corner of the diagnostic test page. This guides the learner to quit the test.", + "I18N_DIAGNOSTIC_TEST_HEADING": "The text is displayed on the top center of the diagnostic test page. This text is the heading of the page.", + "I18N_DIAGNOSTIC_TEST_INTRO_TEXT_1": "Text displayed on the diagnostic test player page telling user that in the diagnostic test they will get a topic recommendation for getting started with Math lessons.", + "I18N_DIAGNOSTIC_TEST_INTRO_TEXT_2": "Text displayed on the diagnostic test player page telling user that they will not be able to change their answer after moving to the next question.", + "I18N_DIAGNOSTIC_TEST_RESULT_GO_TO_CLASSROOM_BUTTON_TEXT": "The text is displayed on the result page of the diagnostic test. This guides the learner to go back to the classroom.", + "I18N_DIAGNOSTIC_TEST_RESULT_HEADER_TEXT": "The text is presented on the result page header of the diagnostic test. The text congratulates the learner for their completion of the diagnostic test.", + "I18N_DIAGNOSTIC_TEST_RESULT_START_TOPIC": "The button text is present on the diagnostic test result page. The button guides the learner to start a topic. After clicking the button the learners are navigated to the topic viewer page.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_1_FOR_NO_TOPIC": "The text is presented on the result page of the diagnostic test. The text congratulates the learner for their basic understanding of the topics that are currently present in the Math Classroom.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_2_FOR_NO_TOPIC": "The text is presented on the result page of the diagnostic test. The text informs the learner to review any of the topics or improve what they know. The text also informs them to visit Oppia regularly as the Classroom is constantly updating.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_ONE_TOPIC": "The text is presented on the result page of the diagnostic test. The text guides the learner to start with the below topic.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_TWO_TOPICS": "The text is presented on the result page of the diagnostic test. The text guides the learner to start with any of the below topics.", + "I18N_DIAGNOSTIC_TEST_START_BUTTON": "Text displayed on the diagnostic test page. - When the user clicks the start button, the diagnostic test for the topic recommendation will start.", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Text displayed in the preferences page. - Text shown in the dialog shown to upload a profile picture. The user can upload the picture by dragging a file to the area with this text.", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Text displayed in the preferences page, or everywhere there's image upload to instruct the user to 'Upload a file' from their computer.", "I18N_DONATE_PAGE_BREADCRUMB": "Text displayed in the Donate page. - Text shown in the top left corner of the nav bar.\n{{identical|Donate}}", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "Test displayed on the browser tab when on the donate page.", + "I18N_DONATE_PAGE_BUDGET_HEADING": "Text displayed in the Donate page. - Text shown at the top of the budget section.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_1": "Text displayed in the Donate page. - Text shown in the budget section.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_2": "Text displayed in the Donate page. - Text shown in the budget section.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "Text displayed in the Donate page. - Text shown in the budget section.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_1": "Text displayed in the Donate page. - Text shown in the budget section.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_2": "Text displayed in the Donate page. - Text shown in the budget section.", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_3": "Text displayed in the Donate page. - Text shown in the budget section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_1": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_10": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_2": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_3": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_4_1": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_4_2": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_5": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_6": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_7": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_8": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_ANSWER_9": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_HEADING_TEXT": "Text displayed in the Donate page. - Text shown at the top of the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_10": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_2": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_3": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_4": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_5": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_6": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_7": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_8": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_FAQ_QUESTION_9": "Text displayed in the Donate page. - Text shown in the FAQ section.", + "I18N_DONATE_PAGE_HEADING_1": "Text displayed in the Donate page. - Text shown at the top of the donate page.", + "I18N_DONATE_PAGE_HEADING_2": "Text displayed in the Donate page. - Text shown at the top of the donate page.", "I18N_DONATE_PAGE_IMAGE_TITLE": "Text displayed above the image showing the uses of funds in the donate page.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_1": "Text displayed in the Donate page. - Text shown in the learner feedback section.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_2": "Text displayed in the Donate page. - Text shown in the learner feedback section.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "Text displayed in the Donate page. - Text shown in the learner feedback section.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_SECTION_HEADING": "Text displayed in the Donate page. - Text shown at the top of the learner feedback section.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_1": "Text displayed in the Donate page. - Text shown in the learner feedback section.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_2": "Text displayed in the Donate page. - Text shown in the learner feedback section.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_TEXT_3": "Text displayed in the Donate page. - Text shown in the learner feedback section.", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "Text displayed in the Donate page. - Text shown on the button that redirects the user to the blog page.", + "I18N_DONATE_PAGE_STATISTIC_1": "Text displayed in the Donate page. - Text shown in the first statistic box.", + "I18N_DONATE_PAGE_STATISTIC_2": "Text displayed in the Donate page. - Text shown in the second statistic box.", + "I18N_DONATE_PAGE_STATISTIC_3": "Text displayed in the Donate page. - Text shown in the third statistic box.", + "I18N_DONATE_PAGE_STATISTIC_4": "Text displayed in the Donate page. - Text shown in the fourth statistic box.", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "Text displayed in the Donate page. - Text shown at the top of the subscribe modal.", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_1": "Text displayed in the Donate page. - Text shown in the subscribe modal.", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_2": "Text displayed in the Donate page. - Text shown in the subscribe modal.", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "Text displayed in the Donate page. - Text shown in the subscribe section.", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "Text displayed in the Donate page. - Text shown in the subscribe section.", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "Text displayed in the Donate page. - Text shown in the subscribe section.", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_1": "Text displayed in the Donate page. - Text shown in the subscribe section.", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_2": "Text displayed in the Donate page. - Text shown in the subscribe section.", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "Text displayed in the Donate page. - Text shown at the top of the thanks modal.", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_1": "Text displayed in the Donate page. - Text shown in the thanks modal.", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_2": "Text displayed in the Donate page. - Text shown in the thanks modal.", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_3": "Text displayed in the Donate page. - Text shown in the thanks modal.", "I18N_DONATE_PAGE_TITLE": "Title of the Oppia Donate page.", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Text displayed below the video in the Donate page.", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "The first paragraph of the vision of Oppia in the Donate page.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "The second paragraph of the vision of Oppia in the Donate page.", + "I18N_DONATE_PAGE_VISION_TEXT": "Text explaining the vision of Oppia in the Donate page.", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "Text displayed on the button to watch the video in the Donate page.", + "I18N_EDIT_LEARNER_GROUP_PAGE_TITLE": "Title of the Edit Learner Group page.", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "Text shown when the learner groups list on the facilitator dashboard is empty.", + "I18N_EMPTY_SOLUTION_MESSAGE": "Text shown when the solution for a state is not specified.", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "Message displayed at the end of a chapter after a signed in learner completes their 1st chapter ever.", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "Message displayed at the end of a chapter after a signed in learner completes their 5th chapter ever.", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "Message displayed at the end of a chapter after a signed in learner completes their 10th chapter ever.", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_4": "Message displayed at the end of a chapter after a signed in learner completes their 25th chapter ever.", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "Message displayed at the end of a chapter after a signed in learner completes their 50th chapter ever.", + "I18N_END_CHAPTER_MILESTONE_PROGRESS_MESSAGE": "Message displayed at the end of a chapter indicating how many chapter(s) (one or more) the learner has left to complete before they reach the next milestone.", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "Text displayed under the recommendation card which leads the learner onto the next chapter", + "I18N_END_CHAPTER_PRACTICE_SESSION_TEXT": "Text displayed under the recommendation card which leads the learner onto the practice tab", + "I18N_END_CHAPTER_REVISION_TAB_TEXT": "Text displayed under the recommendation card which leads the learner onto the revision tab", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "Message displayed to the learner at the end of an exploration telling them what other lessons/practice session they could try.", "I18N_ERROR_DISABLED_EXPLORATION": "Text displayed as title when an exploration is unavailable due to having been disabled.", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Description shown in the center of the error page for an 'exploration unavailable' error.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Title of the page displayed when an 'exploration unavailable' error occurred.", @@ -222,13 +421,143 @@ "I18N_ERROR_MESSAGE_404": "Description shown in the center of the error page for a 404 error.", "I18N_ERROR_MESSAGE_500": "Description shown in the center of the error page for a 500 error.", "I18N_ERROR_NEXT_STEPS": "Text displayed when an error occurred and the page could not be loaded.", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "Text displayed on the browser tab when on the error page root.", + "I18N_ERROR_PAGE_TITLE": "Text displayed on the browser tab when on the error page.", "I18N_ERROR_PAGE_TITLE_400": "Title of the page displayed when a 400 type error occurred.", "I18N_ERROR_PAGE_TITLE_401": "Title of the page displayed when a 401 type error occurred because the user has no permissions to access the page.", "I18N_ERROR_PAGE_TITLE_404": "Title of the page displayed when a 404 type error occurred because the page was not found.", "I18N_ERROR_PAGE_TITLE_500": "Title of the page displayed when a 500 type error occurred because the server failed to process the request.", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_0FBWxCE5egOw_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_0X0KC9DXWwra_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_0X0KC9DXWwra_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_1904tpP0CYwY_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_2mzzFVDLuAj8_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_40a3vjmZ7Fwu_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_53Ka3mQ6ra5A_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_5I4srORrwjt2_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_5I4srORrwjt2_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_5NWuolNcwH6e_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_670bU6d9JGBh_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_6Q6IyIDkjpYC_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_6Q6IyIDkjpYC_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_8HTzQQUPiK5i_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_9DITEN8BUEHw_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_9DITEN8BUEHw_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_9trAQhj6uUC2_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_9trAQhj6uUC2_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_BDIln52yGfeH_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_BDIln52yGfeH_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_BJd7yHIxpqkq_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_IrbGLTicm0BI_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_IrbGLTicm0BI_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_Jbgc3MlRiY07_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_Jbgc3MlRiY07_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_K645IfRNzpKy_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_K645IfRNzpKy_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_K89Hgj2qRSzw_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_K89Hgj2qRSzw_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_Knvx24p24qPO_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_Knvx24p24qPO_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_MRJeVrKafW6G_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_MRJeVrKafW6G_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_MjZzEVOG47_1_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_MjZzEVOG47_1_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_OKxYhsWONHZV_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_PLAYER_PAGE_TITLE": "Text displayed on the browser's tab when on an exploration player page.", + "I18N_EXPLORATION_PsfDKdhd6Esz_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_PsfDKdhd6Esz_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_R7WpsSfmDQPV_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_R7WpsSfmDQPV_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_RvopsvVdIb0J_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_RvopsvVdIb0J_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_SR1IKIdLxnm1_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_SR1IKIdLxnm1_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_STARTING_FROM_BEGINNING": "Alert message displayed to the user when they complete an exploration informing them that they would be starting from the begining the next time they come back.", + "I18N_EXPLORATION_STATE_PREVIOUSLY_COMPLETED": "Message displayed to the user if the exploration state has been already completed by them in a previous session", + "I18N_EXPLORATION_VKXd8qHsxLml_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_Vgde5_ZVqrq5_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_Vgde5_ZVqrq5_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_W0xq3jW5GzDF_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_WulCxGAmGE61_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_WwqLmeQEn9NK_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_WwqLmeQEn9NK_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_Xa3B_io-2WI5_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_Xa3B_io-2WI5_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_aAkDKVDR53cG_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_aHikhPlxYgOH_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_aHikhPlxYgOH_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_aqJ07xrTFNLF_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_aqJ07xrTFNLF_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_avwshGklKLJE_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_avwshGklKLJE_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_cQDibOXQbpi7_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_cQDibOXQbpi7_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_hNOP3TwRJhsz_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_hNOP3TwRJhsz_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_ibeLZqbbjbKF_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_ibeLZqbbjbKF_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_k2bQ7z5XHNbK_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_k2bQ7z5XHNbK_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_kYSrbNDCv5sH_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_kYSrbNDCv5sH_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_lNpxiuqufPiw_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_lNpxiuqufPiw_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_lOU0XPC2BnE9_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_m1nvGABWeUoh_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_m1nvGABWeUoh_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_nLmUS6lbmvnl_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_nLmUS6lbmvnl_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_nTMZwH7i0DdW_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_nTMZwH7i0DdW_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_osw1m5Q3jK41_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_osw1m5Q3jK41_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_rDJojPOc0KgJ_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_rDJojPOc0KgJ_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_rfX8jNkPnA-1_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_rfX8jNkPnA-1_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_rwN3YPG9XWZa_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_rwN3YPG9XWZa_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_tIoSb3HZFN6e_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_tIoSb3HZFN6e_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_umPkwp0L1M0-_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_v8fonNnX4Ub1_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_v8fonNnX4Ub1_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_wE9pyaC5np3n_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_wE9pyaC5np3n_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_zIBYaqfDJrJC_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_zNb0Bh27QtJ4_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_zNb0Bh27QtJ4_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_zTg2hzTz37jP_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_zTg2hzTz37jP_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_zVbqxwck0KaC_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_zVbqxwck0KaC_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_EXPLORATION_zW39GLG_BdN2_DESCRIPTION": "Description of the chapters displayed on story viewer page. Should be at most 100 characters.", + "I18N_EXPLORATION_zW39GLG_BdN2_TITLE": "Title of the chapters displayed on topic viewer and story viewer page. Should be at most 36 characters.", + "I18N_FACILITATOR_DASHBOARD_PAGE_TITLE": "Title of the Facilitator Dashboard page.", + "I18N_FEEDBACK_INSTRUCTION": "Message below the feedback input box.", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "The name of the author of a message in a feedback thread who wanted to stay anonymous.", "I18N_FOOTER_ABOUT": "Text shown in the footer. - Link to the about page. See I18N_ABOUT_PAGE_TITLE\n{{Identical|About}}", "I18N_FOOTER_ABOUT_ALL_CAPS": "Text shown in the footer. - Title of the about section. Must be in all capital letters for languages that use Latin, Cyrillic, and/or Greek scripts.", + "I18N_FOOTER_ANDROID_APP": "Text shown in the footer. - A link to the Android beta launch page.", "I18N_FOOTER_AUTHOR_PROFILES": "Text shown in the exploration footer. - A link that opens up a dropdown menu of authors that contributed to this exploration.", "I18N_FOOTER_BROWSE_LIBRARY": "Text shown in the footer. - Link to the library page.", "I18N_FOOTER_CONTACT_US": "Text shown in the footer. - Link to the contact page.\n{{Identical|Contact us}}", @@ -270,6 +599,7 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_9": "Text on the Get Started page that explains the process for creating an Oppia activity at a high level.\n{{Identical|Get started}}", "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "Paragraph heading on the Get Started page that explains the process for creating an Oppia activity at a high level.\n{{Identical|Get started}}", "I18N_GET_STARTED_PAGE_TITLE": "Title of the Get Started page that explains the process for creating an Oppia activity at a high level.\n{{Identical|Get started}}", + "I18N_GOAL_LIMIT": "Text shown in the learner dashboard that shows the limit of the number of topics that a learner can mark as a 'goal'", "I18N_GOT_IT": "Text shown at the bottom of the modal that displays hints and solutions", "I18N_HEADING_VOLUNTEER": "Text shown in the footer and navbar. - Link to a page that contains Oppia's Volunteer's page", "I18N_HINT_NEED_HELP": "Text shown in the modal to prompt the learner to view a hint.", @@ -278,8 +608,16 @@ "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Instructions to the learner to interact with the CodeRepl interaction.", "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Shorter instructions to the learner to interact with the CodeRepl interaction.", "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Instructions to the learner to interact with the DragAndDrop interaction.", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "Error message shown below input field in the fractions interaction.", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Text displayed in the fractions interaction input as a placeholder.", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Text displayed in the fractions interaction input as a placeholder when no integer part is allowed.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS": "Error message shown below input field in the fractions interaction.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "Error message shown below input field in the fractions interaction.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "Error message shown below input field in the fractions interaction.", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "Error message shown below input field in the fractions interaction.", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "Error message shown below input field in the fractions interaction.", + "I18N_INTERACTIONS_FRACTIONS_PROPER_FRACTION": "Error message shown below input field in the fractions interaction.", + "I18N_INTERACTIONS_FRACTIONS_SIMPLEST_FORM": "Error message shown below input field in the fractions interaction.", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Text displayed in a button in a graph interaction inside the graph editor - When the user clicks the button, he can add a new line (edge) to the graph.", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "Text displayed in a button in a graph interaction inside the graph editor - When the user clicks the button, he can add a new dot (node) to the graph.", "I18N_INTERACTIONS_GRAPH_DELETE": "Text displayed in a button in a graph interaction inside the graph editor - When the user clicks the button, he can move then select an element of the graph and delete it.\n{{Identical|Delete}}", @@ -311,20 +649,44 @@ "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "Shorter instructions to the learner to interact with the MusicNotesInput interaction.", "I18N_INTERACTIONS_MUSIC_PLAY": "Text displayed in a music interaction. - This text is shown inside a button. When the user clicks the button, the sequences of notes the user has introduced is reproduced.\n{{Identical|Play}}", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Text displayed in a music interaction. - This text is shown inside a button. When the user clicks the button, the sequences of notes the user has to copy is reproduced.", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "Error message shown below the input field for the number with units interaction.", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "Error message shown below the input field for the number with units interaction", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_UNIT_CHARS": "Error message shown below the input field for the number with units interaction", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "Error message shown below the input field for the number with units interaction", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Text displayed in number with units interaction. - The text is shown inside a button. When the user clicks the button, a dialog is shown with the explanation of the accepted syntax that can be used inside number with units in Oppia.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_DECIMAL": "Error message shown when number has more than one decimal points.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_EXPONENT": "Error message shown when number has more than 1 exponent sign.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_MINUS": "Error message shown when number has more than 1 minus sign.", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_COMMA": "Error message shown when number has more than 15 digits.", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_DOT": "Error message shown when number has more than 15 digits.", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "Error message shown when number is invalid.", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "Error message shown when number is less than zero.", + "I18N_INTERACTIONS_NUMERIC_INPUT_MINUS_AT_BEGINNING": "Error message shown when number does not have minus sign in the beginning.", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_INVALID_CHARS": "Error message shown when number has invalid characters.", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_TRAILING_DECIMAL": "Error message shown when number has trailing decimal points.", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "This text is displayed in pencil code editor interaction - This text is shown when user clicks on the reset button. This is body of the modal and asks the user whether they really want to proceed with the action", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Text displayed in a pencil code editor interaction - This text is shown when user clicks on reset code button. If user clicks this button in the modal then it cancels user's current action\n{{Identical|Cancel}}", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Text displayed in pencil code editor interaction - This text is shown when user clicks on the reset code button. This is title of a modal asking for confirmation of user's action", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Text displayed in a pencil code editor interaction - This text is shown when user clicks on reset code button. If user clicks this button, the code editor is reset to its initial state", "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "Instructions to the learner to interact with the PencilCode interaction.", "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Shorter instructions to the learner to interact with the PencilCode interaction.", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "Error message shown below the input field for the ratio input interaction.", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "Error message shown below the input field for the ratio input interaction.", + "I18N_INTERACTIONS_RATIO_INVALID_CHARS": "Error message shown below the input field for the ratio input interaction.", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "Error message shown below the input field for the ratio input interaction.", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "Error message shown below the input field for the ratio input interaction.", + "I18N_INTERACTIONS_RATIO_NON_INTEGER_ELEMENTS": "Error message shown below the input field for the ratio input interaction.", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Text displayed in a set interaction. - This text is shown inside a button. When the user clicks the button, a new line to introduce elements to the set appears above the button.", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Text displayed in a set interaction. - This text is shown in red when the user tries to submit a set that has duplicate elements.", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "Text displayed in a set interaction. - The user is asked to introduce items of a set described in the problem. This text indicates that each one of those items has to be written is a separate line. It's shown when the user has not entered any item yet.", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Text displayed in a set interaction. - The interaction is a dialog between Oppia and the user. This text is shown on the user side of the dialog if the set introduced by the user has no elements.", "I18N_INTERACTIONS_SUBMIT": "Text displayed in an interaction or navigation bar. - The text is shown inside a button. When the user clicks the button, the system checks if the answer is correct and continues to the following action.\n{{Identical|Submit}}", + "I18N_INTERACTIONS_TERMS_LIMIT": "Error message shown below the input field for the ratio input interaction.", + "I18N_INVALID_TAGS_AND_ATTRIBUTES_ALERT": "Alert message shown when invalid tags and attributes have been stripped from the uploaded SVG", + "I18N_JOIN_LEARNER_GROUP_BUTTON": "Text appearing in the button in learner group invitation modal on learner dashboard. - Clicking this button will allow the learner to join the group.", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Text for the site language selector. - When the user selects a different language, the site (not the explorations) is translated into that language.", - "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Good afternoon greeting on the learner dashboard.", + "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Good afternoon greeting on the learner dashboard.\n{{Identical|Good}}", + "I18N_LEARNER_DASHBOARD_ALL": "Section in the learner dashboard that displays both completed and incomplete explorations.\n\n{{Identical|All}}", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Text for the edit goals section in the learner dashboard.", "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Text for the bronze badge in the learner dashboard.", "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Text for the community lessons section in the learner dashboard.", @@ -333,38 +695,48 @@ "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "Text displayed in the learner dashboard. - The text is shown when new exporations are added to the collections the user has already completed.", "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Text for the continue where you left off section in the learner dashboard.", "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Text for the current goals section in the learner dashboard.", + "I18N_LEARNER_DASHBOARD_DECLINE_INVITATION_MODAL_BODY": "Body text for the decline learner group invitation modal.", + "I18N_LEARNER_DASHBOARD_DECLINE_INVITATION_MODAL_HEADER": "Header for the decline learner group invitation modal.", "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "Text displayed in the learner dashboard. - This text is shown when there are no collections in the playlist of the user.", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "Text displayed in the learner dashboard. - This text is shown when there are no collections in the completed section of the user.", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "Text displayed in the learner dashboard. - This text is shown when there are no explorations in the completed section of the user.", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_GOALS_SECTION": "Text displayed in the learner dashboard. - This text is shown when there are no topics in the completed goals section of the user.", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_STORIES_SECTION": "Text displayed in the learner dashboard. - This text is shown in the stories completed section in mobile view when the user has not completed any story.", - "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Text displayed in the learner dashboard. - This text is shown when there are no topics in the current goals section of the user.", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Text displayed in the learner dashboard. - This text is shown when there are no topics in the current goals section of the user, just before the link displaying the message:
\n{{Msg-oppia|I18N LEARNER DASHBOARD EMPTY CONTINUE WHERE YOU LEFT OFF SECTION SET A GOAL}}", "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_HEADING": "Text displayed in the learner dashboard. - This text is shown when there are no topics in the current goals section of the user.", - "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "Link displayed in the learner dashboard. - This link is shown when there are no topics in the current goals section of the user.", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "Link displayed in the learner dashboard. – This link is shown when there are no topics in the current goals section of the user, appended just after this message:
\n{{Msg-oppia|I18N LEARNER DASHBOARD EMPTY CONTINUE WHERE YOU LEFT OFF SECTION}}", "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "Text displayed in the learner dashboard. - This text is shown when there are no topics in the current goals section of the user.", "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "Text displayed in the learner dashboard. - This text is shown when there are no explorations in the playlist of the user.", "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Text displayed in the learner dashboard. - This text is shown when there are no feedback threads to which the user is subscribed.", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "Text displayed in the learner dashboard. - This text is shown when there is at least one collection in the complete section but no collections in the incomplete section of the user.", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "Text displayed in the learner dashboard. - This text is shown when there is at least one exploration in the complete section but no explorations in the incomplete section of the user.", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "Text displayed in the learner dashboard. - This text is shown when the user has reached the goal selection limit in edit goals section of learner dashboard.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "Text displayed in the learner dashboard. - This text is shown when the user hasn't started any topic.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "Text displayed in the learner dashboard. - This text is shown when the user hasn't started any topic.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "Text displayed in the learner dashboard. - This text is shown when the user hasn't started any topic.", "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "Text displayed in the learner dashboard. - This text is shown when the user hasn't subscribed to any creator.", "I18N_LEARNER_DASHBOARD_EMPTY_SUGGESTED_FOR_YOU_SECTION": "Text displayed in the learner dashboard. - This text is shown when the user has completed or started all the topics on the server.", - "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "Good evening greeting on the learner dashboard.", + "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "Good evening greeting on the learner dashboard.\n{{Identical|Good}}", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "Option in drop down list for sorting explorations - This is an option for sorting explorations by Last Played", "I18N_LEARNER_DASHBOARD_FEEDBACK_SECTION": "Text for the feedback updates section in the learner dashboard.", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Test that appears in the learner dashboard. It is placeholder text that appears in the message box of a feedback thread.", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_STATUS_CHANGE_MESSAGE": "Text that appears in the feedback thread of the learner dashboard. It indicates that the status of the feedback thread has been changed by a creator.", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_WARNING": "This message appears when a feedback thread is opened in the learner dashboard. It warns the user against divulging private information on a publicly viewable discussion.", "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Text for the goals section in the learner dashboard.", - "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "Text for the gold badge in the learner dashboard.", + "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "Text for the gold badge in the learner dashboard.\n\n{{Identical|Gold}}", "I18N_LEARNER_DASHBOARD_HOME_SECTION": "Text for the home section in the learner dashboard.", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "Title of the section that shows incomplete explorations in the learner dashboard.", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "Text for the incomplete section in the learner dashboard.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "First part of the introductory message that appears on the learner dashboard when there is no activity.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Second part of the introductory message that appears on the learner dashboard when there is no activity.", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION": "Label for the learner groups section in the learner dashboard.", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "Label for groups which learner has joined in learner groups section on the learner dashboard.", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_INVITATIONS": "Label for group invitations in learner groups section on the learner dashboard.", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_NO_GROUPS": "Text displayed in learner groups section on the learner dashboard. It is shown when the user has not joined any groups.", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_DECLINE_INVITATION": "Button text for declining an invitation to join a learner group in learner groups section on learner dashboard.", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_VIEW_PREFERENCES": "Button text for viewing preferences of a given learner group in learner groups section on learner dashboard.", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Text for the 'Learn Something New' section in the learner dashboard.", - "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Good morning greeting on the learner dashboard.", + "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Good morning greeting on the learner dashboard.\n{{Identical|Good}}", "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "Text displayed in the learner dashboard. - This text is shown when a new story content has been added to a topic in the completed goals section.", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COLLECTIONS_FROM_PLAYLIST": "Text displayed in the learner dashboard. - This text is shown when one or more than one collection in the playlist has been deleted or unpublished.", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_COLLECTIONS": "Text displayed in the learner dashboard. - This text is shown when one or more than one completed collection has been deleted or unpublished.", @@ -374,6 +746,7 @@ "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_EXPLORATIONS": "Text displayed in the learner dashboard. - This text is shown when one or more than one exploration in progress has been deleted or unpublished.", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "Text displayed in the learner dashboard. - This text is shown when the user hasn't played a collection before and is checking the 'incomplete collections' section.", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "Text displayed in the learner dashboard. - This text is shown when the user hasn't played an exploration before and is checking the 'incomplete explorations' section.", + "I18N_LEARNER_DASHBOARD_PAGE_TITLE": "Text displayed on the browser tab when on the Learner dashboard page.", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "Text for the playlist section in the learner dashboard. Will contain all the explorations and collections which the user wants to play later but doesn't have the time at the moment.", "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "Text for the progress section in the learner dashboard. Will contain all the topics in progress and stories completed by the user.\n{{identical|Progress}}", "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE": "Text that appears in the learner playlist section. It tells the learner that he/she can rearrage the activities in the order he/she wants to play them.", @@ -384,9 +757,9 @@ "I18N_LEARNER_DASHBOARD_RETURN_TO_FEEDBACK_THREADS_MESSAGE": "Text that appears in the learner dashboard on hovering over a back button. The back buttons takes the learner back to the list of threads.", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "The name of a button that appears in the learner dashboard. On clicking this button the message written by the user is sent to the feedback thread.\n{{Identical|Send}}", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Text that appears in the learner dashboard. It indicates that the message written by the user is being sent to the feedback thread.", - "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Text for the silver badge in the learner dashboard.", + "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Text for the silver badge in the learner dashboard.\n\n{{Identical|Silver}}", "I18N_LEARNER_DASHBOARD_SKILLS": "Text for the skills in the learner dashboard.", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "Text for the skill proficiency section in the learner dashboard.", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Text for the skill progress section in the learner dashboard.", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Text for the stories completed section in the learner dashboard.", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Text for the subscriptions section in the learner dashboard. The header asks the user if he/she is sure to remove the activity from the learner dashboard.", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Text for the subtopic progress in the learner dashboard.", @@ -396,7 +769,86 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Text that appears in the learner dashboard when a suggestion is opened. It is the header for the box which shows the suggested content of the state in a suggestion.", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Text displayed in the learner dashboard. It appears in the thread summary, when last message sent in the thread is a suggestion.", "I18N_LEARNER_DASHBOARD_TOOLTIP": "Text for the tooltip that appears on hovering over the info icon next to collections text", + "I18N_LEARNER_DASHBOARD_VIEW": "Text displayed beside the exploration filter dropdown in the learner dashboard.\n\n{{Identical|View}}", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "The name of a button in the learner dashboard. On clicking it the contents of the suggestion are viewable.", + "I18N_LEARNER_GROUPS_SECTION_TITLE": "Title of the section that shows lists of learner groups on the facilitator dashboard page.", + "I18N_LEARNER_GROUP_ADD_GROUP_DETAILS": "Text shown for the add group details section on the learner group creation page.", + "I18N_LEARNER_GROUP_ADD_NEW_SYLLABUS_ITEMS": "Button text for the add new syllabus items button in syllabus tab on the edit learner group page.", + "I18N_LEARNER_GROUP_ADD_SYLLABUS_ITEMS": "Text shown for the add syllabus items section on the learner group creation page.", + "I18N_LEARNER_GROUP_ADD_TO_SYLLABUS": "Button text shown to add a syllabus item to the learner group syllabus.", + "I18N_LEARNER_GROUP_ASSIGNED_SKILLS": "Text shown for assigned skills summary in the overview tab on the edit learner group page.", + "I18N_LEARNER_GROUP_ASSIGNED_STORIES": "Text shown for assigned stories summary in the overview tab on the edit learner group page.", + "I18N_LEARNER_GROUP_ASSIGNED_SYLLABUS_TAB": "Label for the assigned syllabus tab in the learner view of learner group page.", + "I18N_LEARNER_GROUP_BACK_TO_ALL_LEARNERS_PROGRESS": "Text shown for the back to all learners progress button from learner specific progress in the learner progress tab on the edit learner group page.", + "I18N_LEARNER_GROUP_BACK_TO_SYLLABUS": "Text shown for the back to syllabus button from add new syllabus items from syllabus tab on the edit learner group page.", + "I18N_LEARNER_GROUP_CREATED_TITLE": "Message shown when a new learner group is created.", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "Button text shown to move to the next step of the learner group creation process.", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "Button text shown to move to the previous step of the learner group creation process.", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "Label text shown for learner group description in preferences modal on learner group page.\n{{identical|Description}}", + "I18N_LEARNER_GROUP_DETAILS_GROUP_DESCRIPTION": "Label text shown for the group description input field on the learner group details section.", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "Label text shown for the group title input field on the learner group details section.", + "I18N_LEARNER_GROUP_DETAILS_MODAL_DESCRIPTION": "Label text shown for the group description on the learner group details modal.", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "Title text shown on the learner group details modal.", + "I18N_LEARNER_GROUP_FACILITATOR_LABEL_TEXT": "Label text shown for facilitator name in learner group details modal.", + "I18N_LEARNER_GROUP_GROUP_DETAILS_SECTION": "Group Details section title in preferences tab on the edit learner group page.", + "I18N_LEARNER_GROUP_INVITATION_MODAL_HEADER": "Header text for the learner group invitation modal.", + "I18N_LEARNER_GROUP_INVITE_LEARNERS": "Text shown for the invite learners section on the learner group creation page.", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_BY_USERNAME": "Label text shown for the username input field on the learner group invite learners section.", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_PLACEHOLDER_TEXT": "Placeholder text shown for the username input field on the learner group invite learners section.", + "I18N_LEARNER_GROUP_INVITE_LEARNER_BUTTON_TEXT": "Button text to invite a learner to the learner group.", + "I18N_LEARNER_GROUP_INVITE_LIST_TEXT": "Text shown for the list of learners invited to the learner group.", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "Button text shown on a syllabus item card when that syllabus item is already added to the learner group syllabus.", + "I18N_LEARNER_GROUP_ITEM_ALREADY_ADDED_TO_SYLLABUS": "Message shown on a syllabus item card when that syllabus item is already added to the learner group syllabus.", + "I18N_LEARNER_GROUP_JOINING_MESSAGE": "Text showing that the learner is about to join the learner group in the learner group invitation modal.", + "I18N_LEARNER_GROUP_LEARNERS": "Text shown on the learner group card shown on the facilitator dashboard page. It is used to denote the number of learners in the group.", + "I18N_LEARNER_GROUP_LEARNERS_MODAL_TEXT": "Text shown for the list of learners in the learner group preferences modal.", + "I18N_LEARNER_GROUP_LEARNERS_PROGRESS_TAB": "Learner Progress tab text on edit learner group page.", + "I18N_LEARNER_GROUP_LEARNERS_SECTION": "Text shown on the overview tab of the edit learner group page. It shows count of learners present in the group.", + "I18N_LEARNER_GROUP_MINIMUM_SYLLABUS_ITEMS_INFO": "Text shown on the learner group syllabus section stating the condition for minimum number of syllabus items required in order to create a learner group.", + "I18N_LEARNER_GROUP_NO_INVITATIONS": "Text shown when there are no invitations to the learner group on learner dashboard.", + "I18N_LEARNER_GROUP_NO_ITEMS_ADDED": "Text shown on the learner group syllabus section stating that no syllabus items have been added to the group.", + "I18N_LEARNER_GROUP_NO_LEARNERS_HAVE_JOINED": "Text shown in preferences tab on edit learner group page when there are no learners in the group.", + "I18N_LEARNER_GROUP_NO_LEARNERS_INVITED": "Text shown on the learner group invite learners section when no learners are invited to the group.", + "I18N_LEARNER_GROUP_NO_RESULTS_FOUND": "Text shown when no results are found while searching new syllabus items to add to the learner group.", + "I18N_LEARNER_GROUP_OVERVIEW_TAB": "Title text for overview tab on the edit learner group page.", + "I18N_LEARNER_GROUP_PAGE_TITLE": "Text displayed on the browser tab when on the learner group page.", + "I18N_LEARNER_GROUP_PERMISSION_NOT_GIVEN": "Text shown on learner progress card in learner progress tab. It indicates that the learners have not given permissions to facilitator to view their progress.", + "I18N_LEARNER_GROUP_PREFERENCE": "Section title text in preferences tab on the edit learner group page.", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "Button text for the save button in the learner group preferences modal.\n\n{{Identical|Save}}", + "I18N_LEARNER_GROUP_PREFERENCES_TAB": "Title text for preferences tab on the edit learner group page.", + "I18N_LEARNER_GROUP_PROGRESS_IN_STORIES_SECTION": "Section title text in learner progress tab on the edit learner group page.", + "I18N_LEARNER_GROUP_PROGRESS_NO_LEARNERS": "Text shown in learner progress tab on edit learner group page. It indicates that there is not progress to show as no student had joined the group.", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_INFO_TEXT": "Text explaining the progress sharing feature in the learner group preferences modal.", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_INFO_TITLE": "Text for the progress sharing info section title in learner group preferences modal.", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_FALSE": "Text for the progress sharing option when it is set to false in learner group preferences modal.", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_TRUE": "Text for the progress sharing option when it is set to true in learner group preferences modal.", + "I18N_LEARNER_GROUP_SEARCH_BY_USERNAME": "Label text of the search input field on the learner progress tab of the edit learner group page.", + "I18N_LEARNER_GROUP_SECTION_FEATURE_INFO_DESC": "Text explaining learner groups feature in learner groups section on learner dashboard page.", + "I18N_LEARNER_GROUP_SECTION_FEATURE_INFO_TITLE": "Title text of learner groups feature explanation on learner dashboard page.", + "I18N_LEARNER_GROUP_SHOWING_PROGRESS_FOR_LEARNER": "Title text shown for learner specific progress in the learner progress tab of the edit learner group page.", + "I18N_LEARNER_GROUP_SKILLS_ANALYSIS_SECTION": "Section title text shown on the overview tab of the edit learner group page.", + "I18N_LEARNER_GROUP_SKILLS_MASTERED_SECTION": "Section title text shown on the overview tab of the learner group page.", + "I18N_LEARNER_GROUP_SKILLS_SECTION_PROGRESS_DESCRIPTION": "Description text shown under skills section on the overview tab of the edit learner group page.", + "I18N_LEARNER_GROUP_STORIES_SECTION_PROGRESS_DESCRIPTION": "Description text shown under stories section on the overview tab of the edit learner group page.", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "Button text shown on the syllabus item card while adding new syllabus items to learner group.", + "I18N_LEARNER_GROUP_SYLLABUS_COMPLETION": "Text preceded by the percentage of completion of a chapter in learners' progress tab of edit learner group page.", + "I18N_LEARNER_GROUP_SYLLABUS_ITEM_NOT_STARTED_YET": "Text shown on the learner progress in story and skill card on edit learner group page. It indicates that the student has not started a particular chapter or skill.", + "I18N_LEARNER_GROUP_SYLLABUS_LESSONS": "Text shown for lessons of a story in learners' progress tab on edit learner group page.", + "I18N_LEARNER_GROUP_SYLLABUS_TAB": "Title text for syllabus tab of the edit learner group page.", + "I18N_LEARNER_GROUP_USER_STORIES_PROGRESS": "Title text shown in learner specific progress view for progress in stories on edit learner group page.", + "I18N_LEARNER_GROUP_VIEW_DETAILS": "Button text shown in learner progress tab on edit learner group page to view detailed progress of a student.", + "I18N_LEARNER_GROUP_VIEW_OVERVIEW_SUMMARY_TITLE": "Title text shown on the overview tab of the learner group page.", + "I18N_LEARNER_GROUP_VIEW_PREFERENCES": "Button text shown on the learner group page. - Clicking it opens the learner group preferences modal.", + "I18N_LEARNER_GROUP_WITHDRAW_INVITE": "Button text displayed in student invitation card in preferences tab of the edit learner group page. On clicking it, it withdraws student's invitation to the learner group.", + "I18N_LEARNT_TOPIC": "Text displayed on the learner dashboard that marks a topic as completed.", + "I18N_LEARN_TOPIC": "Text displayed on the learner dashboard that prompts user to start a topic.", + "I18N_LEAVE_LEARNER_GROUP": "Button text shown on the learner group page. - Clicking it prompts the user to leave the learner group.", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BODY": "Confirmation message text shown in the modal when a learner tries to leave a learner group.", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BUTTON": "Confirmation button text shown in the modal when a learner tries to leave a learner group.", + "I18N_LEAVE_LEARNER_GROUP_MODAL_HEADER": "Header text for the modal that appears when a learner tries to leave a learner group.", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "Text displayed on the lesson authors dropdown menu in the lesson info modal.", + "I18N_LESSON_INFO_HEADER": "The name of a button in exploration player. On clicking it, the lesson information modal opens up.", + "I18N_LESSON_INFO_TOOLTIP_MESSAGE": "The message in the tooltip pointing to lesson info button. Appears when learner reaches second checkpoint in an exploration for the first time in his/her whole lifetime.", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Text that appears on hovering over the completed icon that appears on top of the activity cards. It indicates to the user that he/she has completed the activity.", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Text that appears on hovering over the playlist icon that appears on top of the activity cards. It indicates to the user that the activity is already there on his/her list of activities to play later.", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Text that appears when the user hovers over a clock icon that appears on top of the activities in the library section. The clock icon indicates to the user that he/she can add the activity to his/her learner playlist which he/she can then play later.", @@ -422,7 +874,7 @@ "I18N_LIBRARY_CATEGORIES_HISTORY": "Text displayed in a filter list of categories in the library page. - Name of the history category. See I18N_LIBRARY_ALL_CATEGORIES\n{{Identical|History}}", "I18N_LIBRARY_CATEGORIES_HOBBIES": "Text displayed in a filter list of categories in the library page. - Name of the hobbies category. See I18N_LIBRARY_ALL_CATEGORIES\n{{Identical|Hobby}}", "I18N_LIBRARY_CATEGORIES_INTERACTIVE_FICTION": "Text displayed in a filter list of categories in the library page. - Name of the interactive fiction category. See I18N_LIBRARY_ALL_CATEGORIES Fiction", - "I18N_LIBRARY_CATEGORIES_LANGUAGES": "Text displayed in a filter list of categories in the library page. - Name of the languanges category (not a filter for the language of the exploration). See I18N_LIBRARY_ALL_CATEGORIES\n{{Identical|Language}}", + "I18N_LIBRARY_CATEGORIES_LANGUAGES": "Text displayed in a filter list of categories in the library page. - Name of the languanges category (not a filter for the language of the exploration).\n\nSee also:\n* {{Msg-oppia|I18N LIBRARY ALL CATEGORIES}}\n{{Identical|Language}}", "I18N_LIBRARY_CATEGORIES_LAW": "Text displayed in a filter list of categories in the library page. - Name of the law category. See I18N_LIBRARY_ALL_CATEGORIES\n{{Identical|Law}}", "I18N_LIBRARY_CATEGORIES_LIFE_SKILLS": "Text displayed in a filter list of categories in the library page. - Name of the life skills category. See I18N_LIBRARY_ALL_CATEGORIES Skills", "I18N_LIBRARY_CATEGORIES_MATHEMATICS": "Text displayed in a filter list of categories in the library page. - Name of the math category. See I18N_LIBRARY_ALL_CATEGORIES\n{{Identical|Math}}", @@ -461,6 +913,7 @@ "I18N_LIBRARY_NO_OBJECTIVE": "Text displayed inside the exploration card in the library - It's shown under the title when the exploration has no description available.", "I18N_LIBRARY_N_CATEGORIES": "Text of a filter button located next to the search bar in the library page - This text is displayed when the button is collapsed (the list of categories is not shown). It indicates the number of categories selected for the filter, and it's shown when there is more than one category selected, but not all. See I18N_LIBRARY_ALL_CATEGORIES\n{{Identical|Category}}", "I18N_LIBRARY_N_LANGUAGES": "Text of a filter button located next to the search bar in the library page - This text is displayed when the button is collapsed (the list of languages is not shown). It indicates the number of exploration languages selected for the filter, and it's shown when there is more than one language selected, but not all. See I18N_LIBRARY_ALL_LANGUAGES\n{{Identical|Language}}", + "I18N_LIBRARY_PAGE_BROWSE_MODE_TITLE": "Title displayed in the tab on the Browser in the library page when browsing through groups or in search mode.", "I18N_LIBRARY_PAGE_TITLE": "Title displayed in the tab on the Browser in the library page. - It should not be too long.", "I18N_LIBRARY_RATINGS_TOOLTIP": "Tooltip displayed inside the exploration card in the library - It's on top of the rating (or score) of the exploration. The ranking is calculated based on users votes.\n{{Identical|Rating}}", "I18N_LIBRARY_SEARCH_PLACEHOLDER": "Text displayed inside of a search box located in the library. - The result of the search are the explorations related to the search term introduced. The text should be a phrase that indicates i) the user can type here, ii) the result of the action is going to be something to learn. The text should not be long, and it should be a single phrase.", @@ -470,28 +923,37 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "Text (Heading) for Oppia's License page.\n{{Identical|License}}", "I18N_LICENSE_PAGE_PARAGRAPH_1": "Text for first paragraph of Oppia's License page.", "I18N_LICENSE_PAGE_PARAGRAPH_2": "Text for second paragraph of Oppia's License page.", + "I18N_LICENSE_PAGE_TITLE": "Text displayed on the browser tab when on the License page.", "I18N_LICENSE_TERMS_HEADING": "Heading to be displayed on top of the license page.", + "I18N_LOGIN_PAGE_TITLE": "Text displayed on the browser tab when on the Login page.", "I18N_LOGOUT_LOADING": "Text displayed in the middle of the page while the user is being logged out.", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "Text displayed on the browser tab when on the Logout page.", "I18N_LOGOUT_PAGE_TITLE": "Title displayed in the tab on the Browser in the logout page - In this page the user is waiting for the logout process to complete. Keep in two or three short words.", "I18N_MATH_COURSE_DETAILS": "The course details for the math classroom page.", "I18N_MATH_TOPICS_COVERED": "The introduction to the topics covered section of the math classroom page.", "I18N_MODAL_CANCEL_BUTTON": "Text that is displayed in a button of a modal. On clicking it the modal closes.\n{{Identical|Cancel}}", "I18N_MODAL_CONTINUE_BUTTON": "Text that is displayed in a button of a modal. On clicking it the user confirms to continue with the action.\n{{Identical|Continue}}", + "I18N_MODAL_REMOVE_BUTTON": "Text that is displayed in a button of a modal. On clicking it the user confirms to remove a syllabus item from the learner group syllabus.", "I18N_NEXT_LESSON": "Title of the section that shows the next lesson in a story once a chapter is completed.", + "I18N_NO": "Text on button to reject the content of a modal.\n\n{{Identical|No}}", + "I18N_NO_RESULTS_FOUND_FOR_MATCHING_USERNAME": "Text shown on learners progress tab on edit learner group page when no learners are found to have username matching the searched keyword.", "I18N_ONE_SUBSCRIBER_TEXT": "Text displayed under the subscribers tab in creator dashboard. If the creator has one subscriber, this text is displayed which informs him/her about the same.", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Text displayed in the Partnerships page. - Text shown in the top left corner of the nav bar.", + "I18N_PARTNERSHIPS_PAGE_TITLE": "Text displayed on the browser tab when on the partnerships page.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "Text displayed in the Pending Account Deletion page. - Text shown in the top left corner of the nav bar.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "Heading of the Pending Account Deletion page that explains to the user that their account is going to be deleted.\n{{Identical|Pending account deletion}}", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "Text on the Pending Account Deletion page that explains to the user that their account is going to be deleted.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1_HEADING": "Paragraph heading on the Pending Account Deletion page that explains to the user that their account is going to be deleted.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2": "Text on the Pending Account Deletion page that explains to the user that their account is going to be deleted.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "Paragraph heading on the Pending Account Deletion page that explains to the user that their account is going to be deleted.", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_TITLE": "Text displayed on the browser tab when on the Pending Accout Deletion page.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_1": "Text on Oppia's creator guidelines page(participation playbook) outlining the Community Guidelines.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_2": "Text on Oppia's creator guidelines page(participation playbook) outlining the Community Guidelines.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_3": "Text on Oppia's creator guidelines page(participation playbook) outlining the Community Guidelines.", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "Heading for Oppia's Community Guidelines on the creator guidelines page(participation playbook).", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_TEXT": "Text on Oppia's creator guidelines page(participation playbook) outlining the Community Guidelines.", "I18N_PLAYBOOK_HEADING": "Heading for Oppia's creator guidelines page(participation playbook).", + "I18N_PLAYBOOK_PAGE_TITLE": "Text displayed on the browser tab when on the Creator Guidelines (Participation playbook) page.", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_HEADING": "Heading on the creator guidelines page(participation playbook) outlining instructions for making explorations publishable.", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_1": "Text on the creator guidelines page(participation playbook) outlining instructions for making explorations publishable.", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_2": "Text on the creator guidelines page(participation playbook) outlining instructions for making explorations publishable.", @@ -578,6 +1040,9 @@ "I18N_PLAYER_UNRATED": "Text displayed when the user is playing an exploration. - When the user clicks the information button, a dialog opens with further information. This text is shown in that dialog when the exploration has no rate.\n{{Identical|Unrated}}", "I18N_PLAYER_VIEWS_TOOLTIP": "Text displayed when the user is playing an exploration. - When the user clicks the information button, a dialog opens with further information. This text is shown in that dialog as tooltip of the number of views of the exploration.\n{{Identical|View}}", "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "Text displayed in the Practice Session page. - Text shown in the top left corner of the nav bar.", + "I18N_PRACTICE_SESSION_PAGE_TITLE": "Text displayed on the browser tab when on the practice session page", + "I18N_PRACTICE_SESSION_START_BUTTON_TEXT": "Button text displayed in subtopic card in assgined syllabus tab of learner group page. - Clicking this button redirects learner to the practice session of the subtopic.", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Text displayed in the preferences page. - Placeholder text in audio language selector", "I18N_PREFERENCES_BIO": "Text displayed in the preferences page. - Text shown to the left of the text entry for the user biography.\n{{Identical|Bio}}", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "Text displayed in the preferences page. - Text shown below the text entry for the user biography.", "I18N_PREFERENCES_BREADCRUMB": "Text displayed in the preferences page. - Text shown in the top left corner of the nav bar.\n{{Identical|Preferences}}", @@ -591,9 +1056,14 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "Text displayed in the preferences page. - Text shown to the left of a checkbox that allows the user to agree or disagree to receive periodic emails from the site.", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "Text displayed in the preferences page. - Text shown to the left of a checkbox that allows the user to agree or disagree to receive emails related to user subscriptions.", "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "Text displayed in the preferences page. Text shown to the bottom of the checkbox to subscribe to Oppia's newsletter when we are not able to automatically add the user to the mailing list.", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "Text displayed in the preferences page. Text shown as heading and on button of Export Account section.", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "Text displayed in the preferences page. Text to inform the user about the type of exported data and downloading status.", + "I18N_PREFERENCES_EXPORT_ACCOUNT_WARNING_TEXT": "Text displayed in the preferences page. Warning text to inform user to not leave the page when user's account is being exported.", "I18N_PREFERENCES_HEADING": "Text displayed in the preferences page. - Text shown as the title.\n{{Identical|Preferences}}", "I18N_PREFERENCES_HEADING_SUBTEXT": "Text displayed in the preferences page. - Text shown below the title. Indicates that the changes made on the profile are going to be saved even if the user does not click a save button.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "Text displayed in the preferences page. - Text shown if the user has no subscribed creators", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Text displayed in preference page. - Heading text shown above the text highlighting oppia's impact.\n{{identical|Impact}}", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "Text displayed on the browser tab when on the preferences page.", "I18N_PREFERENCES_PAGE_TITLE": "Title of the preferences page.", "I18N_PREFERENCES_PICTURE": "Text displayed in the preferences page. - Text shown in the space for the user profile picture if no picture has been selected.\n{{Identical|Picture}}", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Text displayed in the preferences page. - Text shown to the left of the entry where the user can select the preferred language to be used for playing audio translations in explorations.", @@ -603,6 +1073,7 @@ "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "Text displayed in the preferences page. - Text shown below the choices to select the preferred dashboard, explaining what the field is for.", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "Text displayed in the preferences page. - Text shown to the left of text entry to select the preferred language for explorations.", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "Text displayed in the preferences page. - Text shown below the text entry to select the preferred language for explorations, explaining what the field is for.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "Text displayed in the preferences page - Placeholder text in the preferred exploration language selector.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "Text displayed in the preferences page. - Text shown to the left of the entry where the user can select the preferred language of the site (not the explorations). When a different language is selected, the page is translated.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "Text displayed in the preferences page. - Text shown below the entry where the user can select the preferred language of the site when no language is selected.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "Text displayed in the preferences page. - Text shown in the entry where the user can select the preferred language of the site when no language is selected.", @@ -610,15 +1081,25 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Text displayed in the preferences page. - Text of the dialog shown to upload a profile picture when the user has uploaded an image that is too big and needs to be cropped.", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Text displayed in the preferences page. - Error text of the dialog shown to upload a profile picture. This error is shown when the file uploaded by the user is not an image.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Text displayed in the preferences page. - Title of the dialog shown to upload a profile picture.", + "I18N_PREFERENCES_SEARCH_LABEL": "Text displayed in the preferences page. - Placeholder text in language selector\n{{identical|Search}}", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Text displayed in the preferences page. - Text shown in the text entry to select the preferred language for explorations when no language is selected.", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Text displayed in the preferences page. - Placeholder text in site language selector", "I18N_PREFERENCES_SUBJECT_INTERESTS": "Text displayed in the preferences page. - Text shown below the text entry to select the user subject interests.", + "I18N_PREFERENCES_SUBJECT_INTERESTS_ERROR_TEXT": "Text displayed in the preferences page. - Text shown below the text entry to select the user subject interests if input by user is not correct.", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "Text displayed in the preferences page. - Text shown below the text entry to select the user subject interests, explaining what the field is for.", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "Text displayed in the preferences page. - Text shown in the text entry to select the user subject interests when the user clicks to type.", + "I18N_PREFERENCES_SUBJECT_INTERESTS_LABEL": "Text displayed in the preferences page. - Placeholder text shown in the text entry to select the user subject interests.", "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "Text displayed in the preferences page. - Text shown in the text entry to select the user subject interests when no subject is selected.", "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Text displayed in the preferences page. - Text shown to list the user's subscribed creators", "I18N_PREFERENCES_USERNAME": "Text displayed in the preferences page. - Text shown at the left of the text entry where the user can change his username.\n{{Identical|Username}}", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Text displayed in the preferences page. - Text shown in the text entry for the username when there is no username assigned to the user.", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "Text displayed on the browser tab when on the Privacy policy page.", "I18N_PROFILE_NO_EXPLORATIONS": "Text displayed on the Profile page. - This message is shown on a user's profile when the user has zero created/edited explorations.", + "I18N_PROFILE_PAGE_TITLE": "Text displayed on the browser tab when on the Profile page.", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "Text displayed on the progress reminder modal asking the learner if they wish to continue.", + "I18N_PROGRESS_REMINDER_MODAL_HEADER": "Text displayed on the progress reminder modal informing the learner of their progress.", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "Text displayed on the button meant for restarting the lesson on the progress reminder modal.", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "Text displayed on the button meant for resuming the lesson on the progress reminder modal.", "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Text displayed in Practice Session and Review Test pages. - Text that is shown above the breakdown of the score that indicates users can learn more about their score below.", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Text displayed in the Practice Session page. - Text of the button that go to the Topics Dashboard on click.", "I18N_QUESTION_PLAYER_NEW_SESSION": "Text displayed in the Practice Session page. - Text of the button that starts a new practice session", @@ -629,26 +1110,57 @@ "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "Text displayed in Practice Session and Review Test pages. - Text that is the heading of a column with skill descriptions.", "I18N_QUESTION_PLAYER_TEST_FAILED": "Text displayed in Practice Session and Review Test pages. - Text that is shown above the score wheel when the user fails the test.", "I18N_QUESTION_PLAYER_TEST_PASSED": "Text displayed in Practice Session and Review Test pages. - Text that is shown above the score wheel when the user passes the test.", + "I18N_REDIRECTION_TO_STUCK_STATE_MESSAGE": "Oppia's response when the learner is being redirected to the stuck state for refreshing concepts.", + "I18N_REFRESHER_EXPLORATION_MODAL_BODY": "Body of modal that redirects the learner to a separate exploration to referesh certain concepts.", + "I18N_REFRESHER_EXPLORATION_MODAL_TITLE": "Title of the modal that redirects the learner to a separate exploration to refresh certain concepts.", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Text which appears as the heading for registration session expired modal on Signup page.", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "Text which appears on registration session expired modal on Signup page - This message informs user that their registration session has expired and they need to click on Continue Registration to restart the registration.", + "I18N_RELEASE_COORDINATOR_PAGE_TITLE": "Text displayed on the browser tab when on the release coordinator page.", "I18N_RESET_CODE": "Text which appears on the Reset button in the PencilCode interaction.", + "I18N_RESTART_EXPLORATION_BUTTON": "Text which appears on the Restart button in the lesson info modal of the learner checkpoints progress.", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Text displayed in the review test page. - Text shown in the top left corner of the nav bar.", + "I18N_REVIEW_TEST_PAGE_TITLE": "Text displayed on the browser tab when on the review test page.", + "I18N_SAVE_BUTTON_ALERT_TOOLTIP": "Text displayed when logged-out user tries to save the exploration progress and didn't even reached the first checkpoint.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_1": "Text displayed on the save progress menu of exploration lesson info modal telling users that their progress will be automatically saved if they sign in.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "Text displayed on the save progress menu of exploration lesson info modal asking the users whether they already have an account.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_3": "Text displayed on the save progress menu of exploration lesson info modal telling the users to use the generated progress URL to save their exploration progress.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_5": "Text displayed on the save progress menu of exploration lesson info modal prompting the users to write or copy the genrated URL.", "I18N_SAVE_PROGRESS": "Text displayed at the end of a chapter to prompt the user to login or signup to save chapter progress.", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "Text displayed on the button in the Save progress card for copying the unique URL of the progress.", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "Text that will be display in a tooltip for a second when the user clicks the copy button.", + "I18N_SAVE_PROGRESS_TEXT": "Text which appears on the save progress button and the save progress menu in the exploration lesson info modal.", "I18N_SHARE_LESSON": "Text displayed before the buttons to share an exploration.", + "I18N_SHOW_LESS": "Text, which when clicked on, contracts the section it is a part of.", + "I18N_SHOW_MORE": "Text, which when clicked on, expands the section it is a part of.", "I18N_SHOW_SOLUTION_BUTTON": "Button displayed in the warning modal that shows the solution of the question, when clicked.", - "I18N_SIDEBAR_ABOUT_LINK": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the about page.\n{{Identical|About}}", + "I18N_SIDEBAR_ABOUT_LINK": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the about page.\n{{Identical|About Us}}", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the about Oppia Foundation page.", "I18N_SIDEBAR_BLOG": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Oppia blog.\n{{Identical|Blog}}", "I18N_SIDEBAR_CLASSROOM": "Text displayed inside the side navigation bar. - When the user clicks the button, a list with several classroom pages is displayed.", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Text displayed inside drop-down menu. - The list is shown after the user clicks the I18N_SIDEBAR_CLASSROOM button. When the option is clicked, the Maths Classroom page is loaded.", - "I18N_SIDEBAR_CONTACT_US": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Contact Us page.\n{{Identical|Contact us}}", + "I18N_SIDEBAR_CONTACT_US": "Text displayed inside drop-down get involved menu. - When the user clicks the link, they are redirected to the Contact Us page.\n{{Identical|Contact Us}}", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "Text displayed inside drop-down get involved menu. - It's shown in the bottom of the contact us link.", "I18N_SIDEBAR_DONATE": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Donate page.\n{{Identical|Donate}}", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "Text displayed inside drop-down get involved menu. - It's shown in the bottom of the donate link.", "I18N_SIDEBAR_FORUM": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the forum (an external page)\n{{Identical|Forum}}", - "I18N_SIDEBAR_GET_STARTED": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Get Started page.\n{{Identical|Get started}}", + "I18N_SIDEBAR_GET_INVOLVED": "Text displayed in the side navigation bar. - When the user clicks the link, The list is shown after the user clicks the I18N_SIDEBAR_GET_INVOLVED.\n{{Identical|Get Involved}}", + "I18N_SIDEBAR_HOME": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Home page.", + "I18N_SIDEBAR_LEARN": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the learn/math page. \n{{Identical|Learn}}", "I18N_SIDEBAR_LIBRARY_LINK": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Library page.\n{{Identical|Library}}", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "Text displayed inside drop-down learn menu. - When the user clicks the link, they are redirected to the learn/math page.", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "Text displayed inside drop-down get involved menu. - It's shown in the bottom of the Math Foundations link.", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Oppia Foundation site.\n{{Identical|Oppia Foundation}}", "I18N_SIDEBAR_PARTNERSHIPS": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Partnerships page.", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "Text displayed inside drop-down get involved menu. - It's shown in the bottom of the Partnerships link.", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "Text displayed inside drop-down learn menu. - When the user clicks the link, they are redirected to the addition-subtraction page. \n{{Identical|Addition And Subtraction}}", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "Text displayed inside drop-down learn menu. - When the user clicks the link, they are redirected to the community-library page. \n{{Identical|Community Library}}", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "Text displayed inside drop-down learn menu. - It's shown in the bottom of the Community Library link.", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "Text displayed inside drop-down learn menu. - When the user clicks the link, they are redirected to the multiplication page. \n{{Identical|Multiplication}}", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "Text displayed inside drop-down learn menu. - When the user clicks the link, they are redirected to the place-values page. \n{{Identical|Place Values}}", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "Text displayed inside drop-down learn menu. - When the user clicks the link, they are redirected to the learn/math page. \n{{Identical|See All Lessons}}", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Teach page.", "I18N_SIDEBAR_VOLUNTEER": "Text displayed in the side navigation bar. - When the user clicks the link, they are redirected to the Volunteer page.\n{{identical|Volunteer}}", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "Text displayed inside drop-down get involved menu. - It's shown in the bottom of the Volunteer link.", "I18N_SIGNIN_LOADING": "Text displayed in the middle of the page while the user is waiting to sign in.", "I18N_SIGNIN_PAGE_TITLE": "Title displayed in the tab on the Browser in the signin page - In this page the user is waiting for the signin process to complete. Keep in two or three short words.", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Text displayed in the signup page. - This text in on the left of a checkbox. If the user clicks the checkbox, he agrees to the terms and conditions of the page.", @@ -674,6 +1186,7 @@ "I18N_SIGNUP_LOADING": "Text displayed in the middle of the page while the content is being loaded.\n{{Identical|Loading}}", "I18N_SIGNUP_PAGE_TITLE": "Title displayed in the tab on the Browser in the signup page - In this page the user can create a new account. Keep in three short words.", "I18N_SIGNUP_REGISTRATION": "Title of the signup page - The user arrives to this page after selecting the google account to use in Oppia. Here the user has to register a new username and agree to the terms and conditions of the page.\n{{Identical|Registration}}", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "Clickable text which hides the sign up section upon clicking for the learner", "I18N_SIGNUP_SEND_ME_NEWS": "Text displayed in the signup page. - It's shown on the right of a choose button. When the button is selected, the user is agreeing to receive periodic emails from Oppia with news and updates about the site and the explorations. See I18N_SIGNUP_EMAIL_PREFERENCES.", "I18N_SIGNUP_SITE_DESCRIPTION": "Text displayed in the signup page. - It's the first paragraph and describes the site.", "I18N_SIGNUP_SITE_OBJECTIVE": "Text displayed inside a information dialog widget in the signup page. - It explains Oppia's objective as a site. See I18N_SIGNUP_WHY_LICENSE.", @@ -681,9 +1194,14 @@ "I18N_SIGNUP_USERNAME": "Text displayed next to an entry box in the signup page - The user is expected to enter a username in the entry box.\n{{Identical|Username}}", "I18N_SIGNUP_USERNAME_EXPLANATION": "Text displayed next to an entry box in the signup page - Explains how the username selected in the box is going to be used inside the site.", "I18N_SIGNUP_WHY_LICENSE": "Text displayed inside a information dialog widget in the signup page. - It's the title of a dialog that contains additional information about why the creative commons licence was chosen for Oppia.", + "I18N_SKILL_LEVEL_BEGINNER": "Text denoting beginner level mastery of user in a particular skill.", + "I18N_SKILL_LEVEL_INTERMIDIATE": "Text denoting intermediate level mastery of user in a particular skill.", + "I18N_SKILL_LEVEL_NEEDS_WORK": "Text denoting user needs to work upon a particular skill that needs to be improved.", + "I18N_SKILL_LEVEL_PROFICIENT": "Text denoting proficient level mastery of user in a particular skill.", "I18N_SOLICIT_ANSWER_DETAILS_FEEDBACK": "Text to introduce the regular feedback that Oppia shows after the student submits the answer details.", "I18N_SOLICIT_ANSWER_DETAILS_QUESTION": "Text for the question which asks the students to explain why they entered a particular answer.", "I18N_SOLUTION_EXPLANATION_TITLE": "Title shown in the section that shows the explanation of a solution to the learner.\n{{identical|Explanation}}", + "I18N_SOLUTION_NEED_HELP": "Text shown in the modal to prompt the learner to view the solution", "I18N_SOLUTION_TITLE": "Title shown in the modal that shows the solution of a state to the learner", "I18N_SPLASH_BENEFITS_ONE": "The first benefit, shown in the splash page, of using Oppia.", "I18N_SPLASH_BENEFITS_THREE": "The third benefit, shown in the splash page, of using Oppia.", @@ -724,9 +1242,65 @@ "I18N_SPLASH_VOLUNTEERS_CONTENT": "The content displayed in the volunteers section on the splash page.", "I18N_SPLASH_VOLUNTEERS_TITLE": "The main title for the volunteers section on the splash page.", "I18N_START_HERE": "Click here to start!", + "I18N_STORY_3M5VBajMccXO_DESCRIPTION": "Description of the story displayed on story viewer page. Should be at most 1000 characters.", + "I18N_STORY_3M5VBajMccXO_TITLE": "Title of the story displayed on topic viewer and story viewer page. Should be at most 39 characters.", + "I18N_STORY_JhiDkq01dqgC_DESCRIPTION": "Description of the story displayed on story viewer page. Should be at most 1000 characters.", + "I18N_STORY_JhiDkq01dqgC_TITLE": "Title of the story displayed on topic viewer and story viewer page. Should be at most 39 characters.", + "I18N_STORY_Qu6THxP29tOy_DESCRIPTION": "Description of the story displayed on story viewer page. Should be at most 1000 characters.", + "I18N_STORY_Qu6THxP29tOy_TITLE": "Title of the story displayed on topic viewer and story viewer page. Should be at most 39 characters.", + "I18N_STORY_RRVMHsZ5Mobh_DESCRIPTION": "Description of the story displayed on story viewer page. Should be at most 1000 characters.", + "I18N_STORY_RRVMHsZ5Mobh_TITLE": "Title of the story displayed on topic viewer and story viewer page. Should be at most 39 characters.", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "The title for a chapter icon, in the story viewer, that is completed by the learner.", + "I18N_STORY_VIEWER_PAGE_TITLE": "Text displayed on the browser tab when on the story viewer page", + "I18N_STORY_ialKSV0VYV0B_DESCRIPTION": "Description of the story displayed on story viewer page. Should be at most 1000 characters.", + "I18N_STORY_ialKSV0VYV0B_TITLE": "Title of the story displayed on topic viewer and story viewer page. Should be at most 39 characters.", + "I18N_STORY_rqnxwceQyFnv_DESCRIPTION": "Description of the story displayed on story viewer page. Should be at most 1000 characters.", + "I18N_STORY_rqnxwceQyFnv_TITLE": "Title of the story displayed on topic viewer and story viewer page. Should be at most 39 characters.", + "I18N_STORY_vfJDB3JAdwIx_DESCRIPTION": "Description of the story displayed on story viewer page. Should be at most 1000 characters.", + "I18N_STORY_vfJDB3JAdwIx_TITLE": "Title of the story displayed on topic viewer and story viewer page. Should be at most 39 characters.", + "I18N_STRUGGLING_WITH_SKILL": "Tect displayed in learner specific progress view in learner progress tab on edit learner group page when user is struggling with a particular skill.", "I18N_SUBSCRIBE_BUTTON_TEXT": "The text that appears on the subscribe button, which allows users to subscribe to creators.\n{{Identical|Subscribe}}", + "I18N_SUBTOPIC_0abdeaJhmfPm_adding-fractions_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_comparing-fractions_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_dividing-fractions_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_equivalent-fractions_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_fractions-of-a-group_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_mixed-numbers_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_multiplying-fractions_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_number-line_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_subtracting-fractions_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_5g0nxGUmx5J5_calculations-with-ratios_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_5g0nxGUmx5J5_combining-ratios_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_5g0nxGUmx5J5_equivalent-ratios_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_C4fqwrvqWpRm_memorizing-expressions_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_C4fqwrvqWpRm_multiplication-techniques_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_C4fqwrvqWpRm_rules-to-simplify_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Text appears next to a skill summary card, labelling it as the next skill to complete", + "I18N_SUBTOPIC_VIEWER_PAGE_TITLE": "Text displayed on the browser tab when on the subtopic viewer page.", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "Text appears next to a skill summary card, labelling it as the previous skill to complete", + "I18N_SUBTOPIC_dLmjjMDbCcrf_algebraic-expressions_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_dLmjjMDbCcrf_modelling-scenarios_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_dLmjjMDbCcrf_order-of-operations_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_dLmjjMDbCcrf_problem-solving_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_dLmjjMDbCcrf_solving-equations_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_dLmjjMDbCcrf_variables_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_iX9kYCjnouWN_comparing-numbers_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_iX9kYCjnouWN_naming-numbers_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_iX9kYCjnouWN_place-names-and-values_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_qW12maD4hiA8_techniques-of-division_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_sWBXKH4PZcK6_estimation_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SUBTOPIC_sWBXKH4PZcK6_sequences _TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page.", + "I18N_SUBTOPIC_sWBXKH4PZcK6_subtracting-numbers_TITLE": "Title for the subtopic card appearing in the practice tab of the topic viewer page. Should be at most 64 characters long.", + "I18N_SYLLABUS_SKILL_TITLE": "Title for skill filter in the syllabus type filter on the add syllabus items section of learner group creation page.", + "I18N_SYLLABUS_STORY_TITLE": "Title for story filter in the syllabus type filter on the add syllabus items section of learner group creation page.", "I18N_TEACH_BENEFITS_ONE": "The first benefit, shown in the splash page, of using Oppia.", "I18N_TEACH_BENEFITS_THREE": "The third benefit, shown in the splash page, of using Oppia.", "I18N_TEACH_BENEFITS_TITLE": "Title displayed in the benefits section on the splash page.", @@ -741,6 +1315,7 @@ "I18N_TEACH_PAGE_LIBRARY_CONTENT": "Content in library section in Oppia's Teach Page.", "I18N_TEACH_PAGE_LIBRARY_TITLE": "Title in library section in Oppia's Teach Page.", "I18N_TEACH_PAGE_SIX_TITLE": "Title in last buttons section in Oppia's Teach Page", + "I18N_TEACH_PAGE_TITLE": "Text displayed on the browser tab when on Oppia's Teach Page.", "I18N_TEACH_STUDENT_DETAILS_1": "The detail of the first person,displayed below on the teach page", "I18N_TEACH_STUDENT_DETAILS_2": "The detail of the second person,displayed below on the teach page", "I18N_TEACH_STUDENT_DETAILS_3": "The detail of the third person,displayed below on the teach page", @@ -748,57 +1323,108 @@ "I18N_TEACH_TESTIMONIAL_1": "The testimonial of the first person (female), displayed on the Teach page", "I18N_TEACH_TESTIMONIAL_2": "The testimonial of the second person (female), displayed on the Teach page", "I18N_TEACH_TESTIMONIAL_3": "The testimonial of the third person (female), displayed on the Teach page", + "I18N_TERMS_PAGE_TITLE": "Text displayed on the browser tab when on the Terms of use Page.", "I18N_THANKS_PAGE_BREADCRUMB": "Text displayed in the post-donation Thanks page. - Text shown in the top left corner of the nav bar.\n{{identical|Thanks}}", + "I18N_THANKS_PAGE_TITLE": "Text displayed on the browser tab when on the Thanks Page.", + "I18N_TIME_FOR_BREAK_BODY_1": "Body of the modal that prompts user to take a break on repeated wrong answers in an interaction.", + "I18N_TIME_FOR_BREAK_BODY_2": "Body of the modal that prompts user to take a break on repeated wrong answers in an interaction.", + "I18N_TIME_FOR_BREAK_FOOTER": "Footer of the modal that prompts user to take a break on repeated wrong answers in an interaction.", + "I18N_TIME_FOR_BREAK_TITLE": "Title of modal that prompts user to take a break on repeated wrong answers in an interaction.", + "I18N_TOPIC_0abdeaJhmfPm_DESCRIPTION": "The description of the topic in the topic summary. Should be at most 240 characters long. - Displayed on topic page", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "The title of the topic in the topic summary card. Should be at most 39 characters long. - Displayed on learn math page", + "I18N_TOPIC_5g0nxGUmx5J5_DESCRIPTION": "The description of the topic in the topic summary. Should be at most 240 characters long. - Displayed on topic page", + "I18N_TOPIC_5g0nxGUmx5J5_TITLE": "The title of the topic in the topic summary card. Should be at most 39 characters long. - Displayed on learn math page", + "I18N_TOPIC_C4fqwrvqWpRm_DESCRIPTION": "The description of the topic in the topic summary. Should be at most 240 characters long. - Displayed on topic page", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "The title of the topic in the topic summary card. Should be at most 39 characters long. - Displayed on learn math page", + "I18N_TOPIC_LANDING_PAGE_TITLE": "Text displayed on the browser's tab when on the topic landing page.", + "I18N_TOPIC_LEARN": "The title of the column in the learner dashboard that allows learner to mark a topic as a goal.\n{{Identical|Learn}}", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "Text to be displayed in the topic summary tile after the number of lessons in topic", + "I18N_TOPIC_TITLE": "The title of the column in the learner dashboard that displays topic names.\n\n{{Identical|Topic}}", "I18N_TOPIC_VIEWER_CHAPTER": "Text to be displayed on each story summary tile in the topic viewer indicating the chapter count in the story.", "I18N_TOPIC_VIEWER_CHAPTERS": "Text to be displayed on each topic summary tile with singular/plural adjustments.\n\nOnly translate \"1 chapter\" and \"# chapters\" (keep a \"#\" as the placeholder for the actual replaced number). The rest should remain as is, including curly braces (\"{\", \"}\"), plural keywords \"count\", \"one\" or \"other\" (in English), and comma separators (\",\").\n\nIn languages that don't need to mark different forms for the plural, you may just keep the \"other\" option (used as the default value for the result of the \"{count...}\" expression), and then translate just \"# chapters\".", + "I18N_TOPIC_VIEWER_COMING_SOON": "Text displayed as a heading when no content is available to show in respective tabs.", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "Text displayed as a descriptive message when no content is available to show in lessons tab.", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "Text displayed as a descriptive message when no content is available to show in Practice tab.", "I18N_TOPIC_VIEWER_DESCRIPTION": "Text shown on the header inside the topic info tab, where the topic description is displayed.\n{{identical|Description}}", "I18N_TOPIC_VIEWER_LESSON": "Title shown in a label in the topic's viewer page, when the number of chapters is equal to one.", "I18N_TOPIC_VIEWER_LESSONS": "Title shown in a tab header in the topic's viewer page. - Clicking on this tab header shows the list of stories in topic.", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "Message shown in the topic viewer when stories aren't released for the topic yet.", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "Title shown in the 'Practice' tab in the topic viewer, where the user can select skills to tackle questions on them.", + "I18N_TOPIC_VIEWER_NO_QUESTION_WARNING": "Text displayed below the start button in 'practice' tab, when user select a subtopic which does not contain any questions attached to it.", + "I18N_TOPIC_VIEWER_PAGE_TITLE": "Text displayed on the browser's tab when on the topic viewer page.", "I18N_TOPIC_VIEWER_PRACTICE": "Title shown in a tab header in the topic's viewer page. - Clicking on this tab header shows a list of the practice questions that can be used to test the learner's understanding of the topic.", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_MESSAGE": "Message shown in practice session confirmation modal meant for informing the users that the practice feature is still in beta and seeking confirmation from them on whether they'd like to start the practice session in English if their site language is not set to English.", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_TITLE": "Title shown in the practice session confirmation modal meant for seeking confirmation from users on whether they'd like to start the practice session in English if their site language is not set to English.", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "Text enclosed in parentheses meant to indicate that a practice session is in beta", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "Text shown in the topic's viewer page when the topic does not have enough questions linked to it.", "I18N_TOPIC_VIEWER_REVISION": "Title shown in a tab header in the topic's viewer page. - Clicking on this shows a list of the subtopics that lie within the topic", "I18N_TOPIC_VIEWER_SELECT_SKILLS": "Subtitle shown in the 'Practice' tab in the topic viewer, where the user can select skills to tackle questions on them.", "I18N_TOPIC_VIEWER_SKILL": "Label for the 'Skill' counter, displayed in the 'Info' tab in the topic viewer when number of skills equal to one.", "I18N_TOPIC_VIEWER_SKILLS": "Label for the 'Skill' counter, displayed in the 'Info' tab in the topic viewer when number of skills greater than one.", - "I18N_TOPIC_VIEWER_START_PRACTICE": "Text displayed on the start button in the 'Practice' tab in the topic viewer. Clicking this, the user can start a practice session on skills selected by the user.", + "I18N_TOPIC_VIEWER_START_PRACTICE": "Text displayed on the start button in the 'Practice' tab in the topic viewer. Clicking this, the user can start a practice session on skills selected by the user.\n{{Identical|Start}}", "I18N_TOPIC_VIEWER_STORIES": "Label for the 'Story' counter, displayed in the 'Info' tab in the topic viewer, when number of stories is greater than one.", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "Text displayed above all stories specifically in mobile view.", "I18N_TOPIC_VIEWER_STORY": "Label for the 'Story' counter, displayed in the 'Info' tab in the topic viewer, when number of stories is equal to one.", "I18N_TOPIC_VIEWER_STUDY_SKILLS": "Title for the 'Revision' tab in the topic viewers, from where the user can learn about certain skills.", "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "Subtitle for the 'Revision' tab in the topic viewers, from where the user can learn about certain skills.", "I18N_TOPIC_VIEWER_VIEW_ALL": "Text displayed on the story summary card in the 'Lessons' tab in the topic viewer, to view the titles of all chapters in the story.", "I18N_TOPIC_VIEWER_VIEW_LESS": "Text displayed on the story summary card in the 'Lessons' tab in the topic viewer, to hide the titles of extra chapters in the story.", + "I18N_TOPIC_dLmjjMDbCcrf_DESCRIPTION": "The description of the topic in the topic summary. Should be at most 240 characters long. - Displayed on topic page", + "I18N_TOPIC_dLmjjMDbCcrf_TITLE": "The title of the topic in the topic summary card. Should be at most 39 characters long. - Displayed on learn math page", + "I18N_TOPIC_iX9kYCjnouWN_DESCRIPTION": "The description of the topic in the topic summary. Should be at most 240 characters long. - Displayed on topic page", + "I18N_TOPIC_iX9kYCjnouWN_TITLE": "The title of the topic in the topic summary card. Should be at most 39 characters long. - Displayed on learn math page", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION": "The description of the topic in the topic summary. Should be at most 240 characters long. - Displayed on topic page", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "The title of the topic in the topic summary card. Should be at most 39 characters long. - Displayed on learn math page", + "I18N_TOPIC_sWBXKH4PZcK6_DESCRIPTION": "The description of the topic in the topic summary. Should be at most 240 characters long. - Displayed on topic page", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "The title of the topic in the topic summary card. Should be at most 39 characters long. - Displayed on learn math page", "I18N_TOPNAV_ABOUT": "Text displayed in the navigation bar. - When the user hovers over the button, a list with several information pages is displayed.\n{{Identical|About}}", "I18N_TOPNAV_ABOUT_OPPIA": "Text displayed inside drop-down menu - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the about oppia page is loaded which contains a more general description of the site.", "I18N_TOPNAV_ADMIN_PAGE": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over their profile picture. When the option is clicked, the admin page is loaded.", + "I18N_TOPNAV_ANDROID_APP_DESCRIPTION": "Text displayed inside a drop-down menu. It tells the user that an Android app is available.", + "I18N_TOPNAV_ANDROID_APP_HEADING": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over the learn button. When the option is clicked, the android app page is loaded.", "I18N_TOPNAV_BLOG": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the Oppia blog is loaded.\n{{Identical|Blog}}", "I18N_TOPNAV_BLOG_DASHBOARD": "Text displayed in the dropdown on the navbar for reaching the blog dashboard.", - "I18N_TOPNAV_CLASSROOM": "Text displayed inside the navigation bar. - When the user hovers over the button, a list with several classroom pages is displayed.", - "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Text displayed inside drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_CLASSROOM button. When the option is clicked, the Maths Classroom page is loaded.", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Text displayed inside drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_LEARN button. When the option is clicked, the Maths Classroom page is loaded.", "I18N_TOPNAV_CONTACT_US": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the Contact Us page is loaded.\n{{Identical|Contact us}}", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the Contact Us page is loaded.\n{{Identical|Contact us}}", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over their profile picture. When the option is clicked, the contributor dashboard page is loaded. On this page, the user can contribute translation and questions for lessons.", "I18N_TOPNAV_CREATOR_DASHBOARD": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over their profile picture. When the option is clicked, the creator dashboard page is loaded. On this page, the user can administrate his own explorations and groups.", "I18N_TOPNAV_DONATE": "Text displayed in the navigation bar. - When the user clicks the button the donation page is loaded. The donation page asks the viewer to donate to The Oppia Foundation.\n{{Identical|Donate}}", "I18N_TOPNAV_DONATE_DESCRIPTION": "Text displayed inside a drop-down menu. The description for donate is shown when user hovers over the I18N_TOPNAV_DONATE button, it tells them why they should donate.", + "I18N_TOPNAV_FACILITATOR_DASHBOARD": "Text displayed in the navigation bar of the facilitator dashboard page.", "I18N_TOPNAV_FORUM": "Text displayed inside drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the external forum page is loaded.\n{{Identical|Forum}}", "I18N_TOPNAV_GET_INVOLVED": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the Get Involved page on the Oppia Foundation website is loaded.", "I18N_TOPNAV_GET_STARTED": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the Get Started page is loaded.\n{{Identical|Get started}}", + "I18N_TOPNAV_HOME": "Text displayed in the navigation bar. - When the user clicks the button, the home page is loaded based on user preferences set.\n{{Identical|Home}}", + "I18N_TOPNAV_LEARN": "Text displayed inside the navigation bar. - When the user hovers over the button, a list with several classroom pages is displayed.\n{{Identical|Learn}}", "I18N_TOPNAV_LEARNER_DASHBOARD": "Text displayed in the navigation bar when the learner dashboard page is loaded.", + "I18N_TOPNAV_LEARNER_GROUP": "Text displayed in the navigation bar of the learner group page.", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "Text displayed inside drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_LEARN button.", + "I18N_TOPNAV_LEARN_HEADING": "Text displayed inside drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_LEARN button.", + "I18N_TOPNAV_LEARN_LINK_1": "Text displayed inside drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_LEARN button. When the option is clicked, the Maths Classroom page is loaded.", + "I18N_TOPNAV_LEARN_LINK_2": "Text displayed in the navigation bar. - When the user clicks the button the library page is loaded.", "I18N_TOPNAV_LIBRARY": "Text displayed in the navigation bar. - When the user clicks the button the library page is loaded. The library page lists all the exploration available in the site and allows the user to search and filter them.\n{{Identical|Library}}", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "Text displayed inside drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_LEARN button.", "I18N_TOPNAV_LOGOUT": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over their profile picture. When the option is clicked, the user's account is closed. This button is only available when the user is logged in.\n{{Identical|Logout}}", "I18N_TOPNAV_MODERATOR_PAGE": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over their profile picture. When the option is clicked, the moderator page is loaded.", "I18N_TOPNAV_OPPIA_FOUNDATION": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the user is redirected to the Oppia Foundation website.", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the participation playbook page is loaded.", "I18N_TOPNAV_PARTNERSHIPS": "Text displayed inside a drop-down menu. - When user clicks user land on Partnerships page", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "Text displayed inside a drop-down menu. - When user clicks user land on Partnerships page", "I18N_TOPNAV_PREFERENCES": "Text displayed inside a drop-down menu. - The list is shown after the user hovers over their profile picture. When the option is clicked, the preference page is loaded where the user can modify the account information.\n{{Identical|Preferences}}", "I18N_TOPNAV_SIGN_IN": "Text displayed in the top right corner of the page. - When clicked, the user is redirected to the sign in page where he can access his account.\n{{Identical|Sign in}}", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Text displayed inside the Sign In dropdown. - When clicked, the user is signed in with Google Oauth - The text in the button needs to be less than 25 characters long", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Text displayed inside drop-down menu - The list is shown after the user hovers over the I18N_TOPNAV_ABOUT button. When the option is clicked, the teach with oppia page is loaded which contains information specific for teachers who would like to create explorations.", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Text displayed in the dropdown on the navbar for reaching the topics and skills dashboard.", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "Text displayed inside a drop-down menu. It suggests the user to try the Android app.", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "Text displayed inside a drop-down menu. - When user clicks user land on Volunteers page", "I18N_TOTAL_SUBSCRIBERS_TEXT": "Text displayed under the subscribers tab in creator dashboard. Tells the creator the total number of subscribers he/she has.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "The text that appears on the unsubscribe button, which allows users to unsubscribe from creators.\n{{Identical|Unsubscribe}}", + "I18N_VIEW_ALL_TOPICS": "Text displayed on the card that shows all topics in a classroom.", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Text displayed in the Volunteer page. - Text shown in the top left corner of the nav bar.", + "I18N_VOLUNTEER_PAGE_TITLE": "Text displayed on the browser's tab when on the volunteer page", "I18N_WARNING_MODAL_DESCRIPTION": "Text shown in the warning modal before the solution is displayed", "I18N_WARNING_MODAL_TITLE": "Title of the modal shown before the modal that displays the solution, to warn the user\n\n{{Identical|Warning}}", - "I18N_WORKED_EXAMPLE": "The text that is displayed on the button to view another worked example when a skill concept card is displayed in the exploration player." + "I18N_WORKED_EXAMPLE": "The text that is displayed on the button to view another worked example when a skill concept card is displayed in the exploration player.", + "I18N_YES": "Text on button to accept the content of a modal.\n\n{{Identical|Yes}}" } diff --git a/assets/i18n/ru.json b/assets/i18n/ru.json index f93215ffc6c7..dc673449cac0 100644 --- a/assets/i18n/ru.json +++ b/assets/i18n/ru.json @@ -1,5 +1,6 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "О фонде", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "О Фонде Oppia", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "О Фонде Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Создать исследование", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "о теме, о которой вы заботитесь.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Получайте обратную связь", @@ -29,12 +30,50 @@ "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Фонд", "I18N_ABOUT_PAGE_TEACH_BUTTON": "Я хочу обучать", "I18N_ABOUT_PAGE_TITLE": "О нас | Oppia", + "I18N_ACTION_ACCESS_ANDROID_APP": "Получите доступ к приложению для Android", "I18N_ACTION_BROWSE_LESSONS": "Просмотреть наши уроки", "I18N_ACTION_CREATE_LESSON": "Создайте свой собственный урок", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "Отмена", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "Готово", + "I18N_ANDROID_PAGE_AVAILABLE_FOR_DOWNLOAD_TEXT": "доступно для скачивания.", + "I18N_ANDROID_PAGE_BETA_DESCRIPTION": "Бета-версию приложения Oppia для Android теперь можно бесплатно загрузить и использовать на английском и бразильском португальском.", + "I18N_ANDROID_PAGE_CONSENT_CHECKBOX_LABEL": "Вы подтверждаете, что вам исполнилось 18 лет, или что у вас есть согласие и одобрение вашего родителя или опекуна.", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "Адрес электронной почты", + "I18N_ANDROID_PAGE_FEATURES_SECTION_HEADER": "Образование для каждого.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_3": "Приложение доступно на английском и бразильском португальском.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_4": "Другие языки будут добавлены в ближайшее время!", + "I18N_ANDROID_PAGE_FEATURE_TEXT_1": "Учитесь через увлекательные истории", + "I18N_ANDROID_PAGE_FEATURE_TEXT_2": "Учитесь в любое время, в любом месте", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "Имя", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Похоже, вы еще не создали ни одной записи в блоге!", "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Новый пост", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Черновики", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "Блог", + "I18N_BLOG_HOME_PAGE_NO_RESULTS_FOUND": "Извините, в блоге нет публикаций для отображения.", + "I18N_BLOG_HOME_PAGE_OPPIA_DESCRIPTION": "Создание сообщества для предоставления качественного образования тем, кто не имеет к нему доступа.", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "Новейшие публикации", + "I18N_BLOG_HOME_PAGE_POSTS_NUMBER_DISPLAY": "Показаны записи с <[startingNumber]> по <[endingNumber]> из <[totalNumber]>", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "Ключевые слова", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "Теги", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "Выберите теги", + "I18N_BLOG_HOME_PAGE_TITLE": "Блог Oppia | Oppia", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "Добро пожаловать в блог Oppia!", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_HEADING": "Результаты поиска", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_DISPLAY": "Показаны результаты поиска с <[startingNumber]> по <[endingNumber]>", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_NUMBER_OUT_OF_TOTAL_DISPLAY": "Показаны записи с <[startingNumber]> по <[endingNumber]> из <[totalNumber]>.", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "Предлагается вам.", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "Теги", + "I18N_BLOG_POST_PAGE_TITLE": "<[blogPostTitle]> | Блог | Oppia", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_EXTENSIONS_PREFIX": "Допустимые расширения изображений:", + "I18N_CLASSROOM_MATH_TITLE": "Математика", + "I18N_CLASSROOM_PAGE_BEGIN_WITH_FIRST_TOPIC_BUTTON": "Начните с <[firstTopic]>", "I18N_CLASSROOM_PAGE_COMING_SOON": "Скоро", "I18N_CLASSROOM_PAGE_HEADING": "Классная комната Oppia", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Начать", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Продолжить", + "I18N_COMING_SOON": "Совсем скоро!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "КОЛЛЕКЦИЯ", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "Отличная работа!", "I18N_CONTACT_PAGE_HEADING": "Присоединяйтесь!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Спасибо за ваш интерес к поддержке проекта Oppia!", "I18N_CONTACT_PAGE_PARAGRAPH_11_HEADING": "Пожертвования", @@ -44,10 +83,12 @@ "I18N_CONTACT_PAGE_PARAGRAPH_4_HEADING": "Как вы можете помочь", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "OK", "I18N_CORRECT_FEEDBACK": "Правильно!", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "Ссылка на вашу группу", "I18N_CREATE_ACTIVITY_QUESTION": "Что вы хотите создать?", "I18N_CREATE_COLLECTION": "Создать коллекцию", "I18N_CREATE_EXPLORATION_CREATE": "Создать", "I18N_CREATE_EXPLORATION_UPLOAD": "Загрузить", + "I18N_CREATE_LEARNER_GROUP": "Создать группу", "I18N_CREATE_NO_THANKS": "Нет, спасибо", "I18N_CREATE_YES_PLEASE": "Да, пожалуйста!", "I18N_DASHBOARD_COLLECTIONS": "коллекции", @@ -66,13 +107,19 @@ "I18N_DELETE_ACCOUNT_PAGE_BUTTON": "Удалить мою учётную запись", "I18N_DELETE_ACCOUNT_PAGE_HEADING": "Удалить учётную запись", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Обзор", + "I18N_DEST_IF_STUCK_INFO_TOOLTIP": "Теперь вы можете указать новую карточку, в которой вы сможете рассказать учащимся о тех идеях, которые используются в вопросе, в случае, когда они застряли на месте!", + "I18N_DIAGNOSTIC_TEST_RESULT_GO_TO_CLASSROOM_BUTTON_TEXT": "Перейти в класс", + "I18N_DIAGNOSTIC_TEST_RESULT_START_TOPIC": "Начать <[topicName]>", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Перетащите изображение в эту область", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Загрузить файл", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "Пожертвовать | Сделайте положительный вклад | Oppia", "I18N_ERROR_HEADER_400": "Ошибка 400", "I18N_ERROR_HEADER_401": "Ошибка 401", "I18N_ERROR_HEADER_404": "Ошибка 404", "I18N_ERROR_HEADER_500": "Ошибка 500", "I18N_ERROR_MESSAGE_400": "Иногда машины не могут понимать людей. Это один из тех случаев.", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "Ошибка <[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "Ошибка <[statusCode]> - Oppia", "I18N_ERROR_PAGE_TITLE_400": "Ошибка 400 - Oppia", "I18N_ERROR_PAGE_TITLE_401": "Ошибка 401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "Ошибка 404 - Oppia", @@ -80,6 +127,7 @@ "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Аноним", "I18N_FOOTER_ABOUT": "О проекте", "I18N_FOOTER_ABOUT_ALL_CAPS": "О ПРОЕКТЕ OPPIA", + "I18N_FOOTER_ANDROID_APP": "Приложение для Android", "I18N_FOOTER_BROWSE_LIBRARY": "Просматривать библиотеку", "I18N_FOOTER_CONTACT_US": "Контакты", "I18N_FOOTER_CREDITS": "Авторы", @@ -98,6 +146,9 @@ "I18N_GET_STARTED_PAGE_TITLE": "Начать", "I18N_HINT_NEED_HELP": "Нужна помощь? Посмотреть подсказку по этой проблеме!", "I18N_HINT_TITLE": "Подсказка", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "Пожалуйста, не используйте нулевое значение (0) в знаменателе", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "Пожалуйста, введите непустое дробное значение.", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "Пожалуйста, введите свой ответ в виде правильной или неправильной дроби (например, 5/3 вместо 1 2/3).", "I18N_INTERACTIONS_GRAPH_DELETE": "Удалить", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Неверный график!", "I18N_INTERACTIONS_GRAPH_MOVE": "Переместить", @@ -105,6 +156,10 @@ "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Выберите картинку для отображения]", "I18N_INTERACTIONS_MUSIC_CLEAR": "Очистить", "I18N_INTERACTIONS_MUSIC_PLAY": "Воспроизвести", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "Пожалуйста, убедитесь, что значение является либо дробью, либо целым числом", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_DOT": "Введённое значение может содержать не более 15 цифр (0-9), исключая символы (. или -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "Введённое значение должно быть действительным числом.", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "Введённое значение должно быть больше или равно нулю.", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Отмена", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Требуется подтверждение", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Код сброса", @@ -118,11 +173,14 @@ "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Текущие цели", "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Начните с изучения ", "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "Выберите цель ниже и начните своё обучение!", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "Очевидно, при выборе целей вы достигли предела. Обращайтесь в библиотеку и изучите другие варианты.", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "установка цели! ", "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "Добрый вечер", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Ответить", "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Цели", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "В процессе", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "Ваши группы", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_DECLINE_INVITATION": "Отклонить", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Узнайте что-то новое", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "Доброе утро", "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "Похоже, вы еще не начали никаких исследований. Отправляйтесь в библиотеку, чтобы начать новое захватывающее исследование!", @@ -135,6 +193,14 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "Текущий:", "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Предложенный:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Предложения", + "I18N_LEARNER_DASHBOARD_VIEW": "Посмотреть", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "Далее", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "Предыдущий шаг", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "Описание", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "Название группы", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "Сохранить", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "Посмотреть детали", + "I18N_LEARNER_GROUP_SYLLABUS_LESSONS": "уроки", "I18N_LIBRARY_ALL_CATEGORIES": "Все категории", "I18N_LIBRARY_ALL_LANGUAGES": "Все языки", "I18N_LIBRARY_CATEGORIES_ALGORITHMS": "Алгоритмы", @@ -185,6 +251,7 @@ "I18N_LICENSE_TERMS_HEADING": "Условия лицензии", "I18N_MODAL_CANCEL_BUTTON": "Отмена", "I18N_MODAL_CONTINUE_BUTTON": "Продолжить", + "I18N_NO": "Нет", "I18N_ONE_SUBSCRIBER_TEXT": "У вас 1 подписчик.", "I18N_PLAYER_AUDIO_EXPAND_TEXT": "Послушай урок", "I18N_PLAYER_AUDIO_LANGUAGE": "Язык", @@ -210,7 +277,7 @@ "I18N_PLAYER_HINT_REQUEST_STRING_3": "У меня есть некоторые проблемы", "I18N_PLAYER_INFO_TOOLTIP": "Информация", "I18N_PLAYER_LAST_UPDATED_TOOLTIP": "Последнее обновление", - "I18N_PLAYER_LOADING": "Загрузка…", + "I18N_PLAYER_LOADING": "Загружается…", "I18N_PLAYER_NEXT_LESSON": "Следующий урок", "I18N_PLAYER_RATINGS_TOOLTIP": "Рейтинги", "I18N_PLAYER_REPORT_MODAL_BODY_ADDITIONAL_DETAILS": "Просьба представить дополнительные сведения для модераторов:", @@ -242,16 +309,26 @@ "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "Это язык, на котором показано место.", "I18N_PREFERENCES_PROFILE_PICTURE_ADD": "Добавить изображение профиля", "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Перетащите, чтобы обрезать и изменить размер:", + "I18N_PREFERENCES_SEARCH_LABEL": "Поиск", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Выберите предпочитаемые языки...", "I18N_PREFERENCES_USERNAME": "Имя участника", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Пока не выбрано", - "I18N_SIDEBAR_ABOUT_LINK": "О проекте Оппиа", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "Вы хотите продолжить?", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "Нет, перезапустить сначала", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "Да, возобновить урок", + "I18N_SAVE_BUTTON_ALERT_TOOLTIP": "Прогресс не может быть сохранен, пока вы не достигли первой контрольной точки.", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "Уже есть учётная запись?", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "Скопировать", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "Скопировано!", + "I18N_SHOW_LESS": "Показать меньше", + "I18N_SIDEBAR_ABOUT_LINK": "О нас", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "О фонде «Oppia»", "I18N_SIDEBAR_BLOG": "Блог", "I18N_SIDEBAR_CLASSROOM": "Класс", "I18N_SIDEBAR_CONTACT_US": "Контакты", "I18N_SIDEBAR_DONATE": "Пожертвовать", "I18N_SIDEBAR_FORUM": "Форум", - "I18N_SIDEBAR_GET_STARTED": "Начать", + "I18N_SIDEBAR_HOME": "Главная", "I18N_SIDEBAR_LIBRARY_LINK": "Библиотека", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Фонд Oppia", "I18N_SIDEBAR_PARTNERSHIPS": "Партнёрство", @@ -283,12 +360,24 @@ "I18N_SPLASH_THIRD_EXPLORATION_DESCRIPTION": "Oppia позволяет создавать и делиться исследованиями по широкому кругу вопросов, ограниченных только вашей фантазией.", "I18N_SPLASH_TITLE": "Бесплатное образование для всех", "I18N_SUBSCRIBE_BUTTON_TEXT": "Подписаться", + "I18N_SUBTOPIC_VIEWER_PAGE_TITLE": "Проверить <[subtopicTitle]> | Oppia", + "I18N_SUBTOPIC_dLmjjMDbCcrf_order-of-operations_TITLE": "Порядок действий", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Решение проблемы", + "I18N_SYLLABUS_SKILL_TITLE": "Навык", + "I18N_SYLLABUS_STORY_TITLE": "История", + "I18N_TIME_FOR_BREAK_FOOTER": "Я готов_а продолжить урок", + "I18N_TIME_FOR_BREAK_TITLE": "Самое время для перерыва?", + "I18N_TOPIC_LEARN": "Ознакомление", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 урок} other{# уроков}}", + "I18N_TOPIC_TITLE": "Тема", "I18N_TOPIC_VIEWER_CHAPTER": "Глава", + "I18N_TOPIC_VIEWER_COMING_SOON": "Скоро!", "I18N_TOPIC_VIEWER_DESCRIPTION": "Описание", "I18N_TOPIC_VIEWER_LESSON": "Урок", "I18N_TOPIC_VIEWER_LESSONS": "Уроки", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "Вернитесь позже, когда материалы по этой теме будут доступны.", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "Овладеть навыками <[topicName]>", + "I18N_TOPIC_VIEWER_PAGE_TITLE": "<[topicName]> | <[pageTitleFragment]> | Oppia", "I18N_TOPIC_VIEWER_PRACTICE": "Практика", "I18N_TOPIC_VIEWER_REVISION": "Версия", "I18N_TOPIC_VIEWER_SELECT_SKILLS": "Выберите навыки из уроков <[topicName]>, которые вы хотите практиковать.", @@ -303,13 +392,15 @@ "I18N_TOPNAV_ABOUT": "О проекте", "I18N_TOPNAV_ABOUT_OPPIA": "О Oppia", "I18N_TOPNAV_ADMIN_PAGE": "Страница Админа", + "I18N_TOPNAV_ANDROID_APP_DESCRIPTION": "Раннее приложение Oppia для Android теперь доступно на английском, бразильском и португальском языках. Попробуйте и оставьте отзыв!", + "I18N_TOPNAV_ANDROID_APP_HEADING": "Приложение для Android", "I18N_TOPNAV_BLOG": "Блог", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Базовая математика", "I18N_TOPNAV_CONTACT_US": "Свяжитесь с нами", "I18N_TOPNAV_DONATE": "Пожертвовать", "I18N_TOPNAV_FORUM": "Форум", "I18N_TOPNAV_GET_STARTED": "Начать", - "I18N_TOPNAV_LIBRARY": "Библиотека", + "I18N_TOPNAV_LIBRARY": "Библиотека сообщества", "I18N_TOPNAV_LOGOUT": "Выйти", "I18N_TOPNAV_MODERATOR_PAGE": "Страница модератора", "I18N_TOPNAV_OPPIA_FOUNDATION": "Фонд Oppia", @@ -317,6 +408,9 @@ "I18N_TOPNAV_SIGN_IN": "Войти", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Войти с помощью Google", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Учить с Oppia", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "Попробуйте сегодня!", "I18N_TOTAL_SUBSCRIBERS_TEXT": "У Вас <[totalSubscribers]> подписчиков.", - "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Отписаться" + "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Отписаться", + "I18N_VOLUNTEER_PAGE_TITLE": "Волонтёр | Oppia", + "I18N_YES": "Да" } diff --git a/assets/i18n/sk.json b/assets/i18n/sk.json index e3ac40376a6d..85c24e12598d 100644 --- a/assets/i18n/sk.json +++ b/assets/i18n/sk.json @@ -1,5 +1,5 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "O nadácii", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "O nadácii Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Vytvorte Prieskumnú výpravu", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "o téme, ktorá Vás zaujíma.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Získajte spätnú väzbu", @@ -65,6 +65,7 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "Návod pre učiteľov", "I18N_ACTION_TIPS_FOR_PARENTS": "Tipy pre rodičov a opatrovateľov", "I18N_ACTION_VISIT_CLASSROOM": "Navštíviť triedu", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_2": "Prosím skontrolujte pravopis.", "I18N_ATTRIBUTION_HTML_STEP_ONE": "Skopírovať a prilepiť HTML", "I18N_ATTRIBUTION_HTML_STEP_TWO": "Uistite sa, že odkaz sa zobrazí ako <[linkText]>", "I18N_ATTRIBUTION_HTML_TITLE": "Atribút v HTML", @@ -105,12 +106,19 @@ "I18N_CLASSROOM_CALLOUT_HEADING_1": "Matematické nadácie", "I18N_CLASSROOM_CALLOUT_HEADING_2": "Predstavenie: Školský predmet Oppie", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Objavte najskôr obsiahle kurzy v najnovších školských triedach Oppie, pre ovládnutie základných matematických zručností.", + "I18N_CLASSROOM_MATH_TITLE": "Matematika", "I18N_CLASSROOM_PAGE_COMING_SOON": "Príde čoskoro", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Detaily kurzu", "I18N_CLASSROOM_PAGE_HEADING": "Školský predmet Oppie", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Preskúmajte viac lekcií vytvorených komunitou", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Hľadanie v komunitnej knižnici", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Pokryté témy", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Začať", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Pokračovať", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "Dokončili ste celú zbierku! Môžete sa vybrať na niektorú z výprav nižšie.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "Pre zobrazenie náhľadu podržte kurzor myšky nad výpravou.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "Do zbierky nebola pridaná žiadna výprava.", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "ZBIERKA", "I18N_CONTACT_PAGE_BREADCRUMB": "Kontakt", "I18N_CONTACT_PAGE_HEADING": "Zapojte sa!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Ďakujeme za Váš záujem pomôcť projektu Oppia!", @@ -206,10 +214,8 @@ "I18N_DIRECTIVES_UPLOAD_A_FILE": "Nahrať súbor", "I18N_DONATE_PAGE_BREADCRUMB": "Darovať", "I18N_DONATE_PAGE_IMAGE_TITLE": "Vaše dary podporujú:", - "I18N_DONATE_PAGE_TITLE": "Darovať
Nadácii Oppia", + "I18N_DONATE_PAGE_TITLE": "Darovať Nadácii Oppia", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Počuť viac od našej komunity Oppia", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "V roku 2021 začala Oppia s jednoduchou myšlienkou: vylepšiť vzdelanie študentov po celom svete a zároveň zvýšiť kvalitu vyučovania. Vízia sa od vtedy premenila na vzdelávaciu platformu s viac než 11.000 výpravami, ktoré využíva viac než 430.000 používateľov z celého sveta.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "Prosím podporte Nadáciu Oppia a pridajte sa k nám pri poskytovaní radosti z učenia a vzdelávania ľudí po celom svete.", "I18N_ERROR_DISABLED_EXPLORATION": "Nedostupná prieskumná výprava", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Prepáčte, ale prieskumná výprava na ktorú ste klikli je momentálne nedostupná. Prosím skúste neskôr.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Nedostupná prieskumná výprava - Oppia", @@ -226,6 +232,8 @@ "I18N_ERROR_PAGE_TITLE_401": "Chyba 401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "Chyba 404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "Chyba 500 - Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "Pripravený/á na viac koláčikov? Urobte si krátky kvíz pre kontrolu toho čo ste sa už naučili.", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "Rovnosť zlomkov (rekapitulácia)", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anonymný", "I18N_FOOTER_ABOUT": "O Oppia", "I18N_FOOTER_ABOUT_ALL_CAPS": "VŠETKO O OPPIA", @@ -274,6 +282,10 @@ "I18N_HEADING_VOLUNTEER": "Dobrovoľník", "I18N_HINT_NEED_HELP": "Potrebujete pomoc? Zobraziť indíciu k tomuto problému!", "I18N_HINT_TITLE": "Indícia", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "Sem napíšte výraz", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Napísať kód v editore", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Ísť do editora", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Vziať a presunúť položky", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Zadajte zlomok do formulára \"x/y\" alebo zmiešané číslo do formulára A x/y\".", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Zlomok zadajte vo formáte x/y.", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Pridať hranu", @@ -282,6 +294,8 @@ "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "Kliknite na cieľový uzol pre vytvorenie hrany (kliknite na ten istý uzol pre zrušenie vytvorenia hrany).", "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "Kliknite na počiatočný uzol hrany pre vytvorenie.", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Neplatný diagram!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "Vytvoriť graf", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Zobraziť graf", "I18N_INTERACTIONS_GRAPH_MOVE": "Presunúť", "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "Kliknite na akýkoľvek bod do ktorého sa má uzol preniesť.", "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "Pre posunutie kliknite na uzol.", @@ -292,11 +306,17 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "a <[vertices]> uzly", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Aktualizovať štítok", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Aktualizovať váhu", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Kliknite na obrázok", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Vybrať obrázok pre zobrazenie]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Môžete vybrať viac možností.", "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Prosím vyberte jednu alebo viac možností.} other{Prosím vyberte # alebo viac možností.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{Nesmie byť vybrané viac než 1 možnosť.} other{Nesmie byť vybrané viac než # možnosti/í.}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "Kliknite na mapu", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "Zobraziť mapu", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "Sem napíšte rovnicu.", "I18N_INTERACTIONS_MUSIC_CLEAR": "Vymazať", + "I18N_INTERACTIONS_MUSIC_INSTRUCTION": "Presuňte poznámky pre personál na vytvorenie poradia", + "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "Zobraziť hudobný personál", "I18N_INTERACTIONS_MUSIC_PLAY": "Prehrať", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Prehrať cieľovú sekvenciu", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Formáty jednotiek", @@ -304,6 +324,8 @@ "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Zrušiť", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Je potrebné potvrdenie", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Resetovať kód", + "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "Upraviť kód. Kliknite \"Hrať\" pre kontrolu!", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Zobraziť editor", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Pridať položku", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Vyzerá to, že vaše položky sa opakujú!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Pridajte jednu položku na jeden riadok.)", @@ -372,7 +394,7 @@ "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Odosielanie...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Strieborný", "I18N_LEARNER_DASHBOARD_SKILLS": "Zručnosti", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "Odborné zručnosti", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Pokrok v zručnostiach", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Dokončené príbehy", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Odbery", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Pokrok:", @@ -617,23 +639,38 @@ "I18N_QUESTION_PLAYER_TEST_PASSED": "Skúška dokončená. Veľmi dobre!", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Čas na registráciu uplynul", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "Prepáčte, Váš čas na registráciu uplynul. Prosím kliknite na \"Pokračovať v registrácii\" pre zopakovanie procesu.", + "I18N_RESET_CODE": "Vynulovať kód", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Preskúmať test", "I18N_SAVE_PROGRESS": "Prihláste alebo registrujte sa pre uloženie Vášho progresu.", "I18N_SHARE_LESSON": "Zdieľať túto lekciu", "I18N_SHOW_SOLUTION_BUTTON": "Zobraziť riešenie", - "I18N_SIDEBAR_ABOUT_LINK": "O Oppii", + "I18N_SIDEBAR_ABOUT_LINK": "O nás", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Školská trieda", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Základy matematiky", "I18N_SIDEBAR_CONTACT_US": "Kontaktujte nás", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "Sme tu, aby sme Vám pomohli s akýmikoľvek otázkami.", "I18N_SIDEBAR_DONATE": "Darovať", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "Vaše príspevky pomáhajú vylepšiť kvalitu vzdelávania pre všetkých.", "I18N_SIDEBAR_FORUM": "Fórum", - "I18N_SIDEBAR_GET_STARTED": "Začať", + "I18N_SIDEBAR_GET_INVOLVED": "Zapojte sa", + "I18N_SIDEBAR_HOME": "Domov", + "I18N_SIDEBAR_LEARN": "Dozvedieť sa viac", "I18N_SIDEBAR_LIBRARY_LINK": "Knižnica", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "Matematické nadácie", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "Pre začatie s matematikou sú dostupné začiatočnícke lekcie.", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Nadácia Oppia", "I18N_SIDEBAR_PARTNERSHIPS": "Obchodné partnerstvo", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "Prineste kvalitné vzdelávanie študentov do Vášho regiónu.", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "Pričítať a odčítať", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "Komunitná knižnica", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "Dodatočné zdroje vytvorené komunitou.", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "Násobenie", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "Zadávať hodnoty", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "Zobraziť všetky lekcie", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Vyučovať s Oppiou", "I18N_SIDEBAR_VOLUNTEER": "Dobrovoľník", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "Pridajte sa do nášho globálneho tímu pre tvorbu a vylepšovanie lekcií.", "I18N_SIGNIN_LOADING": "Prihlasovanie sa", "I18N_SIGNIN_PAGE_TITLE": "Prihlásiť sa", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Zaškrtnutím políčka vľavo od tohoto textu potvrdíte a odsúhlasíte Podmienky používania <[sitename]> uvedené tu.", @@ -711,7 +748,7 @@ "I18N_START_HERE": "Kliknite sem pre začiatok!", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - Dokončené!", "I18N_SUBSCRIBE_BUTTON_TEXT": "Odoberať", - "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Ďalšie zručnosti", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Ďalšia zručnosť:", "I18N_TEACH_BENEFITS_ONE": "Efektívne, vysoko kvalitné vzdelávanie pre všetky vekové kategórie", "I18N_TEACH_BENEFITS_THREE": "Vždy jednoducho a zdarma", "I18N_TEACH_BENEFITS_TITLE": "Naše výhody", @@ -758,27 +795,37 @@ "I18N_TOPNAV_ADMIN_PAGE": "Stránka administrátora", "I18N_TOPNAV_BLOG": "Blog", "I18N_TOPNAV_BLOG_DASHBOARD": "Nástenka blogu", - "I18N_TOPNAV_CLASSROOM": "Školská trieda", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Základy matematiky", "I18N_TOPNAV_CONTACT_US": "Kontaktujte nás", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "Sme tu, aby sme Vám pomohli s akýmikoľvek otázkami.", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Nástenka prispievateľa", "I18N_TOPNAV_CREATOR_DASHBOARD": "Nástenka tvorcu", "I18N_TOPNAV_DONATE": "Darovať", + "I18N_TOPNAV_DONATE_DESCRIPTION": "Vaše príspevky pomáhajú vylepšiť kvalitu vzdelávania pre všetkých.", "I18N_TOPNAV_FORUM": "Fórum", "I18N_TOPNAV_GET_INVOLVED": "Zapojiť sa", "I18N_TOPNAV_GET_STARTED": "Začať", + "I18N_TOPNAV_HOME": "Domov", + "I18N_TOPNAV_LEARN": "Učiť sa", "I18N_TOPNAV_LEARNER_DASHBOARD": "Nástenka študenta", - "I18N_TOPNAV_LIBRARY": "Knižnica", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "Pre začatie s matematikou sú dostupné začiatočnícke lekcie.", + "I18N_TOPNAV_LEARN_HEADING": "Spôsoby ako sa naučiť viac", + "I18N_TOPNAV_LEARN_LINK_1": "Zobraziť všetky lekcie", + "I18N_TOPNAV_LEARN_LINK_2": "Pokračovať v učení", + "I18N_TOPNAV_LIBRARY": "Komunitná knižnica", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "Dodatočné zdroje vytvorené komunitou, aby Vám pomohli sa naučiť viac.", "I18N_TOPNAV_LOGOUT": "Odhlásenie", "I18N_TOPNAV_MODERATOR_PAGE": "Stránka moderátora", "I18N_TOPNAV_OPPIA_FOUNDATION": "Nadácia Oppia", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Príručka účastníka", - "I18N_TOPNAV_PARTNERSHIPS": "Obchodné partnerstvo", + "I18N_TOPNAV_PARTNERSHIPS": "Školy a organizácie", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "Staňte sa partnerom a prineste Oppiu do Vašej školy, komunity alebo kraja.", "I18N_TOPNAV_PREFERENCES": "Nastavenia", "I18N_TOPNAV_SIGN_IN": "Prihlásiť sa", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Prihlásiť sa cez Google", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Vyučovať s Oppiou", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Nástenka pre Témy a Zručnosti", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "Pridajte sa do nášho globálneho tímu pre tvorbu a vylepšovanie lekcií.", "I18N_TOTAL_SUBSCRIBERS_TEXT": "Celkový počet Vašich odberateľov <[totalSubscribers]>", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Zrušiť odober", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Dobrovoľník", diff --git a/assets/i18n/skr-arab.json b/assets/i18n/skr-arab.json index 0dfaecbc8740..16840d983039 100644 --- a/assets/i18n/skr-arab.json +++ b/assets/i18n/skr-arab.json @@ -11,6 +11,36 @@ "I18N_ABOUT_PAGE_TABS_FOUNDATION": "ادارہ", "I18N_ABOUT_PAGE_TEACH_BUTTON": "میں پڑھاوݨ چاہندا ہاں", "I18N_ABOUT_PAGE_TITLE": "ساݙے بارے ـ اوپیا", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "منسوخ", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "تھی ڳیا", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "ای میل پتہ", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "ناں", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "مصنف پروفائل", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TOTAL_POSTS_DISPLAY": "<[totalNumber]> پوسٹاں", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "ناں", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "نویں پوسٹ", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "بچاؤ", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "ڈرافٹ", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "شائع تھی ڳئے", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "بلاگ", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "تازہ ترین پوسٹاں", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "ٹیگ", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "ٹیگ چݨو", + "I18N_BLOG_POST_EDITOR_BODY_HEADING": "باڈی", + "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "منسوخ", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "مٹاؤ", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "شائع کرو", + "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "تھی ڳیا", + "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "پیشگی ݙکھالا", + "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "ٹیگ", + "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "تھمب نیل", + "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "عنوان", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "ٹیگ", + "I18N_CLASSROOM_CALLOUT_BUTTON": "پھرولو", + "I18N_CLASSROOM_PAGE_BEGIN_WITH_FIRST_TOPIC_BUTTON": "<[firstTopic]> نال شروع کرو", + "I18N_CLASSROOM_PAGE_TAKE_A_TEST_BUTTON": "ٹیسٹ ݙیوو", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "شروع کرو", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "جاری رکھو", "I18N_CONTACT_PAGE_HEADING": "کم نال رَلو!", "I18N_CONTACT_PAGE_PARAGRAPH_11_HEADING": "عطیے", "I18N_CONTACT_PAGE_PARAGRAPH_13_HEADING": "دباؤ", @@ -39,7 +69,19 @@ "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "بھورل", "I18N_DASHBOARD_TABLE_HEADING_LAST_UPDATED": "چھیکڑی تبدیلی", "I18N_DASHBOARD_TABLE_HEADING_PLAYS": "بلے", + "I18N_DIAGNOSTIC_TEST_RESULT_GO_TO_CLASSROOM_BUTTON_TEXT": "کمرہ جماعت وچ ون٘ڄو", + "I18N_DIAGNOSTIC_TEST_RESULT_START_TOPIC": "<[topicName]> شروع کرو", "I18N_DIRECTIVES_UPLOAD_A_FILE": "فائل چڑھاؤ", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "اوپیا کیا ہے؟", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_1": "خان پور، بھارت کنوں", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_2": "فلسطین کنوں", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "بھارت کنوں", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "ساݙا بلاگ پڑھو", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "ای میل پتہ", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "ناں(مرضی ہے)", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "ہݨ سبسکرائب کرو", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "عطیہ دان کرݨ تے تھورائت ہیں!", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "وڈیو ݙیکھو", "I18N_ERROR_HEADER_400": "رپھڑ٤٠٠", "I18N_ERROR_HEADER_401": "رپھڑ٤٠١", "I18N_ERROR_HEADER_404": "رپھڑ٤٠٤", @@ -51,6 +93,7 @@ "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "گمنام", "I18N_FOOTER_ABOUT": "تعارف", "I18N_FOOTER_ABOUT_ALL_CAPS": "اوپیا بارے", + "I18N_FOOTER_ANDROID_APP": "اینڈرائیڈ ایپ", "I18N_FOOTER_CONTACT_US": "ساکوں ملو", "I18N_FOOTER_CONTRIBUTE_ALL_CAPS": "آپݨاں حصہ پاؤ", "I18N_FOOTER_CREDITS": "کریڈٹ", @@ -78,6 +121,8 @@ "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "مکمل تھی ڳیا", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "ڄواب", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "تھیندا پئے", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "تہاݙے گروپ", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUP_SECTION_DECLINE_INVITATION": "انکار کرو", "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "ہٹاؤ", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "بھیڄو", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "بھیجیندا پئے", @@ -85,6 +130,10 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "صلاح ایہ ہے", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "صلاح", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "تجویزاں ݙیکھو", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "تفصیل", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "تفصیلاں", + "I18N_LEARNER_GROUP_PREFERENCES_MODAL_SAVE_BUTTON": "بچاؤ", + "I18N_LEAVE_LEARNER_GROUP_MODAL_BUTTON": "چھوڑو", "I18N_LIBRARY_ALL_CATEGORIES": "ساریاں ونکیاں", "I18N_LIBRARY_ALL_LANGUAGES": "ساریاں زباناں", "I18N_LIBRARY_CATEGORIES_ART": "آرٹ", @@ -151,18 +200,20 @@ "I18N_PLAYER_SUBMIT_BUTTON": "جمع کرواؤ", "I18N_PLAYER_TAGS_TOOLTIP": "ٹیگ", "I18N_PLAYER_VIEWS_TOOLTIP": "ݙکھالے", + "I18N_PRACTICE_SESSION_START_BUTTON_TEXT": "مشق شروع کرو", "I18N_PREFERENCES_BREADCRUMB": "ترجیحات", "I18N_PREFERENCES_CANCEL_BUTTON": "منسوخ", "I18N_PREFERENCES_EMAIL": "ای میل", "I18N_PREFERENCES_HEADING": "ترجیحات", "I18N_PREFERENCES_PICTURE": "فوٹو", "I18N_PREFERENCES_USERNAME": "ورتݨ آلا ناں", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "نقل کرو", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "نقل تھی ڳئے!", "I18N_SIDEBAR_ABOUT_LINK": "تعارف", "I18N_SIDEBAR_BLOG": "بلاگ", "I18N_SIDEBAR_CONTACT_US": "ساکوں ملو", "I18N_SIDEBAR_DONATE": "عطیہ", "I18N_SIDEBAR_FORUM": "فورم", - "I18N_SIDEBAR_GET_STARTED": "شروع کرو", "I18N_SIDEBAR_LIBRARY_LINK": "لائبریری", "I18N_SIGNUP_CLOSE_BUTTON": "بند کرو", "I18N_SIGNUP_EMAIL": "ای میل", @@ -176,12 +227,13 @@ "I18N_SPLASH_START_TEACHING": "پڑھاوݨ شروع کرو", "I18N_TOPIC_VIEWER_STORY": "کہاݨی", "I18N_TOPNAV_ABOUT": "تعارف", + "I18N_TOPNAV_ANDROID_APP_HEADING": "اینڈرائیڈ ایپ", "I18N_TOPNAV_BLOG": "بلاگ", - "I18N_TOPNAV_CLASSROOM": "کمرہ جماعت", "I18N_TOPNAV_CONTACT_US": "ساکوں ملو", "I18N_TOPNAV_DONATE": "عطیہ", "I18N_TOPNAV_FORUM": "فورم", "I18N_TOPNAV_GET_STARTED": "شروع کرو", + "I18N_TOPNAV_LEARN": "کمرہ جماعت", "I18N_TOPNAV_LIBRARY": "لائبریری", "I18N_TOPNAV_LOGOUT": "لاگ آؤٹ", "I18N_TOPNAV_PREFERENCES": "ترجیحات", diff --git a/assets/i18n/sv.json b/assets/i18n/sv.json index 952fcea807cb..037a17188e3b 100644 --- a/assets/i18n/sv.json +++ b/assets/i18n/sv.json @@ -101,8 +101,19 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Användarstatistik", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Översikt", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Detta kommer ta dig till en sida där du kan radera ditt Oppia-konto.", + "I18N_DIAGNOSTIC_TEST_RESULT_TEXT_FOR_TWO_TOPICS": "Baserat på dina svar rekommenderar vi att du startar med något av dessa ämnen.", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Dra en bild till detta område", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Ladda upp en fil", + "I18N_DONATE_PAGE_BUDGET_HEADING": "Vart tar dina pengar vägen?", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_2": "Sprider ordet", + "I18N_DONATE_PAGE_FAQ_QUESTION_9": "Finns det någon jag kan prata med om jag är intresserad av att bli företagspartner?", + "I18N_DONATE_PAGE_HEADING_2": "högkvalitativ och engagerande utbildning.", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "Från Indien", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "Läs vår blogg", + "I18N_DONATE_PAGE_STATISTIC_4": "Volontärer från hela världen", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "Tack för att du donerade!", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_3": "Om du har några frågor, vänligen kontakta oss när som helst.", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "Titta på en video", "I18N_ERROR_DISABLED_EXPLORATION": "Inaktiverad utforskning", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Utforskningen du klickade på är tyvärr inaktiverad för närvarande. Försök gärna igen senare.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Inaktiverad utforskning - Oppia", @@ -408,7 +419,6 @@ "I18N_SIDEBAR_CONTACT_US": "Kontakta oss", "I18N_SIDEBAR_DONATE": "Donera", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Kom igång", "I18N_SIDEBAR_LIBRARY_LINK": "Bibliotek", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Undervisa med Oppia", "I18N_SIDEBAR_VOLUNTEER": "Volontär", diff --git a/assets/i18n/sw.json b/assets/i18n/sw.json new file mode 100644 index 000000000000..caee2427953c --- /dev/null +++ b/assets/i18n/sw.json @@ -0,0 +1,1131 @@ +{ + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Maelezo Kuhusu Shirika la Oppia", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Kuhusu Shirika la Oppia | Oppia", + "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Unda Uchunguzi", + "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "kuhusu mada unayojali.", + "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Pata maoni", + "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK_TEXT": "ili kuboresha uchunguzi wako.", + "I18N_ABOUT_PAGE_ABOUT_TAB_HEADING": "Kuhusu Oppia", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_1": "Dhamira ya Oppia ni kumsaidia mtu yeyote kujifunza chochote anachotaka kwa njia bora na ya kufurahisha.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_2": "Kwa kuunda seti ya masomo yasiyolipishwa, ya ubora wa juu, na yenye matokeo yanayoonekana kwa usaidizi wa walimu kutoka kote ulimwenguni, Oppia inalenga kuwapa wanafunzi elimu bora- bila kujali walipo au rasilimali gani za jadi wanazoweza kufikia.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_3": "Kufikia sasa, walimu wameunda zaidi ya <[numberOfExplorations]> ya masomo haya, ambayo tunayaita explorations . Na wanahudumia karibu wanafunzi <[numberofStudentsServed]> kote ulimwenguni.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_4": "Ugunduzi huwasaidia wanafunzi kujifunza kwa njia ya kufurahisha na ya ubunifu, kwa kutumia video, picha na maswali ya wazi. Kwa kuwa wanafunzi mara nyingi huwa na dhana potofu sawa, Oppia pia huwapa walimu uwezo wa kushughulikia dhana hizi potofu moja kwa moja ndani ya uchunguzi, na kuwapa uwezo wa kutoa maoni yanayolengwa kwa wanafunzi wengi kwa wakati mmoja.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_5": "Ikiwa wewe ni mwanafunzi na ungependa kujifunza na Oppia, unaweza anza matukio yako ya kujifunza kwa kuvinjari uvumbuzi wetu .", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_6": "Iwapo wewe ni mwalimu unayetaka kuathiri maisha ya wanafunzi duniani kote, unaweza kutuma ombi la kujiunga na mpango wetu wa Fundisha ukitumia Oppia ,programu unaolenga kutoa masomo ya mada ambayo kwa kawaida wanafunzi huona ni ngumu.", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_7": "Kwa kufundisha na Oppia, unaweza kuboresha ujuzi wako katika mawasiliano na huruma huku ukisaidia kuboresha elimu kwa wanafunzi kote ulimwenguni. Au, ikiwa bado hauko tayari kufundisha, bado unaweza kushiriki maoni kuhusu masomo ili kusaidia kuyaboresha kwa wanafunzi wengine!", + "I18N_ABOUT_PAGE_ABOUT_TAB_PARAGRAPH_8": "Ikiwa wewe ni mwalimu wa K-12, mwanafunzi aliyehitimu, au mtu binafsi ambaye anapenda somo mahususi na anataka kutoa maarifa yako, Oppia inakukaribisha. Jiunge na jumuiya na uanze uchunguzi nasi.", + "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE": "Chapisha &Gawa", + "I18N_ABOUT_PAGE_ABOUT_TAB_SHARE_TEXT": "ubunifu wako na jamii.", + "I18N_ABOUT_PAGE_AUDIO_SUBTITLES_FEATURE": "Manukuu ya Sauti", + "I18N_ABOUT_PAGE_BREADCRUMB": "Kuhusu", + "I18N_ABOUT_PAGE_CREATE_LESSON_CONTENT": "Ukiwa na mfumo wa kuunda maudhui wa Oppia, unaweza kuunda na kubinafsisha masomo kuhusu mada unazozipenda kwa urahisi.", + "I18N_ABOUT_PAGE_CREDITS_TAB_HEADING": "Watambuliwa", + "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT": "Wachangiaji wa Oppia wanatoka duniani kote —wengi wetu ni wanafunzi, wanafunzi wa hivi karibuni na walimu. Tungependa kuwashukuru wachangiaji wafuatao ambao wamesaidia kujenga jukwaa. Ikiwa ungependa kusaidia, hivi ndivyo unavyoweza kuhusika !", + "I18N_ABOUT_PAGE_CREDITS_TAB_TEXT_BOTTOM": "Timu ya ukuzaji wa Oppia pia inashukuru kwa maoni, mawazo, usaidizi na mapendekezo kutoka kwa <[listOfNames]>.", + "I18N_ABOUT_PAGE_CREDITS_THANK_TRANSLATEWIKI": "Tungependa pia kuwashukuru translatewiki.net kwa kutoa tafsiri zenye vyanzo vingi.", + "I18N_ABOUT_PAGE_EASILY_CREATE_LESSON": "Unda Masomo kwa Urahisi", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Gundua Mafunzo Yanayotolewa na Jumuiya", + "I18N_ABOUT_PAGE_EXPLORE_LESSONS_CONTENT": "Walimu na wanajamii kote ulimwenguni hutumia jukwaa la kuunda somo la Oppia kama njia ya kuunda na kugawa masomo. Unaweza kupata zaidi ya masomo 20,000 kwa masomo 17 tofauti katika maktaba yetu ya Ugunduzi, na labda utatiwa moyo kuunda yako mwenyewe!", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Changia", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "Jihusishe", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "Shirika la Oppia", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_1": "Tovuti ya Oppia na msimbo wa chanzo unatumika na Shirika la Oppia , shirika lisilo la faida la 501(c)(3) lililosajiliwa katika Jimbo la California.", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_2": "Shirika linategemea usaidizi wa ukarimu wa wachangiaji na wafadhili kutoka kote ulimwenguni kufanya kazi kuelekea dhamira yake ya kumwezesha mtu yeyote kujifunza chochote anachotaka kwa njia ya kufurahisha na inayofaa.", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_3": "Ikiwa ungependa kujiunga na mamia ya watu wengine wanaounga mkono juhudi hizi, tafadhali jifunze zaidi kuhusu kuchangia Shirika la Oppia au kujihusisha kwa njia nyinginezo .", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4": "Wakurugenzi wa Shirika hili ni Ben Henning, Jacob Davis, na Sean Lip. Shirika ni bylaws and minutes zinapatikana ili kusomwa. Ikiwa ungependa kuwasiliana na Shirika, tafadhali tuma barua pepe: admin@oppia.org .", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_4_HEADING": "Wakurugenzi", + "I18N_ABOUT_PAGE_HEADING": "Oppia: elimu kwa wote", + "I18N_ABOUT_PAGE_LANGUAGE_FEATURE": "Tafsiri kwa Lahaja za kienyeji", + "I18N_ABOUT_PAGE_LEARN_BUTTON": "Nataka kujifunza", + "I18N_ABOUT_PAGE_LEARN_FROM": "Jifunze kutoka kwa Masomo Yanayoratibiwa ya Oppia", + "I18N_ABOUT_PAGE_LEARN_FROM_CONTENT": "Darasani, unaweza kupata seti ya masomo ambayo timu ya Oppia imebuni na kufanyia majaribio ili kuhakikisha kuwa yanafaa na ya kufurahisha wanafunzi wote. Masomo yote yamekaguliwa na walimu na wataalam, kwa hivyo unaweza kuhakikishiwa kuwa wanafunzi wako wanapata elimu bora huku wakijifunza kwa kasi yao wenyewe.", + "I18N_ABOUT_PAGE_LESSON_FEATURE": "Masomo ya msingi wa hadithi", + "I18N_ABOUT_PAGE_MOBILE_FEATURE": "Urambazaji unaotumia rununu", + "I18N_ABOUT_PAGE_OUR_FEATURES": "Sifa Zetu", + "I18N_ABOUT_PAGE_OUR_FEATURES_CONTENT": "Yametengenezwa na walimu, wakufunzi na wanafunzi kutoka kote ulimwenguni, tumejitahidi kuhakikisha jukwaa hili na masomo tunayounda yanavutia, yanafaa na yanapatikana kwa wingi.", + "I18N_ABOUT_PAGE_OUR_OUTCOMES": "Matokeo Yetu", + "I18N_ABOUT_PAGE_OUR_OUTCOMES_CONTENT": "Tunajitahidi kwa ufanisi na ubora. Ndiyo maana tunaendelea kufanya utafiti ya watumiaji na majaribio ya nasibu ili kuhakikisha masomo yetu yanafikia viwango vyetu vya juu.", + "I18N_ABOUT_PAGE_SECTION_ONE_CONTENT": "Oppia hutoa mbinu mpya na ya kuvutia ya kujifunza mtandaoni ambayo imeundwa mahususi kukidhi mahitaji ya kipekee ya wanafunzi wasio na nyenzo za kutosha kote ulimwenguni.", + "I18N_ABOUT_PAGE_SECTION_SEVEN_TITLE": "Anza na Vidokezo Vilivyoratibiwa", + "I18N_ABOUT_PAGE_SPEECH_BUBBLE": "Je, ungependa kufanya nini leo?", + "I18N_ABOUT_PAGE_TABS_ABOUT": "Kuhusu", + "I18N_ABOUT_PAGE_TABS_CREDITS": "Watambuliwa", + "I18N_ABOUT_PAGE_TABS_FOUNDATION": "Msingi", + "I18N_ABOUT_PAGE_TEACH_BUTTON": "Nataka kufundisha", + "I18N_ABOUT_PAGE_TITLE": "Kuhusu | Oppia", + "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "Anza na Oppia", + "I18N_ABOUT_PAGE_WIFI_FEATURE": "Kipimo cha Chini Kinahitajika", + "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "Omba Kufundisha Kwa Opia", + "I18N_ACTION_BROWSE_EXPLORATIONS": "Vinjari Ugunduzi wetu", + "I18N_ACTION_BROWSE_LESSONS": "Vinjari Masomo yetu", + "I18N_ACTION_BROWSE_LIBRARY": "Vinjari maktaba", + "I18N_ACTION_CREATE_EXPLORATION": "Unda Uchunguzi", + "I18N_ACTION_CREATE_LESSON": "Unda Somo lako mwenyewe", + "I18N_ACTION_CREATE_LESSON_BUTTON": "Unda masomo", + "I18N_ACTION_EXPLORE_LESSONS": "Angalia masomo", + "I18N_ACTION_GUIDE_FOR_TEACHERS": "Mwongozo kwa walimu", + "I18N_ACTION_TIPS_FOR_PARENTS": "Vidokezo kwa wazazi na walezi", + "I18N_ACTION_VISIT_CLASSROOM": "Tembelea darasa", + "I18N_ATTRIBUTION_HTML_STEP_ONE": "Nakili na ubandike HTML", + "I18N_ATTRIBUTION_HTML_STEP_TWO": "Hakikisha kiungo kinaonekana kama \"<[linkText]>\"", + "I18N_ATTRIBUTION_HTML_TITLE": "Sifa katika HTML", + "I18N_ATTRIBUTION_PRINT_STEP_ONE": "Nakili na ubandike Watambuliwa", + "I18N_ATTRIBUTION_PRINT_STEP_TWO": "Ambatisha nakala ya \"<[link]>\"", + "I18N_ATTRIBUTION_PRINT_TITLE": "Sifa kwa kuchapishwa", + "I18N_ATTRIBUTION_TITLE": "Jinsi ya kuhusisha somo hili kwa kugawanya au kutumia tena", + "I18N_BLOG_CARD_PREVIEW_CONTEXT": "Hivi ndivyo kadi ya blogu itakavyoonekana kwenye Ukurasa wa Nyumbani na kwenye Wasifu wako wa Mwandishi.", + "I18N_BLOG_CARD_PREVIEW_HEADING": "Onyesho la awali la Kadi ya Blogi", + "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Unda Chapisho mpya la Blogi", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Inaonekana bado hujatunga hadithi yoyote!", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Chapisho Mpya", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Rasimu", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Imechapishwa!", + "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Ongeza picha ndogo ya Picha", + "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Mwili", + "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Ghairi", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Futa", + "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "Hariri Picha Ndogo ya Picha", + "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "Ilihifadhiwa mwisho saa", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "Chapisha", + "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "Tayari", + "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "Hifadhi Kama Rasimu", + "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "Hakiki", + "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "Vitambulisho", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_PREFIX": "Kikomo cha", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "vitambulisho zaidi bado vinaweza kuongezwa.", + "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "Picha ndogo", + "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "Kichwa", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "Chagua faili au iburute hapa", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Hitilafu: Haikuweza kusoma faili ya picha.", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Ongeza picha ndogo", + "I18N_BLOG_POST_UNTITLED_HEADING": "Haina jina", + "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "Maudhui ya kadi ni marefu sana. Tafadhali iweke chini ya herufi 4500 ili kuhifadhi.", + "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Kadi hii ni ndefu sana, na huenda wanafunzi wakakosa kupendezwa. Fikiria kufupisha, au kugawanya iwe kadi mbili.", + "I18N_CHAPTER_COMPLETION": "Hongera kwa kukamilisha sura!", + "I18N_CLASSROOM_CALLOUT_BUTTON": "Chunguza", + "I18N_CLASSROOM_CALLOUT_HEADING_1": "Misingi ya Hesabu", + "I18N_CLASSROOM_CALLOUT_HEADING_2": "Kuanzisha: Darasa la Oppia", + "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Angalia kozi ya kwanza ya kina katika Darasa mpya kabisa la Oppia! Masomo yaliyoratibiwa - yanakaguliwa na walimu - ili uweze kupata ujuzi wa msingi wa hesabu katika mada ikiwepo Sehemu ya nambari hadi Kuzidisha na Kugawanya.", + "I18N_CLASSROOM_MATH_TITLE": "Hesabu", + "I18N_CLASSROOM_PAGE_COMING_SOON": "\nInaanza Hivi Karibuni", + "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Maelezo ya Kozi", + "I18N_CLASSROOM_PAGE_HEADING": "Darasa la Oppia", + "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Gundua Mafunzo Zaidi Yanayotengenezwa na Jumuiya", + "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Tafuta kupitia Maktaba yetu ya Jumuiya", + "I18N_CLASSROOM_PAGE_TITLE": "Jifunze <[classroomName]> ukitumia Oppia | Oppia", + "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Mada Zinazofundishwa", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "<[collectionTitle]> - Kihariri cha Oppia", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "Haina jina - Mhariri wa Oppia", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Anza", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Endelea", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "Umemaliza mkusanyiko! Jisikie huru kucheza tena uchunguzi wowote ulio hapa chini.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "Elea juu ya alama ili kuhakiki uchunguzi.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "Hakuna uchunguzi ambao umeongezwa kwenye Mkusanyiko huu.", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> - Oppia", + "I18N_COMING_SOON": "\nInakuja Hivi Karibuni", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "MSANYIKO", + "I18N_COMPLETED_STORY": "Imekamilika '<[story]>'", + "I18N_COMPLETE_CHAPTER": "Kamilisha sura katika '<[topicName]>'", + "I18N_CONTACT_PAGE_BREADCRUMB": "Wasiliana", + "I18N_CONTACT_PAGE_HEADING": "Jihusishe!", + "I18N_CONTACT_PAGE_PARAGRAPH_1": "Asante kwa nia yako ya kusaidia mradi wa Oppia!", + "I18N_CONTACT_PAGE_PARAGRAPH_10": "Mradi wa Oppia ni chanzo wazi kabisa, na unadumishwa na jumuiya kubwa ya watu wanaojitolea kwenye GitHub . Tunahitaji misimbo, wabunifu na waandishi wa nakala, kwa hivyo ikiwa ungependa kusaidia, tafadhali utuandikie! Njia bora ya kuanza ni kufuata maagizo kwenye ukurasa wetu wa wiki wa GitHub .", + "I18N_CONTACT_PAGE_PARAGRAPH_10_HEADING": "Kuboresha na kudumisha tovuti", + "I18N_CONTACT_PAGE_PARAGRAPH_11": "Mradi wa Oppia unaendeshwa kwa bajeti ya muda mfupi. Gharama zetu kuu zinatokana na kuendesha Oppia.org server, ambayo hugharimu maelfu ya dola kwa mwaka. Tunakaribisha michango kwa mradi ili kulipia gharama zetu za kimsingi, ili tuweze kuzingatia kuunda na kusambaza maudhui bora ya elimu kwa mtu yeyote duniani kote anayehitaji.", + "I18N_CONTACT_PAGE_PARAGRAPH_11_HEADING": "Michango", + "I18N_CONTACT_PAGE_PARAGRAPH_12": "Ikiwa ungependa kuchangia mradi, tafadhali toa kwa ukarimu katika www.oppia.org/donate . Ahsante kwa msaada wako!", + "I18N_CONTACT_PAGE_PARAGRAPH_13": "Ikiwa wewe ni mwandishi wa habari ambaye ungependa kusaidia kueneza habari kuhusu Oppia tafadhali tuma barua pepe kwa press@oppia.org .", + "I18N_CONTACT_PAGE_PARAGRAPH_13_HEADING": "Bonyeza", + "I18N_CONTACT_PAGE_PARAGRAPH_14": "Iwapo una nia ya kushirikiana na Oppia ili kuleta masomo yetu kwa hadhira kubwa zaidi, au kuunda mkusanyiko wa uchunguzi kuhusu mada ambazo wanafunzi wanaona kuwa ngumu, tafadhali tuma barua pepe kwa partnerships@oppia.org .", + "I18N_CONTACT_PAGE_PARAGRAPH_14_HEADING": "Ushirikiano", + "I18N_CONTACT_PAGE_PARAGRAPH_15": "Ikiwa umegundua hitilafu ya usalama kwenye tovuti ya Oppia, tafadhali tuma barua pepe kwa admin@oppia.org .", + "I18N_CONTACT_PAGE_PARAGRAPH_15_HEADING": "Usalama", + "I18N_CONTACT_PAGE_PARAGRAPH_2": "Sisi ni timu ya watu wanaojitolea duniani kote wanaotaka kuboresha ufikiaji wa elimu ya ubora wa juu. Kwa sasa, tunashughulika kujenga masomo wasilianifu makubwa yanayoitwa ugunduzi kwa wanafunzi katika jamii zisizoweza kuhudumiwa ambao hawawezi kufikia walimu kwa urahisi na vitabu vya kiada, ili waweze kupata maoni kama ya mwalimu kuhusu kazi zao, pamoja na ushauri lengwa wa jinsi ya kuboresha ujuzi wao. Ikiwa ungependa kuwasiliana nasi, jisikie huru kuwasiliana nasi kupitia Kikundi chetu cha oppia-dev@ Google, au wasiliana nasi kwa admin@oppia.org .", + "I18N_CONTACT_PAGE_PARAGRAPH_2_HEADING": "Sisi sote tunajitolea", + "I18N_CONTACT_PAGE_PARAGRAPH_3": "Kuna tafiti nyingi za kielimu zinazopendekeza kuwa ujifunzaji tendaji na maoni yanayolengwa huleta faida kubwa za kujifunza, na kutoa usaidizi na kuhimiza mtazamo wa ukuaji, husaidia mwanafunzi kuhisi anaweza kufaulu. Oppia ni tofauti na majukwaa mengine mengi, kwa kuwa inalenga wanafunzi kujifunza kwa kufanya, na kupata maoni wanapoyahitaji. Kufanya uchunguzi mzuri ni bora zaidi kuliko kupakia video tu — ni muhimu kuwa mwanafunzi anaweza kuangalia uelewa wao wa dhana kwa kuuliza maswali na kupata maoni kuhusu kazi zao.", + "I18N_CONTACT_PAGE_PARAGRAPH_3_HEADING": "Jinsi Oppia ni tofauti na majukwaa mengine ya kujifunza", + "I18N_CONTACT_PAGE_PARAGRAPH_4": "Hii ni juhudi kubwa, kwa hivyo usaidizi wowote unaoweza kutoa unakaribishwa! Haijalishi unatoka wapi, unazungumza lugha gani, au una umri gani au mchanga kiasi gani — huu ni mradi wa jumuiya, na mradi uko tayari kusaidia, tungependa ujiunge nasi. Hapa kuna njia kuu ambazo unaweza kujiuzisha:", + "I18N_CONTACT_PAGE_PARAGRAPH_4_HEADING": "Njia unazoweza kusaidia", + "I18N_CONTACT_PAGE_PARAGRAPH_5": "Kabla ya kuchapisha uvumbuzi, tunataka kuhakikisha kuwa ni mzuri na wa kufurahisha. Hii ina maana kwamba ujuzi wa wanafunzi huboreka kwa kuwa wanaicheza, na wanafunzi huicheza hadi kuimaliza, hata bila shinikizo lolote.", + "I18N_CONTACT_PAGE_PARAGRAPH_5_HEADING": "Kujaribu ugunduzi uliopo", + "I18N_CONTACT_PAGE_PARAGRAPH_6": "Jambo moja ambalo husaidia sana utafiti huu wa watumiaji ili kuona ni nini kinachofanya kazi na kisichofanya kazi. Hili linahusisha kukaa na mwanafunzi na kuwatazama wakimaliza uchunguzi, kuandika maelezo kuhusu yale ambayo mwanafunzi huona yanachanganya, anapokosa kupendezwa, au akipata maelezo yasiyoeleweka. Inatosha kufanya hivi na wanafunzi 2 au 3 kwa kila uchunguzi— utajifunza mengi katika mchakato! Kisha unaweza kutuma vidokezo hivyo kwa mtengenezaji wa uvumbuzi, au pendekeza masasisho moja kwa moja , na hii itasaidia kuboresha uchunguzi kwa wanafunzi wengine duniani kote.", + "I18N_CONTACT_PAGE_PARAGRAPH_7": "Oppia imejengwa kutumia wazo kwamba walimu bora wanaweza kutoka popote. Unaweza kuwa mwanafunzi ambaye anapenda kueleza dhana ngumu kwa wenzako, mstaafu ambaye angependa kubadiishana uzoefu wake mwingi, mkufunzi, mwalimu wa darasa, Mwalimu msaidizi au profesa, au mfanyabiashara ambaye anatafuta njia nzuri ya kutumia wakati wake. Mradi uchunguzi unaounda ni mzuri katika kuwasaidia wanafunzi , haijalishi asili yako ni nini.", + "I18N_CONTACT_PAGE_PARAGRAPH_7_HEADING": "Kuunda uvumbuzi mpya", + "I18N_CONTACT_PAGE_PARAGRAPH_8": "Kwa hivyo, ikiwa ungependa kuunda masomo yasiyolipishwa na yanayofaa kwa wanafunzi kote ulimwenguni, umefika mahali pazuri. Tunakuhimiza uangalie mafunzo yetu ya watayarishi na masomo yaliyopo , na uanze kuunda somo lako mwenyewe . Zaidi ya hayo, ikiwa ungependa kuhakikisha kuwa masomo yako yana matokeo makubwa, tafadhali zingatia kutuma maombi kwenye mpango wetu wa Kufundisha ukitumia Oppia , ambapo tutakusaidia kuunda, kujaribu na kuboresha uchunguzi wako ili kupata matokeo bora.", + "I18N_CONTACT_PAGE_PARAGRAPH_9": "Kama uchunguzi uliopo, lakini umepata kitu ambacho kinaweza kuwa bora zaidi? Unaweza kupendekeza mabadiliko kwa uchunguzi wowote moja kwa moja kutoka kwa ukurasa wa uchunguzi. Bonyeza tu alama ya penseli katika kona ya juu ya mkono wa kulia, na ushiriki kile unachofikiria kinaweza kuboreshwa. Mtayarishi wa somo atapokea mapendekezo yako na kupata fursa ya kuyaunganisha katika uvumbuzi. Hii ni njia muhimu sana ya kuchangia, haswa ikiwa unaweza msingi wa mapendekezo yako kutoka kwa uzoefu wa wanafunzi kucheza kupitia uchunguzi.", + "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "Kuboresha ugunduzi uliopo", + "I18N_CONTACT_PAGE_TITLE": "Wasiliana | Oppia", + "I18N_CONTINUE_REGISTRATION": "Endelea na Usajili", + "I18N_COOKIE_BANNER_ACKNOWLEDGE": "SAWA", + "I18N_COOKIE_BANNER_EXPLANATION": "Tovuti hii hutumia vidakuzi na teknolojia sawa ili kusaidia utendakazi msingi, kuweka tovuti salama, na kuchanganua trafiki ya tovuti yetu. Pata maelezo zaidi katika Sera yetu ya Faragha .", + "I18N_CORRECT_FEEDBACK": "\nSahihi!", + "I18N_CREATE_ACTIVITY_QUESTION": "Ungependa kuunda nini?", + "I18N_CREATE_ACTIVITY_TITLE": "Unda Shughuli", + "I18N_CREATE_COLLECTION": "Unda Mkusanyiko", + "I18N_CREATE_EXPLORATION": "\nTengeneza Uchunguzi", + "I18N_CREATE_EXPLORATION_CREATE": "Unda", + "I18N_CREATE_EXPLORATION_QUESTION": "Je, ungependa kuunda uchunguzi?", + "I18N_CREATE_EXPLORATION_TITLE": "\nTengeneza Uchunguzi", + "I18N_CREATE_EXPLORATION_UPLOAD": "Pakia", + "I18N_CREATE_NO_THANKS": "Hapana, asante lakini!", + "I18N_CREATE_YES_PLEASE": "Ndio, tafadhali!", + "I18N_CREATOR_IMPACT": "Athari", + "I18N_DASHBOARD_COLLECTIONS": "Mikusanyiko", + "I18N_DASHBOARD_CREATOR_DASHBOARD": "Dashibodi ya Mtayarishi", + "I18N_DASHBOARD_EXPLORATIONS": "Ugunduzi", + "I18N_DASHBOARD_EXPLORATIONS_EMPTY_MESSAGE": "Inaonekana bado hujaunda uchunguzi wowote. Hebu tuanze!", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY": "Panga kwa", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_AVERAGE_RATING": "Ukadiriaji Wastani", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_CATEGORY": "Kundi", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_UPDATED": "Sasisho la mwisho", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_OPEN_FEEDBACK": "Fungua Maoni", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TITLE": "Kichwa", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TOTAL_PLAYS": "Jumla ya Michezo", + "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_UNRESOLVED_ANSWERS": "Majibu ambayo hayajetatuliwa", + "I18N_DASHBOARD_LESSONS": "Masomo", + "I18N_DASHBOARD_OPEN_FEEDBACK": "Fungua Maoni", + "I18N_DASHBOARD_SKILL_PROFICIENCY": "Ustadi wa ujuzi", + "I18N_DASHBOARD_STATS_AVERAGE_RATING": "Ukadiriaji Wastani", + "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "Fungua Maoni", + "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "Jumla ya Michezo", + "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "Wanaofuatilia", + "I18N_DASHBOARD_STORIES": "Hadithi", + "I18N_DASHBOARD_SUBSCRIBERS": "Wanaofuatilia", + "I18N_DASHBOARD_SUGGESTIONS": "Mapendekezo", + "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "Ugunduzi", + "I18N_DASHBOARD_TABLE_HEADING_LAST_UPDATED": "Sasisho la mwisho", + "I18N_DASHBOARD_TABLE_HEADING_OPEN_THREADS": "Fungua Mlolongo", + "I18N_DASHBOARD_TABLE_HEADING_PLAYS": "Michezo", + "I18N_DASHBOARD_TABLE_HEADING_RATING": "Ukadiriaji", + "I18N_DASHBOARD_TABLE_HEADING_UNRESOLVED_ANSWERS": "Majibu ambayo hayajetatuliwa", + "I18N_DASHBOARD_TOPICS_AND_SKILLS_DASHBOARD": "Dashibodi ya Mada na Ujuzi", + "I18N_DELETE_ACCOUNT_PAGE_BREADCRUMB": "Ondoa Akaunti", + "I18N_DELETE_ACCOUNT_PAGE_BUTTON": "Ondoa Akaunti Yangu", + "I18N_DELETE_ACCOUNT_PAGE_HEADING": "Ondoa Akaunti", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_1": "Mipangilio ya mtumiaji na mapendeleo ya barua pepe", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_2": "Ugunduzi na makusanyo ya kibinafsi", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_3": "Maendeleo ya somo", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_4": "Takwimu kuhusu uchunguzi na makusanyo yaliyoundwa na mtumiaji", + "I18N_DELETE_ACCOUNT_PAGE_LIST_1_5": "Rasimu ya mabadiliko kwa uchunguzi wowote", + "I18N_DELETE_ACCOUNT_PAGE_LIST_2_1": "Maoni na mapendekezo", + "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Ahadi zilizofanywa kwa uchunguzi na makusanyo ya umma ambayo yana wamiliki wengine", + "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "Kujitolea kwa mada, hadithi, ujuzi na maswali", + "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "Ili kuthibitisha ufutaji huo, tafadhali ingiza jina lako la mtumiaji kwenye sehemu iliyo hapa chini na ubonyeze kitufe cha 'Futa Akaunti Yangu'. Kitendo hiki hakiwezi kutenduliwa.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Kitendo hiki kitafuta akaunti hii ya mtumiaji na pia data yote ya faragha inayohusishwa na akaunti hii. Data ambayo tayari iko hadharani haitatambulishwa ili isiweze kuhusishwa na akaunti hii, isipokuwa data ya hifadhi rudufu (ambayo huhifadhiwa kwa miezi 6). Baadhi ya kategoria zilizotajwa hapa chini huenda zisitumike kwenye akaunti yako.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Muhtasari", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "Hizi ndizo aina za data zitakazofutwa:", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "Hizi ndizo aina za data ambazo hazitajulikana:", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "Aidha, uchunguzi na makusanyo yaliyochapishwa ambayo hayana wamiliki wengine yatabadilishwa kuwa umiliki wa jumuiya.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "Ikiwa una maswali au wasiwasi wowote kuhusu mchakato wa kuondoa akaunti, tafadhali tuma barua pepe kwa privacy@oppia.org .", + "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Hii itakuelekeza kwenye ukurasa ambapo unaweza kufuta akaunti yako ya Oppia.", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "Futa Akaunti | Oppia", + "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Buruta picha kwenye eneo hili", + "I18N_DIRECTIVES_UPLOAD_A_FILE": "Pakia faili", + "I18N_DONATE_PAGE_BREADCRUMB": "Changa", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "Changa | Fanya Athari Chanya | Oppia", + "I18N_DONATE_PAGE_IMAGE_TITLE": "Pesa zako za zawadi za ukarimu:", + "I18N_DONATE_PAGE_TITLE": "Toa mchango kwa Shirika la Oppia", + "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Sikia kutoka kwa jumuiya yetu ya Oppia", + "I18N_ERROR_DISABLED_EXPLORATION": "Ugunduzi Umezimwa", + "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Samahani, lakini uchunguzi uliobofya umezimwa kwa sasa. Tafadhali jaribu tena baadae.", + "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Ugunduzi Umezimwa - Oppia", + "I18N_ERROR_HEADER_400": "Hitilafu 400", + "I18N_ERROR_HEADER_401": "Hitilafu 401", + "I18N_ERROR_HEADER_404": "Hitilafu 404", + "I18N_ERROR_HEADER_500": "Hitilafu 500", + "I18N_ERROR_MESSAGE_400": "Wakati mwingine mashine haziwezi kuwaelewa wanadamu. Hii ni moja ya nyakati hizo.", + "I18N_ERROR_MESSAGE_401": "Huwezi kuingia huko. Rudi, haraka kabla mwalimu hajafika!", + "I18N_ERROR_MESSAGE_404": "Samahani, tulitafuta na kuangalia lakini hatukuweza kupata ukurasa huo.", + "I18N_ERROR_MESSAGE_500": "Hitilafu mbaya imetokea. Lakini haikuwa kosa lako. Hitilafu ya ndani imetokea.", + "I18N_ERROR_NEXT_STEPS": "Jambo bora zaidi la kufanya sasa labda ni kurudi kwenye \">ukurasa wa nyumbani . Hata hivyo, ikiwa suala hili litajirudia, na unafikiria halifai, tafadhali tujulishe kwenye \" target=\"_blank\">kifuatilia toleo letu . Pole kwa hili.", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "Hitilafu <[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "Hitilafu <[statusCode]> - Oppia", + "I18N_ERROR_PAGE_TITLE_400": "Hitilafu 400 - Oppia", + "I18N_ERROR_PAGE_TITLE_401": "Hitilafu 401 - Oppia", + "I18N_ERROR_PAGE_TITLE_404": "Hitilafu 404 - Oppia", + "I18N_ERROR_PAGE_TITLE_500": "Hitilafu 500 - Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "Je, uko tayari kwa keki zaidi? Jibu maswali haya mafupi ili kuangalia kuelewa kwako kwa yale ambayo umejifunza kufikia sasa!", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "Usawa wa Sehemu za Hesabu (Muhtasari)", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION": "Inawezekana kwa sehemu ya hesabu moja kuwa tofauti kwa kujificha? Hebu tuone kitakachotokea Mathayo anapokutana na Crumb kwa mara ya pili.", + "I18N_EXPLORATION_0FBWxCE5egOw_TITLE": "Sehemu za hesabu Sawa", + "I18N_EXPLORATION_0X0KC9DXWwra_DESCRIPTION": "Nyumbani kwa Kamal, kila mtu anasherehekea siku ya kuzaliwa ya Samir. Kamal anawafurahisha kwa kutengeneza mchezo wa hesabu kwa Ava na Samir. Angalia kama unaweza kutatua maswali!", + "I18N_EXPLORATION_0X0KC9DXWwra_TITLE": "Muhtasari: Ujuzi wa Kutatua Matatizo", + "I18N_EXPLORATION_1904tpP0CYwY_DESCRIPTION": "Ni wakati wa Aria kuanza kupanda mboga! Endelea na safari yako ya bustani unapomsaidia kwenye bustani na uanze kukariri vizidisho vyako.", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE": "Maswali ya hesabu ya Nambari Moja kutoka 1-5", + "I18N_EXPLORATION_2mzzFVDLuAj8_DESCRIPTION": "Jiunge na James na mjomba wake wanapojifunza kuhusu uwiano na jinsi ya kuzitumia!", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "Uwiano Ni Nini?", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION": "Nina na mama yake wamepatana na rafiki yao, ambaye pia ana kibanda cha kuuza matunda. Jiunge na Nina anapotumia mgawanyiko kusaidia rafiki yao na duka!", + "I18N_EXPLORATION_40a3vjmZ7Fwu_TITLE": "Masalio na Kesi Maalum", + "I18N_EXPLORATION_53Ka3mQ6ra5A_DESCRIPTION": "Maya, Omar na Malik hutembelea duka kuu ili kupata viungo zaidi, na wanahitaji kuongeza idadi kubwa zaidi. Angalia kama unaweza kuwasaidia!", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "Kuongeza idadi kubwa", + "I18N_EXPLORATION_5I4srORrwjt2_DESCRIPTION": "Katika baa ya vitafunio, Kamal anasema inabidi wawe werevu katika jinsi wanavyotumia kiasi kidogo cha pesa. Wasaidie Ava na Samir kupata vitafunio wanavyoweza kupata!", + "I18N_EXPLORATION_5I4srORrwjt2_TITLE": "Uwiano na Mbinu ya Umoja", + "I18N_EXPLORATION_5NWuolNcwH6e_DESCRIPTION": "James anajaribu kutengeneza smoothies yake ... lakini hazitokei vizuri sana. Alifanya kosa gani? Cheza somo hili ili kujua!", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE": "Umuhimu wa Utaratibu", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION": "Msaidie Mathayo kutatua tatizo kwa mmoja wa wateja wa Bw. Baker anapojifunza kuhusu nambari mchanganyiko na laini ya nambari. Cheza somo hili ili kuanza!", + "I18N_EXPLORATION_670bU6d9JGBh_TITLE": "Nambari Mchanganyiko na Mstari wa Nambari 1", + "I18N_EXPLORATION_6Q6IyIDkjpYC_DESCRIPTION": "Bwana Baker ana agizo kubwa sana anayotarajia na anahitaji usaidizi wa Mathayo katika kununua viungo zaidi. Je, unaweza kujua wanachohitaji kwa kutumia sehemu za hesabu?", + "I18N_EXPLORATION_6Q6IyIDkjpYC_TITLE": "Kuondoa Sehemu za hesabu", + "I18N_EXPLORATION_8HTzQQUPiK5i_DESCRIPTION": "Ungana na Nina na mama yake wakienda sokoni. Wasaidie kutumia mgawanyiko kubaini ni mifuko mingapi wanahitaji kwa ajili ya mboga zao!", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "Ugawanyaji ni nini?", + "I18N_EXPLORATION_9DITEN8BUEHw_DESCRIPTION": "Jifunze jinsi ya kutathmini hesabu inayohusisha operesheni nyingi za kuongeza na kuondoa.", + "I18N_EXPLORATION_9DITEN8BUEHw_TITLE": "Kuongeza na kupunguza nambari kadhaa", + "I18N_EXPLORATION_9trAQhj6uUC2_DESCRIPTION": "Vipande vinaweza kutumika kuwakilisha sehemu za keki. Lakini je, vinaweza pia kutumiwa kuwakilisha sehemu za vikundi vya vitu? Cheza somo hili ili kujua!", + "I18N_EXPLORATION_9trAQhj6uUC2_TITLE": "Sehemu za Kikundi", + "I18N_EXPLORATION_BDIln52yGfeH_DESCRIPTION": "Wanapofika kwenye uwanja wa burudani, Ava na Samir wanataka kuburudika, lakini Kamal anasema wanahitaji kuona kama wana pesa za kutosha. Wasaidie kuhesabu!", + "I18N_EXPLORATION_BDIln52yGfeH_TITLE": "Kurahisisha Milinganyo", + "I18N_EXPLORATION_BJd7yHIxpqkq_DESCRIPTION": "Wasaidie mashujaa wetu watatu kutengeneza pizza bora, huku wakijifunza jinsi ya kuongeza kwa sufuri na kubaini nambari zinazokosekana katika \"ukweli wa nyongeza\".", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE": "Misingi ya Kuongeza", + "I18N_EXPLORATION_IrbGLTicm0BI_DESCRIPTION": "Wakati Ava na Kamal wanamngoja Bi. Plum, hebu tuone kama umejifunza jinsi ya kutumia mikakati mbalimbali kutatua matatizo ya ulimwengu halisi!", + "I18N_EXPLORATION_IrbGLTicm0BI_TITLE": "Muhtasari: Kutatua Matatizo ya Ulimwengu Halisi", + "I18N_EXPLORATION_Jbgc3MlRiY07_DESCRIPTION": "Baada ya kujifunza stadi hizi zote mpya, Ava anataka kujua anavyoweza kutumia. Jiunge na Ava katika kutumia ujuzi wake mpya kutatua matatizo ya ulimwengu halisi!", + "I18N_EXPLORATION_Jbgc3MlRiY07_TITLE": "Kuiga Matukio ya Ulimwengu Halisi", + "I18N_EXPLORATION_K645IfRNzpKy_DESCRIPTION": "Jaime hujifunza thamani ya mahali ya kila tarakimu katika nambari kubwa.", + "I18N_EXPLORATION_K645IfRNzpKy_TITLE": "Thamani ya Mahali ni nini", + "I18N_EXPLORATION_K89Hgj2qRSzw_DESCRIPTION": "Kamal anafichua mbinu alizotumia kubaini haraka muda wanaohitaji kuamka. Unataka kuona jinsi anavyofanya? Cheza somo hili ili kujua!", + "I18N_EXPLORATION_K89Hgj2qRSzw_TITLE": "Sheria ya Usambazaji", + "I18N_EXPLORATION_Knvx24p24qPO_DESCRIPTION": "Jaime anaelewa thamani ya alama yake ya ukumbi wa michezo.", + "I18N_EXPLORATION_Knvx24p24qPO_TITLE": "Kupata thamani ya Nambari", + "I18N_EXPLORATION_MRJeVrKafW6G_DESCRIPTION": "Bustani ya Aria ni mafanikio makubwa! Kila wiki ya majira ya joto, matunda na mboga zaidi na zaidi huongezeka. Msaidie Aria kuhesabu mboga ngapi zilikua.", + "I18N_EXPLORATION_MRJeVrKafW6G_TITLE": "Kuzidisha kwa Nguvu za Kumi", + "I18N_EXPLORATION_MjZzEVOG47_1_DESCRIPTION": "Tulijifunza kwamba \"dhehebu\" ya sehemu ya hesabu ni idadi ya sehemu sawa kwa ujumla. Lakini kwa nini vipande lazima viwe sawa? Hebu tujue!", + "I18N_EXPLORATION_MjZzEVOG47_1_TITLE": "Maana ya \"Vipande Sawa\"", + "I18N_EXPLORATION_OKxYhsWONHZV_DESCRIPTION": "Jiunge na Maya na Omar wanapojifunza jinsi nambari zinavyoweza \"kuwekwa pamoja\" au \"kuongezwa\" ili kuunda nambari mpya!", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "Nyongeza ni nini?", + "I18N_EXPLORATION_PLAYER_PAGE_TITLE": "<[explorationTitle]> -Oppia", + "I18N_EXPLORATION_PsfDKdhd6Esz_DESCRIPTION": "Maya, Omar na Malik wanaonekana kupoteza pesa kutokana na viungo vilivyoharibika. Kwa kutumia kutoa, unaweza kuwasaidia kujua jinsi ya kutoa hesabu kwa hili?", + "I18N_EXPLORATION_PsfDKdhd6Esz_TITLE": "Kutoa idadi kubwa, Sehemu ya 2", + "I18N_EXPLORATION_R7WpsSfmDQPV_DESCRIPTION": "Pamoja na Aria, hebu tujifunze kuzidisha ni nini, jinsi ya kuandika hesabu hizo, na jinsi ya kuitumia kutatua shida katika kitongoji cha Aria!", + "I18N_EXPLORATION_R7WpsSfmDQPV_TITLE": "Vipande vya hesabu za Kuzidisha", + "I18N_EXPLORATION_RvopsvVdIb0J_DESCRIPTION": "Ni wakati wa James kuuza smoothie yake mpya! Anaweka kibanda na Mjomba Berry. Je, wanaweza kujua ni pesa ngapi kila mmoja wao anapaswa kupata?", + "I18N_EXPLORATION_RvopsvVdIb0J_TITLE": "Kuunganisha Uwiano kwa Nambari Halisi", + "I18N_EXPLORATION_SR1IKIdLxnm1_DESCRIPTION": "Ava alichoshwa na kucheza michezo ya bustani ya burudani, kwa hivyo Kamal akaunda mchezo wa kufurahisha wa hesabu. Je, unaweza kushinda mchezo wa Kamal? Bofya somo hili kujua!", + "I18N_EXPLORATION_SR1IKIdLxnm1_TITLE": "Muhtasari: Vigezo", + "I18N_EXPLORATION_STARTING_FROM_BEGINNING": "Hongera kwa kukamilisha somo hili! Sasa utaanza somo kuanzia mwanzo wakati mwingine utakaporudi.", + "I18N_EXPLORATION_STATE_PREVIOUSLY_COMPLETED": "Ulijibu swali hili katika kipindi kilichopita.", + "I18N_EXPLORATION_VKXd8qHsxLml_DESCRIPTION": "Maya, Omar na Malik wanaona kwamba baadhi ya viungo vyao vimeharibika. Je, unaweza kuwasaidia kufahamu wamebakisha kiasi gani kwa kutumia kutoa?", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE": "Kutoa idadi kubwa, Sehemu ya 1", + "I18N_EXPLORATION_Vgde5_ZVqrq5_DESCRIPTION": "James amegundua anataka kichocheo chake cha smoothie kifanane, lakini ana shida kuchanganya sehemu zote pamoja. Je, unaweza kumsaidia kwa hili?", + "I18N_EXPLORATION_Vgde5_ZVqrq5_TITLE": "Kuchanganya Uwiano", + "I18N_EXPLORATION_W0xq3jW5GzDF_DESCRIPTION": "Kitu kisichotarajiwa kinatokea wakati Maya, Omar na Malik wanajaribu kutengeneza pizza ya pili.", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "Kutoa ni nini?", + "I18N_EXPLORATION_WulCxGAmGE61_DESCRIPTION": "Nina anatembelea nyumba ya Sandra. Jiunge naye anapotumia mgawanyiko kumsaidia Sandra na matatizo hata ya hila, kama vile kuhamisha matunda yote kwenye masanduku!", + "I18N_EXPLORATION_WulCxGAmGE61_TITLE": "Mgawanyiko kwa Vizidisho vya Kumi", + "I18N_EXPLORATION_WwqLmeQEn9NK_DESCRIPTION": "Jamie anaendelea kujifunza mbinu zaidi za kubadilisha nambari.", + "I18N_EXPLORATION_WwqLmeQEn9NK_TITLE": "Nambari za Kubadilisha, Sehemu ya 2", + "I18N_EXPLORATION_Xa3B_io-2WI5_DESCRIPTION": "Ungana na Mathayo anapomsaidia Bw. Baker kurekebisha uharibifu, huku akijifunza jinsi ya kuongeza sehemu za hesabu.", + "I18N_EXPLORATION_Xa3B_io-2WI5_TITLE": "Kuongeza Sehemu", + "I18N_EXPLORATION_aAkDKVDR53cG_DESCRIPTION": "Jamie hujifunza ikiwa nambari ni ndogo au kubwa kuliko nambari nyingine.", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE": "Kulinganisha Nambari", + "I18N_EXPLORATION_aHikhPlxYgOH_DESCRIPTION": "Ungana na Mathayo anapojifunza jinsi nambari zilizochanganywa zilivyo sehemu za kawaida zinazojificha.", + "I18N_EXPLORATION_aHikhPlxYgOH_TITLE": "Nambari Mchanganyiko na Mstari wa Nambari 2", + "I18N_EXPLORATION_aqJ07xrTFNLF_DESCRIPTION": "Baada ya kutumia njia ya umoja kupata vitafunio ambavyo Ava anapaswa kununua, ni zamu ya Samir kwa kutumia mbinu mpya. Jiunge na Samir upate vitafunwa!", + "I18N_EXPLORATION_aqJ07xrTFNLF_TITLE": "Kutatua Matatizo ya hesabu kwa Miundo ya Sanduku", + "I18N_EXPLORATION_avwshGklKLJE_DESCRIPTION": "Jamie anajifunza kurahisisha nambari bila kufanya mabadiliko mengi kwa thamani yake.", + "I18N_EXPLORATION_avwshGklKLJE_TITLE": "Nambari za Kubadilisha, Sehemu ya 1", + "I18N_EXPLORATION_cQDibOXQbpi7_DESCRIPTION": "Aria yuko tayari kupanda mboga kubwa zaidi kwenye bustani yake! Msaidie kuzipanda na kuzimwagilia huku ukikariri nyingi zaidi pamoja naye.", + "I18N_EXPLORATION_cQDibOXQbpi7_TITLE": "Hesabu ya Nambari Moja kutoka 5-9", + "I18N_EXPLORATION_hNOP3TwRJhsz_DESCRIPTION": "Aria anaanza shule tena! Anataka bustani kubwa kwa ajili ya watoto shuleni kwake. Msaidie kuipanga na Omar kwa kutumia kuzidisha kwa nambari kubwa zaidi.", + "I18N_EXPLORATION_hNOP3TwRJhsz_TITLE": "Kuzidisha kwa tarakimu nyingi, Sehemu ya 1", + "I18N_EXPLORATION_ibeLZqbbjbKF_DESCRIPTION": "Katika kituo cha gari moshi, Ava na Kamal hawakupata treni! Kamal hupata makosa katika mahesabu. Je, utawasaidia kupata treni itakapofika?", + "I18N_EXPLORATION_ibeLZqbbjbKF_TITLE": "Kuzuia plagi katika Thamani za Vigezo", + "I18N_EXPLORATION_k2bQ7z5XHNbK_DESCRIPTION": "Inawezekana kwa uwiano mbili tofauti kumaanisha kitu kimoja? Gundua James na Mjomba Berry wanapojaribu kichocheo kipya cha smoothies za chokoleti.", + "I18N_EXPLORATION_k2bQ7z5XHNbK_TITLE": "Uwiano sawa", + "I18N_EXPLORATION_kYSrbNDCv5sH_DESCRIPTION": "Ava anataka kufaidika zaidi na siku ya kuzaliwa ya Samir, kwa hivyo anaanza kupanga siku yake. Msaidie kutumia njia za mkato kutathmini misemo ili kubaini mambo!", + "I18N_EXPLORATION_kYSrbNDCv5sH_TITLE": "Sheria za Ubadilishaji na Ushirikiano", + "I18N_EXPLORATION_lNpxiuqufPiw_DESCRIPTION": "Hivi karibuni Ava atahitaji kutumia ujuzi wake kwa matatizo ya ulimwengu halisi. Je, utaweza kusaidia? Jaribu somo hili ili kuona kama una ustadi katika hesabu!", + "I18N_EXPLORATION_lNpxiuqufPiw_TITLE": "Muhtasari: Kutatua hesabu", + "I18N_EXPLORATION_lOU0XPC2BnE9_DESCRIPTION": "Jiunge na Nina huku akimsaidia Sandra kutengeneza maji ya matunda kwa duka lake, kwa kutumia mbinu mpya ya kugawanya!", + "I18N_EXPLORATION_lOU0XPC2BnE9_TITLE": "Mgawanyiko mrefu, Vigawanyiko vya Nambari Moja", + "I18N_EXPLORATION_m1nvGABWeUoh_DESCRIPTION": "Ava na Samir wanamaliza kucheza michezo na kwenda dukani kutumia tikiti zao. Huko, wanapata mashine ya ajabu! Bofya somo linalofuata ili kujua!", + "I18N_EXPLORATION_m1nvGABWeUoh_TITLE": "Wastani/Kadiri ni nini?", + "I18N_EXPLORATION_nLmUS6lbmvnl_DESCRIPTION": "Je, James anaweza kufahamu kama smoothie ina \"maziwa\" zaidi au \"mgando-y\" kwa kuangalia tu mapishi, badala ya kuhitaji kutengeneza kila laini kwa mikono?", + "I18N_EXPLORATION_nLmUS6lbmvnl_TITLE": "Linganisha Uwiano", + "I18N_EXPLORATION_nTMZwH7i0DdW_DESCRIPTION": "Ava na Kamal huenda kwenye kituo cha gari la moshi. Wanamwona Bi. Plum, mwokaji mikate, na kumsaidia kutatua matatizo kwa kutumia maneno yenye mapato, gharama na faida.", + "I18N_EXPLORATION_nTMZwH7i0DdW_TITLE": "Kutoka kwa Matatizo ya Maneno hadi Hesabu", + "I18N_EXPLORATION_osw1m5Q3jK41_DESCRIPTION": "Ni wakati wa keki tena! Chukua fursa hii kuhakikisha kuwa umeelewa ujuzi ambao umejifunza katika masomo yaliyopita!", + "I18N_EXPLORATION_osw1m5Q3jK41_TITLE": "Matatizo iliyo na Sehemu za hesabu(Muhtasari)", + "I18N_EXPLORATION_rDJojPOc0KgJ_DESCRIPTION": "Ava na Kamal wananunua zawadi kwa ajili ya siku ya kuzaliwa ya binamu yao! Jiunge nao wanapofahamu jinsi ya kukokotoa bei kwa kutathmini vielezo.", + "I18N_EXPLORATION_rDJojPOc0KgJ_TITLE": "\nKutathmini Hesabu - Utaratibu wa hesabu", + "I18N_EXPLORATION_rfX8jNkPnA-1_DESCRIPTION": "Je, unaweza kumsaidia Mathayo kupata keki? Jibu swali hili fupi ili kuona ni kiasi gani unakumbuka kuhusu Sehemu za hesabu.", + "I18N_EXPLORATION_rfX8jNkPnA-1_TITLE": "\nKuwakilisha Sehemu za hesabu (Muhtasari)", + "I18N_EXPLORATION_rwN3YPG9XWZa_DESCRIPTION": "Huku wakifurahia aiskrimu, Ava na Kamal wanajaribu kujibu baadhi ya maswali ambayo Ava anayo kuhusu ziara yao ijayo kwenye bustani ya burudani!", + "I18N_EXPLORATION_rwN3YPG9XWZa_TITLE": "Kutatua Matatizo ya Hesabu ya Maneno", + "I18N_EXPLORATION_tIoSb3HZFN6e_DESCRIPTION": "\nJames anajifunza jinsi ya kupunguza uwiano kwa umbo lake rahisi zaidi, ili kurahisisha mahesabu yake.", + "I18N_EXPLORATION_tIoSb3HZFN6e_TITLE": "Kuandika uwiano kwa Njia Rahisi Zaidi", + "I18N_EXPLORATION_umPkwp0L1M0-_DESCRIPTION": "Ungana na Mathayo anapokutana na Bw. Baker kwa mara ya kwanza na kujifunza kuhusu sehemu za hesabu. Sehemu ya hesabu ni nini? Cheza somo hili ili kujua zaidi!", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE": "Sehemu ya hesabu ni nini?", + "I18N_EXPLORATION_v8fonNnX4Ub1_DESCRIPTION": "Ava na Kamal wanaendelea kumsaidia Bi Plum na biashara yake ya kuoka mikate, lakini kuna baadhi ya maneno ambayo hayajulikani. Je, Ava ataweza kusaidia?", + "I18N_EXPLORATION_v8fonNnX4Ub1_TITLE": "Kuandika Hesabu yenye Vigezo", + "I18N_EXPLORATION_wE9pyaC5np3n_DESCRIPTION": "Nina na Sandra wanaingia kwenye mashindano. Jiunge na Nina anapotumia ujuzi wake wa kitengo kuuza matunda na maji ya matunda nyingi iwezekanavyo, ili kushinda zawadi kuu!", + "I18N_EXPLORATION_wE9pyaC5np3n_TITLE": "Kugawanya tarakimu nyingi", + "I18N_EXPLORATION_zIBYaqfDJrJC_DESCRIPTION": "Endelea na tukio lako la ukulima na Aria anapopanda matunda, anajifunza na kufanya mazoezi ya kuzidisha na Omar!", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE": " Maana Ya Kuzidisha", + "I18N_EXPLORATION_zNb0Bh27QtJ4_DESCRIPTION": "Kwenye baa ya vitafunio, Kamal anakagua mifuko yake na hapati pochi lake. Bila mkoba, hawawezi kupata vitafunio vyovyote! Je, unaweza kusaidia kupata pochi ya Kamal?", + "I18N_EXPLORATION_zNb0Bh27QtJ4_TITLE": "Kuendelea na Hesabu", + "I18N_EXPLORATION_zTg2hzTz37jP_DESCRIPTION": "Baada ya mipango mingi, Aria aliwauliza marafiki zake wamsaidie kupanda bustani ya shule yake! Tumia ujuzi wako kuwasaidia kupanda bustani ya ajabu!", + "I18N_EXPLORATION_zTg2hzTz37jP_TITLE": "Kuzidisha kwa tarakimu nyingi, Sehemu ya 2", + "I18N_EXPLORATION_zVbqxwck0KaC_DESCRIPTION": "James na Mjomba Berry wamealikwa kutengeneza smoothies kwa karamu ya jirani yao. Je, huu unaweza kuwa mwanzo wa kupata umaarufu wao kama watengenezaji wa smoothie?", + "I18N_EXPLORATION_zVbqxwck0KaC_TITLE": "Mahusiano ya uwiano", + "I18N_EXPLORATION_zW39GLG_BdN2_DESCRIPTION": "Mathayo anapojifunza jinsi ya kulinganisha sehemu za hesabu kulingana na ukubwa, ajali hutokea kwenye duka la mikate, na Bw. Baker anakasirika. Hebu tuone kilichotokea!", + "I18N_EXPLORATION_zW39GLG_BdN2_TITLE": "Kulinganisha Sehemu za Hesabu", + "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Isiyojulikana", + "I18N_FOOTER_ABOUT": "Kuhusu", + "I18N_FOOTER_ABOUT_ALL_CAPS": "KUHUSU OPPIA", + "I18N_FOOTER_AUTHOR_PROFILES": "Wasifu wa Waandishi", + "I18N_FOOTER_BROWSE_LIBRARY": "Vinjari Maktaba", + "I18N_FOOTER_CONTACT_US": "Wasiliana nasi", + "I18N_FOOTER_CONTRIBUTE_ALL_CAPS": "CHANGIA", + "I18N_FOOTER_CREDITS": "Watambuliwa", + "I18N_FOOTER_DONATE": "Changa", + "I18N_FOOTER_FOLLOW_US": "Tufuate", + "I18N_FOOTER_FORUM": "Jukwaa", + "I18N_FOOTER_GET_INVOLVED": "Jihusishe", + "I18N_FOOTER_GET_STARTED": "Anza", + "I18N_FOOTER_OPPIA_FOUNDATION": "Shirika la Oppia", + "I18N_FOOTER_PRIVACY_POLICY": "Sera ya faragha", + "I18N_FOOTER_TEACH": "Fundisha na Oppia", + "I18N_FOOTER_TEACH_LEARN_ALL_CAPS": "FUNDISHA/JIFUNZE", + "I18N_FOOTER_TEACH_PAGE": "Kwa Wazazi/Walimu", + "I18N_FOOTER_TERMS_OF_SERVICE": "Masharti ya Huduma", + "I18N_FORMS_TYPE_NUMBER": "Andika nambari", + "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "Tafadhali weka nambari ambayo ni angalau <[minValue]>.", + "I18N_FORMS_TYPE_NUMBER_AT_MOST": "Tafadhali weka nambari isiyozidi <[maxValue]>.", + "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "Tafadhali weka nambari halali ya desimali", + "I18N_GENERATE_ATTRIBUTION": "Tengeneza Sifa", + "I18N_GET_STARTED_PAGE_BREADCRUMB": "Anza", + "I18N_GET_STARTED_PAGE_HEADING": "Anza!", + "I18N_GET_STARTED_PAGE_PARAGRAPH_1": "Kuunda uchunguzi ni rahisi na bila malipo. Gawa maarifa yako na wanafunzi kote ulimwenguni, na upate maoni unayoweza kutumia ili kuboresha ufanisi wa uchunguzi wako.", + "I18N_GET_STARTED_PAGE_PARAGRAPH_10": "Zaidi ya hayo, wanafunzi wanapotumia uchunguzi wako, utaweza kuona makosa ya kawaida wanayofanya. Wakati mwingine hii inaweza kufichua maarifa mapya kuhusu sehemu wamechanganyikiwa. Ni rahisi kuongeza uchunguzi kwa maoni ya ziada ikiwa unafikiri kuwa wanafunzi wengine wanaweza kurudia makosa haya. Unaweza hata kumtuma mwanafunzi kwa hatua tofauti, au \"kwenda ndani zaidi\" kwa kuuliza swali lingine.", + "I18N_GET_STARTED_PAGE_PARAGRAPH_11": "Ili kujihusisha na mradi wa Oppia na kutusaidia kuleta dhamira yetu ya elimu ya kimataifa bila malipo, ya ubora wa juu, wasiliana nasi kwa admin@oppia.org , au ugundue njia zaidi za kujihusisha na jumuiya yetu ya watu wanaojitolea. Tutafurahi kusikia kutoka kwako!", + "I18N_GET_STARTED_PAGE_PARAGRAPH_11_HEADING": "Jihusishe", + "I18N_GET_STARTED_PAGE_PARAGRAPH_2": "Unachohitaji ili kuanza ni mada unayotaka kufundisha. Unaweza kuunda uchunguzi kuhusu mada yoyote, kubwa au ndogo. Ukubwa unaofaa ya mada kwa uchunguzi ni ile ambayo ungeshughulikia katika darasa moja. Unaweza pia kuunda uchunguzi mwingi unaohusiana ambao unakusudiwa kukamilishwa kwa mfuatano. Hii inaitwa mkusanyiko .", + "I18N_GET_STARTED_PAGE_PARAGRAPH_2_HEADING": "Chagua Mada", + "I18N_GET_STARTED_PAGE_PARAGRAPH_3": "Unapochagua mada, bofya tu 'Unda', na uingie ukitumia akaunti yako ya Google. Ikiwa huna akaunti ya Google, unaweza kufungua hapa .", + "I18N_GET_STARTED_PAGE_PARAGRAPH_3_HEADING": "\nTengeneza Uchunguzi wako", + "I18N_GET_STARTED_PAGE_PARAGRAPH_4": "Uchunguzi una hatua nyingi. Kila hatua inaweza kujumuisha maandishi (km maelezo yaliyoandikwa), picha na video. Kila hatua inampa mwanafunzi swali, ambalo ni lazima alijaribu ili kuendelea. Linaweza kuwa swali la chaguo nyingi, linalowahitaji kuchapa kitu, au kuwa mojawapo ya mwingiliano wowote unaopatikana.", + "I18N_GET_STARTED_PAGE_PARAGRAPH_5": "Baada ya mwanafunzi kujibu swali, Oppia atawapa mrejesho na kuwaacha waendelee kwa hatua inayofuata. Ili kuona matukio ya wanafunzi wanavyopitia Oppia, jaribu mojawapo ya uchunguzi huu:", + "I18N_GET_STARTED_PAGE_PARAGRAPH_6": "Maelezo zaidi kuhusu kuunda uchunguzi yanaweza kupatikana katika hati zetu za watumiaji .", + "I18N_GET_STARTED_PAGE_PARAGRAPH_7": "Mara tu unapounda uchunguzi wako na uko tayari kwa wanafunzi kuuona, bofya kitufe cha 'Chapisha' kilicho juu ya ukurasa. Hii itafanya uchunguzi wako upatikane kwa wanafunzi kote ulimwenguni!", + "I18N_GET_STARTED_PAGE_PARAGRAPH_7_HEADING": "Chapisha Ugunduzi Wako", + "I18N_GET_STARTED_PAGE_PARAGRAPH_8": "Baada ya kuchapisha uchunguzi, unaweza kuushiriki kupitia kiungo, au hata kuupachika katika ukurasa wako wa wavuti .", + "I18N_GET_STARTED_PAGE_PARAGRAPH_8_HEADING": "\nGawanya Uchunguzi wako", + "I18N_GET_STARTED_PAGE_PARAGRAPH_9": "Wanafunzi wanapopitia uchunguzi wako, wanaweza kukutumia maoni ili kukuarifu kuhusu matatizo au kushiriki mawazo ya kuifanya iwe bora zaidi.", + "I18N_GET_STARTED_PAGE_PARAGRAPH_9_HEADING": "Boresha Ugunduzi Wako", + "I18N_GET_STARTED_PAGE_TITLE": "Anza", + "I18N_GOAL_LIMIT": "Mpaka wa malengo <[limit]>", + "I18N_GOT_IT": "Nimeelewa", + "I18N_HEADING_VOLUNTEER": "Ujitolee", + "I18N_HINT_NEED_HELP": "Je, unahitaji usaidizi? Tazama kidokezo cha tatizo hili!", + "I18N_HINT_TITLE": "Kidokezo", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "Andika swali la hesabu hapa.", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Andika msimbo katika kihariri", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Nenda kwa kihariri cha msimbo", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Buruta na udondoshe vitu", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "Tafadhali usiweke 0 kwenye dhehebu", + "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Andika sehemu ya hesabu katika mpangilio huu \"x/y\", au nambari iliyochanganywa katika mpangilio huu \" A x/y \".", + "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Weka sehemu ya hesabu katika fomu ya x/y.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS": "Tafadhali tumia nambari, nafasi au mikwaruzo ya mbele (/) pekee.", + "I18N_INTERACTIONS_FRACTIONS_INVALID_CHARS_LENGTH": "Hakuna nambari yoyote katika sehemu ya hesabu inayopaswa kuwa na zaidi ya tarakimu 7", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "Tafadhali weka sehemu ya hesabu halali (kwa mfano, 5/3 au 1 2/3)", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "Tafadhali weka thamani ya sehemu ya hesabu isiyo tupu.", + "I18N_INTERACTIONS_FRACTIONS_NON_MIXED": "Tafadhali andika jibu lako kama sehemu ya hesabu (kwa mfano, 5/3 badala ya 1 2/3).", + "I18N_INTERACTIONS_FRACTIONS_PROPER_FRACTION": "Tafadhali weka jibu lenye sehemu ya hesabu \"halali\" (kwa mfano, 1 2/3 badala ya 5/3).", + "I18N_INTERACTIONS_FRACTIONS_SIMPLEST_FORM": "Tafadhali weka jibu kwa njia rahisi zaidi (kwa mfano, 1/3 badala ya 2/6).", + "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Ongeza Ukingo", + "I18N_INTERACTIONS_GRAPH_ADD_NODE": "Ongeza Kinundu", + "I18N_INTERACTIONS_GRAPH_DELETE": "Futa", + "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "Gusa kipeo lengwa ili kuunda ukingo (bofya kwenye kipeo sawa ili kughairi uundaji wa kingo).", + "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "Gusa kipeo cha mwanzo cha ukingo ili kuunda.", + "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Grafu batili!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "Unda grafu", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Tazama grafu", + "I18N_INTERACTIONS_GRAPH_MOVE": "Songa", + "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "Gusa sehemu yoyote ili kusongesha kipeo hadi mahali hapo.", + "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "Gusa kipeo ili kusongesha.", + "I18N_INTERACTIONS_GRAPH_RESET_BUTTON": "Washa Upya", + "I18N_INTERACTIONS_GRAPH_RESPONSE_EDGE": "<[edges]> upeo", + "I18N_INTERACTIONS_GRAPH_RESPONSE_EDGES": "<[edges]> upeo", + "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTEX": "na <[vertices]>kipeo", + "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "na <[vertices]> Vipeo", + "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Sasisha Alama", + "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Sasisha Uzito", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Bofya kwenye picha", + "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Chagua picha ya kuonyesha]", + "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Unaweza kuchagua chaguo zaidi.", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, wingi, moja{Tafadhali chagua chaguo moja au zaidi.} nyingine{Tafadhali chagua chaguo # au zaidi.}}", + "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, wingi, moja{Chaguo lisilozidi 1 linaweza kuchaguliwa.} lingine{Chaguzi zisizozidi # zinaweza kuchaguliwa.}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "Bofya kwenye ramani", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "Tazama ramani", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "Andika hesabu ya kulinganisha hapa.", + "I18N_INTERACTIONS_MUSIC_CLEAR": "Futa", + "I18N_INTERACTIONS_MUSIC_INSTRUCTION": "Buruta vidokezo kwa wafanyikazi ili kuunda utaratibu", + "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "Onyesha wafanyikazi wa muziki", + "I18N_INTERACTIONS_MUSIC_PLAY": "Cheza", + "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Cheza Mfuatano Uliolengwa", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY": "Tafadhali weka sarafu halali (km, $5 au Rupia 5)", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "Tafadhali andika vitengo vya sarafu mwanzoni", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_UNIT_CHARS": "Tafadhali hakikisha kuwa kitengo kina nambari, alfabeti, (, ), *, ^, /, - pekee", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "Tafadhali hakikisha kuwa thamani ni sehemu ya hesabu au nambari", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Fomu zinazowezekana za kitengo", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_DECIMAL": "Angalau alama 1 ya desimali inapaswa kuwepo.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_EXPONENT": "Angalau alama 1 ya kipeo (e) inapaswa kuwepo.", + "I18N_INTERACTIONS_NUMERIC_INPUT_ATMOST_1_MINUS": "Angalau alama 1 ya kutoa (-) inapaswa kuwepo.", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_COMMA": "Jibu linaweza kuwa na tarakimu zisizozidi 15 (0-9) bila kujumuisha alama (, au -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_GREATER_THAN_15_DIGITS_DOT": "Jibu linaweza kuwa na tarakimu zisizozidi 15 (0-9) bila kujumuisha alama (. au -).", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "Jibu linapaswa kuwa nambari halali.", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "Jibu lazima liwe kubwa kuliko au sawa na sufuri.", + "I18N_INTERACTIONS_NUMERIC_INPUT_MINUS_AT_BEGINNING": "Ishara ya kutoa (-) inaruhusiwa mwanzoni pekee.", + "I18N_INTERACTIONS_NUMERIC_INPUT_NO_TRAILING_DECIMAL": "Desimali zinazofuatana haziruhusiwi.", + "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "Je, una uhakika unataka kuweka upya msimbo wako?", + "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Ghairi", + "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Uthibitisho Unahitajika", + "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Weka upya Msimbo", + "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "Hariri msimbo. Bofya 'Cheza' ili kukiangalia!", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Onyesha kihariri cha msimbo", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "Tafadhali weka uwiano halali (km 1:2 au 1:2:3).", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "Uwiano hauwezi kuwa na 0 kama kipengele.", + "I18N_INTERACTIONS_RATIO_INVALID_CHARS": "Tafadhali andika uwiano unaojumuisha tarakimu zilizotenganishwa na nukta pacha (km 1:2 au 1:2:3).", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "Jibu lako lina nukta pacha nyingi (:) karibu na kila moja.", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "Tafadhali weka uwiano halali (km 1:2 au 1:2:3).", + "I18N_INTERACTIONS_RATIO_NON_INTEGER_ELEMENTS": "Katika swali hili, kila kipengele katika uwiano wako kinapaswa kuwa nambari nzima (sio sehemu au desimali).", + "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Ongeza kipengee", + "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Lo, inaonekana kama seti yako ina nakala sawa!", + "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Ongeza kipengee kimoja kwa kila mstari.)", + "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Hakuna jibu lililotolewa.", + "I18N_INTERACTIONS_SUBMIT": "Wasilisha", + "I18N_INTERACTIONS_TERMS_LIMIT": "Mtayarishi amebainisha idadi ya masharti katika jibu kuwa <[termsCount]>", + "I18N_INVALID_TAGS_AND_ATTRIBUTES_ALERT": "Baadhi yaalama na sifa zisizo sahihi zimeondolewa kwenye picha iliyopakiwa. Ikiwa picha yako inaonekana imepotoshwa, tafadhali \" target=\"_blank\">tujulishe , kisha ujaribu kupakia SVG tofauti.", + "I18N_LANGUAGE_FOOTER_VIEW_IN": "Angalia Oppia katika:", + "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "\nHabari ya mchana,", + "I18N_LEARNER_DASHBOARD_ALL": "Zote", + "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Hariri Malengo", + "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Shaba", + "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Masomo ya Jumuiya", + "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "Malengo Yaliyokamilika", + "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "Imekamilika", + "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "<[numberMoved]> kati ya mikusanyiko uliyokamilisha imehamishwa hadi sehemu ya 'inaendelea' kwa kuwa uchunguzi mpya umeongezwa!", + "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Endelea ulipowachia", + "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Malengo ya Sasa", + "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "Inaonekana hakuna mikusanyiko yoyote katika orodha yako ya 'Cheza Baadaye'. Nenda kwenye maktaba na uunde orodha yako ya kucheza iliyoratibiwa!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "Inaonekana bado hujakamilisha mikusanyiko yoyote. Nenda kwenye maktaba ili uanzishe mkusanyiko mpya wa kusisimua!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "Inaonekana bado hujakamilisha uchunguzi wowote. Nenda kwenye maktaba ili uanzishe ugunduzi mpya wa kufurahisha!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_GOALS_SECTION": "Kamilisha Lengo kutoka juu na uone maendeleo yako hapa litakapokamilika!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_STORIES_SECTION": "Endelea kwa darasani ili kukamilisha hadithi mpya ya kusisimua!", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Anza kujifunza kwa ", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_HEADING": "Anza", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "Kuweka lengo!", + "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "Anza kujifunza kwa kuchagua lengo kutoka hapa chini!", + "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "Inaonekana hakuna uchunguzi wowote katika orodha yako ya 'Cheza Baadaye'. Nenda kwenye maktaba na uunde orodha yako ya kucheza iliyoratibiwa!", + "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Bado huna mazungumzo yoyote yanayoendelea. Maoni yako husaidia kuboresha ubora wa masomo yetu. Unaweza kufanya hivyo kwa kuanzisha somo letu lolote na kuwasilisha maoni yako muhimu!", + "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "Inaonekana huna mikusanyiko iliyokamilika kwa sasa. Nenda kwenye maktaba ili uanzishe mkusanyiko mpya wa kusisimua!", + "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "Inaonekana huna uchunguzi uliokamilika nusu kwa sasa. Nenda kwenye maktaba ili uanzishe ugunduzi mpya wa kufurahisha!", + "I18N_LEARNER_DASHBOARD_EMPTY_LEARN_SOMETHING_NEW_SECTION": "Inaonekana umefikia kikomo cha uteuzi wa lengo. Nenda kwenye maktaba na uchunguze uvumbuzi zaidi.", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "Anza kwa ", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "Kuweka lengo huruhusu Oppia kukupa mapendekezo bora zaidi katika dashibodi yako ambayo huchangia safari yako ya kujifunza.", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "kuweka lengo! ", + "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "Inaonekana bado hujajisajili kwa watayarishi wowote. Nenda kwenye maktaba ili kugundua watayarishi wapya na ugunduzi wao wa ajabu!", + "I18N_LEARNER_DASHBOARD_EMPTY_SUGGESTED_FOR_YOU_SECTION": "Lo, umemaliza masomo yetu yote ya Mada! Jisikie huru kuangalia Ugunduzi wetu mwingine kwenye ukurasa wetu wa Masomo ya Jumuiya", + "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "Habari ya jioni,", + "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "Iliyochezwa Mwisho", + "I18N_LEARNER_DASHBOARD_FEEDBACK_SECTION": "Masasisho ya Maoni", + "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Jibu", + "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_STATUS_CHANGE_MESSAGE": "Hali imebadilishwa kuwa '<[threadStatus]>'", + "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_WARNING": "Usitoe taarifa zozote za kibinafsi kwa kuwa majadiliano haya yanaonekana hadharani.", + "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Malengo", + "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "Dhahabu", + "I18N_LEARNER_DASHBOARD_HOME_SECTION": "Mwanzo", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "Haijakamilika", + "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "Inaendelea", + "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "Inaonekana bado hujajaribu uchunguzi wetu wowote.", + "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Hebu tuanze safari hii ya kusisimua!", + "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Jifunze Kitu Kipya", + "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "\nHabari ya asubuhi,", + "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "Maudhui ya Hadithi Mpya yanapatikana", + "I18N_LEARNER_DASHBOARD_NONEXISTENT_COLLECTIONS_FROM_PLAYLIST": "{numberNonexistent, wingi, moja{1 ya mikusanyiko katika orodha yako ya 'Cheza Baadaye' haipatikani tena. Samahani kwa usumbufu} mikusanyiko mingine{# ya orodha yako ya 'Cheza Baadaye' haipatikani tena. Samahani kwa usumbufu}}", + "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_COLLECTIONS": "{numberNonexistent, wingi, moja{1 ya mikusanyiko uliyokamilisha haipatikani tena. Samahani kwa usumbufu} mikusanyiko mingine{# uliyokamilisha haipatikani tena. Samahani kwa usumbufu}}", + "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_EXPLORATIONS": "{numberNonexistent, wingi, moja{1 ya uchunguzi uliokamilisha haipatikani tena. Samahani kwa usumbufu} mwingine{# wa uchunguzi uliokamilisha haupatikani tena. Samahani kwa usumbufu}}", + "I18N_LEARNER_DASHBOARD_NONEXISTENT_EXPLORATIONS_FROM_PLAYLIST": "{numberNonexistent, wingi, moja{1 ya uchunguzi katika orodha yako ya 'Cheza Baadaye' haipatikani tena. Samahani kwa usumbufu} mwingine{# wa uchunguzi katika orodha yako ya 'Cheza Baadaye' haupatikani tena. Samahani kwa usumbufu}}", + "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_COLLECTIONS": "{numberHaipo, wingi, moja{1 ya mikusanyiko inayoendelea haipatikani tena. Samahani kwa usumbufu} mikusanyiko mingine{# inayoendelea haipatikani tena. Samahani kwa usumbufu}}", + "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_EXPLORATIONS": "{numberNonexistent, wingi, moja{1 ya uchunguzi uliokamilisha haipatikani tena. Samahani kwa usumbufu} mwingine{# wa uchunguzi uliokamilisha haupatikani tena. Samahani kwa usumbufu}}", + "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "Inaonekana bado hujaanzisha mikusanyiko yoyote. Nenda kwenye maktaba ili uanzishe mkusanyiko mpya wa kusisimua!", + "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "Inaonekana bado hujaanza uchunguzi wowote. Nenda kwenye maktaba ili uanzishe ugunduzi mpya wa kufurahisha!", + "I18N_LEARNER_DASHBOARD_PAGE_TITLE": "Dashibodi ya Mwanafunzi | Oppia", + "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "Cheza Baadaye", + "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "Maendeleo", + "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE": "Buruta na upange upya shughuli katika mpangilio unaotaka kuzicheza!", + "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE_MOBILE": "Bonyeza na ushikilie ili kupanga upya shughuli katika mpangilio unaotaka kuzicheza!", + "I18N_LEARNER_DASHBOARD_REMOVE_ACTIVITY_MODAL_BODY": "Je, una uhakika unataka kuondoa '<[entityTitle]>' kutoka kwenye orodha yako ya '<[sectionNameI18nId]>'?", + "I18N_LEARNER_DASHBOARD_REMOVE_ACTIVITY_MODAL_HEADER": "Ondoa kutoka '<[sectionNameI18nId]>'orodha?", + "I18N_LEARNER_DASHBOARD_REMOVE_BUTTON": "Ondoa", + "I18N_LEARNER_DASHBOARD_RETURN_TO_FEEDBACK_THREADS_MESSAGE": "Rudi kwenye orodha ya ujumbe", + "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "Tuma", + "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Inatuma...", + "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Fedha", + "I18N_LEARNER_DASHBOARD_SKILLS": "Ujuzi", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Maendeleo ya Ujuzi", + "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Hadithi Zilizokamilika", + "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Usajili", + "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Maendeleo:", + "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "Sasa hivi:", + "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "Maelezo Mafupi ya Mabadiliko:", + "I18N_LEARNER_DASHBOARD_SUGGESTION_NO_CURRENT_STATE": "Lo! Hali hii haipo tena!", + "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "Imependekezwa:", + "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "Pendekezo", + "I18N_LEARNER_DASHBOARD_TOOLTIP": "Mikusanyiko ni uchunguzi mwingi unaohusiana ambao unakusudiwa kukamilishwa kwa mfuatano.", + "I18N_LEARNER_DASHBOARD_VIEW": "Tazama", + "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "Tazama Pendekezo", + "I18N_LEARNT_TOPIC": "Umejifunza <[topicName]>", + "I18N_LEARN_TOPIC": "Jifunze <[topicName]>", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "WAANDISHI WA MASOMO", + "I18N_LESSON_INFO_HEADER": "Maelezo ya Somo", + "I18N_LESSON_INFO_TOOLTIP_MESSAGE": "Umefika kituo cha ukaguzi. Kazi nzuri! Tazama maendeleo yako na maelezo mengine ya somo hapa.", + "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "Umekamilisha hili", + "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "Tayari imeongezwa kwenye orodha ya kucheza", + "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "Ongeza kwenye orodha ya 'Cheza Baadaye'", + "I18N_LIBRARY_ALL_CATEGORIES": "Kategoria Zote", + "I18N_LIBRARY_ALL_CATEGORIES_SELECTED": "Kategoria zote zimechaguliwa", + "I18N_LIBRARY_ALL_LANGUAGES": "Lugha Zote", + "I18N_LIBRARY_ALL_LANGUAGES_SELECTED": "Lugha zote zimechaguliwa", + "I18N_LIBRARY_CATEGORIES_ALGORITHMS": "Algorithms", + "I18N_LIBRARY_CATEGORIES_ARCHITECTURE": "Usanifu majengo", + "I18N_LIBRARY_CATEGORIES_ART": "Sanaa", + "I18N_LIBRARY_CATEGORIES_BIOLOGY": "Biolojia", + "I18N_LIBRARY_CATEGORIES_BUSINESS": "Biashara", + "I18N_LIBRARY_CATEGORIES_CHEMISTRY": "Kemia", + "I18N_LIBRARY_CATEGORIES_CODING": "Kuweka msimbo", + "I18N_LIBRARY_CATEGORIES_COMPUTING": "Computing", + "I18N_LIBRARY_CATEGORIES_ECONOMICS": "Uchumi", + "I18N_LIBRARY_CATEGORIES_EDUCATION": "Elimu", + "I18N_LIBRARY_CATEGORIES_ENGINEERING": "Uhandisi", + "I18N_LIBRARY_CATEGORIES_ENGLISH": "Kiingereza", + "I18N_LIBRARY_CATEGORIES_ENVIRONMENT": "Mazingira", + "I18N_LIBRARY_CATEGORIES_GEOGRAPHY": "Jiografia", + "I18N_LIBRARY_CATEGORIES_GOVERNMENT": "Serikali", + "I18N_LIBRARY_CATEGORIES_HISTORY": "Historia", + "I18N_LIBRARY_CATEGORIES_HOBBIES": "Mambo Unayopenda", + "I18N_LIBRARY_CATEGORIES_INTERACTIVE_FICTION": "Tamthiliya maingiliano", + "I18N_LIBRARY_CATEGORIES_LANGUAGES": "Lugha", + "I18N_LIBRARY_CATEGORIES_LAW": "Sheria", + "I18N_LIBRARY_CATEGORIES_LIFE_SKILLS": "Ujuzi wa maisha", + "I18N_LIBRARY_CATEGORIES_MATHEMATICS": "Hesabu", + "I18N_LIBRARY_CATEGORIES_MATHS": "Hesabu", + "I18N_LIBRARY_CATEGORIES_MEDICINE": "Dawa", + "I18N_LIBRARY_CATEGORIES_MUSIC": "Muziki", + "I18N_LIBRARY_CATEGORIES_PHILOSOPHY": "Falsafa", + "I18N_LIBRARY_CATEGORIES_PHYSICS": "Fizikia", + "I18N_LIBRARY_CATEGORIES_PROGRAMMING": "Kupanga programu", + "I18N_LIBRARY_CATEGORIES_PSYCHOLOGY": "Saikolojia", + "I18N_LIBRARY_CATEGORIES_PUZZLES": "Mafumbo", + "I18N_LIBRARY_CATEGORIES_READING": "Kusoma", + "I18N_LIBRARY_CATEGORIES_RELIGION": "Dini", + "I18N_LIBRARY_CATEGORIES_SPORT": "Mchezo", + "I18N_LIBRARY_CATEGORIES_STATISTICS": "Takwimu", + "I18N_LIBRARY_CATEGORIES_TEST": "Mtihani", + "I18N_LIBRARY_CATEGORIES_WELCOME": "Karibu", + "I18N_LIBRARY_CREATE_EXPLORATION_QUESTION": "Je, ungependa kuunda moja?", + "I18N_LIBRARY_GROUPS_COMPUTING": "Computing", + "I18N_LIBRARY_GROUPS_FEATURED_ACTIVITIES": "Shughuli Zilizoangaziwa", + "I18N_LIBRARY_GROUPS_HUMANITIES": "Humanities", + "I18N_LIBRARY_GROUPS_LANGUAGES": "Lugha", + "I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS": "Hesabu na Takwimu", + "I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED": "Iliyochapishwa Hivi Karibuni", + "I18N_LIBRARY_GROUPS_SCIENCE": "Sayansi", + "I18N_LIBRARY_GROUPS_SOCIAL_SCIENCE": "Sayansi ya Jamii", + "I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS": "Ugunduzi Uliokadiriwa Juu", + "I18N_LIBRARY_INCOMPLETE_ACTIVITY_ICON": "Umekaribia shughuli hii.", + "I18N_LIBRARY_LAST_UPDATED": "Ilisasishwa mwisho", + "I18N_LIBRARY_LOADING": "Inapakia", + "I18N_LIBRARY_MAIN_HEADER": "Hebu fikiria nini unaweza kujifunza leo...", + "I18N_LIBRARY_N/A": "N/A", + "I18N_LIBRARY_NO_EXPLORATIONS": "Lo, hakuna uchunguzi wa kuonyesha.", + "I18N_LIBRARY_NO_EXPLORATION_FOR_QUERY": "Lo, inaonekana kama utafutaji wako haukulingana na uchunguzi wowote.", + "I18N_LIBRARY_NO_EXPLORATION_GROUPS": "Hakuna vikundi vya uchunguzi vinavyopatikana vya kuonyesha.", + "I18N_LIBRARY_NO_OBJECTIVE": "Hakuna lengo lililobainishwa.", + "I18N_LIBRARY_N_CATEGORIES": "{categoriesCount, plural, =1{1 Category} Other{# Categories}}", + "I18N_LIBRARY_N_LANGUAGES": "{languagesCount, plural, =1{1 Language} other{# Languages}}", + "I18N_LIBRARY_PAGE_BROWSE_MODE_TITLE": "Tafuta uvumbuzi wa kujifunza kutoka - Oppia", + "I18N_LIBRARY_PAGE_TITLE": "Masomo ya Maktaba ya Jamii | Oppia", + "I18N_LIBRARY_RATINGS_TOOLTIP": "Ukadiriaji", + "I18N_LIBRARY_SEARCH_PLACEHOLDER": "Una hamu ya nini?", + "I18N_LIBRARY_SUB_HEADER": "Vinjari seti kamili ya masomo yaliyoundwa na jumuiya", + "I18N_LIBRARY_VIEWS_TOOLTIP": "Mitazamo", + "I18N_LIBRARY_VIEW_ALL": "Tazama zote", + "I18N_LICENSE_PAGE_LICENSE_HEADING": "Leseni", + "I18N_LICENSE_PAGE_PARAGRAPH_1": "Maudhui yote katika masomo ya Oppia yameidhinishwa chini ya CC-BY-SA 4.0 .", + "I18N_LICENSE_PAGE_PARAGRAPH_2": "Programu inayoendesha Oppia ni chanzo wazi, na msimbo wake hutolewa chini ya leseni ya Apache 2.0 .", + "I18N_LICENSE_PAGE_TITLE": "Ukurasa wa Leseni | Oppia", + "I18N_LICENSE_TERMS_HEADING": "Masharti ya Leseni", + "I18N_LOGIN_PAGE_TITLE": "Ingia | Oppia", + "I18N_LOGOUT_LOADING": "Toka", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "Toka | Oppia", + "I18N_LOGOUT_PAGE_TITLE": "Toka", + "I18N_MATH_COURSE_DETAILS": "Kozi ya misingi ya hesabu iliyoratibiwa ya Oppia hufunza kanuni za msingi za hesabu, inayojumuisha dhana muhimu kama vile kuongeza, kuzidisha na sehemu za hesabu. Mara tu unapofahamu dhana hizi za msingi, unaweza kuendelea na masomo ya juu zaidi! Kila mada inajengwa juu ya ile iliyotangulia, kwa hivyo unaweza kuanza kutoka mwanzo na kukamilisha masomo kutoka kiwango chochote cha ujuzi, au ingia tu ikiwa unahitaji usaidizi kuhusu mada fulani.", + "I18N_MATH_TOPICS_COVERED": "Anzia misingi na mada yetu ya kwanza, Thamani ya Sehemu. Au, ikiwa unataka kuzungumzia mada mahususi, nenda kwenye mada yoyote na uingie ndani!", + "I18N_MODAL_CANCEL_BUTTON": "Ghairi", + "I18N_MODAL_CONTINUE_BUTTON": "Endelea", + "I18N_NEXT_LESSON": "Somo Linalofuata", + "I18N_NO": "Hapana", + "I18N_ONE_SUBSCRIBER_TEXT": "Una mchangiaji 1.", + "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Ushirikiano", + "I18N_PARTNERSHIPS_PAGE_TITLE": "Ushirikiano | Oppia", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "Akaunti inasubiri Kufutwa", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "Akaunti ya Kufutwa", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "Akaunti yako imeratibiwa kufutwa, na itafutwa baada ya saa 24. Utaarifiwa kwa barua pepe baada ya ufutaji kukamilika.", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1_HEADING": "Utaratibu wa kufuta unaendelea", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2": "Kitendo hiki kitafuta akaunti hii ya mtumiaji na pia data yote ya faragha inayohusishwa na akaunti hii. Data ambayo tayari iko hadharani haitatambulishwa ili isiweze kuhusishwa na akaunti hii. Umiliki wa baadhi ya data ambayo tayari ni ya umma unaweza kuhamishiwa kwa jumuiya.", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "Maelezo ya kufuta", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_TITLE": "Akaunti Inasubiri Kufutwa| Oppia", + "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_1": "Kila mtu anakaribishwa kucheza na kutoa maoni kuhusu ugunduzi uliochapishwa. Kwa usaidizi wa kila mtu, tunaweza kuendelea kuboresha masomo kwenye tovuti na kuyafanya yawe na ufanisi iwezekanavyo.", + "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_2": "Tumia uamuzi mzuri unapochapisha uchunguzi. Ugunduzi unapaswa kuwa na thamani kubwa ya kielimu na hauwezi kuwa na utangazaji, barua taka, uharibifu na/au matumizi mabaya.", + "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_3": "Uwe raia mwema. Kufungua akaunti nyingi, kutumia vibaya mifumo ya maoni, kutumia uchunguzi kuwahadaa watumiaji, au tabia nyingine kama hiyo isiyo ya kijamii haitakubaliwa na huenda ikasababisha akaunti kusimamishwa.", + "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "Miongozo ya Jumuiya", + "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_TEXT": "Ikiwa unahitaji ufafanuzi wowote kuhusu miongozo hii, tafadhali jisikie huru kuuliza kwenye mijadala yetu.", + "I18N_PLAYBOOK_HEADING": "Miongozo ya Watayarishi", + "I18N_PLAYBOOK_PAGE_TITLE": "Mwongozo wa Watayarishi | Oppia", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_HEADING": "Kufanya Ugunduzi Wako uwe wa Kuchapishwa", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_1": "Fundisha jambo la maana - Wasilisha maelezo ambayo ni mapya kwa hadhira lengwa - usijaribu tu maarifa ambayo tayari wanayo. Pia, ikiwa ungependa kufundisha mada inayohusu uchunguzi uliopo, zingatia kuwasilisha maoni kwa ajili ya uchunguzi wa sasa ili kusaidia kuuboresha badala yake - ni rahisi zaidi!", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_2": "Fundisha zaidi ya factoid moja - Unapochagua mada, jaribu na uchague dhana inayohusika, ngumu ambayo ina tofauti ndogo na kina, au mkusanyiko wa mambo yanayohusiana, ya kuvutia. Inaweza pia kuwa nzuri kuwa na hisia ya maendeleo na changamoto, ili mwanafunzi apate nafasi ya kutumia dhana ambayo wamejifunza hivi karibuni kwa hali isiyoonekana.", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_3": "Toa maoni yenye taarifa - Usiwaambie tu wanafunzi kama wako sahihi au sio sahihi. Eleza hoja, au chimba zaidi ili kujaribu na kuwasaidia kuelewa kosa lao. Tumia njia mbadala kutoa vidokezo kwa maswali ya wazi na kuwapa wanafunzi maarifa mapya na muhimu.", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_4": "Sahihisha - Hakikisha kwamba uchunguzi wako umeandikwa vyema na iwe rahisi kusoma. Inapaswa kuwa bila makosa ya makosa ya kuchapa, makosa ya kweli na hitilafu, kwani haya yatasababisha kupoteza uaminifu machoni pa mwanafunzi.", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_5": "Iwakilishe kwa usahihi - Lengo la kujifunza linaonyeshwa kwa wanafunzi wanaovinjari uchunguzi, na linapaswa kuonyesha maudhui ya uchunguzi wako. Uchunguzi unapaswa kutoa maudhui yote yaliyoahidiwa katika lengo la kujifunza na katika uchunguzi wenyewe.", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_6": "Endelea kuvutia! - Ugunduzi bora husimulia hadithi, na una maoni muhimu. Huwapa wanafunzi nafasi ya kusababu kuhusu dhana, kujaribu maarifa mapya, na kupokea maoni muhimu kuhusu kazi zao.", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_PARAGRAPH_1": "Ugunduzi unapochapishwa, unapewa sifa, na unapatikana kwa mtu yeyote kujifunza. Ili kuunda hali nzuri ya kujifunza kwa wanafunzi, hapa kuna vidokezo vichache ambavyo tumepata kuwa muhimu:", + "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_PARAGRAPH_2": "Kwa usaidizi wa ziada wa kuunda maoni muhimu na uchunguzi mzuri, angalia ukurasa wetu wa Vidokezo vya Usanifu .", + "I18N_PLAYBOOK_PUBLICATION_POLICY_HEADING": "Sera ya uchapishaji", + "I18N_PLAYBOOK_PUBLICATION_POLICY_PARAGRAPH_1": "Mmiliki yeyote wa uchunguzi wa faragha anaweza kuuchapisha wakati wowote. Hata hivyo, utafutaji wa ubora wa chini unaweza kubatilishwa na wasimamizi, pamoja na maoni kuhusu jinsi ya kuyaboresha.", + "I18N_PLAYBOOK_PUBLICATION_POLICY_PARAGRAPH_2": "Ugunduzi unakusudiwa kuboreshwa kila mara, na ugunduzi ambao haujahaririwa kwa muda mrefu unaweza kuchukuliwa kuwa \"yatima\". Katika hali hii, umiliki unaweza kukabidhiwa kwa jumuiya ya Oppia kwa ujumla (kwa hiari ya msimamizi), ili uchunguzi uendelee kuboreshwa.", + "I18N_PLAYBOOK_TAB_PARTICIPATION_PLAYBOOK": "Kitabu cha kucheza cha Ushiriki", + "I18N_PLAYER_AUDIO_EXPAND_TEXT": "Sikiliza somo", + "I18N_PLAYER_AUDIO_LANGUAGE": "Lugha", + "I18N_PLAYER_AUDIO_LOADING_AUDIO": "Inapakia sauti...", + "I18N_PLAYER_AUDIO_MIGHT_NOT_MATCH_TEXT": "Sauti inaweza isilingane kikamilifu na maandishi", + "I18N_PLAYER_AUDIO_NOT_AVAILABLE_IN": "Haipatikani katika <[languageDescription]>", + "I18N_PLAYER_AUDIO_TRANSLATION_SETTINGS": "Mipangilio ya Tafsiri ya Sauti", + "I18N_PLAYER_BACK": "Nyuma", + "I18N_PLAYER_BACK_TO_COLLECTION": "Rudi kwenye Mkusanyiko", + "I18N_PLAYER_BANDWIDTH_USAGE_WARNING_MODAL_BODY": "Tafsiri hii ya sauti ina <[fileSizeMB]>MB ya sauti ya <[languageDescription]> . Ungependa kuendelea kupakua?", + "I18N_PLAYER_BANDWIDTH_USAGE_WARNING_MODAL_DOWNLOAD_ALL_AUDIO": "Pakua sauti zote za <[languageDescription]> katika uchunguzi huu (<[fileSizeMB]>MB)", + "I18N_PLAYER_BANDWIDTH_USAGE_WARNING_MODAL_TITLE": "Onyo la Matumizi ya Bandwidth", + "I18N_PLAYER_CARD_NUMBER_TOOLTIP": "Kadi #", + "I18N_PLAYER_COMMUNITY_EDITABLE_TOOLTIP": "Inaweza kuhaririwa na jumuiya", + "I18N_PLAYER_CONTINUE_BUTTON": "Endelea", + "I18N_PLAYER_CONTRIBUTORS_TOOLTIP": "Wachangiaji", + "I18N_PLAYER_DEFAULT_MOBILE_PLACEHOLDER": "Bonyeza hapa ili kujibu!", + "I18N_PLAYER_EDIT_TOOLTIP": "Hariri", + "I18N_PLAYER_EMBED_TOOLTIP": "Pachika", + "I18N_PLAYER_FEEDBACK_TOOLTIP": "Maoni", + "I18N_PLAYER_FORWARD": "Mbele", + "I18N_PLAYER_GIVE_UP": "Kata tamaa?", + "I18N_PLAYER_GIVE_UP_TOOLTIP": "Bofya hapa kwa jibu.", + "I18N_PLAYER_HINT": "Kidokezo", + "I18N_PLAYER_HINTS": "Vidokezo", + "I18N_PLAYER_HINTS_EXHAUSTED": "Samahani, nimeishiwa na vidokezo!", + "I18N_PLAYER_HINT_IS_AVAILABLE": "Bofya hapa kwa kidokezo!", + "I18N_PLAYER_HINT_NEED_A_HINT": "Je, unahitaji kidokezo?", + "I18N_PLAYER_HINT_NOT_AVAILABLE": "Jaribu kufikiria zaidi kabla ya kuitisha kidokezo!", + "I18N_PLAYER_HINT_REQUEST_STRING_1": "Ningependa kidokezo.", + "I18N_PLAYER_HINT_REQUEST_STRING_2": "Nimekwama kidogo, vidokezo vyovyote?", + "I18N_PLAYER_HINT_REQUEST_STRING_3": "Nina shida.", + "I18N_PLAYER_INFO_TOOLTIP": "Habari", + "I18N_PLAYER_IS_PRIVATE": "Ugunduzi huu ni wa faragha.", + "I18N_PLAYER_LAST_UPDATED_TOOLTIP": "Ilisasishwa Mwisho", + "I18N_PLAYER_LEARN_AGAIN_BUTTON": "Jifunze Tena", + "I18N_PLAYER_LEAVE_FEEDBACK": "Acha maoni kwa waandishi. (Inapowasilishwa, hii pia itajumuisha rejeleo la kadi ambayo uko kwenye uchunguzi.)", + "I18N_PLAYER_LOADING": "Inapakia...", + "I18N_PLAYER_NEXT_LESSON": "Somo Linalofuata", + "I18N_PLAYER_NO_OBJECTIVE": "Hakuna lengo lililobainishwa.", + "I18N_PLAYER_NO_TAGS": "Hakuna alama zilizobainishwa.", + "I18N_PLAYER_PLAY_EXPLORATION": "Ugunduzi wa kucheza", + "I18N_PLAYER_PLUS_TAGS": "<[additionalTagNumber]>+ tagi zaidi", + "I18N_PLAYER_PREVIOUS_RESPONSES": "Majibu ya awali (<[previousResponses]>)", + "I18N_PLAYER_RATE_EXPLORATION": "Umejifunza kitu kipya? Je, unaweza kukadiriaje uchunguzi huu?", + "I18N_PLAYER_RATINGS_TOOLTIP": "Makadirio", + "I18N_PLAYER_RECOMMEND_EXPLORATIONS": "Mapendekezo Yanayofuata", + "I18N_PLAYER_REPORT_MODAL_BODY_AD": "Maudhui ya kupotosha, barua taka au matangazo", + "I18N_PLAYER_REPORT_MODAL_BODY_ADDITIONAL_DETAILS": "Tafadhali toa maelezo ya ziada kwa wasimamizi:", + "I18N_PLAYER_REPORT_MODAL_BODY_HEADER": "Tatizo ni nini?", + "I18N_PLAYER_REPORT_MODAL_BODY_OTHER": "Nyingine", + "I18N_PLAYER_REPORT_MODAL_BODY_POOR_EXPERIENCE": "Uzoefu duni wa kujifunza", + "I18N_PLAYER_REPORT_MODAL_FOOTER_CANCEL": "Ghairi", + "I18N_PLAYER_REPORT_MODAL_FOOTER_SUBMIT": "Wasilisha", + "I18N_PLAYER_REPORT_MODAL_HEADER": "Ripoti uchunguzi huu", + "I18N_PLAYER_REPORT_SUCCESS_MODAL_BODY": "Ripoti yako imetumwa kwa wasimamizi kwa ukaguzi.", + "I18N_PLAYER_REPORT_SUCCESS_MODAL_CLOSE": "Funga", + "I18N_PLAYER_REPORT_SUCCESS_MODAL_HEADER": "Asante!", + "I18N_PLAYER_REPORT_TOOLTIP": "Ripoti Uchunguzi", + "I18N_PLAYER_RETURN_TO_COLLECTION": "Rudi kwa <[collectionTitle]>", + "I18N_PLAYER_RETURN_TO_EDITOR": "Rudi kwa mhariri", + "I18N_PLAYER_RETURN_TO_LIBRARY": "Rudi kwenye Maktaba", + "I18N_PLAYER_RETURN_TO_PARENT": "Rudi kwenye Somo Kuu", + "I18N_PLAYER_RETURN_TO_STORY": "Rudi kwenye Hadithi", + "I18N_PLAYER_SHARE_EXPLORATION": "Je, ulifurahia uvumbuzi huu? Shiriki na marafiki zako!", + "I18N_PLAYER_SHARE_THIS_COLLECTION": "Gawa mkusanyiko huu", + "I18N_PLAYER_SHARE_THIS_EXPLORATION": "\nGawanya Uchunguzi wako", + "I18N_PLAYER_STAY_ANONYMOUS": "Usijulikane", + "I18N_PLAYER_SUBMIT_BUTTON": "Wasilisha", + "I18N_PLAYER_TAGS_TOOLTIP": "Lebo", + "I18N_PLAYER_THANK_FEEDBACK": "Asante kwa maoni!", + "I18N_PLAYER_UNRATED": "Haijakadiriwa", + "I18N_PLAYER_VIEWS_TOOLTIP": "Maoni", + "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "Kipindi cha Mazoezi", + "I18N_PRACTICE_SESSION_PAGE_TITLE": "Kipindi cha Mazoezi: <[topicName]> - Oppia", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Lugha ya sauti", + "I18N_PREFERENCES_BIO": "Wasifu", + "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "Sehemu hii ni ya hiari. Chochote unachoandika hapa ni cha umma na kinaweza kutazamwa na ulimwengu wote.", + "I18N_PREFERENCES_BREADCRUMB": "Mapendeleo", + "I18N_PREFERENCES_CANCEL_BUTTON": "Ghairi", + "I18N_PREFERENCES_CHANGE_PICTURE": "Badilisha picha ya wasifu", + "I18N_PREFERENCES_EMAIL": "Barua pepe", + "I18N_PREFERENCES_EMAIL_CLARIFICATION": "Tafadhali onyesha hapa chini wakati ungependa kupokea barua pepe kutoka kwa Oppia. Unaweza kubadilisha mapendeleo yako wakati wowote kwenye ukurasa huu au kwa kufuata maagizo ya kujiondoa yaliyo katika kila barua pepe kama hizo.", + "I18N_PREFERENCES_EMAIL_EXPLAIN": "Wasimamizi na wasimamizi wa tovuti pekee ndio wanaoweza kuona anwani yako ya barua pepe.", + "I18N_PREFERENCES_EMAIL_RECEIVE_EDIT_RIGHTS_NEWS": "Pokea barua pepe mtu anapokupa haki za kuhariri kwenye uchunguzi", + "I18N_PREFERENCES_EMAIL_RECEIVE_FEEDBACK_NEWS": "Pokea barua pepe mtu anapokutumia maoni kuhusu uchunguzi", + "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "Pokea habari na sasisho kuhusu tovuti", + "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "Pokea barua pepe mtayarishi ambaye umejisajili kwake anapochapisha uchunguzi mpya", + "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "Hatukuweza kukuongeza kwenye orodha yetu ya barua pepe kiotomatiki. Tafadhali tembelea kiungo kifuatacho ili kujiandikisha kwa orodha yetu ya barua pepe:", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "Hamisha akaunti", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "Hii itapakua data ya akaunti yako ya Oppia kama faili ya maandishi iliyoumbizwa na JSON.", + "I18N_PREFERENCES_EXPORT_ACCOUNT_WARNING_TEXT": "Tafadhali usiondoke kwenye ukurasa huu. Data yako inapakiwa kwa sasa na itapakuliwa kama faili ya maandishi iliyoumbizwa na JSON itakapokamilika. Ikiwa kitu kitaenda vibaya, tafadhali wasiliana", + "I18N_PREFERENCES_HEADING": "Mapendeleo", + "I18N_PREFERENCES_HEADING_SUBTEXT": "Mabadiliko yoyote utakayofanya kwenye ukurasa huu yatahifadhiwa kiotomatiki.", + "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "Bado hujajisajili kwa watayarishi wowote. Jisikie huru kujiandikisha kwa mwandishi unayempenda kwa kubofya kitufe cha 'jiandikishe' katika ukurasa wa wasifu wa mwandishi. Kwa kujiandikisha kwa mwandishi, utaarifiwa kwa barua-pepe mwandishi atakapochapisha somo jipya.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Athari", + "I18N_PREFERENCES_PAGE_BROWSER_TAB_TITLE": "Mapendeleo | Oppia", + "I18N_PREFERENCES_PAGE_TITLE": "Badilisha mapendeleo yako ya wasifu - Oppia", + "I18N_PREFERENCES_PICTURE": "Picha", + "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Lugha ya Sauti Inayopendekezwa", + "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE_EXPLAIN": "Hii ndiyo lugha ambayo, ikiwa inapatikana, itachaguliwa kwa chaguo-msingi unapocheza uchunguzi kwa tafsiri za sauti.", + "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE_PLACEHOLDER": "Lugha ya Sauti Inayopendekezwa", + "I18N_PREFERENCES_PREFERRED_DASHBOARD": "Dashibodi Inayopendekezwa", + "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "Hii ndio dashibodi ambayo itaonyeshwa kwa chaguo-msingi wakati wa kuingia.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "Lugha za uchunguzi zinazopendekezwa", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "Lugha hizi zitachaguliwa kwa chaguo-msingi unapotafuta matunzio kwa ajili ya uchunguzi.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "Chagua lugha unazopendelea.", + "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "Lugha ya Tovuti Inayopendekezwa", + "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "Hii ndiyo lugha ambayo tovuti inaonyeshwa.", + "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "Lugha ya Tovuti Inayopendekezwa", + "I18N_PREFERENCES_PROFILE_PICTURE_ADD": "Ongeza Picha ya Wasifu", + "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Buruta ili kupunguza na kubadilisha ukubwa:", + "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Hitilafu: Haikuweza kusoma faili ya picha.", + "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Pakia Picha ya Wasifu", + "I18N_PREFERENCES_SEARCH_LABEL": "Tafuta", + "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Chagua lugha unazopendelea...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Lugha ya tovuti", + "I18N_PREFERENCES_SUBJECT_INTERESTS": "Maslahi ya Somo", + "I18N_PREFERENCES_SUBJECT_INTERESTS_ERROR_TEXT": "Maslahi ya somo lazima yawe ya kipekee na kwa herufi ndogo.", + "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "Kwa mfano: Hesabu, sayansi ya kompyuta, sanaa, ...", + "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "Ongeza jambo jipya linalokuvutia (kwa kutumia herufi ndogo na nafasi)...", + "I18N_PREFERENCES_SUBJECT_INTERESTS_LABEL": "Maslahi Mpya ya Somo", + "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "Weka mambo yanayokuvutia...", + "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Watayarishi ambao umejiandikisha kwao", + "I18N_PREFERENCES_USERNAME": "Jina la mtumiaji", + "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Bado haijachaguliwa", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "Sera ya Faragha | Oppia", + "I18N_PROFILE_NO_EXPLORATIONS": "Mtumiaji huyu bado hajaunda au kuhariri uchunguzi wowote.", + "I18N_PROFILE_PAGE_TITLE": "Wasifu | Oppia", + "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Uchanganuzi wa Alama", + "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Dashibodi", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Cheza tena", + "I18N_QUESTION_PLAYER_RETRY_TEST": "Jaribu tena Mtihani", + "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Rudi kwenye Hadithi", + "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "Kagua Ustadi wa Alama ya Chini Zaidi", + "I18N_QUESTION_PLAYER_SCORE": "Alama", + "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "Maelezo ya Ujuzi", + "I18N_QUESTION_PLAYER_TEST_FAILED": "Kipindi kimeshindwa. Tafadhali kagua ujuzi na ujaribu tena", + "I18N_QUESTION_PLAYER_TEST_PASSED": "Kikao kimekamilika. Umefanya vizuri!", + "I18N_REFRESHER_EXPLORATION_MODAL_BODY": "Inaonekana unatatizika na swali hili. Je, ungependa kujaribu uchunguzi mfupi ili kuonyesha upya kumbukumbu yako,na kurudi hapa baada ya kukamilisha hilo?", + "I18N_REFRESHER_EXPLORATION_MODAL_TITLE": "Je, ungependa kionyesha upya?", + "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Kipindi cha Usajili Kimeisha", + "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "Samahani, kipindi chako cha usajili kimeisha.Tafadhali bofya \"Endelea Usajili\" ili kuanzisha upya mchakato.", + "I18N_RELEASE_COORDINATOR_PAGE_TITLE": "Jopo la Mratibu wa Kutolewa kwa Oppia", + "I18N_RESET_CODE": "Weka upya Msimbo", + "I18N_RESTART_EXPLORATION_BUTTON": "Anzisha somo upya", + "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Kagua Mtihani", + "I18N_REVIEW_TEST_PAGE_TITLE": "Kagua Mtihani: <[storyName]> - Oppia", + "I18N_SAVE_PROGRESS": "Ingia au ujiandikishe ili kuhifadhi maendeleo yako na ucheze somo linalofuata.", + "I18N_SHARE_LESSON": "Gawanya somo hili", + "I18N_SHOW_LESS": "Onyesha Kidogo", + "I18N_SHOW_MORE": "Onyesha Zaidi", + "I18N_SHOW_SOLUTION_BUTTON": "Onyesha Suluhisho", + "I18N_SIDEBAR_ABOUT_LINK": "Kutuhusu", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "Kuhusu Oppia Foundation", + "I18N_SIDEBAR_BLOG": "Blogi", + "I18N_SIDEBAR_CLASSROOM": "Darasa", + "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Hesabu ya Msingi", + "I18N_SIDEBAR_CONTACT_US": "Wasiliana nasi", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "Tuko hapa kukusaidia kwa maswali yoyote uliyo nayo.", + "I18N_SIDEBAR_DONATE": "Changa", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "Michango yako inasaidia kutoa elimu bora kwa wote.", + "I18N_SIDEBAR_FORUM": "Jukwaa", + "I18N_SIDEBAR_GET_INVOLVED": "Jihusishe", + "I18N_SIDEBAR_HOME": "Mwanzo", + "I18N_SIDEBAR_LEARN": "Jifunze", + "I18N_SIDEBAR_LIBRARY_LINK": "Maktaba", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "Misingi ya Hesabu", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "Masomo ya kirafiki ya wanaoanza kukusaidia kuanza katika hesabu.", + "I18N_SIDEBAR_OPPIA_FOUNDATION": "Shirika la Oppia", + "I18N_SIDEBAR_PARTNERSHIPS": "Ushirikiano", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "Lete elimu bora kwa wanafunzi katika mkoa wako.", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "Kuongeza na Kutoa", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "Maktaba ya Jumuiya", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "Rasilimali za ziada zinazotolewa na jumuiya.", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "Kuzidisha", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "Thamani ya Sehemu", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "Tazama Masomo Yote", + "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Fundisha kwa Oppia", + "I18N_SIDEBAR_VOLUNTEER": "Kujitolea", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "Jiunge na timu yetu ya kimataifa ili kuunda na kuboresha masomo.", + "I18N_SIGNIN_LOADING": "Kuingia", + "I18N_SIGNIN_PAGE_TITLE": "Ingia", + "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Kwa kuteua kisanduku kilicho upande wa kushoto wa maandishi haya, unathibitisha, unaafikiana, na kukubali kufungwa na <[sitename]> Masharti ya Matumizi, yanayopatikana hapa .", + "I18N_SIGNUP_BUTTON_SUBMIT": "Wasilisha na uanze kuchangia", + "I18N_SIGNUP_CC_TITLE": "Leseni ya Creative Commons", + "I18N_SIGNUP_CLOSE_BUTTON": "Funga", + "I18N_SIGNUP_COMPLETE_REGISTRATION": "Kamilisha Usajili Wako", + "I18N_SIGNUP_DO_NOT_SEND_EMAILS": "Usitume hizi barua pepe", + "I18N_SIGNUP_EMAIL": "Barua pepe", + "I18N_SIGNUP_EMAIL_PREFERENCES": "Mapendeleo ya Barua pepe", + "I18N_SIGNUP_EMAIL_PREFERENCES_EXPLAIN": "Unaweza kubadilisha mpangilio huu wakati wowote kutoka kwa ukurasa wako wa Mapendeleo.", + "I18N_SIGNUP_ERROR_MUST_AGREE_TO_TERMS": "Ili kuhariri uchunguzi kwenye tovuti hii, utahitaji kukubaliana na masharti ya tovuti.", + "I18N_SIGNUP_ERROR_NO_USERNAME": "Tafadhali weka jina la mtumiaji.", + "I18N_SIGNUP_ERROR_USERNAME_NOT_AVAILABLE": "Jina hili la mtumiaji halipatikani.", + "I18N_SIGNUP_ERROR_USERNAME_ONLY_ALPHANUM": "Majina ya watumiaji yanaweza tu kuwa na herufi za alphanumeric.", + "I18N_SIGNUP_ERROR_USERNAME_TAKEN": "Samahani, jina hili la mtumiaji tayari limechukuliwa.", + "I18N_SIGNUP_ERROR_USERNAME_TOO_LONG": "Jina la mtumiaji linaweza kuwa na herufi zisizozidi <[maxUsernameLength]>.", + "I18N_SIGNUP_ERROR_USERNAME_WITH_ADMIN": "Majina ya watumiaji walio na 'admin' yamehifadhiwa.", + "I18N_SIGNUP_ERROR_USERNAME_WITH_SPACES": "Tafadhali hakikisha kuwa jina lako la mtumiaji halina nafasi.", + "I18N_SIGNUP_FIELD_REQUIRED": "Sehemu hii inahitajika.", + "I18N_SIGNUP_LICENSE_NOTE": "Tafadhali kumbuka kuwa, kwa kukubaliana na Sheria na Masharti yetu, unakubali kwamba maudhui yoyote au michango unayotoa kwenye Tovuti yetu iko na itapewa leseni chini ya CC-BY-SA v.4.0. Tafadhali kagua Sheria na Masharti yetu kwa maelezo zaidi kuhusu ruzuku ya leseni. Kwa habari zaidi kuhusu CC-BY-SA, bofya hapa .", + "I18N_SIGNUP_LICENSE_OBJECTIVE": "Kutumia leseni ya <[licenselink]> huruhusu maudhui ya ugunduzi kunakiliwa bila malipo, kutumiwa tena, kuchanganywa na kusambazwa upya. Sharti kuu ni kwamba ikiwa mtu atachanganya, kubadilisha au kujenga juu ya nyenzo, lazima pia asambaze kazi yake chini ya leseni sawa ya bure.", + "I18N_SIGNUP_LOADING": "inapakizwa...", + "I18N_SIGNUP_PAGE_TITLE": "Jiunge na jumuiya - Oppia", + "I18N_SIGNUP_REGISTRATION": "Usajili", + "I18N_SIGNUP_SEND_ME_NEWS": "Nitumie habari na masasisho kuhusu tovuti", + "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]> ni mchanganyiko wazi wa nyenzo za kujifunzia. Nyenzo zote zilizo juu yake zinaweza kutumika tena kwa uhuru na zinaweza kushirikiwa.", + "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]> ipo ili kukuza uundaji na uboreshaji unaoendelea wa seti ya nyenzo za kujifunza za ubora wa juu ambazo zinapatikana bila malipo kwa mtu yeyote.", + "I18N_SIGNUP_UPDATE_WARNING": "Tafadhali kumbuka kuwa hivi majuzi tumesasisha Sheria na Masharti yetu.", + "I18N_SIGNUP_USERNAME": "Jina la mtumiaji", + "I18N_SIGNUP_USERNAME_EXPLANATION": "Jina lako la mtumiaji litaonyeshwa kando ya michango yako.", + "I18N_SIGNUP_WHY_LICENSE": "Kwa nini CC-BY-SA?", + "I18N_SOLICIT_ANSWER_DETAILS_FEEDBACK": "Sawa, sasa turudi kwenye jibu lako.", + "I18N_SOLICIT_ANSWER_DETAILS_QUESTION": "Unaweza kueleza kwa nini umechagua jibu hili?", + "I18N_SOLUTION_EXPLANATION_TITLE": "Maelezo:", + "I18N_SOLUTION_TITLE": "Suluhisho", + "I18N_SPLASH_BENEFITS_ONE": "Kujifunza kwa kibinafsi", + "I18N_SPLASH_BENEFITS_THREE": "Masomo Rahisi-Kufuata", + "I18N_SPLASH_BENEFITS_TITLE": "Faida Zetu", + "I18N_SPLASH_BENEFITS_TWO": "Masomo yanayotegemea Hadithi", + "I18N_SPLASH_FIRST_EXPLORATION_DESCRIPTION": "Masomo ya Oppia, pia yanajulikana kama uchunguzi, hutoa uzoefu wa kina zaidi kuliko video tuli au maandishi, kusaidia watumiaji kujifunza kwa kufanya.", + "I18N_SPLASH_FOR_STUDENTS": "Kwa Wanafunzi", + "I18N_SPLASH_FOR_TEACHERS": "Kwa Walimu", + "I18N_SPLASH_FOR_VOLUNTEERS": "Kwa Watu wa Kujitolea", + "I18N_SPLASH_ICON_ONE_TEXT": "Watumiaji Milioni 1+", + "I18N_SPLASH_ICON_THREE_TEXT": "<[lessonCount]> Masomo Yanayoratibiwa", + "I18N_SPLASH_ICON_TWO_TEXT": "Inapatikana katika <[languageCount]>+ Lugha", + "I18N_SPLASH_JAVASCRIPT_ERROR_DESCRIPTION": "Oppia ni jukwaa lisilolipishwa la kujifunza chanzo wazi lililojaa shughuli shirikishi zinazoitwa 'ugunduzi'. Cha kusikitisha ni kwamba, Oppia inahitaji JavaScript iwashwe kwenye kivinjari chako ili kufanya kazi vizuri na kivinjari chako kimezimwa JavaScript. Ikiwa unahitaji usaidizi wa kuwezesha JavaScript, \">bofya hapa.", + "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "Asante.", + "I18N_SPLASH_JAVASCRIPT_ERROR_TITLE": "Tunahitaji JavaScript katika Kivinjari chako", + "I18N_SPLASH_LEARN_MORE": "Jifunze zaidi", + "I18N_SPLASH_PAGE_TITLE": "Opia | Masomo ya Bila Malipo, Mkondoni na Maingiliano kwa Yeyote", + "I18N_SPLASH_SECOND_EXPLORATION_DESCRIPTION": "Ugunduzi ni rahisi kuunda. Zinarekebishwa kwa urahisi kulingana na maoni ya mwanafunzi binafsi na mitindo ya uzoefu wa wanafunzi wako ulimwenguni kote.", + "I18N_SPLASH_SITE_FEEDBACK": "Maoni ya Tovuti", + "I18N_SPLASH_START_CONTRIBUTING": "Anza Kuchangia", + "I18N_SPLASH_START_LEARNING": "Anza Kujifunza", + "I18N_SPLASH_START_TEACHING": "Anza Kufundisha", + "I18N_SPLASH_STUDENTS_CONTENT": "Oppia hutambua majibu ya kawaida yasiyo sahihi na hutoa maoni yaliyolengwa, ili wanafunzi wapate matumizi yaliyobinafsishwa. Masomo yetu huwaweka wanafunzi kushirikishwa kupitia wahusika wanaocheza, na kutumia mikakati tofauti ili kuimarisha maarifa yao. Angalia masomo yetu ya hesabu na matokeo yaliyothibitishwa!", + "I18N_SPLASH_STUDENTS_TITLE": "Furaha na Kujifunza kwa ufanisi", + "I18N_SPLASH_STUDENT_DETAILS_1": "- Mira, Mwanafunzi, Palestina", + "I18N_SPLASH_STUDENT_DETAILS_2": "- Dheeraj, Mwanafunzi, India", + "I18N_SPLASH_STUDENT_DETAILS_3": "- Sama, Mwanafunzi, Palestina", + "I18N_SPLASH_STUDENT_DETAILS_4": "- Gaurav, Mwanafunzi, India", + "I18N_SPLASH_SUBTITLE": "Elimu bora inayoshirikisha na yenye ufanisi kwa wote", + "I18N_SPLASH_TEACHERS_CONTENT": "Ukiwa na mfumo wa kuunda maudhui wa Oppia, unaweza kubinafsisha umbo la somo kwa wanafunzi wako kwa urahisi. Unda na ushiriki masomo kuhusu mada unazozipenda sana.", + "I18N_SPLASH_TEACHERS_TITLE": "Gawanya Maarifa Yako kwa Urahisi", + "I18N_SPLASH_TESTIMONIAL_1": "\"Nilifurahiya sana kucheza somo, sikuhisi kuchoka na ninahisi kama ninaelewa nambari hasi sasa\"", + "I18N_SPLASH_TESTIMONIAL_2": "\"Nilifurahiya sana wakati nikijibu maswali kwa sababu yalikuwa na maumbo na picha nyingi za kupendeza. Picha zilifanya iwe rahisi kuelewa mada pia!", + "I18N_SPLASH_TESTIMONIAL_3": "\"Nilijifunza uwiano shuleni lakini kwa kutumia Oppia niliwasilishwa kwa mambo mengi mapya kama vile uwiano wa vitu vitatu na kuchanganya uwiano\"", + "I18N_SPLASH_TESTIMONIAL_4": "\"Nilijifunza hesabu nyingi mpya na ilikuwa rahisi sana\"", + "I18N_SPLASH_THIRD_EXPLORATION_DESCRIPTION": "Oppia hukuruhusu kuunda na kushiriki ugunduzi kwenye anuwai ya masomo, iliyopunguzwa tu na mawazo yako.", + "I18N_SPLASH_TITLE": "Elimu Bure kwa Kila Mtu", + "I18N_SPLASH_VOLUNTEERS_CONTENT": "Haijalishi wewe ni nani, unaweza kupata makao huko Oppia. Kila wakati, tunahitaji watu zaidi ili kuboresha masomo kwa kupendekeza maswali, kuchangia michoro, au kutafsiri masomo.", + "I18N_SPLASH_VOLUNTEERS_TITLE": "Inaendeshwa na Jumuiya", + "I18N_START_HERE": "Bofya hapa ili kuanza!", + "I18N_STORY_3M5VBajMccXO_DESCRIPTION": "Katika hadithi hii, tutaungana na Mathayo anapotembelea duka la mikate kununua keki. Kwa bahati mbaya, hana pesa za kutosha kwa keki kamili. Kwa hivyo, Bw. Baker anamsaidia kwa kugawanya keki iliyochaguliwa na Matthew katika vipande vidogo ambavyo anaweza kumudu. Nini kitatokea baadaye? Cheza masomo ili kujua!", + "I18N_STORY_3M5VBajMccXO_TITLE": "Mathayo Anatembelea Duka la kuoka mkate", + "I18N_STORY_JhiDkq01dqgC_DESCRIPTION": "Jiunge na Ava na baba yake wanapoenda kwenye uwanja wa burudani. Wasaidie kwa kutumia ujuzi wako wa misemo na milinganyo kutatua matatizo yanayowakabili!", + "I18N_STORY_JhiDkq01dqgC_TITLE": "Siku katika Bustani ya Burudani", + "I18N_STORY_Qu6THxP29tOy_DESCRIPTION": "Jifunze jinsi ya kuongeza na kupunguza pamoja na Maya, Omar na babu yao, wanapotengeneza pizza pamoja!", + "I18N_STORY_Qu6THxP29tOy_TITLE": "Maya, Omar na Malik wanatengeneza pizza!", + "I18N_STORY_RRVMHsZ5Mobh_DESCRIPTION": "Katika hadithi hii, tutafuatana na Jaime na dada yake Nic wanapojifunza jinsi ya kuwakilisha na kusoma thamani ya nambari.", + "I18N_STORY_RRVMHsZ5Mobh_TITLE": "Matukio ya Jaime kwenye Ukumbi wa michezo", + "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - Imekamilika!", + "I18N_STORY_VIEWER_PAGE_TITLE": "Jifunze <[topicName]> | <[storyTitle]> | Oppia", + "I18N_STORY_ialKSV0VYV0B_DESCRIPTION": "Kutana na James na mjomba wake wanapojua jinsi wanavyoweza kutumia uwiano kutengeneza vinywaji vitamu!", + "I18N_STORY_ialKSV0VYV0B_TITLE": "Matukio ya Smoothie ya James", + "I18N_STORY_rqnxwceQyFnv_DESCRIPTION": "Jiunge na Nina anapotumia mbinu za mgawanyiko kuwasaidia Mama yake na Sandra sokoni!", + "I18N_STORY_rqnxwceQyFnv_TITLE": "Nina Atembelea Soko", + "I18N_STORY_vfJDB3JAdwIx_DESCRIPTION": "Jiunge na Aria na babake Omar wanapotumia mbinu za kuzidisha kupanda mbegu kwenye bustani yao!", + "I18N_STORY_vfJDB3JAdwIx_TITLE": "Aria anataka kupanda bustani", + "I18N_SUBSCRIBE_BUTTON_TEXT": "Jisajili", + "I18N_SUBTOPIC_0abdeaJhmfPm_adding-fractions_TITLE": "Kuongeza Sehemu za Hesabu", + "I18N_SUBTOPIC_0abdeaJhmfPm_comparing-fractions_TITLE": "Kulinganisha Sehemu za Hesabu", + "I18N_SUBTOPIC_0abdeaJhmfPm_dividing-fractions_TITLE": "Kugawanya Sehemu za Hesabu", + "I18N_SUBTOPIC_0abdeaJhmfPm_equivalent-fractions_TITLE": "Sehemu za Hesabu Sawa", + "I18N_SUBTOPIC_0abdeaJhmfPm_fractions-of-a-group_TITLE": "Sehemu za hesabu za Kikundi", + "I18N_SUBTOPIC_0abdeaJhmfPm_mixed-numbers_TITLE": "Nambari Mchanganyiko", + "I18N_SUBTOPIC_0abdeaJhmfPm_multiplying-fractions_TITLE": "Kuzidisha Sehemu za Hesabu", + "I18N_SUBTOPIC_0abdeaJhmfPm_number-line_TITLE": "Mstari wa Nambari", + "I18N_SUBTOPIC_0abdeaJhmfPm_subtracting-fractions_TITLE": "Kutoa Sehemu za Hesabu", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE": "Sehemu ya Hesabu Ni Nini?", + "I18N_SUBTOPIC_5g0nxGUmx5J5_calculations-with-ratios_TITLE": "Hesabu za Uwiano", + "I18N_SUBTOPIC_5g0nxGUmx5J5_combining-ratios_TITLE": "Kuchanganya uwiano", + "I18N_SUBTOPIC_5g0nxGUmx5J5_equivalent-ratios_TITLE": "Uwiano sawa", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "Uwiano Ni Nini?", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE": "Dhana za Msingi za Kuzidisha", + "I18N_SUBTOPIC_C4fqwrvqWpRm_memorizing-expressions_TITLE": "Kukumbuka Hesabu za Kuzidisha", + "I18N_SUBTOPIC_C4fqwrvqWpRm_multiplication-techniques_TITLE": "Mbinu za kuzidisha", + "I18N_SUBTOPIC_C4fqwrvqWpRm_rules-to-simplify_TITLE": "Sheria za kurahisisha kuzidisha", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Ujuzi Ufuatao:", + "I18N_SUBTOPIC_VIEWER_PAGE_TITLE": "Kagua <[subtopicTitle]> | Oppia", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "Ujuzi Uliopita:", + "I18N_SUBTOPIC_dLmjjMDbCcrf_algebraic-expressions_TITLE": "Kurahisisha hesabu ya aljebra", + "I18N_SUBTOPIC_dLmjjMDbCcrf_modelling-scenarios_TITLE": "Kuiga matukio ya ulimwengu halisi kwa kutumia milinganyo", + "I18N_SUBTOPIC_dLmjjMDbCcrf_order-of-operations_TITLE": "Utaratibu wa Operesheni", + "I18N_SUBTOPIC_dLmjjMDbCcrf_problem-solving_TITLE": "Mikakati ya Kutatua Matatizo", + "I18N_SUBTOPIC_dLmjjMDbCcrf_solving-equations_TITLE": "Kudhibiti na kutatua milinganyo", + "I18N_SUBTOPIC_dLmjjMDbCcrf_variables_TITLE": "Kuwakilisha Visivyojulikana kwa Vigezo", + "I18N_SUBTOPIC_iX9kYCjnouWN_comparing-numbers_TITLE": "Kulinganisha Nambari", + "I18N_SUBTOPIC_iX9kYCjnouWN_naming-numbers_TITLE": " Majina ya Nambari", + "I18N_SUBTOPIC_iX9kYCjnouWN_place-names-and-values_TITLE": "Majina ya Mahali na Thamani Yake", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "Kubadilisha Nambari", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE": "Dhana za kimsingi za Kugawanya", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Kutatua tatizo", + "I18N_SUBTOPIC_qW12maD4hiA8_techniques-of-division_TITLE": "Mbinu za Kugawanya", + "I18N_SUBTOPIC_sWBXKH4PZcK6_adding-numbers_TITLE": "Kuongeza Nambari", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "Uhusiano Kati ya Kuongeza na Kutoa", + "I18N_SUBTOPIC_sWBXKH4PZcK6_estimation_TITLE": "Makadirio", + "I18N_SUBTOPIC_sWBXKH4PZcK6_sequences _TITLE": "Mifuatano", + "I18N_SUBTOPIC_sWBXKH4PZcK6_subtracting-numbers_TITLE": "Kuondoa Nambari", + "I18N_TEACH_BENEFITS_ONE": "Mafunzo ya Ufanisi, ya Ubora wa Vizazi vyote", + "I18N_TEACH_BENEFITS_THREE": "Kila Wakati ni Bure na Rahisi kutumia", + "I18N_TEACH_BENEFITS_TITLE": "Faida Zetu", + "I18N_TEACH_BENEFITS_TWO": "Masomo yanayofurahisha yenye msingi wa hadithi", + "I18N_TEACH_PAGE_ACTION_START_LEARNING": "Anza Kujifunza", + "I18N_TEACH_PAGE_CLASSROOM_BUTTON": "TEMBELEA DARASA", + "I18N_TEACH_PAGE_CLASSROOM_CONTENT": "Darasani, unaweza kupata seti ya masomo ambayo timu ya Oppia imebuni na kufanyia majaribio ili kuhakikisha kuwa yanafaa na ya kufurahisha wanafunzi wote. Masomo yote yamekaguliwa na walimu na wataalam, kwa hivyo unaweza kuhisi kuwa na uhakika kwamba wanafunzi wako wanapata elimu bora, huku wakijifunza kwa kasi yao wenyewe.", + "I18N_TEACH_PAGE_CLASSROOM_TITLE": "Jifunze kutoka kwa Masomo Yaliyojaribiwa na Kuthibitishwa ya Oppia", + "I18N_TEACH_PAGE_CONTENT": "Oppia ni mbinu mpya inayovutia ya kujifunza mtandaoni ambayo hufanywa mahususi ili kuhakikisha kuwa kila mtu anapata elimu bora.", + "I18N_TEACH_PAGE_HEADING": "Oppia kwa Wazazi, Walimu, na Walezi", + "I18N_TEACH_PAGE_LIBRARY_BUTTON": "ANGALIA MAKTABA", + "I18N_TEACH_PAGE_LIBRARY_CONTENT": "Walimu na wanajamii kote ulimwenguni hutumia jukwaa la kuunda somo la Oppia kama njia ya kuunda na kugawa masomo. Unaweza kupata zaidi ya masomo 20,000 kwa masomo 17 tofauti katika maktaba yetu ya Ugunduzi, na labda utatiwa moyo kuunda yako mwenyewe!", + "I18N_TEACH_PAGE_LIBRARY_TITLE": "Gundua Mafunzo Yanayotolewa na Jumuiya", + "I18N_TEACH_PAGE_SIX_TITLE": "Anza Kujifunza Leo", + "I18N_TEACH_PAGE_TITLE": "Mwongozo wa Oppia kwa Wazazi na Walimu | Oppia", + "I18N_TEACH_STUDENT_DETAILS_1": "Riya Sogani", + "I18N_TEACH_STUDENT_DETAILS_2": "Wala Awad", + "I18N_TEACH_STUDENT_DETAILS_3": "Himanshu Taneja, Kurukshetra, India", + "I18N_TEACH_STUDENT_DETAILS_4": "Yamama, Mwezeshaji, Palestina", + "I18N_TEACH_TESTIMONIAL_1": "\"Ninashukuru kupata fursa ya kuelimisha watoto wa India wasiojiweza na kuziba mapengo katika uelewa wao wa dhana muhimu za hesabu. Kutazama hali ya kujiamini kwa wanafunzi hawa ikiongezeka kadri walivyojifunza kulifaa saa za ziada.”", + "I18N_TEACH_TESTIMONIAL_2": "\"Oppia ni ya kwanza ya aina yake! Inasaidia wanafunzi kujifunza yote wanayohitaji kuhusu mada mahususi kwa njia ya kuvutia na ya kuvutia; pia inawahimiza kutumia vifaa mahiri kwa manufaa yao wenyewe.”", + "I18N_TEACH_TESTIMONIAL_3": "\"Sikuwahi kutarajia wanafunzi kujifunza teknolojia na kufanya masomo ya hesabu haraka sana. Ni mara yao ya kwanza kufichua kwa smarttech na walikuwa wanajitahidi sana kuzishughulikia mwanzoni. Sasa, ninafurahi sana kuwaona wakifanya masomo ya Opia hata kabla sijaingia darasani!”", + "I18N_TERMS_PAGE_TITLE": "Masharti ya Matumizi | Oppia", + "I18N_THANKS_PAGE_BREADCRUMB": "Asante", + "I18N_THANKS_PAGE_TITLE": "Asante | Oppia", + "I18N_TIME_FOR_BREAK_BODY_1": "Inaonekana unatuma majibu haraka sana. Unaanza kuchoka?", + "I18N_TIME_FOR_BREAK_BODY_2": "Ikiwa hivyo, unaweza kupumzika! Unaweza kurudi baadaye.", + "I18N_TIME_FOR_BREAK_FOOTER": "Niko tayari kuendelea na somo", + "I18N_TIME_FOR_BREAK_TITLE": "Muda wa mapumziko?", + "I18N_TOPIC_0abdeaJhmfPm_DESCRIPTION": "Mara nyingi utahitaji kuzungumza juu ya sehemu za kitu: kichocheo kinaweza kuomba nusu kikombe cha unga, au unaweza kumwaga sehemu ya chupa ya maziwa. Katika mada hii, utajifunza jinsi ya kutumia sehemu za hesabu ili kuelewa na kuelezea hali kama hizi.", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "Sehemu za hesabu", + "I18N_TOPIC_5g0nxGUmx5J5_DESCRIPTION": "Uwiano ni muhimu kwa kuhesabu ni kiasi gani cha viungo vya kutumia ikiwa una mapishi ya watu wanne lakini unataka kupika kwa wawili. Katika mada hii, utajifunza jinsi ya kutumia uwiano ili kulinganisha kwa urahisi ukubwa wa kitu kimoja hadi kingine.", + "I18N_TOPIC_5g0nxGUmx5J5_TITLE": "Uwiano na kufikiria kwa Usawa", + "I18N_TOPIC_C4fqwrvqWpRm_DESCRIPTION": "Ikiwa ulinunua masanduku 60 ya keki tano, ungekuwa na keki ngapi kwa jumla? Katika mada hii, utajifunza jinsi ya kutumia kuzidisha kutatua matatizo kama haya (bila kulazimika kuongeza nambari nyingi pamoja kila wakati!).", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "Kuzidisha", + "I18N_TOPIC_LANDING_PAGE_TITLE": "<[topicTitle]> | <[topicTagline]> | Oppia", + "I18N_TOPIC_LEARN": "Jifunze", + "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 lesson} other{# lessons}}", + "I18N_TOPIC_TITLE": "Mada", + "I18N_TOPIC_VIEWER_CHAPTER": "Sura", + "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 chapter} other{# chapters}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "\nInaanza Hivi Karibuni", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "Rudi baadaye wakati masomo yanapatikana kwa mada hii.", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "Rudi baadaye wakati maswali ya mazoezi yanapatikana kwa mada hii.", + "I18N_TOPIC_VIEWER_DESCRIPTION": "Maelezo", + "I18N_TOPIC_VIEWER_LESSON": "Somo", + "I18N_TOPIC_VIEWER_LESSONS": "Masomo", + "I18N_TOPIC_VIEWER_LESSONS_UNAVAILABLE": "Rudi baadaye wakati masomo yanapatikana kwa mada hii.", + "I18N_TOPIC_VIEWER_MASTER_SKILLS": "Ujuzi Mkuu wa <[topicName]>", + "I18N_TOPIC_VIEWER_NO_QUESTION_WARNING": "Bado hakuna maswali yaliyoundwa kwa mada ndogo iliyochaguliwa.", + "I18N_TOPIC_VIEWER_PAGE_TITLE": "<[topicName]> | <[pageTitleFragment]> | Oppia", + "I18N_TOPIC_VIEWER_PRACTICE": "Fanya mazoezi", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_MESSAGE": "Kipengele cha Mazoezi bado kiko katika toleo la beta na kinapatikana kwa Kiingereza pekee. Je, ungependa kuendelea?", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_TITLE": "Thibitisha Lugha ya Mazoezi", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "(Beta)", + "I18N_TOPIC_VIEWER_QUESTIONS_UNAVAILABLE": "Rudi baadaye wakati maswali ya mazoezi yanapatikana kwa mada hii.", + "I18N_TOPIC_VIEWER_REVISION": "Marudio", + "I18N_TOPIC_VIEWER_SELECT_SKILLS": "Chagua ujuzi kutoka kwa masomo ya <[topicName]> ungependa kufanya mazoezi.", + "I18N_TOPIC_VIEWER_SKILL": "Ujuzi", + "I18N_TOPIC_VIEWER_SKILLS": "Ujuzi", + "I18N_TOPIC_VIEWER_START_PRACTICE": "Anza", + "I18N_TOPIC_VIEWER_STORIES": "Hadithi", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "Hadithi unazoweza kucheza", + "I18N_TOPIC_VIEWER_STORY": "Hadithi", + "I18N_TOPIC_VIEWER_STUDY_SKILLS": "Ujuzi wa Kusoma wa <[topicName]>", + "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "Tumia Kadi zifuatazo za Maoni kukusaidia kujifunza ujuzi kuhusu <[topicName]>.", + "I18N_TOPIC_VIEWER_VIEW_ALL": "Tazama zote", + "I18N_TOPIC_VIEWER_VIEW_LESS": "Tazama machache", + "I18N_TOPIC_dLmjjMDbCcrf_DESCRIPTION": "Mara nyingi utahitaji kutatua matatizo na nambari zisizojulikana -- kwa mfano, ikiwa umenunua bidhaa inayouzwa na ungependa kujua bei halisi. Katika mada hii, utajifunza jinsi ya kufanya hivi kwa milinganyo, misemo na fomula.", + "I18N_TOPIC_dLmjjMDbCcrf_TITLE": "Maswali ya hesabu na Milinganyo", + "I18N_TOPIC_iX9kYCjnouWN_DESCRIPTION": "Je, unajua kwamba nambari zote zinazowezekana za vitu zinaweza kuonyeshwa kwa kutumia tarakimu kumi tu (0,1,2,3,...,9)? Katika mada hii, tutajifunza jinsi tunavyoweza kutumia thamani za mahali kufanya hivyo, na kuona ni kwa nini \"5\" ina thamani tofauti katika \"25\" na \"2506\".", + "I18N_TOPIC_iX9kYCjnouWN_TITLE": "Thamani ya Sehemu", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION": "Ikiwa ulikuwa na nyanya thelathini na mbili za ugawanye kati ya watu wanne, je, kila mtu anapaswa kupata nyanya ngapi? Katika mada hii, utajifunza kutumia divisheni ili kujua jinsi ya kugawanya kitu katika vipande.", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "Mgawanyo", + "I18N_TOPIC_sWBXKH4PZcK6_DESCRIPTION": "Ukiwa na mayai manne na rafiki yako akakupa 37 zaidi, utakuwa na mangapi kwa jumla? Vipi ikiwa umepoteza nane? Katika mada hii, utajifunza jinsi ya kutatua matatizo kama haya kwa ujuzi wa kimsingi wa kuongeza na kutoa.", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "Kuongeza na Kutoa", + "I18N_TOPNAV_ABOUT": "Kuhusu", + "I18N_TOPNAV_ABOUT_OPPIA": "Kuhusu Opia", + "I18N_TOPNAV_ADMIN_PAGE": "Ukurasa wa Msimamizi", + "I18N_TOPNAV_BLOG": "Blogi", + "I18N_TOPNAV_BLOG_DASHBOARD": "Dashibodi ya Blogi", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Hesabu ya Msingi", + "I18N_TOPNAV_CONTACT_US": "Wasiliana nasi", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "Tuko hapa kukusaidia kwa maswali yoyote uliyo nayo.", + "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Dashibodi ya Wachangiaji", + "I18N_TOPNAV_CREATOR_DASHBOARD": "Dashibodi ya Watayarishi", + "I18N_TOPNAV_DONATE": "Changa", + "I18N_TOPNAV_DONATE_DESCRIPTION": "Michango yako inasaidia kutoa elimu bora kwa wote.", + "I18N_TOPNAV_FORUM": "Jukwaa", + "I18N_TOPNAV_GET_INVOLVED": "Jihusishe", + "I18N_TOPNAV_GET_STARTED": "Anza", + "I18N_TOPNAV_HOME": "Mwanzo", + "I18N_TOPNAV_LEARN": "Jifunze", + "I18N_TOPNAV_LEARNER_DASHBOARD": "Dashibodi ya Mwanafunzi", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "Masom mazuri ya wanaoanza kukusaidia kuanza katika hesabu.", + "I18N_TOPNAV_LEARN_HEADING": "Njia za kujifunza zaidi", + "I18N_TOPNAV_LEARN_LINK_1": "Tazama Masomo Yote", + "I18N_TOPNAV_LEARN_LINK_2": "Endelea Kujifunza", + "I18N_TOPNAV_LIBRARY": "Maktaba ya Jumuiya", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "Nyenzo za ziada zilizoundwa na jumuiya ili kukusaidia kujifunza zaidi.", + "I18N_TOPNAV_LOGOUT": "Toka", + "I18N_TOPNAV_MODERATOR_PAGE": "Ukurasa wa Msimamizi", + "I18N_TOPNAV_OPPIA_FOUNDATION": "Shirika la Oppia", + "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Kitabu cha kucheza cha Ushiriki", + "I18N_TOPNAV_PARTNERSHIPS": "Shule na Mashirika", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "Shirikiana na ulete Oppia kwenye shule yako, jumuiya au eneo lako.", + "I18N_TOPNAV_PREFERENCES": "Mapendekezo", + "I18N_TOPNAV_SIGN_IN": "Ingia", + "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Ingia kwa kutumia Google", + "I18N_TOPNAV_TEACH_WITH_OPPIA": "Fundisha kwa Oppia", + "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Dashibodi ya Mada na Ujuzi", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "Jiunge na timu yetu ya kimataifa ili kuunda na kuboresha masomo.", + "I18N_TOTAL_SUBSCRIBERS_TEXT": "Una jumla ya wanaofuatilia <[totalSubscribers]>.", + "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Jiondoe", + "I18N_VIEW_ALL_TOPICS": "Tazama mada zote za <[classroomName]>", + "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Ujitolee", + "I18N_VOLUNTEER_PAGE_TITLE": "Ujitolee | Oppia", + "I18N_WARNING_MODAL_DESCRIPTION": "Hii itaonyesha suluhisho kamili. Una uhakika?", + "I18N_WARNING_MODAL_TITLE": "Onyo!", + "I18N_WORKED_EXAMPLE": "Mifano iliyofanyiwa kazi", + "I18N_YES": "Ndiyo" +} diff --git a/assets/i18n/tr.json b/assets/i18n/tr.json index c1342b1b2acc..3b6fe8df4bab 100644 --- a/assets/i18n/tr.json +++ b/assets/i18n/tr.json @@ -1,4 +1,6 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Oppia Vakfı Hakkında", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Oppia Vakfı Hakkında | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Keşif Oluştur", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "ilgilendiğiniz bir konu hakkında.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Geri bildirim al", @@ -25,7 +27,7 @@ "I18N_ABOUT_PAGE_EXPLORE_LESSONS": "Topluluk Tarafından Oluşturulan Dersleri Keşfedin", "I18N_ABOUT_PAGE_EXPLORE_LESSONS_CONTENT": "Dünyanın dört bir yanındaki eğitimciler ve topluluk üyeleri, dersler oluşturmanın ve paylaşmanın bir yolu olarak Oppia'nın ders oluşturma platformunu kullanır. Keşif kitaplığımızda 17 farklı konu için 20.000'den fazla ders bulabilir ve belki de kendinizinkini yaratmak için ilham alırsınız!", "I18N_ABOUT_PAGE_FOUNDATION_TAB_DONATE_BUTTON": "Bağış", - "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "Katılın", + "I18N_ABOUT_PAGE_FOUNDATION_TAB_GET_INVOLVED_BUTTON": "Katıl", "I18N_ABOUT_PAGE_FOUNDATION_TAB_HEADING": "Oppia Vakfı", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_1": "Oppia web sitesi ve kaynak kodu, California Eyaletinde kayıtlı, vergiden muaf 501(c)(3) kâr amacı gütmeyen bir kuruluş olan Oppia Vakfı tarafından desteklenmektedir.", "I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_2": "Vakıf, herkesin istediği her şeyi eğlenceli ve etkili bir şekilde öğrenmesini sağlama misyonu doğrultusunda çalışmak için dünyanın dört bir yanından katkıda bulunanların ve bağışçıların cömert desteğini bekliyor.", @@ -64,17 +66,53 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "Öğretmenler için rehber", "I18N_ACTION_TIPS_FOR_PARENTS": "Ebeveynler ve vasiler için ipuçları", "I18N_ACTION_VISIT_CLASSROOM": "Sınıfı ziyaret edin", + "I18N_ATTRIBUTION_HTML_STEP_ONE": "HTML'i kopyala ve yapıştır", + "I18N_ATTRIBUTION_HTML_STEP_TWO": "Bağlantının <[linkText]> olarak gözüktüğünden emin olun", + "I18N_BLOG_CARD_PREVIEW_HEADING": "Blog Kartı Önizlemesi", + "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "Yeni Blog Gönderisi Oluştur", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "Henüz herhangi bir hikaye oluşturmamışsınız gibi gözüküyor!", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Yeni Gönderi", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Taslaklar", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Yayımlandı", + "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Önizleme görseli ekle", + "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Beden", + "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "İptal", + "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "Sil", + "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "Önizleme görselini düzenle", + "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "En son kaydedilme:", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "Yayımla", + "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "Tamam", + "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "Taslak olarak kaydet", + "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "Önizleme", + "I18N_BLOG_POST_EDITOR_TAGS_HEADING": "Etiketler", + "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "daha fazla etiket hala eklenebilir.", + "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "Başlık", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "Bir dosya seçin veya buraya sürükleyin", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "Hata: Görsel dosyası okunamadı.", + "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "Önizleme görseli ekle", + "I18N_BLOG_POST_UNTITLED_HEADING": "Başlıksız", + "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "Bu kart içeriği çok uzun. Lütfen kaydetmek için 4500 karakterin altında tutunuz.", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "Bu kart oldukça uzundur ve öğrenciler ilgilerini kaybedebilir. Kısaltmayı veya iki karta bölmeyi düşünün.", + "I18N_CHAPTER_COMPLETION": "Bu bölümü tamamladığınız için tebrikler!", "I18N_CLASSROOM_CALLOUT_BUTTON": "Keşfet", "I18N_CLASSROOM_CALLOUT_HEADING_1": "Matematik Temelleri", "I18N_CLASSROOM_CALLOUT_HEADING_2": "Tanıtım: Oppia Sınıfı", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Yepyeni Oppia Sınıfındaki ilk kapsamlı kursa göz atın! Eğitimciler tarafından inceleyen derlenmiş dersler, Yer Değerlerinden Çarpma ve Bölmeye kadar çeşitli konularda temel matematik becerilerinde ustalaşabilirsiniz.", + "I18N_CLASSROOM_MATH_TITLE": "Matematik", "I18N_CLASSROOM_PAGE_COMING_SOON": "Yakında", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Kurs Ayrıntıları", "I18N_CLASSROOM_PAGE_HEADING": "Oppia Sınıfı", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Topluluk Tarafından Yapılan Daha Fazla Dersi Keşfedin", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Topluluk Kitaplığımızda arama yapın", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "İşlenmiş Konular", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Başla", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Devam", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "Koleksiyonu bitirdiniz! Aşağıdaki keşifleri tekrar oynamaktan çekinmeyin.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "Bir keşfi önizlemek için bir simgenin üzerine gelin.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "Bu koleksiyona keşif eklenmedi.", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> - Oppia", + "I18N_COMING_SOON": "Çok yakında!", + "I18N_CONTACT_PAGE_BREADCRUMB": "İletişim", "I18N_CONTACT_PAGE_HEADING": "Katılın!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Oppia projesine yardım ettiğiniz için teşekkür ederiz!", "I18N_CONTACT_PAGE_PARAGRAPH_10": "Oppia projesi tamamen açık kaynaktır ve GitHub adresinde büyük bir gönüllü topluluğu tarafından sağlanır. Kodlayıcılara, tasarımcılara ve metin yazarlarına ihtiyacımız var, bu yüzden yardım etmek isterseniz, lütfen bize bir satır bırakın! Başlamanın en iyi yolu GitHub viki sayfamızdaki talimatları takip etmektir.", @@ -102,7 +140,11 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "Bu yüzden, dünyadaki öğrenciler için ücretsiz, etkili dersler oluşturmak istiyorsanız doğru yere geldiniz. yaratıcı öğreticilerimizi ve mevcut dersleri incelemenizi öneririz. Ve kendi dersinizi oluşturmaya başlayın. Ayrıca, derslerinizin büyük bir etkiye sahip olmasını sağlamak istiyorsanız, lütfen oluşturmanıza, test etmenize yardımcı olacağımız Oppia ile Öğretme programımıza başvurmayı düşünün. Ve keşiflerinizi optimum etki için iyileştirin.", "I18N_CONTACT_PAGE_PARAGRAPH_9": "Mevcut bir keşif gibi, ancak daha iyi olabilecek bir şey mi buldunuz? Herhangi bir keşifte doğrudan keşif sayfasından değişiklikler önerebilirsiniz. Sağ üst köşedeki kurşun kalem simgesini tıklamanız ve daha iyi olabileceğini düşündüğünüzü paylaşmanız yeterli. Dersi oluşturan kişi önerilerinizi alacak ve onları keşifle birleştirme fırsatı bulacak. Bu, özellikle de önerilerinizi keşif sırasında oynayarak öğrencilerin deneyimlerini temel alarak yapabilirsiniz.", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "Mevcut keşiflerin iyileştirilmesi", + "I18N_CONTACT_PAGE_TITLE": "İletişim | Oppia", "I18N_CONTINUE_REGISTRATION": "Kayıt İşlemine Devam Et", + "I18N_COOKIE_BANNER_ACKNOWLEDGE": "TAMAM", + "I18N_CORRECT_FEEDBACK": "Doğru!", + "I18N_CREATE_ACCOUNT": "Hesap Oluştur", "I18N_CREATE_ACTIVITY_QUESTION": "Ne oluşturmak istiyorsun?", "I18N_CREATE_ACTIVITY_TITLE": "Bir Etkinlik Oluştur", "I18N_CREATE_COLLECTION": "Koleksiyon Oluştur", @@ -126,11 +168,14 @@ "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TITLE": "Başlık", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_TOTAL_PLAYS": "Toplam oyun", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY_UNRESOLVED_ANSWERS": "Çözümlenmemiş Cevaplar", + "I18N_DASHBOARD_LESSONS": "Dersler", "I18N_DASHBOARD_OPEN_FEEDBACK": "Dönütleri aç", + "I18N_DASHBOARD_SKILL_PROFICIENCY": "Beceri Yeterliliği", "I18N_DASHBOARD_STATS_AVERAGE_RATING": "Ortalama puanı", "I18N_DASHBOARD_STATS_OPEN_FEEDBACK": "Geribildirim açın", "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "Toplam oyun", "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "Aboneler", + "I18N_DASHBOARD_STORIES": "Hikayeler", "I18N_DASHBOARD_SUBSCRIBERS": "Aboneler", "I18N_DASHBOARD_SUGGESTIONS": "Öneriler", "I18N_DASHBOARD_TABLE_HEADING_EXPLORATION": "Keşif", @@ -152,16 +197,28 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Diğer sahipleri olan halka açık keşiflere ve koleksiyonlara yapılan taahhütler", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "Konular, hikayeler, beceriler ve sorulara verilen taahhütler", "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "Silme işlemini onaylamak için lütfen aşağıdaki alana kullanıcı adınızı girin ve 'Hesabımı Sil' düğmesine basın. Bu eylem geri alınamaz.", - "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Bu işlem, bu kullanıcı hesabını ve bu hesapla ilişkili tüm özel verileri siler. Zaten herkese açık olan veriler anonimleştirilir, böylece bu hesapla ilişkilendirilemez. Aşağıda belirtilen kategorilerden bazıları hesabınız için geçerli olmayabilir.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Bu eylem, bu kullanıcı hesabını ve ayrıca bu hesapla ilişkili tüm özel verileri silecektir. Hâlihazırda herkese açık olan veriler, yedek veriler (6 ay boyunca saklanan) dışında bu hesapla ilişkilendirilemeyecek şekilde anonimleştirilecektir. Aşağıda belirtilen kategorilerden bazıları hesabınız için geçerli olmayabilir.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Genel bakış", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "Silinecek veri türleri şunlardır:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "Anonim hale getirilecek veri türleri şunlardır:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "Ayrıca, başka sahibi olmayan yayınlanmış keşifler ve koleksiyonlar topluluk sahipliğine geçirilecektir.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "Hesap kaldırma işlemi hakkında sorularınız veya endişeleriniz varsa, lütfen privacy@oppia.org adresine bir e-posta gönderin.", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "Bu sizi Oppia hesabınızı silebileceğiniz bir sayfaya götürecek.", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "Hesabı Sil | Oppia", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Bir resmi buraya sürükleyin", "I18N_DIRECTIVES_UPLOAD_A_FILE": "Dosya yükle", "I18N_DONATE_PAGE_BREADCRUMB": "Bağış yap", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "Bağış | Olumlu Bir Etki Yaratın | Oppia", + "I18N_DONATE_PAGE_TITLE": "Oppia Foundation'a\nbağış yapın", + "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Oppia topluluğumuzdan haber alın", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "Az önce 1. bölümünüzü tamamladınız!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "Az önce 5. bölümünüzü tamamladınız!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "Az önce 10. bölümünüzü tamamladınız!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_4": "Az önce 25. bölümünüzü tamamladınız!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "50. bölümünüzü tamamladınız!", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "Bir sonraki derse!", + "I18N_END_CHAPTER_PRACTICE_SESSION_TEXT": "Yeni edindiğiniz becerilerinizi geliştirin!", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "İşte bundan sonra yapabilecekleriniz!", "I18N_ERROR_DISABLED_EXPLORATION": "Kapalı Konu Anlatımı", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Üzgünüm, tıkladığınız konu anlatımı şu anda kapalı durumdadır. Lütfen sonra tekrar deneyin.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Kapalı Konu Anlatımı - Oppia", @@ -174,10 +231,17 @@ "I18N_ERROR_MESSAGE_404": "Üzgünüz, tekrar tekrar baktık ancak o sayfayı bulamadık.", "I18N_ERROR_MESSAGE_500": "Büyük bir yanlışlık oldu. Ancak bu sizi hatanız değil. Sunucu kaynaklı bir hata meydana geldi.", "I18N_ERROR_NEXT_STEPS": "Şu anda yapılacak en iyi şey muhtemelen \">ana sayfaya dönmek olacaktır. Nitekim, bu sorun tekrarlarsa ve böyle olmaması gerektiğini düşünüyorsanız, lütfen bizi \" target=\"_blank\">sorun takipçisi üzerinden bilgilendirin. Bunun için üzgünüz.", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "Hata <[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "Hata <[statusCode]> - Oppia", "I18N_ERROR_PAGE_TITLE_400": "Hata 400 - Oppia", "I18N_ERROR_PAGE_TITLE_401": "Hata 401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "Hata 404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "Hata 500 - Oppia", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "Oran Nedir?", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE": "Toplamanın Temelleri", + "I18N_EXPLORATION_Jbgc3MlRiY07_TITLE": "Gerçek-Dünya Senaryolarını Modelleme", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "Ekleme Nedir?", + "I18N_EXPLORATION_aqJ07xrTFNLF_TITLE": "Sorunaları Kutu Modelleri ile Çözme", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Anonim", "I18N_FOOTER_ABOUT": "Hakkında", "I18N_FOOTER_ABOUT_ALL_CAPS": "OPPIA HAKKINDA", @@ -191,8 +255,8 @@ "I18N_FOOTER_FORUM": "Forum", "I18N_FOOTER_GET_INVOLVED": "Dahil Ol", "I18N_FOOTER_GET_STARTED": "Başla", - "I18N_FOOTER_OPPIA_FOUNDATION": "Oppia Kuruluşu", - "I18N_FOOTER_PRIVACY_POLICY": "Gizlilik İlkesi", + "I18N_FOOTER_OPPIA_FOUNDATION": "Oppia Vakfı", + "I18N_FOOTER_PRIVACY_POLICY": "Gizlilik Politikası", "I18N_FOOTER_TEACH": "Oppia ile Öğret", "I18N_FOOTER_TEACH_LEARN_ALL_CAPS": "ÖĞRET/ÖĞREN", "I18N_FOOTER_TEACH_PAGE": "Ebeveynler/Öğretmenler için", @@ -201,11 +265,12 @@ "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "Lütfen, en düşük değeri <[minValue]> olan bir sayı girin.", "I18N_FORMS_TYPE_NUMBER_AT_MOST": "Lütfen, en yüksek değeri <[maxValue]> olan bir sayı girin.", "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "Lütfen geçerli bir ondalık sayı girin.", + "I18N_GET_STARTED_PAGE_BREADCRUMB": "Başlayın", "I18N_GET_STARTED_PAGE_HEADING": "Başla!", "I18N_GET_STARTED_PAGE_PARAGRAPH_1": "Bir keşif oluşturmak kolay ve ücretsizdir. Bilginizi dünyanın dört bir yanındaki öğrencilerle paylaşın ve keşiflerinizin etkinliğini artırmak için kullanabileceğiniz geri bildirimler alın.", "I18N_GET_STARTED_PAGE_PARAGRAPH_10": "Ek olarak, öğrenciler araştırmanızı kullandıkça, yaptıkları genel hataları görebilirsiniz. Bazen bu, nerede karıştığı hakkında yeni görüşler ortaya çıkarabilir. Diğer öğrencilerin bu hataları tekrarlamaları muhtemel olduğunu düşünüyorsanız, araştırmayı ek geri bildirimle artırmak kolaydır. Öğreniciyi farklı bir basamağa bile gönderebilir veya başka bir soru sorarak \"daha derine inebilirsiniz\".", "I18N_GET_STARTED_PAGE_PARAGRAPH_11": "Oppia projesine katılmak ve ücretsiz, yüksek kaliteli evrensel eğitim misyonumuzu gerçekleştirmemize yardımcı olmak için, bizimle admin@oppia.org adresinden iletişime geçin. Veya gönüllü topluluğumuza katılmanın daha fazla yolunu keşfedin. Sizden haber almak için heyecanlıyız!", - "I18N_GET_STARTED_PAGE_PARAGRAPH_11_HEADING": "Katılın", + "I18N_GET_STARTED_PAGE_PARAGRAPH_11_HEADING": "Katıl", "I18N_GET_STARTED_PAGE_PARAGRAPH_2": "Başlamak için tek ihtiyacınız olan, öğretmek istediğiniz bir konudur. Küçük veya büyük herhangi bir konu hakkında bir keşif oluşturabilirsiniz. Bir keşif için ideal konu büyüklüğü, tek bir sınıfta ele alacağınız alandır. Ayrıca sırayla tamamlanması amaçlanan birden fazla ilgili keşifler oluşturabilirsiniz. Buna koleksiyon denir.", "I18N_GET_STARTED_PAGE_PARAGRAPH_2_HEADING": "Bir Konu Seç", "I18N_GET_STARTED_PAGE_PARAGRAPH_3": "Bir konu seçtiğinizde, sadece 'Oluştur'u tıklayın ve Google hesabınızla giriş yapın. Bir Google hesabınız yoksa, burada bir hesap oluşturabilirsiniz.", @@ -213,7 +278,7 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_4": "Bir keşif birden fazla adımdan oluşuyor. Her adım metin (örneğin yazılı bir açıklama), resim ve video içerebilir. Her adım, öğrenciye devam etmesi için denemesi gereken bir soru sunar. Çoktan seçmeli bir soru olabilir, içine bir şey yazmalarını gerektirebilir veya bir dizi başka etkileşimlerden biri olabilir.", "I18N_GET_STARTED_PAGE_PARAGRAPH_5": "Öğrenci soruyu cevapladığında, Oppia onlara geri bildirim verecek ve bir sonraki adıma devam etmelerine izin verecektir. Öğrencilerin Oppia'yı nasıl yaşadıklarını görmek için, bu keşiflerden birini deneyin:", "I18N_GET_STARTED_PAGE_PARAGRAPH_6": "Araştırma oluşturma hakkında daha fazla bilgiyi kullanıcı belgelerinde bulabilirsiniz.", - "I18N_GET_STARTED_PAGE_PARAGRAPH_7": "Araştırmanızı oluşturduktan ve öğrenenleri görmeye hazır olduğunuzda, sayfanın üstündeki 'Yayınla' düğmesini tıklayın. Bu, keşiflerinizi dünyadaki öğrenciler için hazır hale getirecek!", + "I18N_GET_STARTED_PAGE_PARAGRAPH_7": "Araştırmanızı oluşturduktan ve öğrenenleri görmeye hazır olduğunuzda, sayfanın üstündeki 'Yayımla' düğmesini tıklayın. Bu, keşiflerinizi dünyadaki öğrenciler için hazır hale getirecek!", "I18N_GET_STARTED_PAGE_PARAGRAPH_7_HEADING": "Keşiflerinizi Yayınlayın", "I18N_GET_STARTED_PAGE_PARAGRAPH_8": "Araştırmayı yayınladıktan sonra, bir bağlantı yoluyla paylaşabilir veya hatta kendi web sayfanıza gömebilirsiniz.", "I18N_GET_STARTED_PAGE_PARAGRAPH_8_HEADING": "Keşfinizi Paylaşın", @@ -222,6 +287,12 @@ "I18N_GET_STARTED_PAGE_TITLE": "Başla", "I18N_GOT_IT": "Anladım", "I18N_HEADING_VOLUNTEER": "Gönüllü", + "I18N_HINT_NEED_HELP": "Yardım lazım mı? Bu problem için ipucuyu görüntüleyin!", + "I18N_HINT_TITLE": "İpucu", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "Buraya bir ifade giriniz.", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Editörde kod giriniz.", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Kod editörüne git", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Ögeleri bırak ve sürükle", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "\"x/y\" biçiminde bir kesir veya \"A x/y\" biçiminde bir karışık sayı girin.", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "x/y biçiminde bir kesir girin.", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Kenar Ekle", @@ -230,6 +301,8 @@ "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "Kenar oluşturmak için hedef köşeye dokunun (kenar oluşturmayı iptal etmek için aynı köşeye tıklayın).", "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "Oluşturulacak kenarın başlangıç köşesine dokunun.", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Geçersiz grafik!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "Grafik Oluştur", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Grafiği görüntüle", "I18N_INTERACTIONS_GRAPH_MOVE": "Taşı", "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "Köşe noktasını bu noktaya taşımak için herhangi bir noktaya dokunun.", "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "Taşımak için tepe noktasına dokunun.", @@ -240,10 +313,14 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "ve <[vertices]> tepe noktası", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Etiketi Güncelle", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Genişliği Güncelle", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Görsele tıkla", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Gösterilecek resmi seçin]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Daha fazla seçenek seçebilirsiniz.", "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Lütfen en az bir seçenek seçin.} other{Lütfen en az # seçin.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{1 den fazla seçenek seçilemez.} other{# veya fazladan seçenek seçilemez.}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "Haritaya tıkla", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "Haritayı görüntüle", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "Buraya bir eşitlik giriniz.", "I18N_INTERACTIONS_MUSIC_CLEAR": "Temizle", "I18N_INTERACTIONS_MUSIC_PLAY": "Oynat", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Hedef Diziyi Oynat", @@ -252,30 +329,46 @@ "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "İptal", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Onay Gerekiyor", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Kodu Sıfırla", + "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "Kodu düzenleyin. Kontrol etmek için 'Oynat'a tıklayın!", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Kod editörünü göster", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Madde ekle", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Eyvah, setinde kopyalar var gibi gözüküyor!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Her satıra bir madde ekle.)", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "Cevap verilmedi.", "I18N_INTERACTIONS_SUBMIT": "Gönder", "I18N_LANGUAGE_FOOTER_VIEW_IN": "Oppia'yı şu dilde görüntüle:", + "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "Tünaydın", + "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "Amaçları Düzenle", + "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "Bronz", + "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "Topluluk Dersleri", + "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "Tamamlanmış Hedefler", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "Tamamlandı", "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "Tamamladığınız koleksiyonların <[numberMoved]>'ı yeni keşifler eklendiği için 'devam ediyor' bölümüne taşındı!", + "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "Kaldığın yerden devam et", + "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "Mevcut Hedefler", "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "'Daha Sonra Oynat' listenizde herhangi bir koleksiyon yok gibi görünüyor. Kütüphaneye gidin ve kendi oluşturulmuş çalma listenizi oluşturun!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "Henüz herhangi bir koleksiyonu tamamlamadınız gibi görünüyor. Heyecan verici bir yeni koleksiyona başlamak için kütüphaneye gidin!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "Henüz bir arama yapmamışsınız gibi görünüyor. Heyecan verici yeni bir keşif başlatmak için kütüphaneye gidin!", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_HEADING": "Başlayın", + "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "bir hedef belirlemek!", "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "'Daha Sonra Oynat' listenizde herhangi bir keşif bulunmuyor gibi görünüyor. Kütüphaneye gidin ve kendi oluşturulmuş çalma listenizi oluşturun!", "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "Henüz aktif bir geri bildirim ileti diziniz yok. Geri bildiriminiz derslerimizin kalitesini artırmaya yardımcı olur. Bunu, derslerimizden herhangi birine başlayarak ve değerli geri bildirimlerinizi göndererek yapabilirsiniz!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "Şu anda kısmen tamamlanmış koleksiyonlarınız yok gibi görünüyor. Heyecan verici bir yeni koleksiyona başlamak için kütüphaneye gidin!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "Şu anda kısmen tamamlanmış bir keşif yapmadığınız anlaşılıyor. Heyecan verici yeni bir keşif başlatmak için kütüphaneye gidin!", "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "Henüz herhangi bir içerik oluşturucuya abone olmamışsınız. Yeni yaratıcıları ve muhteşem keşiflerini keşfetmek için kütüphaneye gidin!", + "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "İyi Akşamlar", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "Son Oynama", "I18N_LEARNER_DASHBOARD_FEEDBACK_SECTION": "Geribildirim Güncellemeleri", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_DEFAULT_MESSAGE": "Yanıtla", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_STATUS_CHANGE_MESSAGE": "Durum '<[threadStatus]>' olarak değiştirildi", "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_WARNING": "Herhangi bir kişisel bilgiyi paylaşmaktan kaçının çünkü bu tartışma halka açıktır.", + "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "Hedefler", + "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "Altın", + "I18N_LEARNER_DASHBOARD_HOME_SECTION": "Anasayfa", "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "Devam Ediyor", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "Henüz keşiflerimizin hiçbirini denememiş gibisiniz.", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "Bu heyecan verici yolculuğa başlayalım!", + "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "Yeni bir şey öğrenin", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COLLECTIONS_FROM_PLAYLIST": "{numberNonexistent, plural, one{'Daha Sonra Oynat' listenizdeki koleksiyonlardan 1'i artık kullanılamıyor. Rahatsızlıktan dolayı özür dileriz} other{'Daha Sonra Oynat' listenizdeki koleksiyonlardan #'i artık kullanılamıyor. Rahatsızlıktan dolayı özür dileriz}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_COLLECTIONS": "{numberNonexistent, plural, one{Tamamladığınız koleksiyonların 1'i artık kullanılabilir değil. Rahatsızlıktan dolayı özür dileriz} other{Tamamladığınız koleksiyonların #'i artık kullanılabilir değil. Rahatsızlıktan dolayı özür dileriz}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_COMPLETED_EXPLORATIONS": "{numberNonexistent, plural, one{Tamamladığınız keşiflerin 1 tanesi artık mevcut değil. Rahatsızlıktan dolayı özür dileriz} other{Tamamladığınız keşiflerin # tanesi artık mevcut değil. Rahatsızlıktan dolayı özür dileriz}}", @@ -293,7 +386,11 @@ "I18N_LEARNER_DASHBOARD_RETURN_TO_FEEDBACK_THREADS_MESSAGE": "Mesaj listesine dön", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE": "Gönder", "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Gönderiliyor…", + "I18N_LEARNER_DASHBOARD_SKILLS": "Beceriler", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Beceri Süreci", + "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Tamamlanan Hikayeler", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Abonelikler", + "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "İlerleme:", "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "Geçerli:", "I18N_LEARNER_DASHBOARD_SUGGESTION_DESCRIPTION": "Değişikliklerin Kısa Açıklaması:", "I18N_LEARNER_DASHBOARD_SUGGESTION_NO_CURRENT_STATE": "Hata! Bu durum artık yok!", @@ -374,11 +471,15 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "Lisans", "I18N_LICENSE_PAGE_PARAGRAPH_1": "Oppia'nın derslerindeki tüm içerik CC-BY-SA 4.0 altında lisanslanmıştır.", "I18N_LICENSE_PAGE_PARAGRAPH_2": "Oppia’yı destekleyen yazılım açık kaynak kodludur ve kodu bir Apache 2.0 lisansı altında yayınlanmıştır.", + "I18N_LICENSE_TERMS_HEADING": "Lisans Şartları", "I18N_LOGOUT_LOADING": "Oturum kapatılıyor", "I18N_LOGOUT_PAGE_TITLE": "Oturumu kapat", "I18N_MODAL_CANCEL_BUTTON": "İptal", "I18N_MODAL_CONTINUE_BUTTON": "Devam et", + "I18N_NEXT_LESSON": "Sonraki Ders", "I18N_ONE_SUBSCRIBER_TEXT": "1 abonen var.", + "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "Ortaklıklar", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "Bekleyen Hesap Silinmesi", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "Silinecek Hesap", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "Hesabınızın silinmesi planlanıyor ve yaklaşık 24 saat içinde silinecek. Silme işlemi tamamlandıktan sonra e-posta ile bilgilendirileceksiniz.", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1_HEADING": "Silme işlemi devam ediyor", @@ -463,6 +564,7 @@ "I18N_PLAYER_THANK_FEEDBACK": "Geri bildirim için teşekkürler!", "I18N_PLAYER_UNRATED": "Oylanmamış", "I18N_PLAYER_VIEWS_TOOLTIP": "Görüntülenme", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Ses dili", "I18N_PREFERENCES_BIO": "Biyo", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "Bu alan isteğe bağlı. Buraya yazdığın herhangi bir şey herkese açık ve görüntülenebilir.", "I18N_PREFERENCES_BREADCRUMB": "Tercihler", @@ -475,9 +577,12 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_FEEDBACK_NEWS": "Birisi bir keşif hakkında size geri bildirim gönderdiğinde e-postalar alın", "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "Site hakkında haberleri ve güncellemeleri al", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "Abone olduğunuz bir içerik oluşturucusu yeni bir keşif yayınladığında e-posta alın", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "Hesabı dışa aktar", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "Bu Oppia hesap verinizi JSON biçimli metin dosyası olarak indirecektir.", "I18N_PREFERENCES_HEADING": "Tercihler", "I18N_PREFERENCES_HEADING_SUBTEXT": "Bu sayfa üzerinde yaptığın herhangi bir değişiklik otomatik olarak kaydedilecek.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "Henüz hiçbir içerik oluşturucuya abone olmadınız. Yazar profil sayfasındaki 'abone ol' düğmesine tıklayarak favori yazara abone olmaktan çekinmeyin. Bir yazara abone olarak, yazar yeni bir ders yayımladığında e-posta ile bilgilendirileceksiniz.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Etki", "I18N_PREFERENCES_PAGE_TITLE": "Profil tercihlerini değiştir - Oppia", "I18N_PREFERENCES_PICTURE": "Resim", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Tercih Edilen Ses Dili", @@ -487,6 +592,7 @@ "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "Bu, girişte varsayılan olarak gösterilecek olan panodur.", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "Tercih Edilen Konu Anlatım Dilleri", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "Bu diller galeride konu anlatımı arattığında varsayılan olarak seçilecek.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "Tercih edilen dilleri seç.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "Tercih Edilen Site Dili", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "Bu dil sitenin gösterildiği dildir.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "Tercih Edilen Site Dili", @@ -494,7 +600,9 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Kırpmak ve yeniden boyutlandırmak için sürükle:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Hata: Resim dosyası okunamıyor.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Profil Resmini Güncelle", + "I18N_PREFERENCES_SEARCH_LABEL": "Ara", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Öncelikli dilleri seç...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Site dili", "I18N_PREFERENCES_SUBJECT_INTERESTS": "İlgi Alanları", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "E.g.: matematik, bilgisayar bilimi, sanat, ...", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "Yeni bir ilgi alanı ekle (küçük harf ve boşlukları kullanarak)...", @@ -503,30 +611,39 @@ "I18N_PREFERENCES_USERNAME": "Kullanıcı adı", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Henüz seçilmemiş", "I18N_PROFILE_NO_EXPLORATIONS": "Bu kullanıcı henüz bir keşif oluşturmadı veya düzenlemedi.", - "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Puanınız hakkında daha fazla bilgi edinin", + "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Puan Dağılımı", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Gösterge Panelim", - "I18N_QUESTION_PLAYER_NEW_SESSION": "Yeni Sezon", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Tekrarla", "I18N_QUESTION_PLAYER_RETRY_TEST": "Tekrar Test Et", "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Hikayeye Dön", "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "en düşük puanlı beceriyi gözden geçir", "I18N_QUESTION_PLAYER_SCORE": "Puan", "I18N_QUESTION_PLAYER_SKILL_DESCRIPTIONS": "Beceri Açıklamaları", - "I18N_QUESTION_PLAYER_TEST_FAILED": "Test başarısız. Lütfen becerileri gözden geçirin ve tekrar deneyin", - "I18N_QUESTION_PLAYER_TEST_PASSED": "Test tamamlandı. Aferin!", + "I18N_QUESTION_PLAYER_TEST_FAILED": "Oturum başarısız. Lütfen becerileri gözden geçirin ve tekrar deneyin", + "I18N_QUESTION_PLAYER_TEST_PASSED": "Oturum tamamlandı. Aferin!", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Kayıt Oturumu Sona Erdi", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "Üzgünüz, kayıt oturumunuzun süresi doldu. İşlemi yeniden başlatmak için lütfen \"Kaydı Devam Et\"i tıklayın.", + "I18N_RESET_CODE": "Kodu Sıfırla", + "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Testi Gözden Geçir", + "I18N_SHARE_LESSON": "Bu dersi paylaş", "I18N_SHOW_SOLUTION_BUTTON": "Çözümü Görüntüle", - "I18N_SIDEBAR_ABOUT_LINK": "Oppia Hakkında", + "I18N_SIDEBAR_ABOUT_LINK": "Hakkımızda", "I18N_SIDEBAR_BLOG": "Blog", "I18N_SIDEBAR_CLASSROOM": "Sınıf", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Temel Matematik", "I18N_SIDEBAR_CONTACT_US": "Bize Ulaşın", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "Sorularınıza yardımcı olmak için buradayız.", "I18N_SIDEBAR_DONATE": "Bağış yap", "I18N_SIDEBAR_FORUM": "Forum", - "I18N_SIDEBAR_GET_STARTED": "Başla", + "I18N_SIDEBAR_HOME": "Anasayfa", + "I18N_SIDEBAR_LEARN": "Öğren", "I18N_SIDEBAR_LIBRARY_LINK": "Kütüphane", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "Matematiğe başlangıç yapmanıza yardımcı olmak için acemi dostu dersler.", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Oppia Vakfı", + "I18N_SIDEBAR_PARTNERSHIPS": "Ortaklıklar", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "Tüm Dersleri Gör", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Oppia ile Öğret", + "I18N_SIDEBAR_VOLUNTEER": "Gönüllü", "I18N_SIGNIN_LOADING": "Oturum açılıyor", "I18N_SIGNIN_PAGE_TITLE": "Oturum aç", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Bu metnin solundaki kutucuğu işaretleyerek, burada bulunan <[sitename]> Kullanım Koşullarını görmüş, kabul etmiş ve onaylamış olursun.", @@ -552,6 +669,7 @@ "I18N_SIGNUP_LOADING": "Yükleniyor", "I18N_SIGNUP_PAGE_TITLE": "Topluluğa katıl - Oppia", "I18N_SIGNUP_REGISTRATION": "Kayıt", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "Bana bir daha sorma", "I18N_SIGNUP_SEND_ME_NEWS": "Site hakkında haberleri ve güncellemeleri bana gönder", "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]> açık ve ortak kullanıma sahip bir öğrenme kaynağıdır. Site içerisindeki tüm materyaller özgürce tekrar kullanılabilir ve paylaşılabilir.", "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]> yüksek kalitede öğrenme kaynaklarının oluşturulmasını ve devamlı geliştirilmesini teşvik etmek ve herkes tarafından bedava kullanılabilirliğini sağlamak için vardır.", @@ -570,8 +688,8 @@ "I18N_SPLASH_FOR_TEACHERS": "Öğretmenler için", "I18N_SPLASH_FOR_VOLUNTEERS": "Gönüllüler için", "I18N_SPLASH_ICON_ONE_TEXT": "+1 Milyon Kullanıcı", - "I18N_SPLASH_ICON_THREE_TEXT": "<[lessonCount]> Seçilmiş Dersler", - "I18N_SPLASH_ICON_TWO_TEXT": "+<[languageCount]> Dillerde Mevcuttur", + "I18N_SPLASH_ICON_THREE_TEXT": "<[lessonCount]> Seçilmiş Ders", + "I18N_SPLASH_ICON_TWO_TEXT": "+<[languageCount]> Dilde Mevcuttur", "I18N_SPLASH_JAVASCRIPT_ERROR_DESCRIPTION": "Oppia ücretsiz, tamamen etkileşimli konu anlatımları içeren, açık kaynak kodlu bir öğrenme platformudur. Maalesef, Oppia sağlıklı bir şekilde çalışması için tarayıcınızdaki JavaScript eklentisinin aktive edilmesini gerektirmektedir ve tarayıcınızda JavaScript kapalı durumda. Eğer JavaScript'i çalıştırmak istiyorsanız, \">buraya tıklayın.", "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "Teşekkürler.", "I18N_SPLASH_JAVASCRIPT_ERROR_TITLE": "Tarayıcınızda JavaScript olmasına ihtiyacımız var.", @@ -600,9 +718,15 @@ "I18N_SPLASH_VOLUNTEERS_CONTENT": "Kim olursan olsun, Oppia'da bir ev bulabilirsin. Soru önererek, grafiklerle katkıda bulunarak veya dersleri çevirerek dersleri geliştirmek için her zaman daha fazla insana ihtiyacımız var.", "I18N_SPLASH_VOLUNTEERS_TITLE": "Topluluk Tarafından Yönetin", "I18N_START_HERE": "Başlamak için buraya tıkla!", + "I18N_STORY_Qu6THxP29tOy_TITLE": "Maya, Ömer ve Malik pizza yaptı!", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> -Tamamlandı!", "I18N_SUBSCRIBE_BUTTON_TEXT": "Abone ol", + "I18N_SUBTOPIC_5g0nxGUmx5J5_calculations-with-ratios_TITLE": "Oranlarla Hesaplama", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "Oran Nedir?", "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Sonraki Beceri", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "Önceki Beceri:", + "I18N_SUBTOPIC_qW12maD4hiA8_problem-solving_TITLE": "Problem Çözme", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "Ekleme ve Çıkarma Arasındaki İlişki", "I18N_TEACH_BENEFITS_ONE": "Tüm Yaşlar için Etkili, Yüksek-Kaliteli Öğrenme", "I18N_TEACH_BENEFITS_THREE": "Her Zaman Ücretsiz ve Kolay-kullanılabilir", "I18N_TEACH_BENEFITS_TITLE": "Avantajlarımız", @@ -628,17 +752,21 @@ "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 ders} other{# ders}}", "I18N_TOPIC_VIEWER_CHAPTER": "Bölüm", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 bölüm} other{# bölüm}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "Çok yakında!", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "Bu konu için dersler mevcut olduğunda geri gelin.", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "Bu konu için pratik soruları mevcut olduğunda geri gelin.", "I18N_TOPIC_VIEWER_DESCRIPTION": "Açıklama", "I18N_TOPIC_VIEWER_LESSON": "Ders", "I18N_TOPIC_VIEWER_LESSONS": "Dersler", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "<[topicName]> için Uzmanlık Becerileri", "I18N_TOPIC_VIEWER_PRACTICE": "Pratik", "I18N_TOPIC_VIEWER_REVISION": "Revizyon", - "I18N_TOPIC_VIEWER_SELECT_SKILLS": "<[topicName]> bilginizi geliştirmek için gerekli becerileri seçin.", + "I18N_TOPIC_VIEWER_SELECT_SKILLS": "<[topicName]> derslerinden pratik yapmak istediğiniz becerileri seçin.", "I18N_TOPIC_VIEWER_SKILL": "Yetenek", "I18N_TOPIC_VIEWER_SKILLS": "Yetenekler", "I18N_TOPIC_VIEWER_START_PRACTICE": "Başla", "I18N_TOPIC_VIEWER_STORIES": "Hikayeler", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "Oynayabileceğin hikayeler", "I18N_TOPIC_VIEWER_STORY": "Hikaye", "I18N_TOPIC_VIEWER_STUDY_SKILLS": "<[topicName]> için Çalışma Becerileri", "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "<[topicName]> ile ilgili becerilerinizi çalışmanıza yardımcı olması için aşağıdaki İnceleme Kartlarını kullanın.", @@ -648,22 +776,25 @@ "I18N_TOPNAV_ABOUT_OPPIA": "Oppia Hakkında", "I18N_TOPNAV_ADMIN_PAGE": "Yönetici Sayfası", "I18N_TOPNAV_BLOG": "Blog", - "I18N_TOPNAV_CLASSROOM": "Sınıf", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Temel Matematik", "I18N_TOPNAV_CONTACT_US": "Bize Ulaşın", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Katılımcı Kontrol Paneli", "I18N_TOPNAV_CREATOR_DASHBOARD": "Gösterge Paneli", "I18N_TOPNAV_DONATE": "Bağış yap", "I18N_TOPNAV_FORUM": "Forum", - "I18N_TOPNAV_GET_INVOLVED": "Katılın", + "I18N_TOPNAV_GET_INVOLVED": "Katıl", "I18N_TOPNAV_GET_STARTED": "Başla", + "I18N_TOPNAV_HOME": "Anasayfa", + "I18N_TOPNAV_LEARN": "Öğren", "I18N_TOPNAV_LEARNER_DASHBOARD": "Öğrenci Gösterge Tablosu", - "I18N_TOPNAV_LIBRARY": "Kütüphane", + "I18N_TOPNAV_LEARN_LINK_1": "Tüm Dersleri Gör", + "I18N_TOPNAV_LEARN_LINK_2": "Öğrenmeye Devam Et", + "I18N_TOPNAV_LIBRARY": "Topluluk Kütüphanesi", "I18N_TOPNAV_LOGOUT": "Oturumu kapat", "I18N_TOPNAV_MODERATOR_PAGE": "Moderatör Sayfası", - "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia Kuruluşu", + "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia Vakfı", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Katılım Oyun Kitabı", - "I18N_TOPNAV_PARTNERSHIPS": "Ortaklıklar", + "I18N_TOPNAV_PARTNERSHIPS": "Okullar ve Organizasyonlar", "I18N_TOPNAV_PREFERENCES": "Tercihler", "I18N_TOPNAV_SIGN_IN": "Oturum aç", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Google ile oturum aç", @@ -671,6 +802,8 @@ "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Konular ve Beceriler Gösterge Tablosu", "I18N_TOTAL_SUBSCRIBERS_TEXT": "Toplam <[totalSubscribers]> aboneniz var.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Abonelikten çık", + "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Gönüllü", + "I18N_WARNING_MODAL_DESCRIPTION": "Bu, tam çözümü gösterecektir. Emin misiniz?", "I18N_WARNING_MODAL_TITLE": "Uyarı!", "I18N_WORKED_EXAMPLE": "Çalıştığı Örnek" } diff --git a/assets/i18n/uk.json b/assets/i18n/uk.json index 540134c3829a..1cc59be4c961 100644 --- a/assets/i18n/uk.json +++ b/assets/i18n/uk.json @@ -1,5 +1,6 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Про фонд", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "Про фонд Oppia", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "Про фонд Oppia | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "Створюйте дослідження", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "на теми, що Вас цікавлять.", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "Заробити зворотний зв'язок", @@ -65,6 +66,8 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "Посібник для вчителів", "I18N_ACTION_TIPS_FOR_PARENTS": "Поради батькам та опікунам", "I18N_ACTION_VISIT_CLASSROOM": "Відвідайте клас", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_3": "Програма доступна англійською та бразильською португальською мовами.", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_4": "Скоро буде додано більше мов!", "I18N_ATTRIBUTION_HTML_STEP_ONE": "Скопіюйте та вставте HTML", "I18N_ATTRIBUTION_HTML_STEP_TWO": "Переконайтеся, що посилання відображається як «<[linkText]>»", "I18N_ATTRIBUTION_HTML_TITLE": "Атрибут у HTML", @@ -79,6 +82,7 @@ "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "Новий допис", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "Чернетки", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "Опубліковано", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "Теги", "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "Додати ескіз зображення", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "Текст", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "Скасувати", @@ -105,12 +109,22 @@ "I18N_CLASSROOM_CALLOUT_HEADING_1": "Математичні основи", "I18N_CLASSROOM_CALLOUT_HEADING_2": "Представляємо: Клас Oppia", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "Перевірте перший комплексний курс у абсолютно новому класі Oppia! Підготовлені уроки — переглянуті вихователями, — щоб ви могли оволодіти основними математичними навичками з тем, починаючи від значень місця до множення та ділення.", + "I18N_CLASSROOM_MATH_TITLE": "Математика", "I18N_CLASSROOM_PAGE_COMING_SOON": "Незабаром", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "Деталі курсу", "I18N_CLASSROOM_PAGE_HEADING": "Клас Oppia", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "Дослідіть більше уроків, зроблених спільнотою", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "Шукайте в нашій бібліотеці спільноти", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "Охоплені теми", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "Початок", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "Продовжити", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "Ви закінчили збірку! Не соромтеся повторити будь-які дослідження нижче.", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "Наведіть курсор на піктограму, щоб попередньо переглянути дослідження.", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "До цієї колекції не було додано жодного дослідження.", + "I18N_COMING_SOON": "Незабаром!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "КОЛЕКЦІЯ", + "I18N_COMPLETED_STORY": "Завершено «<[story]>»", + "I18N_COMPLETE_CHAPTER": "Завершіть розділ у «<[topicName]>»", "I18N_CONTACT_PAGE_BREADCRUMB": "Зв'язок", "I18N_CONTACT_PAGE_HEADING": "Візьміть участь!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "Дякуємо за вашу зацікавленість у допомозі з проєктом Oppia!", @@ -195,7 +209,7 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "Зобов'язання, зроблені для публічних досліджень та колекцій, які мають інших власників", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "Зобов'язання щодо тем, історій, навичок та питань", "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "Щоб підтвердити видалення, будь ласка, введіть своє ім'я користувача у поле нижче та натисніть кнопку «Видалити мій обліковий запис». Цю дію не можна скасувати.", - "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Ця дія видалить цей обліковий запис користувача, а також усі особисті дані, пов’язані з цим обліковим записом. Дані, які вже є загальнодоступними, будуть анонімізовані, щоб їх не можна було зв’язати з цим обліковим записом. Деякі з перелічених нижче категорій можуть не застосовуватися до вашого облікового запису.", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "Ця дія видалить цей обліковий запис користувача, а також усі особисті дані, пов’язані з цим обліковим записом. Дані, які вже є загальнодоступними, будуть анонімізовані, щоб їх не можна було зв’язати з цим обліковим записом, крім резервних даних (які зберігаються протягом 6 місяців). Деякі з перелічених нижче категорій можуть не застосовуватися до вашого облікового запису.", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "Огляд", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "Нижче наведено типи даних, які буде видалено:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "Ось типи даних, які будуть анонімізовані:", @@ -206,10 +220,8 @@ "I18N_DIRECTIVES_UPLOAD_A_FILE": "Завантажити файл", "I18N_DONATE_PAGE_BREADCRUMB": "Пожертвувати", "I18N_DONATE_PAGE_IMAGE_TITLE": "Ваші щедрі подарункові кошти:", - "I18N_DONATE_PAGE_TITLE": "Пожертвувати у
Фонд Oppia", + "I18N_DONATE_PAGE_TITLE": "Пожертвувати у Фонд Oppia", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Послухайте від нашої спільноти Oppia", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "У 2012 році Oppia розпочала просту ідею: покращити освіту студентів у всьому світі та одночасно підвищити якість викладання. Відтоді це бачення перетворилося на освітню платформу з понад 11 000 досліджень, які використовували понад 430 000 користувачів у всьому світі.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "Будь ласка, пожертвуйте фонду Oppia , зареєстрованій некомерційною організацією 501 (c)(3), та приєднуйтесь до нас, щоб дарувати радість навчання людям у всьому світі.", "I18N_ERROR_DISABLED_EXPLORATION": "Вимкнено дослідження", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Перепрошуємо, але наразі натиснуте Вами дослідження не відображається. Будь ласка, спробуйте ще раз пізніше.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Відключене дослідження — Oppia", @@ -226,9 +238,29 @@ "I18N_ERROR_PAGE_TITLE_401": "Помилка 401 — Oppia", "I18N_ERROR_PAGE_TITLE_404": "Помилка 404 — Oppia", "I18N_ERROR_PAGE_TITLE_500": "Помилка 500 — Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "Готові до нових кексів? Пройдіть цей короткий тест, щоб перевірити, як ви розумієте те, чого ви вже навчилися!", + "I18N_EXPLORATION_-tMgcP1i_4au_TITLE": "Рівність дробів (підсумок)", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION": "Чи можливо, щоб один дріб був іншим замаскованим? Давайте подивимося, що станеться, коли Метью зустріне Крамба вдруге.", + "I18N_EXPLORATION_0FBWxCE5egOw_TITLE": "Рівносильні дроби", + "I18N_EXPLORATION_0X0KC9DXWwra_DESCRIPTION": "У будинку Камала всі святкують день народження Самира. Камал додає задоволення, створюючи математичну гру для Ави та Самира. Подивіться, чи зможете ви вирішити питання!", + "I18N_EXPLORATION_0X0KC9DXWwra_TITLE": "Підсумок: Навички вирішення проблем", + "I18N_EXPLORATION_1904tpP0CYwY_DESCRIPTION": "Арії пора починати садити овочі! Продовжуйте свою садівничу подорож, допомагаючи їй у саду, і почніть запам’ятовувати свої множники.", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE": "Одноцифрові вирази від 1 до 5", + "I18N_EXPLORATION_2mzzFVDLuAj8_DESCRIPTION": "Приєднуйтесь до Джеймса, його дядька та дізнайтесь про співвідношення та як ними користуватися!", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "Що таке співвідношення?", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION": "Ніна та її мати натикаються на свого друга, який також володіє фруктовим кіоском. Приєднуйтесь до Ніни, оскільки вона використовує поділ, щоб допомогти їх подрузі з ларьком!", + "I18N_EXPLORATION_40a3vjmZ7Fwu_TITLE": "Залишки та особливі випадки", + "I18N_EXPLORATION_53Ka3mQ6ra5A_DESCRIPTION": "Майя, Омар і Малік відвідують супермаркет, щоб взяти більше інгредієнтів, і їм потрібно додати більші числа. Подивіться, чи зможете ви їм допомогти!", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "Додавання більших чисел", + "I18N_EXPLORATION_5I4srORrwjt2_DESCRIPTION": "У снек-барі Камал каже, що вони повинні розумно витрачати обмежену суму грошей. Допоможіть Аві та Саміру знайти, які закуски вони можуть отримати!", + "I18N_EXPLORATION_5I4srORrwjt2_TITLE": "Пропорційність та унітарний метод", + "I18N_EXPLORATION_5NWuolNcwH6e_DESCRIPTION": "Джеймс намагається приготувати власні коктейлі, але вони виходять не надто хорошими. Яку помилку він зробив? Грайте в цей урок, щоб дізнатися!", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE": "Важливість порядку", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION": "Допоможіть Метью вирішити проблему для одного з клієнтів містера Бейкера, коли він дізнається про змішані числа та числову пряму. Грайте в цей урок, щоб почати!", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "Анонімний", "I18N_FOOTER_ABOUT": "Опис", "I18N_FOOTER_ABOUT_ALL_CAPS": "ПРО OPPIA", + "I18N_FOOTER_ANDROID_APP": "Додаток Android", "I18N_FOOTER_AUTHOR_PROFILES": "Профілі авторів", "I18N_FOOTER_BROWSE_LIBRARY": "Перегляд бібліотеки", "I18N_FOOTER_CONTACT_US": "Зв'яжіться з нами", @@ -274,6 +306,10 @@ "I18N_HEADING_VOLUNTEER": "Волонтер", "I18N_HINT_NEED_HELP": "Потребуєте допомоги? Перегляньте підказку щодо цієї проблеми!", "I18N_HINT_TITLE": "Підказка", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "Введіть тут вираз.", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "Введіть код в редакторі", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "Перейдіть до редактора коду", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "Перетягуйте елементи", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "Введіть дріб у формі «x/y» або змішане число у формі «A x/y».", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "Введіть дріб у вигляді x/y.", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "Додати край", @@ -282,6 +318,8 @@ "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "Торкніться цільової вершини, щоб створити край (натисніть цю саму вершину, щоб скасувати створення краю).", "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "Торкніться початкової вершини ребра для створення.", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "Невірний графік!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "Створіть графік", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "Переглянути графік", "I18N_INTERACTIONS_GRAPH_MOVE": "Перемістити", "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "Торкніться будь-якої точки, щоб перемістити вершину до цієї точки.", "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "Торкніться вершини для переміщення.", @@ -292,11 +330,17 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "і <[vertices]> вершини", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "Оновити ярлик", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "Оновити вагу", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "Натисніть на зображення", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[Виберіть зображення для відображення]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "Ви можете вибрати більше варіантів.", "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, one{Виберіть один або кілька варіантів.} other{Виберіть # або більше варіантів.}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, one{Можна вибрати не більше 1 варіанта.} other{Можна вибрати не більше # варіантів.}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "Натисніть на мапу", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "Переглянути мапу", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "Введіть тут рівняння.", "I18N_INTERACTIONS_MUSIC_CLEAR": "Очистити", + "I18N_INTERACTIONS_MUSIC_INSTRUCTION": "Перетягніть нотатки до персоналу, щоб сформувати послідовність", + "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "Показати музичний персонал", "I18N_INTERACTIONS_MUSIC_PLAY": "Відтворити", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "Відтворити послідовність цілей", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "Можливі формати одиниць", @@ -304,6 +348,8 @@ "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "Скасувати", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "Обов'язкове підтвердження", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "Скидання коду", + "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "Відредагуйте код. Натисніть «Відтворити», щоб перевірити це!", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "Показати редактор коду", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "Додати елемент", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "Ой, схоже, у Вашого набору є дублікати!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(Додайте один елемент в рядок.)", @@ -372,7 +418,7 @@ "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "Надсилання…", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "Срібло", "I18N_LEARNER_DASHBOARD_SKILLS": "Навички", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "Майстерність", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "Прогрес навичок", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "Історії завершено", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "Підписки", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "Прогрес:", @@ -564,6 +610,7 @@ "I18N_PLAYER_UNRATED": "Без рейтингу", "I18N_PLAYER_VIEWS_TOOLTIP": "Переглядів", "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "Практичний сеанс", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "Мова аудіо", "I18N_PREFERENCES_BIO": "Про себе", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "Це поле необов'язкове. Все, що Ви пишете тут, є загальнодоступними та доступними для перегляду в усьому світі.", "I18N_PREFERENCES_BREADCRUMB": "Налаштування", @@ -577,9 +624,13 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "Отримувати новини та оновлення щодо сайту", "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "Отримувати повідомлення, коли автор, на якого Ви підписані, публікує щось нове", "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "Нам не вдалося автоматично додати вас до нашого списку розсилки. Будь ласка, перейдіть за цим посиланням, щоб зареєструватися у нашому списку розсилки:", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "Експортувати обліковий запис", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "Дані облікового запису Oppia буде завантажено як текстовий файл у форматі JSON.", + "I18N_PREFERENCES_EXPORT_ACCOUNT_WARNING_TEXT": "Будь ласка, не залишайте цю сторінку. Ваші дані зараз завантажуються, і після завершення вони будуть завантажені як текстовий файл у форматі JSON. Якщо щось піде не так, будь ласка, зв’яжіться", "I18N_PREFERENCES_HEADING": "Налаштування", "I18N_PREFERENCES_HEADING_SUBTEXT": "Будь-які Ваші зміни на цій сторінці будуть автоматично збережені.", "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "Ви ще не підписалися на жодного творця. Не соромтеся підписатися на улюбленого автора, натиснувши кнопку «підписатися» на сторінці профілю автора. Підписавшись на автора, ви отримаєте повідомлення електронною поштою, коли автор опублікує новий урок.", + "I18N_PREFERENCES_OPPIA_IMPACT_SECTION_HEADING": "Вплив", "I18N_PREFERENCES_PAGE_TITLE": "Змінити налаштування Вашого профілю — Oppia", "I18N_PREFERENCES_PICTURE": "Зображення", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "Бажана мова аудіо", @@ -589,6 +640,7 @@ "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "Це інформаційна панель, яка буде відображатися за замовчуванням під час входу.", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "Переважні мови дослідження", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "Ці мови будуть вибрані за замовчуванням під час пошуку в галереї досліджень.", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "Виберіть бажані мови.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "Бажана мова сайту", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "Це мова, якою відображається сайт.", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "Бажана мова сайту", @@ -596,18 +648,22 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "Перетягніть, щоб обрізати та змінити розмір:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "Помилка: Не вдалося прочитати файл зображення.", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "Завантажте зображення профілю", + "I18N_PREFERENCES_SEARCH_LABEL": "Пошук", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "Виберіть бажані мови…", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "Мова сайту", "I18N_PREFERENCES_SUBJECT_INTERESTS": "Предметні інтереси", + "I18N_PREFERENCES_SUBJECT_INTERESTS_ERROR_TEXT": "Тема інтересів має бути унікальною та написана з нижнього регістру.", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "Наприклад: математика, інформатика, мистецтво, …", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "Додати новий предмет (за допомогою малих літер та пробілів)…", + "I18N_PREFERENCES_SUBJECT_INTERESTS_LABEL": "Нові предметні інтереси", "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "Введіть предметні інтереси…", "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "Творці, на яких ви підписалися", "I18N_PREFERENCES_USERNAME": "Ім'я користувача", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "Ще не обрано", "I18N_PROFILE_NO_EXPLORATIONS": "Цей користувач ще не створював і не редагував жодних досліджень.", - "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Дізнайтеся більше про свій рахунок", + "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "Розбивка балів", "I18N_QUESTION_PLAYER_MY_DASHBOARD": "Моя панель", - "I18N_QUESTION_PLAYER_NEW_SESSION": "Нова сесія", + "I18N_QUESTION_PLAYER_NEW_SESSION": "Повторити", "I18N_QUESTION_PLAYER_RETRY_TEST": "Повторити тест", "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "Повернутися до історії", "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "Перегляньте вміння з найнижчою оцінкою", @@ -617,8 +673,11 @@ "I18N_QUESTION_PLAYER_TEST_PASSED": "Сесія завершена. Молодець!", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "Сеанс реєстрації закінчився", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "На жаль, термін реєстрації закінчився. Натисніть «Продовжити реєстрацію», щоб перезапустити процес.", + "I18N_RESET_CODE": "Скидання коду", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "Огляд тесту", "I18N_SAVE_PROGRESS": "Увійдіть або зареєструйтесь, щоб зберегти свій прогрес та пройти наступний урок.", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "Копіювати", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "Скопійовано!", "I18N_SHARE_LESSON": "Поділіться цим уроком", "I18N_SHOW_SOLUTION_BUTTON": "Показати рішення", "I18N_SIDEBAR_ABOUT_LINK": "Про Oppia", @@ -626,14 +685,28 @@ "I18N_SIDEBAR_CLASSROOM": "Клас", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "Основи математики", "I18N_SIDEBAR_CONTACT_US": "Зв'язатися з нами", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "Ми тут, щоб допомогти із будь-якими запитаннями.", "I18N_SIDEBAR_DONATE": "Пожертвувати", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "Ваш внесок допомагає забезпечити якісну освіту для всіх.", "I18N_SIDEBAR_FORUM": "Форум", - "I18N_SIDEBAR_GET_STARTED": "Розпочати", + "I18N_SIDEBAR_GET_INVOLVED": "Взяти участь", + "I18N_SIDEBAR_HOME": "Домашня", + "I18N_SIDEBAR_LEARN": "Вчити", "I18N_SIDEBAR_LIBRARY_LINK": "Бібліотека", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "Математичні основи", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "Дружні уроки для початківців, які допоможуть вам розпочати роботу з математики.", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Фонд «Oppia»", "I18N_SIDEBAR_PARTNERSHIPS": "Партнерства", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "Забезпечте якісну освіту студентам у вашому регіоні.", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "Додавання і віднімання", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "Бібліотека спільноти", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "Додаткові ресурси, створені спільнотою.", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "Множення", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "Значення місць", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "Переглянути усі уроки", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Вивчайте з Oppia", "I18N_SIDEBAR_VOLUNTEER": "Волонтер", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "Долучайтесь до міжнародної команди аби створювати та покращувати уроки.", "I18N_SIGNIN_LOADING": "Вхід", "I18N_SIGNIN_PAGE_TITLE": "Увійти", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Встановивши прапорець ліворуч від цього тексту, ви погоджуєтесь дотримуватись умов використання <[sitename]>, наведених тут.", @@ -711,7 +784,8 @@ "I18N_START_HERE": "Натисніть тут, щоб почати!", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> — Завершено!", "I18N_SUBSCRIBE_BUTTON_TEXT": "Підписатися", - "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Наступний навик", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "Наступна навичка:", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "Попередня навичка:", "I18N_TEACH_BENEFITS_ONE": "Ефективне, якісне навчання для всіх віків", "I18N_TEACH_BENEFITS_THREE": "Завжди безкоштовний та простий у використанні", "I18N_TEACH_BENEFITS_TITLE": "Наші переваги", @@ -737,10 +811,14 @@ "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 урок} other{# уроків}}", "I18N_TOPIC_VIEWER_CHAPTER": "Глава", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 глава} other{# глав}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "Незабаром!", + "I18N_TOPIC_VIEWER_COMING_SOON_LESSONS": "Поверніться пізніше, коли будуть доступні уроки з цієї теми.", + "I18N_TOPIC_VIEWER_COMING_SOON_PRACTICE": "Поверніться пізніше, коли будуть доступні практичні запитання для цієї теми.", "I18N_TOPIC_VIEWER_DESCRIPTION": "Опис", "I18N_TOPIC_VIEWER_LESSON": "Урок", "I18N_TOPIC_VIEWER_LESSONS": "Уроки", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "Майстерні навички для <[topicName]>", + "I18N_TOPIC_VIEWER_NO_QUESTION_WARNING": "Для вибраних підтем ще не створено жодних запитань.", "I18N_TOPIC_VIEWER_PRACTICE": "Практика", "I18N_TOPIC_VIEWER_REVISION": "Редакція", "I18N_TOPIC_VIEWER_SELECT_SKILLS": "Виберіть навички з уроків <[topicName]>, які б ви хотіли відпрацювати.", @@ -748,6 +826,7 @@ "I18N_TOPIC_VIEWER_SKILLS": "Навички", "I18N_TOPIC_VIEWER_START_PRACTICE": "Початок", "I18N_TOPIC_VIEWER_STORIES": "Розповіді", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "Історії, які можна відвторити", "I18N_TOPIC_VIEWER_STORY": "Розповідь", "I18N_TOPIC_VIEWER_STUDY_SKILLS": "Навчання навикам для <[topicName]>", "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "Використовуйте наведені нижче картки огляду, щоб допомогти вам вивчити навички щодо <[topicName]>.", @@ -758,27 +837,38 @@ "I18N_TOPNAV_ADMIN_PAGE": "Сторінка адміністратора", "I18N_TOPNAV_BLOG": "Блог", "I18N_TOPNAV_BLOG_DASHBOARD": "Інформаційна панель блогу", - "I18N_TOPNAV_CLASSROOM": "Клас", - "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Основи математики", + "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "Математичні основи", "I18N_TOPNAV_CONTACT_US": "Зв'язатися з нами", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "Ми тут, щоб допомогти із будь-якими запитаннями.", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "Інформаційна панель автора", "I18N_TOPNAV_CREATOR_DASHBOARD": "Інформаційна панель для творців", "I18N_TOPNAV_DONATE": "Пожертвувати", + "I18N_TOPNAV_DONATE_DESCRIPTION": "Ваш внесок допомагає забезпечити якісну освіту для всіх.", "I18N_TOPNAV_FORUM": "Форум", "I18N_TOPNAV_GET_INVOLVED": "Взяти участь", "I18N_TOPNAV_GET_STARTED": "Розпочати", + "I18N_TOPNAV_HOME": "Домашня", + "I18N_TOPNAV_LEARN": "Навчання", "I18N_TOPNAV_LEARNER_DASHBOARD": "Інформаційна панель для учнів", - "I18N_TOPNAV_LIBRARY": "Бібліотека", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "Дружні уроки для початківців, які допоможуть вам розпочати роботу з математики.", + "I18N_TOPNAV_LEARN_HEADING": "Способи навчитися більше", + "I18N_TOPNAV_LEARN_LINK_1": "Переглянути усі уроки", + "I18N_TOPNAV_LEARN_LINK_2": "Продовжити навчання", + "I18N_TOPNAV_LIBRARY": "Бібліотека спільноти", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "Додаткові ресурси, створені спільнотою, щоб допомогти вам навчатися більше.", "I18N_TOPNAV_LOGOUT": "Вийти", "I18N_TOPNAV_MODERATOR_PAGE": "Сторінка модератора", "I18N_TOPNAV_OPPIA_FOUNDATION": "Фонд «Oppia»", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "Зошит участі", - "I18N_TOPNAV_PARTNERSHIPS": "Партнерства", + "I18N_TOPNAV_PARTNERSHIPS": "Школи та організації", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "Ставайте партнером і принесіть Oppia у свою школу, громаду чи місцевість.", "I18N_TOPNAV_PREFERENCES": "Налаштування", "I18N_TOPNAV_SIGN_IN": "Увійти", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "Увійти за допомогою Google", "I18N_TOPNAV_TEACH_WITH_OPPIA": "Вивчайте з Oppia", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "Інформаційна панель тем і навичок", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "Спробуйте це сьогодні!", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "Долучайтесь до міжнародної команди аби створювати та покращувати уроки.", "I18N_TOTAL_SUBSCRIBERS_TEXT": "Всього у вас є <[totalSubscribers]>.", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "Відписатися", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "Волонтер", diff --git a/assets/i18n/vi.json b/assets/i18n/vi.json index a02e345403ee..f58d85eea10b 100644 --- a/assets/i18n/vi.json +++ b/assets/i18n/vi.json @@ -42,10 +42,8 @@ "I18N_DASHBOARD_STATS_TOTAL_PLAYS": "Tổng số vở", "I18N_DASHBOARD_STATS_TOTAL_SUBSCRIBERS": "Người đăng ký", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "Kéo bức hình tới đây", - "I18N_DONATE_PAGE_TITLE": "Quyên góp cho
Oppia Foundation", + "I18N_DONATE_PAGE_TITLE": "Quyên góp cho Oppia Foundation", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "Lắng nghe từ cộng đồng Oppia của chúng tôi", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "Năm 2012, Oppia bắt đầu với một ý tưởng đơn giản: cải thiện nền giáo dục của sinh viên trên toàn thế giới đồng thời nâng cao chất lượng giảng dạy. Từ đó, tầm nhìn này đã trở thành một nền tảng giáo dục với hơn 11.000 khám phá đã được sử dụng bởi hơn 430.000 người dùng trên toàn thế giới.", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "Hãy quyên góp cho The Oppia Foundation, một tổ chức phi lợi nhuận 501(c)(3) đã đăng ký, và cùng chúng tôi mang lại niềm vui dạy và học cho mọi người ở khắp mọi nơi.", "I18N_ERROR_DISABLED_EXPLORATION": "Vô hiệu hóa cuộc hành trình", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "Xin lỗi bạn, nhưng cuộc hành trình này đang bị khóa. Hãy thử lại sau ít phút nữa.", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "Vô hiệu hóa cuộc hành trình này - Oppia", @@ -260,7 +258,6 @@ "I18N_SIDEBAR_CONTACT_US": "Liên lạc", "I18N_SIDEBAR_DONATE": "Giúp đỡ", "I18N_SIDEBAR_FORUM": "Diễn Đàn", - "I18N_SIDEBAR_GET_STARTED": "Bắt đầu", "I18N_SIDEBAR_LIBRARY_LINK": "Thư viện", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "Cùng Oppia dạy học", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "Bấm chuột nếu bạn đồng ý với điều kiện sử dụng của <[sitename]>, ở đây.", @@ -311,7 +308,8 @@ "I18N_TOPNAV_DONATE": "Đóng góp", "I18N_TOPNAV_FORUM": "Diễn Đàn", "I18N_TOPNAV_GET_STARTED": "Bắt đầu", - "I18N_TOPNAV_LIBRARY": "Thư Viện", + "I18N_TOPNAV_LEARN": "Tìm hiểu", + "I18N_TOPNAV_LIBRARY": "Thư Viện Cộng đồng", "I18N_TOPNAV_LOGOUT": "Đăng Xuất", "I18N_TOPNAV_MODERATOR_PAGE": "Trang dàng cho moderator(điều phối viên)", "I18N_TOPNAV_OPPIA_FOUNDATION": "Tổ chức Oppia", diff --git a/assets/i18n/zh-hans.json b/assets/i18n/zh-hans.json index fb82cae7cf1d..27bee2b750c3 100644 --- a/assets/i18n/zh-hans.json +++ b/assets/i18n/zh-hans.json @@ -1,4 +1,6 @@ { + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "关于Oppia基金会", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "关于Oppia基金会 | Oppia", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "创建一个探险", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "关于您关心的话题。", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "获得反馈", @@ -64,13 +66,20 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "教师指南", "I18N_ACTION_TIPS_FOR_PARENTS": "给父母和监护人的提示", "I18N_ACTION_VISIT_CLASSROOM": "拜访教室", + "I18N_ADD_SYLLABUS_SEARCH_PLACEHOLDER": "搜索例如故事,物理,英语", "I18N_ATTRIBUTION_HTML_STEP_ONE": "复制并粘贴HTML", "I18N_ATTRIBUTION_HTML_STEP_TWO": "请确保链接显示为“<[linkText]>”", + "I18N_ATTRIBUTION_HTML_TITLE": "HTML 中的属性", + "I18N_ATTRIBUTION_PRINT_STEP_ONE": "复制并粘贴学分", "I18N_ATTRIBUTION_PRINT_STEP_TWO": "附上“<[link]>”的副本", + "I18N_ATTRIBUTION_PRINT_TITLE": "打印属性", "I18N_BLOG_CARD_PREVIEW_CONTEXT": "这是你的博客卡片如何将出现在你的主页面和你的作者档案。", "I18N_BLOG_CARD_PREVIEW_HEADING": "博客卡片预览", "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "创建新的博客文章", - "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "看起来你还没有创建你的新的故事!", + "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "看起来你还没有创建任何博客文章!", + "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "最新帖子", + "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "草稿", + "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "已发布", "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "添加一个缩放图片", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "正文", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "取消", @@ -90,18 +99,31 @@ "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "错误:不能读取图片文件。", "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "添加一个缩放图", "I18N_BLOG_POST_UNTITLED_HEADING": "无标题", + "I18N_CARD_CONTENT_LIMIT_ERROR_MESSAGE": "此卡片内容过长。 请保持在 4500 个字符以下以保存。", "I18N_CARD_HEIGHT_LIMIT_WARNING_MESSAGE": "这卡片太长了,可能会让学生失去兴趣。请考虑缩短,或者拆分成两个卡片。", "I18N_CHAPTER_COMPLETION": "恭喜完成章节!", "I18N_CLASSROOM_CALLOUT_BUTTON": "探索", "I18N_CLASSROOM_CALLOUT_HEADING_1": "数学基础", "I18N_CLASSROOM_CALLOUT_HEADING_2": "介绍:Oppia教室", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "查看全新Oppia教室中的第一门综合课程!精选课程——由教育工作者审核——以便您掌握从位值到乘除法等主题的基本数学技能。", + "I18N_CLASSROOM_MATH_TITLE": "数学公式", "I18N_CLASSROOM_PAGE_COMING_SOON": "即将推出", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "课程详情", "I18N_CLASSROOM_PAGE_HEADING": "Oppia教室", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "探索更多社区制作的课程", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "搜索我们的社区图书馆", + "I18N_CLASSROOM_PAGE_TITLE": "和Oppia一起学习<[classroomName]> | Oppia", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "涵盖的主题", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "<[collectionTitle]> - Oppia编辑器", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "无标题 - Oppia编辑器", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "开始", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "继续", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "将鼠标悬停在图标上以预览探索。", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> - Oppia", + "I18N_COMING_SOON": "即将推出!", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "收藏", + "I18N_COMPLETED_STORY": "已完成<[story]>", + "I18N_COMPLETE_CHAPTER": "完成在<[topicName]>的一个章节", "I18N_CONTACT_PAGE_BREADCRUMB": "联络", "I18N_CONTACT_PAGE_HEADING": "参与!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "感谢您对帮助Oppia项目感兴趣!", @@ -130,9 +152,13 @@ "I18N_CONTACT_PAGE_PARAGRAPH_8": "所以,如果您想为世界各地的学生创造免费、有用的课程,您就来对地方了。我们欢迎您查看我们的创作者教程现有课程,以及开始创建自己的课程。此外,如果您想确保您的课程具有重大影响力,请考虑申请我们的与Oppia共同教学计划,我们将帮助您创建,测试,并改善您的探索来获得最佳影响力。", "I18N_CONTACT_PAGE_PARAGRAPH_9": "喜欢现有的探索,但发现了还能更好的地方?您可以直接从探索页面提出对任何探索的改良建议。 只需点击右上角的铅笔图标,即可分享您认为可以改进的内容。课程的创建者将收到您的建议,并有机会将他们融入探索。这是一种非常有价值的贡献方式,特别是如果您可以将您的建议建立在学生的体验的基础上进行探索。", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "改进现有的探险", + "I18N_CONTACT_PAGE_TITLE": "联系方式 | Oppia", "I18N_CONTINUE_REGISTRATION": "继续注册", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "确定", "I18N_COOKIE_BANNER_EXPLANATION": "本网站使用cookie和类似技术来支持核心功能、确保网站安全与分析网站流量。在隐私政策中了解更多信息。", + "I18N_CORRECT_FEEDBACK": "正确!", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "你的群组连接", + "I18N_CREATE_ACCOUNT": "创建账号", "I18N_CREATE_ACTIVITY_QUESTION": "您想创建什么?", "I18N_CREATE_ACTIVITY_TITLE": "创建活动", "I18N_CREATE_COLLECTION": "创建收藏", @@ -141,6 +167,7 @@ "I18N_CREATE_EXPLORATION_QUESTION": "您是否希望创建探险?", "I18N_CREATE_EXPLORATION_TITLE": "创建一个探险", "I18N_CREATE_EXPLORATION_UPLOAD": "上传", + "I18N_CREATE_LEARNER_GROUP_PAGE_TITLE": "创建学习者组|奥皮亚", "I18N_CREATE_NO_THANKS": "不用了,谢谢", "I18N_CREATE_YES_PLEASE": "是的,请!", "I18N_CREATOR_IMPACT": "影响", @@ -182,19 +209,41 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_1_4": "有关用户创建的探索与收藏的统计信息", "I18N_DELETE_ACCOUNT_PAGE_LIST_1_5": "对任何探索的草稿编辑", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_1": "反馈意见与建议", - "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "用户统计", + "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "对有其他所有者的公共探索和收藏的承诺", + "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "对于主题、故事、技能和问题的提交", "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "为了确认删除,请在下面的字段中输入您的用户名,然后按“删除我的帐户”按钮。 此操作不能撤消。", - "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "该行动会删除本用户与所有与他相关的隐私信息。为了使信息无法与本用户相联,已经公开的信息会被转为匿名。其中一些数据的所有权可能会转移到社群。", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "该行动会删除本用户与所有与他相关的隐私信息。为了使信息无法与本用户相联,已经公开的信息会被转为匿名,备份数据(将会保存6个月)除外。下面提到的某些分类可能不适用于您的账户。", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "概览", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "以下是将被删除的数据类型:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "以下是将被匿名化的数据类型:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "此外,没有其他所有者的已发布探索和收藏将转换为社区所有权。", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "如果您对账户删除有疑问或担心,请联系privacy@oppia.org。", + "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "这将带您到一个页面,您可以在其中删除您的 Oppia 帐户。", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "删除账号 | Oppia", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "将文件拖放到此区域", "I18N_DIRECTIVES_UPLOAD_A_FILE": "上传一个文件", "I18N_DONATE_PAGE_BREADCRUMB": "捐赠", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "捐款 | 做出正面影响 | Oppia", + "I18N_DONATE_PAGE_BUDGET_HEADING": "你的钱去哪里了?", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "维护", + "I18N_DONATE_PAGE_FAQ_ANSWER_10": "有关 Oppia 的一般问题,请联系 contact@oppia.org。", + "I18N_DONATE_PAGE_FAQ_QUESTION_4": "Oppia与其他在线教育平台有何不同?", + "I18N_DONATE_PAGE_FAQ_QUESTION_6": "我如何通过支票付款?", + "I18N_DONATE_PAGE_HEADING_2": "高质量和引人入胜的教育。", "I18N_DONATE_PAGE_IMAGE_TITLE": "您的慷慨捐赠资金:", - "I18N_DONATE_PAGE_TITLE": "捐款给
Oppia基金会", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "来自印度", + "I18N_DONATE_PAGE_STATISTIC_2": "我们虚拟图书馆的课程", + "I18N_DONATE_PAGE_STATISTIC_4": "来自世界各地的志愿者", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "感谢订阅!", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_1": "您很快就会在收件箱中收到更新。我们承诺不会发送垃圾邮件,您也可以随时退订。", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_2": "在我们社区(包括您)的帮助和支持下,Oppia 已经为世界上资源最匮乏的学习者提供服务,并将持续提供该服务。", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "电邮地址", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "感谢捐赠!", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_3": "如有任何疑问,请随时与我们联系。", + "I18N_DONATE_PAGE_TITLE": "捐款给Oppia基金会", + "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "听取我们 Oppia 社区的意见", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "您还没有任何群组", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "这是您接下来可以做的!", "I18N_ERROR_DISABLED_EXPLORATION": "禁用的探险", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "对不起,您点击的探险目前已被禁用。请稍后重试。", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "禁用的探险 - Oppia", @@ -207,13 +256,19 @@ "I18N_ERROR_MESSAGE_404": "对不起,我们看了一遍又一遍,但我们找不到那个页面。", "I18N_ERROR_MESSAGE_500": "发生了可怕的错误。但这不怪您。发生了内部错误。", "I18N_ERROR_NEXT_STEPS": "现在应该做的最好的事情可能是返回\">首页。然而,如果该错误仍然发生,并且您认为它不该发生的话,请在我们的\" target=\"_blank\">问题追踪器上让我们知道这个错误。不便之处敬请谅解。", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "错误<[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "错误<[statusCode]> - Oppia", "I18N_ERROR_PAGE_TITLE_400": "错误400 - Oppia", "I18N_ERROR_PAGE_TITLE_401": "错误401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "错误404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "错误500 - Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "准备好获得更多纸杯蛋糕了吗?参加这个简短的测验,看看您对目前所学知识的理解!", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "什么是比率?", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "什么是分区?", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "匿名", "I18N_FOOTER_ABOUT": "关于", "I18N_FOOTER_ABOUT_ALL_CAPS": "关于OPPIA", + "I18N_FOOTER_ANDROID_APP": "安卓应用", "I18N_FOOTER_AUTHOR_PROFILES": "作者详情", "I18N_FOOTER_BROWSE_LIBRARY": "浏览图书馆", "I18N_FOOTER_CONTACT_US": "联系我们", @@ -276,7 +331,7 @@ "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "更新重量", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[选择一个图片以显示]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "您可以选择更多选项。", - "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, =1{请选择一个或多个选择。} other{请选择#个或多个选择。}}", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, =1{请选择所有正确的选项。} other{请选择#个或多个选项。}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, =1{最多可以选择1个选项。} other{最多可以选择#个选项。}}", "I18N_INTERACTIONS_MUSIC_CLEAR": "清空", "I18N_INTERACTIONS_MUSIC_PLAY": "播放", @@ -341,7 +396,7 @@ "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "正在发送...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "白银", "I18N_LEARNER_DASHBOARD_SKILLS": "技巧", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "技巧熟练度", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "技能进步", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "订阅", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "进度:", "I18N_LEARNER_DASHBOARD_SUGGESTION_CURRENT_STATE_CONTENT_HEADER": "当前:", @@ -351,6 +406,15 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "建议", "I18N_LEARNER_DASHBOARD_TOOLTIP": "收藏是多个计划按序列完成的相关探险。", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "查看建议", + "I18N_LEARNER_GROUPS_SECTION_TITLE": "你的学习者群组", + "I18N_LEARNER_GROUP_ADD_TO_SYLLABUS": "添加到教学大纲", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "下一个", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "上一步", + "I18N_LEARNER_GROUP_DETAILS_GROUP_DESCRIPTION": "小组描述(用 2-4 行描述小组目标)", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "组标题", + "I18N_LEARNER_GROUP_INVITE_LIST_TEXT": "邀请列表", + "I18N_LEARNER_GROUP_ITEM_ADDED_TO_SYLLABUS": "以添加", + "I18N_LEARNER_GROUP_STORY_VIEW_DETAILS": "查看详情", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "您已完成了这个", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "已添加到播放列表", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "添加到“稍后播放”列表", @@ -406,7 +470,7 @@ "I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS": "最受欢迎的探险", "I18N_LIBRARY_INCOMPLETE_ACTIVITY_ICON": "您在这个活动中完成了一半任务。", "I18N_LIBRARY_LAST_UPDATED": "最后更新", - "I18N_LIBRARY_LOADING": "正在载入", + "I18N_LIBRARY_LOADING": "正在加载", "I18N_LIBRARY_MAIN_HEADER": "想象您今天可以学到什么...", "I18N_LIBRARY_N/A": "不可用", "I18N_LIBRARY_NO_EXPLORATIONS": "天哪,这里没有可以显示的探险。", @@ -418,7 +482,7 @@ "I18N_LIBRARY_PAGE_TITLE": "社区图书馆 | Oppia", "I18N_LIBRARY_RATINGS_TOOLTIP": "评分", "I18N_LIBRARY_SEARCH_PLACEHOLDER": "您对什么比较好奇?", - "I18N_LIBRARY_SUB_HEADER": "通过浏览我们的探险,开始您的探险经历。", + "I18N_LIBRARY_SUB_HEADER": "浏览由社区创建的全套课程", "I18N_LIBRARY_VIEWS_TOOLTIP": "查看", "I18N_LIBRARY_VIEW_ALL": "查看全部", "I18N_LICENSE_PAGE_LICENSE_HEADING": "许可协议", @@ -432,7 +496,7 @@ "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "合作伙伴", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "将要被删除的账户", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "您的用户计划于在24小时内被删除。完成删除后将通过电子邮箱通知您。", - "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2": "该行动会删除本用户与所有与他相关的隐私信息。为了使信息无法与本用户相联,已经公开的信息会被转为匿名。其中一些数据的所有权可能会转移到社群。", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2": "此操作将会删除该用户账号与所有关联该账号的隐私数据。已公开的数据因为无法与账号关联,因此会匿名处理。一些已公开数据的所有权则可能会转移到社区。", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "删除详情", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "社区指导", "I18N_PLAYBOOK_HEADING": "创始人指引", @@ -474,8 +538,8 @@ "I18N_PLAYER_IS_PRIVATE": "这项探索是私人的。", "I18N_PLAYER_LAST_UPDATED_TOOLTIP": "最后更新", "I18N_PLAYER_LEARN_AGAIN_BUTTON": "再次学习", - "I18N_PLAYER_LEAVE_FEEDBACK": "为作者留下反馈...", - "I18N_PLAYER_LOADING": "正在载入...", + "I18N_PLAYER_LEAVE_FEEDBACK": "给作者留下反馈。(提交后,会一同包括您目前所探索卡片的参考。)", + "I18N_PLAYER_LOADING": "正在加载…", "I18N_PLAYER_NEXT_LESSON": "下一节课", "I18N_PLAYER_NO_OBJECTIVE": "未指定目标。", "I18N_PLAYER_NO_TAGS": "未指定标签。", @@ -511,6 +575,7 @@ "I18N_PLAYER_THANK_FEEDBACK": "感谢您的反馈!", "I18N_PLAYER_UNRATED": "未评级", "I18N_PLAYER_VIEWS_TOOLTIP": "查看", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "添加语言", "I18N_PREFERENCES_BIO": "个人简介", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "此字段可选。您在这里写下的任何东西都是公开的,全世界人都可以看到的。", "I18N_PREFERENCES_BREADCRUMB": "参数设置", @@ -525,7 +590,7 @@ "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "我们无法自动将您加入我们的邮件列表。请访问下面的链接以注册到我们的邮件列表:", "I18N_PREFERENCES_HEADING": "参数设置", "I18N_PREFERENCES_HEADING_SUBTEXT": "在此页面做出的任何更改都将被自动保存。", - "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "您尚未订阅任何创建者。", + "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "您尚未订阅任何创作者。通过点击作者个人资料页面中的“订阅”按钮,随时订阅您喜欢的作者。通过订阅作者,当作者发布新课程时,您将收到电子邮件通知。", "I18N_PREFERENCES_PAGE_TITLE": "更改您的个人设置 - Oppia", "I18N_PREFERENCES_PICTURE": "图片", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "首选音频语言", @@ -541,6 +606,7 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "拖放以裁剪并调整大小:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "错误:不能读取图片文件。", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "上传个人照片", + "I18N_PREFERENCES_SEARCH_LABEL": "搜索", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "选择首选语言...", "I18N_PREFERENCES_SUBJECT_INTERESTS": "学科兴趣", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "例如:数学、计算机科学、艺术等...", @@ -557,12 +623,11 @@ "I18N_SAVE_PROGRESS": "请登录或注册来保存您的进度并完成下一课。", "I18N_SHARE_LESSON": "分享此课程", "I18N_SHOW_SOLUTION_BUTTON": "显示解法", - "I18N_SIDEBAR_ABOUT_LINK": "关于Oppia", + "I18N_SIDEBAR_ABOUT_LINK": "关于我们", "I18N_SIDEBAR_BLOG": "博客", "I18N_SIDEBAR_CONTACT_US": "联系我们", "I18N_SIDEBAR_DONATE": "捐款", "I18N_SIDEBAR_FORUM": "论坛", - "I18N_SIDEBAR_GET_STARTED": "入门", "I18N_SIDEBAR_LIBRARY_LINK": "图书馆", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Oppia基金会", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "通过Oppia教学", @@ -585,7 +650,7 @@ "I18N_SIGNUP_FIELD_REQUIRED": "此字段是必需的。", "I18N_SIGNUP_LICENSE_NOTE": "请注意,通过同意我们的使用条款,您同意任何您在我们网站做出的内容或贡献都将会以知识共享 署名-相同方式共享 4.0版(CC-BY-SA v.4.0)许可协议授权,并放弃署名(BY)要求。请查阅我们的使用条款以了解更多有关许可协议授权的信息。有关更多关于CC-BY-SA的新鲜,请点此。", "I18N_SIGNUP_LICENSE_OBJECTIVE": "使用<[licenselink]>许可协议允许探险内容被自由复制、复用、修改并再分发。主要限制是如果有人以素材为基础修改、转移或构造,其也必须按照相同的自由许可协议分发其作品。", - "I18N_SIGNUP_LOADING": "正在载入", + "I18N_SIGNUP_LOADING": "正在加载", "I18N_SIGNUP_PAGE_TITLE": "加入社群 - Oppia", "I18N_SIGNUP_REGISTRATION": "注册", "I18N_SIGNUP_SEND_ME_NEWS": "发送我有关网站的新闻和更新", @@ -605,13 +670,13 @@ "I18N_SPLASH_JAVASCRIPT_ERROR_THANKS": "感谢您。", "I18N_SPLASH_JAVASCRIPT_ERROR_TITLE": "我们需要您的浏览器启用JavaScript", "I18N_SPLASH_LEARN_MORE": "了解更多", - "I18N_SPLASH_PAGE_TITLE": "Oppia:教学、学习、探索", + "I18N_SPLASH_PAGE_TITLE": "Oppia | 所有人都可免费使用的在线互动课程", "I18N_SPLASH_SECOND_EXPLORATION_DESCRIPTION": "创建探险很简单。这很容易适应以个别学生的反馈为基础,加上您的课程在全世界的经验趋势的环境。", "I18N_SPLASH_SITE_FEEDBACK": "网站反馈", "I18N_SPLASH_START_CONTRIBUTING": "开始贡献", "I18N_SPLASH_START_LEARNING": "开始学习", "I18N_SPLASH_START_TEACHING": "开始教学", - "I18N_SPLASH_SUBTITLE": "Oppia使创建交互式课程来从事教育,并与其建立关系变得容易。", + "I18N_SPLASH_SUBTITLE": "适用于每一位的迷人、有效果的优质教育", "I18N_SPLASH_TEACHERS_TITLE": "简单地分享您的知识", "I18N_SPLASH_THIRD_EXPLORATION_DESCRIPTION": "Oppia让您创建分享范围广泛的学科探险,只因您的想象而不同。", "I18N_SPLASH_TITLE": "人人享有的免费教育", @@ -619,11 +684,16 @@ "I18N_START_HERE": "点此开始!", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - 己完成!", "I18N_SUBSCRIBE_BUTTON_TEXT": "订阅", - "I18N_TEACH_PAGE_HEADING": "帮助全世界的学生", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "下一个技能:", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "以前的技能:", + "I18N_TEACH_PAGE_HEADING": "给家长、教师、监护人的Oppia", "I18N_TOPIC_VIEWER_DESCRIPTION": "描述", "I18N_TOPIC_VIEWER_LESSON": "会话", "I18N_TOPIC_VIEWER_LESSONS": "会话", "I18N_TOPIC_VIEWER_PRACTICE": "练习", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_MESSAGE": "练习功能只能在未发布版本里用英语使用。想要继续吗?", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_TITLE": "\n确认练习语言", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_TAG": "未发布版本", "I18N_TOPIC_VIEWER_REVISION": "修订版本", "I18N_TOPIC_VIEWER_START_PRACTICE": "开始", "I18N_TOPIC_VIEWER_STORIES": "故事", @@ -633,26 +703,29 @@ "I18N_TOPNAV_ABOUT": "关于", "I18N_TOPNAV_ABOUT_OPPIA": "关于Oppia", "I18N_TOPNAV_ADMIN_PAGE": "管理页面", + "I18N_TOPNAV_ANDROID_APP_HEADING": "安卓应用", "I18N_TOPNAV_BLOG": "博客", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "基础数学", "I18N_TOPNAV_CONTACT_US": "联系我们", "I18N_TOPNAV_CREATOR_DASHBOARD": "创建者面板", "I18N_TOPNAV_DONATE": "捐款", + "I18N_TOPNAV_FACILITATOR_DASHBOARD": "主持人仪表板", "I18N_TOPNAV_FORUM": "论坛", "I18N_TOPNAV_GET_INVOLVED": "参与", "I18N_TOPNAV_GET_STARTED": "入门", "I18N_TOPNAV_LEARNER_DASHBOARD": "学习者面板", - "I18N_TOPNAV_LIBRARY": "图书馆", + "I18N_TOPNAV_LIBRARY": "社群图书馆", "I18N_TOPNAV_LOGOUT": "退出", "I18N_TOPNAV_MODERATOR_PAGE": "版主页面", "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia基金会", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "合作伙伴Playbook", - "I18N_TOPNAV_PARTNERSHIPS": "合作关系", + "I18N_TOPNAV_PARTNERSHIPS": "学校和组织", "I18N_TOPNAV_PREFERENCES": "参数设置", "I18N_TOPNAV_SIGN_IN": "登录", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "使用Google登录", "I18N_TOPNAV_TEACH_WITH_OPPIA": "通过Oppia教学", "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "主题和技能仪表板", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "今天就试试吧!", "I18N_TOTAL_SUBSCRIBERS_TEXT": "您共有<[totalSubscribers]>位订阅者。", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "取消订阅", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "志愿者", diff --git a/assets/i18n/zh-hant.json b/assets/i18n/zh-hant.json index 756ac243dc45..817ea4c2a923 100644 --- a/assets/i18n/zh-hant.json +++ b/assets/i18n/zh-hant.json @@ -1,6 +1,7 @@ { - "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "關於基金會", - "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "創建一個探索", + "I18N_ABOUT_FOUNDATION_PAGE_BREADCRUMB": "關於 Oppia 基金會", + "I18N_ABOUT_FOUNDATION_PAGE_TITLE": "關於 Oppia 基金會 | Oppia", + "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE": "建立探索", "I18N_ABOUT_PAGE_ABOUT_TAB_CREATE_TEXT": "關於您關注的主題。", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK": "取得意見回饋", "I18N_ABOUT_PAGE_ABOUT_TAB_FEEDBACK_TEXT": "來改進您的探索。", @@ -54,6 +55,7 @@ "I18N_ABOUT_PAGE_TITLE": "關於 | Oppia", "I18N_ABOUT_PAGE_TITLE_SECTION_ONE": "與 Oppia 一起開始", "I18N_ABOUT_PAGE_WIFI_FEATURE": "低頻寬要求", + "I18N_ACTION_ACCESS_ANDROID_APP": "訪問 Android app", "I18N_ACTION_APPLY_TO_TEACH_WITH_OPPIA": "申請以 Oppia 教學", "I18N_ACTION_BROWSE_EXPLORATIONS": "瀏覽我們的探索", "I18N_ACTION_BROWSE_LESSONS": "瀏覽我們的課程", @@ -65,6 +67,27 @@ "I18N_ACTION_GUIDE_FOR_TEACHERS": "導師指南", "I18N_ACTION_TIPS_FOR_PARENTS": "給家長與監護人的提示", "I18N_ACTION_VISIT_CLASSROOM": "參訪教室", + "I18N_ADD_NEW_SYLLABUS_CANCEL_BUTTON_TEXT": "取消", + "I18N_ADD_NEW_SYLLABUS_DONE_BUTTON_TEXT": "完成", + "I18N_ADD_NEW_SYLLABUS_ITEMS": "新教學大綱項目", + "I18N_ADD_SYLLABUS_SEARCH_PLACEHOLDER": "搜索如故事、物理、英語", + "I18N_ANDROID_PAGE_AVAILABLE_FOR_DOWNLOAD_TEXT": "可供下載。", + "I18N_ANDROID_PAGE_CONSENT_CHECKBOX_LABEL": "您確認您已年滿 18 歲,或著是您已獲得法定父母或監護人的同意和許可。", + "I18N_ANDROID_PAGE_EMAIL_FIELD_LABEL": "電子郵件地址", + "I18N_ANDROID_PAGE_FEATURES_SECTION_HEADER": "給予世上任何人 教育。", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_2": "即使沒有連接網際網路也能遊玩課程。", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_3": "該應用程序提供英語巴西葡萄牙語 版本。", + "I18N_ANDROID_PAGE_FEATURE_SUBTEXT_4": "不久將會添加更多語言!", + "I18N_ANDROID_PAGE_FEATURE_TEXT_1": "透過引人入勝的故事來學習", + "I18N_ANDROID_PAGE_FEATURE_TEXT_2": "在任何時間地點學習", + "I18N_ANDROID_PAGE_FEATURE_TEXT_3": "以您的語言來學習", + "I18N_ANDROID_PAGE_FEATURE_TEXT_4": "在學習者之間切換", + "I18N_ANDROID_PAGE_NAME_FIELD_LABEL": "名稱", + "I18N_ANDROID_PAGE_TITLE": "Android | Oppia", + "I18N_ANDROID_PAGE_UPDATES_MAIN_TEXT": "訂閱以接收 Oppia Android 應用程序的更新", + "I18N_ANDROID_PAGE_UPDATES_SUBMIT_BUTTON_TEXT": "提醒我", + "I18N_ANSWER_MISSPELLED_RESPONSE_TEXT_2": "請重新檢查您的拼寫。", + "I18N_ASSIGNED_STORIES_AND_SKILLS": "分配的故事和技能", "I18N_ATTRIBUTION_HTML_STEP_ONE": "複製並貼上 HTML", "I18N_ATTRIBUTION_HTML_STEP_TWO": "確認連結是顯示成「<[linkText]>」", "I18N_ATTRIBUTION_HTML_TITLE": "在 HTML 的歸屬", @@ -72,20 +95,34 @@ "I18N_ATTRIBUTION_PRINT_STEP_TWO": "附上「<[link]>」的副本", "I18N_ATTRIBUTION_PRINT_TITLE": "在印刷品的歸屬", "I18N_ATTRIBUTION_TITLE": "如何在分享或重複使用時歸屬此課程", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_BREADCRUMB": "作者個人檔案", + "I18N_BLOG_AUTHOR_PROFILE_PAGE_TITLE": "部落格 | 作者 | Oppia", "I18N_BLOG_CARD_PREVIEW_CONTEXT": "這是部落格卡片在首頁與您的作者個人檔案上的顯示方式。", "I18N_BLOG_CARD_PREVIEW_HEADING": "部落格卡片預覽", + "I18N_BLOG_DASHBOARD_AUTHOR_BIO_HEADING": "簡介", + "I18N_BLOG_DASHBOARD_AUTHOR_DETAILS_EDITOR_HEADING": "編輯您的作者名稱和簡介", + "I18N_BLOG_DASHBOARD_AUTHOR_NAME_HEADING": "名稱", "I18N_BLOG_DASHBOARD_CREATE_NEW_BLOG_POST_BUTTON": "建立新的部落格文章", "I18N_BLOG_DASHBOARD_INTRO_MESSAGE": "看起來您尚未創建任何故事!", "I18N_BLOG_DASHBOARD_PAGE_NEW_POST_BUTTON": "新文章", + "I18N_BLOG_DASHBOARD_PAGE_SAVE_BUTTON": "儲存", "I18N_BLOG_DASHBOARD_TAB_DRAFTS": "草稿", "I18N_BLOG_DASHBOARD_TAB_PUBLISHED": "已發布", + "I18N_BLOG_HOME_PAGE_BREADCRUMB": "部落格", + "I18N_BLOG_HOME_PAGE_POSTS_HEADING": "最新的帖子", + "I18N_BLOG_HOME_PAGE_QUERY_SEARCH_HEADING": "關鍵字", + "I18N_BLOG_HOME_PAGE_TAGS_SEARCH_HEADING": "標籤", + "I18N_BLOG_HOME_PAGE_TAG_FILTER_HOLDER_TEXT": "選擇標籤", + "I18N_BLOG_HOME_PAGE_TITLE": "Oppia 部落格| Oppia", + "I18N_BLOG_HOME_PAGE_WELCOME_HEADING": "歡迎來到 Oppia 部落格!", + "I18N_BLOG_HOME_SEARCH_PAGE_POSTS_HEADING": "顯示搜尋結果", "I18N_BLOG_POST_EDITOR_ADD_THUMBNAIL_TEXT": "增加縮圖圖片", "I18N_BLOG_POST_EDITOR_BODY_HEADING": "正文", "I18N_BLOG_POST_EDITOR_CANCEL_BUTTON_TEXT": "取消", "I18N_BLOG_POST_EDITOR_DELETE_BUTTON": "刪除", "I18N_BLOG_POST_EDITOR_EDIT_THUMBNAIL_TEXT": "編輯縮圖圖片", "I18N_BLOG_POST_EDITOR_LAST_SAVED_PREFIX": "上一次儲存於", - "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "發布", + "I18N_BLOG_POST_EDITOR_PUBLISH_BUTTON": "發佈", "I18N_BLOG_POST_EDITOR_SAVE_CONTENT_BUTTON_TEXT": "完成", "I18N_BLOG_POST_EDITOR_SAVE_DRAFT_BUTTON": "儲存成草稿", "I18N_BLOG_POST_EDITOR_SHOW_PREVIEW_HEADING": "預覽", @@ -94,6 +131,10 @@ "I18N_BLOG_POST_EDITOR_TAGS_LIMIT_SPECIFICATION_SUFFIX": "還可增加更多標籤。", "I18N_BLOG_POST_EDITOR_THUMBNAIL_HEADING": "縮圖", "I18N_BLOG_POST_EDITOR_TITLE_HEADING": "標題", + "I18N_BLOG_POST_PAGE_RECOMMENDATION_SECTON_HEADING": "為您推薦。", + "I18N_BLOG_POST_PAGE_TAGS_HEADING": "標籤", + "I18N_BLOG_POST_PAGE_TITLE": "<[blogPostTitle]> | 部落格 | Oppia", + "I18N_BLOG_POST_THUMBNAIL_ALLOWED_EXTENSIONS_PREFIX": "允許的圖片副檔名:", "I18N_BLOG_POST_THUMBNAIL_PICTURE_DRAG": "挑選檔案或拖拉於此", "I18N_BLOG_POST_THUMBNAIL_PICTURE_ERROR": "錯誤:不能讀取圖片檔案。", "I18N_BLOG_POST_THUMBNAIL_PICTURE_UPLOAD": "增加縮圖", @@ -105,12 +146,46 @@ "I18N_CLASSROOM_CALLOUT_HEADING_1": "數學基礎", "I18N_CLASSROOM_CALLOUT_HEADING_2": "介紹:Oppia 教室", "I18N_CLASSROOM_CALLOUT_PARAGRAPH_1": "在全新的 Oppia 教室試試第一門綜合課程!策劃課程 — 是由教育工作者檢驗 — 因此您可以精通在廣泛主題裡乘法與除法位值的基本數學技巧。", + "I18N_CLASSROOM_MATH_TITLE": "數學", + "I18N_CLASSROOM_PAGE_ALREADY_KNOW_SOME_MATH_HEADING": "已經知道一些數學?", + "I18N_CLASSROOM_PAGE_BEGIN_WITH_FIRST_TOPIC_BUTTON": "從<[firstTopic]>開始", "I18N_CLASSROOM_PAGE_COMING_SOON": "即將到來", "I18N_CLASSROOM_PAGE_COURSE_DETAILS": "課程詳細內容", "I18N_CLASSROOM_PAGE_HEADING": "Oppia 教室", + "I18N_CLASSROOM_PAGE_NEW_TO_MATH_HEADING": "數學新手?", "I18N_CLASSROOM_PAGE_SEARCH_BAR_HEADING": "瀏覽更多由社群做出的課程", "I18N_CLASSROOM_PAGE_SEARCH_BAR_SUBHEADING": "透過我們的社群圖書館搜尋", + "I18N_CLASSROOM_PAGE_TITLE": "和 Oppia 一起學習<[classroomName]>|Oppia", "I18N_CLASSROOM_PAGE_TOPICS_COVERED": "涵蓋主題", + "I18N_COLLECTION_EDITOR_PAGE_TITLE": "<[collectionTitle]> - Oppia 編輯器", + "I18N_COLLECTION_EDITOR_UNTITLED_COLLECTION_PAGE_TITLE": "無標題 - Oppia 編輯器", + "I18N_COLLECTION_PLAYER_PAGE_BEGIN": "開始", + "I18N_COLLECTION_PLAYER_PAGE_CONTINUE": "繼續", + "I18N_COLLECTION_PLAYER_PAGE_FINISHED": "您已完成收藏!可隨時重新遊玩下面的任何探索。", + "I18N_COLLECTION_PLAYER_PAGE_HOVER_MESSAGE": "將游標懸停在圖標上來預覽探索。", + "I18N_COLLECTION_PLAYER_PAGE_NO_EXPLORATION": "此收藏裡未添加任何探索。", + "I18N_COLLECTION_PLAYER_PAGE_TITLE": "<[collectionTitle]> - Oppia", + "I18N_COMING_SOON": "即將推出", + "I18N_COMMUNITY_LIBRARY_PAGE_COLLECTION": "收藏", + "I18N_COMPLETED_STORY": "完成<[story]>", + "I18N_COMPLETE_CHAPTER": "完成在<[topicName]>的一個章節", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_FIRST_3": "一個完美的開始!繼續保持!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_1": "你完成了一個關卡!做得好!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_2": "了不起,你完成了一個關卡!繼續加油!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_GENERIC_3": "太棒了,你剛剛完成了一個關卡!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_1": "你已經完成了一半,很快就會完成的!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_2": "你剛剛完成了一半!做得好!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_MIDWAY_3": "哇!你已經完成了課程一半的內容!成果驚人!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_1": "只剩下一個了,喔喔!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_2": "上吧!只剩下一個了!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_ONE_REMAINING_3": "你做得很好,就只剩下一個了!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_1": "你進步的非常快!繼續保持!", + "I18N_CONGRATULATORY_CHECKPOINT_MESSAGE_SECOND_2": "太驚人了!你剛剛完成了第二個關卡!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_1": "好耶!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_2": "好棒!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_4": "幹得好!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_5": "做得好!", + "I18N_CONGRATULATORY_CHECKPOINT_TITLE_6": "不錯嘛!", "I18N_CONTACT_PAGE_BREADCRUMB": "聯絡", "I18N_CONTACT_PAGE_HEADING": "參與!", "I18N_CONTACT_PAGE_PARAGRAPH_1": "感謝您關注於協助 Oppia 專案!", @@ -133,29 +208,33 @@ "I18N_CONTACT_PAGE_PARAGRAPH_4_HEADING": "您可幫助的方式", "I18N_CONTACT_PAGE_PARAGRAPH_5": "在公佈探索之前,我們要確保它內容是有效且令人愉快的。這代表著學生的技能通過遊玩探索而有所改善,即使在沒有任何外部壓力的情況下,學生也可以完成探索裡的任務。", "I18N_CONTACT_PAGE_PARAGRAPH_5_HEADING": "測試現有的探索", - "I18N_CONTACT_PAGE_PARAGRAPH_6": "要查看哪些內容有效及哪些不行方面,透過用戶研究學習可提供很大的幫助。這涉及與學生坐在一起,來觀察他們完成探索的情況、寫下關於學生會覺得混淆部份的備註、過程中他們會覺得不夠有趣、覺得會另人沮喪的時候、或是發現到一個讓人不清楚的解釋。任一探索每次一同與 2 至 3 名的學生來實作即可 — 而您將會在過程中學到很多東西!您可以之後寄發這些備註內容給探索創建者,或是直接建議更新,而這將會幫助改進給予全世界學生們來使用的探索。", + "I18N_CONTACT_PAGE_PARAGRAPH_6": "要查看哪些內容有效及哪些不行方面,透過用戶研究學習可提供很大的幫助。這涉及與學生坐在一起,來觀察他們完成探索的情況、寫下關於學生會覺得混淆部份的備註、過程中他們會覺得不夠有趣、覺得會另人沮喪的時候、或是發現到一個讓人不清楚的解釋。任一探索每次一同與 2 至 3 名的學生來實作即可 — 而您將會在過程中學到很多東西!您可以之後寄發這些備註內容給探索創作者,或是直接建議更新,而這將會幫助改進給予全世界學生們來使用的探索。", "I18N_CONTACT_PAGE_PARAGRAPH_7": "Oppia 是建立於能讓各地優良導師前來的點子。您也許是位喜歡向同儕釋深澀概念的學生、或是一個想分享豐富人生經驗的退休人士、個別導師、課堂教師、助教或教授、或是一名想尋求以有意義方式渡過時間的生意人。只要您是個能創建有助於學生探索的人士,您的背景來歷完全不構成問題。", - "I18N_CONTACT_PAGE_PARAGRAPH_7_HEADING": "創建新的探索", - "I18N_CONTACT_PAGE_PARAGRAPH_8": "因此,如果您想為全世界各地的學生創建免費、有效的課程,那麼您來對了地方。我們鼓勵您查看我們的創建者指導現有課程、以及開始創建自己的課程。另外,如果您想確認您的課程能夠擁有相當影響力,請考慮申請我們的以 Oppia 課程教學,我們將協助您創建、測試,並改善您的探索以獲得到最佳影響。", + "I18N_CONTACT_PAGE_PARAGRAPH_7_HEADING": "新增探索", + "I18N_CONTACT_PAGE_PARAGRAPH_8": "因此,如果您想為全世界各地的學生創建免費、有效的課程,那麼您來對了地方。我們鼓勵您查看我們的創作者入門現有課程、以及開始創建自己的課程。另外,如果您想確認您的課程能夠擁有相當影響力,請考慮申請我們的以 Oppia 課程教學,我們將協助您創建、測試,並改善您的探索以獲得到最佳影響。", "I18N_CONTACT_PAGE_PARAGRAPH_9": "一旦發現更好的東西,現有的探索又該如何呢?您可以直接從探索頁面向任何探索提出建議更改。只需要點擊右上方的鉛筆圖示,並分享出您認為可以改善的內容。課程的創建者將收到您的建議,並有機會將其內容合併到現有探索中。這是一個非常有意義的貢獻方式,特別是您可基於您的建議從學生體驗中進行探索。", "I18N_CONTACT_PAGE_PARAGRAPH_9_HEADING": "改善現有的探索", + "I18N_CONTACT_PAGE_TITLE": "聯絡 | Oppia", "I18N_CONTINUE_REGISTRATION": "繼續註冊", "I18N_COOKIE_BANNER_ACKNOWLEDGE": "好的", "I18N_COOKIE_BANNER_EXPLANATION": "本網站使用 cookie 和類似技術來支援核心功能、確保網站資安、並分析我們的網站流量。在我們的隱私政策裡有更多相關資訊。", "I18N_CORRECT_FEEDBACK": "正確!", + "I18N_CREATED_LEARNER_GROUP_LINK_MESSAGE": "您的群組連結", + "I18N_CREATE_ACCOUNT": "建立帳號", "I18N_CREATE_ACTIVITY_QUESTION": "您想建立什麼?", "I18N_CREATE_ACTIVITY_TITLE": "建立活動", - "I18N_CREATE_COLLECTION": "創建收藏", - "I18N_CREATE_EXPLORATION": "創建探索", + "I18N_CREATE_COLLECTION": "建立收藏", + "I18N_CREATE_EXPLORATION": "建立探索", "I18N_CREATE_EXPLORATION_CREATE": "建立", "I18N_CREATE_EXPLORATION_QUESTION": "您是否希望建立一個探索?", - "I18N_CREATE_EXPLORATION_TITLE": "創建一個探索", + "I18N_CREATE_EXPLORATION_TITLE": "建立探索", "I18N_CREATE_EXPLORATION_UPLOAD": "上傳", + "I18N_CREATE_LEARNER_GROUP": "建立群組", "I18N_CREATE_NO_THANKS": "不用了,謝謝", "I18N_CREATE_YES_PLEASE": "是的,謝謝!", "I18N_CREATOR_IMPACT": "影響", "I18N_DASHBOARD_COLLECTIONS": "收藏", - "I18N_DASHBOARD_CREATOR_DASHBOARD": "創建者控制面版", + "I18N_DASHBOARD_CREATOR_DASHBOARD": "創作者面板", "I18N_DASHBOARD_EXPLORATIONS": "探索", "I18N_DASHBOARD_EXPLORATIONS_EMPTY_MESSAGE": "看起來你還沒有創造任何探索。讓我們開始吧!", "I18N_DASHBOARD_EXPLORATIONS_SORT_BY": "排序依", @@ -182,7 +261,7 @@ "I18N_DASHBOARD_TABLE_HEADING_PLAYS": "遊玩數", "I18N_DASHBOARD_TABLE_HEADING_RATING": "評價", "I18N_DASHBOARD_TABLE_HEADING_UNRESOLVED_ANSWERS": "未解答問題", - "I18N_DASHBOARD_TOPICS_AND_SKILLS_DASHBOARD": "主題與技能控制面板", + "I18N_DASHBOARD_TOPICS_AND_SKILLS_DASHBOARD": "主題與技能面板", "I18N_DELETE_ACCOUNT_PAGE_BREADCRUMB": "刪除帳號", "I18N_DELETE_ACCOUNT_PAGE_BUTTON": "刪除我的帳號", "I18N_DELETE_ACCOUNT_PAGE_HEADING": "刪除帳號", @@ -195,21 +274,65 @@ "I18N_DELETE_ACCOUNT_PAGE_LIST_2_2": "被其他擁有者擁有的公開探索與收藏的提交", "I18N_DELETE_ACCOUNT_PAGE_LIST_2_3": "對於主題、故事、技能、與問題的提交", "I18N_DELETE_ACCOUNT_PAGE_MODAL_TEXT": "為了確認刪除,請在下面的字段中輸入您的用戶名,然後按“刪除我的帳戶”按鈕。 此操作不能撤消。", - "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "此操作將會刪除該用戶帳號,與所有關聯該帳號的私有資料。已公開的資料因為無法與帳號關聯,因此會匿名處理。以下提及的一些分類可能不適用於您的帳號。", + "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1": "此操作將會刪除該用戶帳號,與所有關聯該帳號的私有資料。已公開的資料因為無法與帳號關聯(會存儲 6 個月的備份資料除外),因此會匿名處理。以下提及的一些分類可能不適用於您的帳號。", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_1_HEADING": "概要", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_2": "此為將會被刪除掉的資料類型:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_3": "此為會匿名化的資料類型:", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_4": "另外,已不被其他人所擁有的已發布探索和收藏,將會被轉換為社群所擁有。", "I18N_DELETE_ACCOUNT_PAGE_PARAGRAPH_5": "如果您有關於帳號移除流程的任何相關問題或疑慮,請發送電子郵件到 privacy@oppia.org。", "I18N_DELETE_ACCOUNT_PAGE_REDIRECT_INFO": "這將會帶您到可以刪除您的 Oppia 帳號的頁面。", + "I18N_DELETE_ACCOUNT_PAGE_TITLE": "刪除帳號 | Oppia", + "I18N_DELETE_LEARNER_GROUP": "刪除群組", + "I18N_DELETE_LEARNER_GROUP_MODAL_BODY_TEXT": "您確定要刪除<[groupName]>學習者群組嗎?", + "I18N_DIAGNOSTIC_TEST_RESULT_GO_TO_CLASSROOM_BUTTON_TEXT": "前往教室", "I18N_DIRECTIVES_DRAG_IMAGE_HERE": "把圖片拖進此區域", "I18N_DIRECTIVES_UPLOAD_A_FILE": "上傳檔案", "I18N_DONATE_PAGE_BREADCRUMB": "捐款", - "I18N_DONATE_PAGE_IMAGE_TITLE": "您的慷慨捐贈資金:", - "I18N_DONATE_PAGE_TITLE": "捐款給
Oppia 基金會", + "I18N_DONATE_PAGE_BROWSER_TAB_TITLE": "捐款 | 做出正面影響 | Oppia", + "I18N_DONATE_PAGE_BUDGET_HEADING": "您的錢會如何使用?", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_3": "維護", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_2": "您的捐款能幫助我們提升全世界對 Oppia 的認識。", + "I18N_DONATE_PAGE_BUDGET_SUBHEADING_EXPLANATION_3": "捐款能使得 Oppia 平台與伺服器平穩可靠地運作。", + "I18N_DONATE_PAGE_FAQ_ANSWER_10": "有關 Oppia 的一般問題,請聯絡 contact@oppia.org。", + "I18N_DONATE_PAGE_FAQ_ANSWER_2": "Oppia 的存在,是為了幫助改善公平的教育機會。許多線上平台(例如大學層級的 MOOC 課程)偏向已經接受過基礎教育的學習者。這些平台另外要求學習者擁有網際網路連線、自主學習的動力、良好的英語知識、和基本的讀寫能力等等。然而,這對於欠缺\n服務的社區與地區的學生來說並非如此,並且可能會增加社會經濟上的差距。Oppia 藉由包含專門針對資源貧乏社區的功能,來縮小這樣的差距。", + "I18N_DONATE_PAGE_FAQ_ANSWER_5": "是的,我們以美國 501(c)(3) 組織的形式運作。在美國法律下,您的捐贈可以申報免稅。", + "I18N_DONATE_PAGE_FAQ_ANSWER_6": "請勿用支票捐款,而是藉信用卡或Paypal捐款。", + "I18N_DONATE_PAGE_FAQ_ANSWER_8": "我們不接受股票或匯款捐贈。請考慮藉由信用卡或是透過 Paypal 捐款。", + "I18N_DONATE_PAGE_FAQ_HEADING_TEXT": "常見問題", + "I18N_DONATE_PAGE_FAQ_QUESTION_1": "什麼是 Oppia?", + "I18N_DONATE_PAGE_FAQ_QUESTION_2": "Oppia 為何存在?", + "I18N_DONATE_PAGE_FAQ_QUESTION_3": "Oppia 如何衡量影響,且此平台到目前為止取得了什麼樣的成就?", + "I18N_DONATE_PAGE_FAQ_QUESTION_4": "Oppia 與其他線上教育平台有何不同?", + "I18N_DONATE_PAGE_FAQ_QUESTION_5": "這個捐款可以抵稅嗎?", + "I18N_DONATE_PAGE_FAQ_QUESTION_6": "我如何透過支票捐款?", + "I18N_DONATE_PAGE_FAQ_QUESTION_8": "我可以用股票或是透過電匯進行捐贈嗎?", + "I18N_DONATE_PAGE_IMAGE_TITLE": "您的慷慨捐贈資金將會幫助到:", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_2": "來自巴勒斯坦", + "I18N_DONATE_PAGE_LEARNER_FEEDBACK_PLACE_TEXT_3": "來自印度", + "I18N_DONATE_PAGE_READ_BLOG_BUTTON_TEXT": "閱讀我們的部落格", + "I18N_DONATE_PAGE_STATISTIC_2": "在我們虛擬圖書館裡的課程", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_HEADING": "感謝訂閱!", + "I18N_DONATE_PAGE_SUBSCRIBE_MODAL_TEXT_2": "在我們社群(包括您!)的幫助和支持下,Oppia 已經進行並將持續為世界上教育資源最匱乏的學習者提供服務。", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_EMAIL_LABEL_TEXT": "電子郵件地址", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_NAME_LABEL_TEXT": "名稱(選填)", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_SUBSCRIBE_LABEL_TEXT": "現在訂閱", + "I18N_DONATE_PAGE_SUBSCRIBE_SECTION_TEXT_1": "今天就加入我們!", + "I18N_DONATE_PAGE_THANKS_MODAL_HEADING": "感謝捐款!", + "I18N_DONATE_PAGE_THANKS_MODAL_TEXT_1": "在您的幫助和支持下,Oppia 將能夠繼續為世界上教育資源最匱乏的學習者提供服務。", + "I18N_DONATE_PAGE_TITLE": "捐款給 Oppia 基金會", "I18N_DONATE_PAGE_VIDEO_SUBTITLE": "聽取來自我們 Oppia 社群的發聲", - "I18N_DONATE_PAGE_VISION_TEXT_ONE": "Oppia 是在 2012 年,從一個簡單的想法開始起步:改善全世界學生的教育,同時增進教學的品質。此願景現已轉變成一個被全球超過 430,000 位使用者利用、超過 11,000 個探索內容的教學平台。", - "I18N_DONATE_PAGE_VISION_TEXT_TWO": "歡迎捐款給 Oppia 基金會,我們是一家已註冊的 501(c)(3) 非營利組織,來與我們一起為世界各地的人們帶來教學的樂趣。", + "I18N_DONATE_PAGE_WATCH_VIDEO_BUTTON_TEXT": "觀看影片", + "I18N_EDIT_LEARNER_GROUP_PAGE_TITLE": "編輯學習者群組 | Oppia", + "I18N_EMPTY_LEARNER_GROUPS_MESSAGE": "您還沒有任何群組", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_1": "您剛剛完成了第 1 章!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_2": "您剛剛完成了第 5 章!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_3": "您剛剛完成了第 10 章!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_4": "您剛剛完成了第 25 章!", + "I18N_END_CHAPTER_MILESTONE_MESSAGE_5": "您剛剛完成了第 50 章!", + "I18N_END_CHAPTER_NEXT_CHAPTER_TEXT": "到下一個課程!", + "I18N_END_CHAPTER_PRACTICE_SESSION_TEXT": "練習您新獲得的技能!", + "I18N_END_CHAPTER_REVISION_TAB_TEXT": "複習您到目前為止學到的東西!", + "I18N_END_EXPLORATION_RECOMMENDATION_MESSAGE": "這是您接下來可以做的!", "I18N_ERROR_DISABLED_EXPLORATION": "探索已停用", "I18N_ERROR_DISABLED_EXPLORATION_MESSAGE": "對不起,您目前選擇的探索目前已被停用,請稍後再試。", "I18N_ERROR_DISABLED_EXPLORATION_PAGE_TITLE": "探索已停用 - Oppia", @@ -222,10 +345,56 @@ "I18N_ERROR_MESSAGE_404": "抱歉,我們找了很久但是還是找不到那個頁面", "I18N_ERROR_MESSAGE_500": "有東西發生了可怕的錯誤。但這不是您的錯。發生了內部錯誤。", "I18N_ERROR_NEXT_STEPS": "現在最好返回\">首頁。然而,如果您認為該錯誤不應該發生,請在我們的\" target=\"_blank\">問題追蹤器上告訴我們。非常抱歉。", + "I18N_ERROR_PAGE_ROOT_BROWSER_TAB_TITLE": "錯誤<[statusCode]> | Oppia", + "I18N_ERROR_PAGE_TITLE": "錯誤<[statusCode]> - Oppia", "I18N_ERROR_PAGE_TITLE_400": "錯誤 400 - Oppia", "I18N_ERROR_PAGE_TITLE_401": "錯誤401 - Oppia", "I18N_ERROR_PAGE_TITLE_404": "錯誤404 - Oppia", "I18N_ERROR_PAGE_TITLE_500": "錯誤500 - Oppia", + "I18N_EXPLORATION_-tMgcP1i_4au_DESCRIPTION": "準備好更多的紙杯蛋糕了嗎?參加這個簡短的測驗,看看你對目前為止所學知識的理解!", + "I18N_EXPLORATION_0FBWxCE5egOw_DESCRIPTION": "一個分數有可能變相成別的分數嗎?讓我們看看當馬修第二次見到克拉姆時會發生什麼事。", + "I18N_EXPLORATION_1904tpP0CYwY_TITLE": "從 1 到 5 的單位數表達式", + "I18N_EXPLORATION_2mzzFVDLuAj8_TITLE": "什麼是比例?", + "I18N_EXPLORATION_40a3vjmZ7Fwu_DESCRIPTION": "Nina 與她的母親遇到一位經營一間水果攤的朋友,來一起加入 Nina 透過除法為她們的朋友解決問題!", + "I18N_EXPLORATION_53Ka3mQ6ra5A_TITLE": "添加更大的數字", + "I18N_EXPLORATION_5NWuolNcwH6e_TITLE": "順序的重要性", + "I18N_EXPLORATION_670bU6d9JGBh_DESCRIPTION": "幫助馬修為貝克先生的一位客人解決一個問題,因為他學習了帶分數和數線。來遊玩這個課程開始吧!", + "I18N_EXPLORATION_670bU6d9JGBh_TITLE": "帶分數和數線 1", + "I18N_EXPLORATION_6Q6IyIDkjpYC_DESCRIPTION": "貝克先生有一筆非常大的訂單進來,他需要馬修幫助來購買更多的原料。你能用分數找出他們需要的是什麼嗎?", + "I18N_EXPLORATION_8HTzQQUPiK5i_DESCRIPTION": "加入妮娜和她的母親前往市場。使用除法來幫助她們找出她們會需要多少個袋子來裝雜貨!", + "I18N_EXPLORATION_8HTzQQUPiK5i_TITLE": "什麼是除法?", + "I18N_EXPLORATION_9DITEN8BUEHw_TITLE": "來加減幾個數字", + "I18N_EXPLORATION_BJd7yHIxpqkq_TITLE": "加法基礎", + "I18N_EXPLORATION_K645IfRNzpKy_TITLE": "什麼是位值", + "I18N_EXPLORATION_K89Hgj2qRSzw_TITLE": "分配律", + "I18N_EXPLORATION_Knvx24p24qPO_TITLE": "找出數字的值", + "I18N_EXPLORATION_OKxYhsWONHZV_TITLE": "什麼是加法?", + "I18N_EXPLORATION_PLAYER_PAGE_TITLE": "<[explorationTitle]> - Oppia", + "I18N_EXPLORATION_PsfDKdhd6Esz_TITLE": "減掉大數字,第 2 部分", + "I18N_EXPLORATION_RvopsvVdIb0J_DESCRIPTION": "該是時候讓詹姆士賣掉他的新冰沙了!他和貝瑞叔叔一起擺攤。他們能算出每個人應該得到多少錢嗎?", + "I18N_EXPLORATION_STARTING_FROM_BEGINNING": "恭喜您完成本課程!課程會在您下次回來時從頭開始。", + "I18N_EXPLORATION_VKXd8qHsxLml_DESCRIPTION": "瑪雅、歐瑪與馬利克發現到他們的一些原料變質了。你可以用減法來幫助他們算出他們還剩多少嗎?", + "I18N_EXPLORATION_VKXd8qHsxLml_TITLE": "減掉大數字,第 1 部分", + "I18N_EXPLORATION_W0xq3jW5GzDF_DESCRIPTION": "瑪雅、歐瑪與馬利克在嘗試做第二份披薩時,意想不到的事發生了!", + "I18N_EXPLORATION_W0xq3jW5GzDF_TITLE": "什麼是減法?", + "I18N_EXPLORATION_WwqLmeQEn9NK_TITLE": "四捨五入,第 2 部分", + "I18N_EXPLORATION_Xa3B_io-2WI5_DESCRIPTION": "一起加入馬修來幫助貝克先生修復東西,同時了解如何添加分數。", + "I18N_EXPLORATION_aAkDKVDR53cG_TITLE": "比較數字", + "I18N_EXPLORATION_aHikhPlxYgOH_DESCRIPTION": "加入馬修來一起了解帶分數只是種變相的普通分數。", + "I18N_EXPLORATION_avwshGklKLJE_TITLE": "四捨五入,第 1 部分", + "I18N_EXPLORATION_cQDibOXQbpi7_DESCRIPTION": "艾莉亞準備在她的花園裡種些更大的蔬菜!請幫助她種植和澆水,同時與她來一起記住更多的倍數。", + "I18N_EXPLORATION_cQDibOXQbpi7_TITLE": "從 5 到 9 的單位數表達式", + "I18N_EXPLORATION_m1nvGABWeUoh_TITLE": "什麼是平均數?", + "I18N_EXPLORATION_nLmUS6lbmvnl_TITLE": "比較比例", + "I18N_EXPLORATION_rfX8jNkPnA-1_DESCRIPTION": "你能夠幫忙馬修賺些紙杯蛋糕嗎?來參加這個簡短的測驗,看看你對於分數的記憶程度。", + "I18N_EXPLORATION_tIoSb3HZFN6e_DESCRIPTION": "詹姆士學習如何將比例簡化成最簡單的形式,以讓他的計算更加容易。", + "I18N_EXPLORATION_tIoSb3HZFN6e_TITLE": "以最簡單的形式填寫比例", + "I18N_EXPLORATION_umPkwp0L1M0-_TITLE": "什麼是分數?", + "I18N_EXPLORATION_v8fonNnX4Ub1_DESCRIPTION": "Ava 與 Kamal 想繼續幫忙 Plum 夫人處理她的烘培業務,不過在運算式裡有一些未知數。Ava 能夠幫上忙嗎?", + "I18N_EXPLORATION_zIBYaqfDJrJC_TITLE": "乘法是什麼意思", + "I18N_EXPLORATION_zVbqxwck0KaC_TITLE": "比例關係", + "I18N_EXPLORATION_zW39GLG_BdN2_DESCRIPTION": "在馬修學習如何比較分數的大小時,烘培坊發生了一起事故讓貝克先生很生氣。讓我們來看看發生了什麼事!", + "I18N_EXPLORATION_zW39GLG_BdN2_TITLE": "比較分數", "I18N_FEEDBACK_MESSAGE_ANONYMOUS_AUTHOR": "匿名", "I18N_FOOTER_ABOUT": "關於", "I18N_FOOTER_ABOUT_ALL_CAPS": "關於 OPPIA", @@ -240,7 +409,7 @@ "I18N_FOOTER_GET_INVOLVED": "參與", "I18N_FOOTER_GET_STARTED": "入門", "I18N_FOOTER_OPPIA_FOUNDATION": "Oppia 基金會", - "I18N_FOOTER_PRIVACY_POLICY": "隱私政策", + "I18N_FOOTER_PRIVACY_POLICY": "隱私權政策", "I18N_FOOTER_TEACH": "以 Oppia 教學", "I18N_FOOTER_TEACH_LEARN_ALL_CAPS": "教學/學習", "I18N_FOOTER_TEACH_PAGE": "給家長/導師", @@ -248,7 +417,7 @@ "I18N_FORMS_TYPE_NUMBER": "輸入一個數字", "I18N_FORMS_TYPE_NUMBER_AT_LEAST": "請輸入一個至少爲<[minValue]>的數字。", "I18N_FORMS_TYPE_NUMBER_AT_MOST": "請輸入一個最多爲<[maxValue]>的數字。", - "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "請輸入一個有效十進位數字。", + "I18N_FORMS_TYPE_NUMBER_INVALID_DECIMAL": "請輸入有效十進數", "I18N_GENERATE_ATTRIBUTION": "產生歸屬", "I18N_GET_STARTED_PAGE_BREADCRUMB": "入門", "I18N_GET_STARTED_PAGE_HEADING": "入門!", @@ -259,7 +428,7 @@ "I18N_GET_STARTED_PAGE_PARAGRAPH_2": "而您所需要開始的便是您想教學的主題。您可以創建任意大小內容的探索,探索主題內容的理想大小是您可在單一課程裡便全包含著。您還可以額外創建多個需按順序完成的相關探索,此種則被稱之為集合。", "I18N_GET_STARTED_PAGE_PARAGRAPH_2_HEADING": "選擇一個主題", "I18N_GET_STARTED_PAGE_PARAGRAPH_3": "當您選擇一個主題,只要點擊「創建」便會以您的 Google 帳號登入。若您還沒有 Google 帳號,您可以在此建立帳號。", - "I18N_GET_STARTED_PAGE_PARAGRAPH_3_HEADING": "創建您的探索", + "I18N_GET_STARTED_PAGE_PARAGRAPH_3_HEADING": "建立您的探索", "I18N_GET_STARTED_PAGE_PARAGRAPH_4": "一個探索內容裡包含多項階段。各階段皆可包含文字(例如:書面說明)、圖像和影片。每一階段會向學生呈現一個問題,來讓他們嘗試繼續接下去。它可以是一個多選的問題、要求他們輸入某些關鍵內容、或者是其它一些像可互動的號碼。", "I18N_GET_STARTED_PAGE_PARAGRAPH_5": "一旦學習者回答了問題,Oppia 會給予他們回饋內容,並讓他們繼續進行下一步。要查看學習者如何體驗 Oppia,請試試以下探索之一:", "I18N_GET_STARTED_PAGE_PARAGRAPH_6": "更多有關如何創建探索內容,可在我們的用戶文件查看。", @@ -274,14 +443,23 @@ "I18N_HEADING_VOLUNTEER": "志工", "I18N_HINT_NEED_HELP": "需要幫忙嗎?請查看此問題的提示!", "I18N_HINT_TITLE": "提示", + "I18N_INTERACTIONS_ALGEBRAIC_EXPR_INSTRUCTION": "在此輸入表達式。", + "I18N_INTERACTIONS_CODE_REPL_INSTRUCTION": "在編輯器輸入代碼", + "I18N_INTERACTIONS_CODE_REPL_NARROW_INSTRUCTION": "前往代碼編輯器", + "I18N_INTERACTIONS_DRAG_AND_DROP_INSTRUCTION": "托放項目", + "I18N_INTERACTIONS_FRACTIONS_DIVISION_BY_ZERO": "請不要在分母中輸入 0", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER": "輸入形式為「x/y」或「A x/y」的分數。", "I18N_INTERACTIONS_FRACTIONS_INPUT_PLACEHOLDER_NO_INTEGER": "輸入形式為「x/y」的分數。", + "I18N_INTERACTIONS_FRACTIONS_INVALID_FORMAT": "請輸入有效的分數(例如:5/3 或是 1 2/3)", + "I18N_INTERACTIONS_FRACTIONS_NON_EMPTY": "請輸入非空白的分數數值。", "I18N_INTERACTIONS_GRAPH_ADD_EDGE": "添加邊緣", "I18N_INTERACTIONS_GRAPH_ADD_NODE": "添加節點", "I18N_INTERACTIONS_GRAPH_DELETE": "刪除", "I18N_INTERACTIONS_GRAPH_EDGE_FINAL_HELPTEXT": "輕觸目標頂點來建立邊緣(點擊在同樣的頂點來取消操作)。", "I18N_INTERACTIONS_GRAPH_EDGE_INITIAL_HELPTEXT": "輕觸邊緣一開始的頂點來建立。", "I18N_INTERACTIONS_GRAPH_ERROR_INVALID": "無效圖形!", + "I18N_INTERACTIONS_GRAPH_INPUT_INSTRUCTION": "建立圖形", + "I18N_INTERACTIONS_GRAPH_INPUT_NARROW_INSTRUCTION": "檢視圖形", "I18N_INTERACTIONS_GRAPH_MOVE": "移動", "I18N_INTERACTIONS_GRAPH_MOVE_FINAL_HELPTEXT": "輕觸任何一點來移動頂點到該點。", "I18N_INTERACTIONS_GRAPH_MOVE_INITIAL_HELPTEXT": "輕觸頂點來移動。", @@ -292,36 +470,56 @@ "I18N_INTERACTIONS_GRAPH_RESPONSE_VERTICES": "和<[vertices]>頂點", "I18N_INTERACTIONS_GRAPH_UPDATE_LABEL": "更新標記", "I18N_INTERACTIONS_GRAPH_UPDATE_WEIGHT": "更新權重", + "I18N_INTERACTIONS_IMAGE_CLICK_INSTRUCTION": "點擊圖片", "I18N_INTERACTIONS_IMAGE_CLICK_SELECT": "[選擇一個圖片來顯示]", "I18N_INTERACTIONS_ITEM_SELECTION_MORE": "您可以選擇更多選項。", - "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, =1{請選擇一個或更多。} other{請選擇#個或更多。}}", + "I18N_INTERACTIONS_ITEM_SELECTION_NOT_ENOUGH": "{minChoiceNumber, plural, =1{請選擇所有正確的答案。} other{請選擇#個或以上的答案。}}", "I18N_INTERACTIONS_ITEM_SELECTION_PREVENT_MORE": "{maxAllowableSelectionCount, plural, =1{最多選擇一個項目。} other{最多選擇#個項目。}}", + "I18N_INTERACTIONS_MAP_INSTRUCTION": "點擊地圖", + "I18N_INTERACTIONS_MAP_NARROW_INSTRUCTION": "檢視地圖", + "I18N_INTERACTIONS_MATH_EQ_INSTRUCTION": "在此輸入方程式。", "I18N_INTERACTIONS_MUSIC_CLEAR": "清除", + "I18N_INTERACTIONS_MUSIC_INSTRUCTION": "拖拉到音符到五線譜來形成模進", + "I18N_INTERACTIONS_MUSIC_NARROW_INSTRUCTION": "顯示樂譜", "I18N_INTERACTIONS_MUSIC_PLAY": "播放", "I18N_INTERACTIONS_MUSIC_PLAY_SEQUENCE": "播放目標序列", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_CURRENCY_FORMAT": "請在開頭填寫貨幣單位", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_UNIT_CHARS": "請確認單位僅包含數字、英文字母、和 (、)、*、^、/、- 這些字元", + "I18N_INTERACTIONS_NUMBER_WITH_UNITS_INVALID_VALUE": "請確認值的內容是分數或數字", "I18N_INTERACTIONS_NUMBER_WITH_UNITS_POSSIBLE_UNIT_FORMATS": "可能的單位格式", + "I18N_INTERACTIONS_NUMERIC_INPUT_INVALID_NUMBER": "答案應該是一個有效數字。", + "I18N_INTERACTIONS_NUMERIC_INPUT_LESS_THAN_ZERO": "答案必須大於或等於零。", + "I18N_INTERACTIONS_NUMERIC_INPUT_MINUS_AT_BEGINNING": "減號(-)僅允許用在開頭。", "I18N_INTERACTIONS_PENCILCODEEDITOR_ARE_YOU_SURE_YOU_WANT_TO_RESET_YOUR_CODE": "您確定您要重設您的代碼?", "I18N_INTERACTIONS_PENCILCODEEDITOR_CANCEL": "取消", "I18N_INTERACTIONS_PENCILCODEEDITOR_CONFIRMATION_REQUIRED": "需要確認", "I18N_INTERACTIONS_PENCILCODEEDITOR_RESET_CODE": "重設代碼", + "I18N_INTERACTIONS_PENCILCODE_INSTRUCTION": "編輯代碼。點擊「遊玩」來查看!", + "I18N_INTERACTIONS_PENCILCODE_NARROW_INSTRUCTION": "顯示代碼編輯器", + "I18N_INTERACTIONS_RATIO_EMPTY_STRING": "請輸入有效的比例(例如:1:2 或是 1:2:3)。", + "I18N_INTERACTIONS_RATIO_INCLUDES_ZERO": "比例不能以 0 作為元素。", + "I18N_INTERACTIONS_RATIO_INVALID_COLONS": "您的答案要有多個相鄰的冒號(:)。", + "I18N_INTERACTIONS_RATIO_INVALID_FORMAT": "請輸入有效的比例(例如:1:2 或是 1:2:3)。", "I18N_INTERACTIONS_SET_INPUT_ADD_ITEM": "新增項目", "I18N_INTERACTIONS_SET_INPUT_DUPLICATES_ERROR": "阿咧,看起來您的內容存在重複項!", "I18N_INTERACTIONS_SET_INPUT_EMPTY_SET": "(每行添加一個項目。)", "I18N_INTERACTIONS_SET_INPUT_NO_ANSWER": "沒有提供答案。", "I18N_INTERACTIONS_SUBMIT": "提交", + "I18N_JOIN_LEARNER_GROUP_BUTTON": "加入群組", "I18N_LANGUAGE_FOOTER_VIEW_IN": "檢視 Oppia 以:", "I18N_LEARNER_DASHBOARD_AFTERNOON_GREETING": "午安", + "I18N_LEARNER_DASHBOARD_ALL": "全部", "I18N_LEARNER_DASHBOARD_ALL_GOALS_SECTION": "編輯目標", "I18N_LEARNER_DASHBOARD_BRONZE_BADGE": "銅牌", "I18N_LEARNER_DASHBOARD_COMMUNITY_LESSONS_SECTION": "社群課程", "I18N_LEARNER_DASHBOARD_COMPLETED_GOALS_SECTION": "完成的目標", "I18N_LEARNER_DASHBOARD_COMPLETED_SECTION": "已完成", - "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "您過去所完成的 <[numberMoved]> 個收藏,已被移動至「進行中」區域以作為新添增的探索!", + "I18N_LEARNER_DASHBOARD_COMPLETED_TO_INCOMPLETE_COLLECTIONS": "您過往完成的<[numberMoved]>項收藏已移至「進行中」區域,新探索已加到收藏!", "I18N_LEARNER_DASHBOARD_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "從上次停止的地方繼續", "I18N_LEARNER_DASHBOARD_CURRENT_GOALS_SECTION": "目前的目標", "I18N_LEARNER_DASHBOARD_EMPTY_COLLECTION_PLAYLIST": "在您的「稍後遊玩」清單裡似乎沒有任何收藏。請前往圖書館來製作您個人策劃的遊玩清單吧!", - "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "看起來似乎您尚未完成任何收藏,來前往圖書館開始進行令人興奮的新收藏吧!", - "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "看起來似乎您尚未完成任何探索,來前往圖書館開始進行一場令人興奮的新探索吧!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_COLLECTIONS": "您看來尚未完成任何收藏,去圖書館開始令人興奮的新收藏吧!", + "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_EXPLORATIONS": "您看來尚未完成任何探索,去圖書館開始刺激的新探索吧!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_GOALS_SECTION": "從上方完成一個目標,當完成後在此查看您的進度!", "I18N_LEARNER_DASHBOARD_EMPTY_COMPLETED_STORIES_SECTION": "前往教室來完成一個興奮的新故事吧!", "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION": "開始學習一個課程依照 ", @@ -329,13 +527,13 @@ "I18N_LEARNER_DASHBOARD_EMPTY_CONTINUE_WHERE_YOU_LEFT_OFF_SECTION_SET_A_GOAL": "設定一個目標!", "I18N_LEARNER_DASHBOARD_EMPTY_CURRENT_GOALS_SECTION": "請從下方選擇一個目標來開始學習課程!", "I18N_LEARNER_DASHBOARD_EMPTY_EXPLORATION_PLAYLIST": "在您的「稍後遊玩」清單裡似乎沒有任何探索。請前往圖書館來製作您個人策劃的遊玩清單吧!", - "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "您還沒有任何活躍的反饋線程。您的反饋有助於提高我們的課程質量。您可以通過開始我們的任何課程並提交您寶貴的反饋來做到這一點!", + "I18N_LEARNER_DASHBOARD_EMPTY_FEEDBACK_THREADS": "您還沒有任何有效的回饋討論。您的回饋有助於提高我們的課程品質。您可以透過開始我們的任何課程,並提交您寶貴的回饋來做到這一點!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_COLLECTIONS": "看起來似乎您目前尚未有部份完整的收藏,來前往圖書館開始進行令人興奮的新收藏吧!", "I18N_LEARNER_DASHBOARD_EMPTY_INCOMPLETE_EXPLORATIONS": "看起來似乎您目前尚未有部份完整的探索,來前往圖書館開始進行一場令人興奮的新探索吧!", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY": "開始使用 ", - "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "設定目標可以讓 Oppia 在您的儀表板中為您提供更好的建議,從而有助於您的學習之旅。", + "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_REASON_FOR_SETTING_A_GOAL": "設定目標可以讓 Oppia 在您的面板中為您提供更好的建議,從而有助於您的學習之旅。", "I18N_LEARNER_DASHBOARD_EMPTY_SKILL_PROFICIENCY_SET_A_GOAL": "設定目標! ", - "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "看起來似乎您尚未向任何創建者做出訂閱,來前往圖書館開始查找新的創建者、以及他們令人驚豔的探索吧!", + "I18N_LEARNER_DASHBOARD_EMPTY_SUBSCRIPTIONS": "看起來似乎您尚未向任何創作者做出訂閱,來前往圖書館開始查找新的創作者、以及他們令人驚豔的探索吧!", "I18N_LEARNER_DASHBOARD_EMPTY_SUGGESTED_FOR_YOU_SECTION": "哇!您完成了我們主題課程的所有內容!歡迎隨時回到我們的社群課程頁面來查看別的探索", "I18N_LEARNER_DASHBOARD_EVENING_GREETING": "晚安", "I18N_LEARNER_DASHBOARD_EXPLORATIONS_SORT_BY_LAST_PLAYED": "上一次遊玩", @@ -345,10 +543,14 @@ "I18N_LEARNER_DASHBOARD_FEEDBACK_THREAD_WARNING": "在此討論為公開時避免洩露任何個人訊息。", "I18N_LEARNER_DASHBOARD_GOALS_SECTION": "目標", "I18N_LEARNER_DASHBOARD_GOLD_BADGE": "金牌", - "I18N_LEARNER_DASHBOARD_HOME_SECTION": "主頁", - "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "進行中", + "I18N_LEARNER_DASHBOARD_HOME_SECTION": "首頁", + "I18N_LEARNER_DASHBOARD_INCOMPLETE": "不完整", + "I18N_LEARNER_DASHBOARD_INCOMPLETE_SECTION": "未完成", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_ONE": "您似乎尚未嘗試過我們的探索。", "I18N_LEARNER_DASHBOARD_INTRO_MESSAGE_PART_TWO": "讓我們開始一趟興奮的旅程!", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION": "學習者群組", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_GROUPS": "您的群組", + "I18N_LEARNER_DASHBOARD_LEARNER_GROUPS_SECTION_INVITATIONS": "您的邀請", "I18N_LEARNER_DASHBOARD_LEARN_SOMETHING_NEW_SECTION": "學習新事物", "I18N_LEARNER_DASHBOARD_MORNING_GREETING": "早安", "I18N_LEARNER_DASHBOARD_NEW_STORY_CONTENT": "有新的故事內容可用", @@ -358,8 +560,9 @@ "I18N_LEARNER_DASHBOARD_NONEXISTENT_EXPLORATIONS_FROM_PLAYLIST": "{numberDeleted, plural, =1{1 個在您「稍後遊玩」清單的探索已不可用。對此我們深表歉意} other{# 個在您「稍後遊玩」清單的探索已不可用。對此我們深表歉意}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_COLLECTIONS": "{numberDeleted, plural, =1{1 個在進行中的收藏已不可用。對此我們深表歉意} other{# 個在進行中的收藏已不可用。對此我們深表歉意}}", "I18N_LEARNER_DASHBOARD_NONEXISTENT_INCOMPLETE_EXPLORATIONS": "{numberDeleted, plural, =1{1 個在進行中的探索已不可用。對此我們深表歉意} other{# 個在進行中的探索已不可用。對此我們深表歉意}}", - "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "看起來似乎您尚未開始任何收藏,來前往圖書館開始進行令人興奮的新收藏吧!", - "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "看起來似乎您尚未開始任何探索,來前往圖書館開始進行一場令人興奮的新探索吧!", + "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_COLLECTION": "您看來尚未開始任何收藏,去圖書館開始令人興奮的新收藏吧!", + "I18N_LEARNER_DASHBOARD_NO_ACTIVITY_IN_EXPLORATION": "您看來尚未開始任何探索,去圖書館開始刺激的新探索吧!", + "I18N_LEARNER_DASHBOARD_PAGE_TITLE": "學習者面板 | Oppia", "I18N_LEARNER_DASHBOARD_PLAYLIST_SECTION": "稍後遊玩", "I18N_LEARNER_DASHBOARD_PROGRESS_SECTION": "進度", "I18N_LEARNER_DASHBOARD_REARRANGE_LEARNER_PLAYLIST_MESSAGE": "在您所想要遊玩的順序裡拖拉並重新排列行動!", @@ -372,7 +575,7 @@ "I18N_LEARNER_DASHBOARD_SEND_FEEDBACK_THREAD_MESSAGE_IN_PROGRESS": "傳送中...", "I18N_LEARNER_DASHBOARD_SILVER_BADGE": "銀牌", "I18N_LEARNER_DASHBOARD_SKILLS": "技能", - "I18N_LEARNER_DASHBOARD_SKILL_PROFICIENCY_SECTION": "技能熟練度", + "I18N_LEARNER_DASHBOARD_SKILL_PROGRESS_SECTION": "技能進步", "I18N_LEARNER_DASHBOARD_STORIES_COMPLETED_SECTION": "故事已完成", "I18N_LEARNER_DASHBOARD_SUBSCRIPTIONS_SECTION": "訂閱", "I18N_LEARNER_DASHBOARD_SUBTOPIC_PROGRESS": "進度:", @@ -382,7 +585,26 @@ "I18N_LEARNER_DASHBOARD_SUGGESTION_SUGGESTED_STATE_CONTENT_HEADER": "已建議:", "I18N_LEARNER_DASHBOARD_SUGGESTION_TEXT": "建議", "I18N_LEARNER_DASHBOARD_TOOLTIP": "收藏是多個按順序完成的相關探索", + "I18N_LEARNER_DASHBOARD_VIEW": "檢視", "I18N_LEARNER_DASHBOARD_VIEW_SUGGESTION": "檢視建議", + "I18N_LEARNER_GROUP_CREATED_TITLE": "您的群組<[groupName]>已建立。", + "I18N_LEARNER_GROUP_CREATION_NEXT_BUTTON_TEXT": "下一步", + "I18N_LEARNER_GROUP_CREATION_PREV_BUTTON_TEXT": "上一步", + "I18N_LEARNER_GROUP_DESCRIPTION_LABEL": "描述", + "I18N_LEARNER_GROUP_DETAILS_GROUP_TITLE": "群組標題", + "I18N_LEARNER_GROUP_DETAILS_MODAL_DESCRIPTION": "群組描述", + "I18N_LEARNER_GROUP_DETAILS_TITLE": "詳細資料", + "I18N_LEARNER_GROUP_INVITE_LEARNERS": "邀請學習者", + "I18N_LEARNER_GROUP_INVITE_LEARNERS_BY_USERNAME": "以使用者名稱來邀請學習者", + "I18N_LEARNER_GROUP_NO_RESULTS_FOUND": "查無結果。", + "I18N_LEARNER_GROUP_PROGRESS_SHARING_OPTION_TRUE": "是的,我想分享我的進度", + "I18N_LEARNER_GROUP_SEARCH_BY_USERNAME": "按使用者名稱搜尋", + "I18N_LEARNER_GROUP_SKILLS_ANALYSIS_SECTION": "技能分析", + "I18N_LEARNER_GROUP_SYLLABUS_COMPLETION": "完成", + "I18N_LEARNER_GROUP_SYLLABUS_LESSONS": "課程", + "I18N_LEARN_TOPIC": "學習<[topicName]>", + "I18N_LESSON_AUTHORS_DROPDOWN_TITLE": "課程作者", + "I18N_LESSON_INFO_HEADER": "課程資訊", "I18N_LIBRARY_ACTIVITY_COMPLETED_ICON": "您已完成此", "I18N_LIBRARY_ACTIVITY_IN_LEARNER_PLAYLIST": "已添加到遊玩清單", "I18N_LIBRARY_ADD_TO_LEARNER_PLAYLIST": "添加到「稍後遊玩」清單", @@ -456,28 +678,35 @@ "I18N_LICENSE_PAGE_LICENSE_HEADING": "許可協議", "I18N_LICENSE_PAGE_PARAGRAPH_1": "所有在 Oppia 裡的課程內容皆依據姓名標示-相同方式分享 4.0 許可協議。", "I18N_LICENSE_PAGE_PARAGRAPH_2": "Oppia 是提供自開放原始碼軟體,其中程式碼依據 Apache 2.0 許可所發佈。", + "I18N_LICENSE_PAGE_TITLE": "許可頁面 | Oppia", "I18N_LICENSE_TERMS_HEADING": "許可條款", + "I18N_LOGIN_PAGE_TITLE": "登入 | Oppia", "I18N_LOGOUT_LOADING": "正在登出", + "I18N_LOGOUT_PAGE_BROWSER_TAB_TITLE": "登出 | Oppia", "I18N_LOGOUT_PAGE_TITLE": "登出", "I18N_MATH_COURSE_DETAILS": "Oppia 精心策劃的數學基礎課程教授數學的基本構建,涵蓋了像是加法、乘法和分數等基本概念。一旦您掌握了這些基本概念,您就可以進入更高階的課程!每個主題都建立在前一個主題上,因此您可以從頭開始並從任何技能水準來完成課程,若您需要特定主題的幫助,也請直接進入。", "I18N_MATH_TOPICS_COVERED": "要進入基礎,請從我們的第一個主題—位值來開始。若您想複習特定主題,請進入到任何主題來深入研究!", "I18N_MODAL_CANCEL_BUTTON": "取消", "I18N_MODAL_CONTINUE_BUTTON": "繼續", "I18N_NEXT_LESSON": "下一堂課程", + "I18N_NO": "否", "I18N_ONE_SUBSCRIBER_TEXT": "您擁有1位訂閱者。", "I18N_PARTNERSHIPS_PAGE_BREADCRUMB": "合作夥伴", + "I18N_PARTNERSHIPS_PAGE_TITLE": "合作夥伴 | Oppia", "I18N_PENDING_ACCOUNT_DELETION_PAGE_BREADCRUMB": "待刪除帳號", "I18N_PENDING_ACCOUNT_DELETION_PAGE_HEADING": "要刪除的帳號", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1": "您的帳號已被排定刪除,並將在 24 後左右進行。當帳號刪除完畢後,您會收到電子郵件的通知。", - "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1_HEADING": "刪除程序正在進行中", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_1_HEADING": "刪除中", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2": "此操作將會刪除該用戶帳號,與所有關聯該帳號的私有資料。已公開的資料因為無法與帳號關聯,因此會匿名處理。一些已公開資料的所有權則可能會轉移到社群。", "I18N_PENDING_ACCOUNT_DELETION_PAGE_PARAGRAPH_2_HEADING": "刪除詳細資訊", + "I18N_PENDING_ACCOUNT_DELETION_PAGE_TITLE": "待刪除帳號 | Oppia", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_1": "歡迎任何人遊玩已發佈的探索並給予意見回饋。在大眾的幫助之下,我們可以持續改進網站上的課程並盡其可能使它們有效。", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_2": "使用良好鑑定來發佈探索。探索應包含有意義的教育價值,而不能摻有廣告、垃圾訊息、破壞或濫用等內容。", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_GUIDELINE_3": "做個好公民。 創建多重帳號、濫用意見回饋系統、利用探索內容來欺騙用戶、或其它類似的反社會行為皆不被允許;並且可能會導致帳號停用。", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_HEADING": "社群指南", "I18N_PLAYBOOK_COMMUNITY_GUIDELINES_TEXT": "若您需要任何有關指南方針的說明,請在您方便時刻來詢問我們的論壇。", - "I18N_PLAYBOOK_HEADING": "創建者指南", + "I18N_PLAYBOOK_HEADING": "創作者指南", + "I18N_PLAYBOOK_PAGE_TITLE": "創作者指南 | Oppia", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_HEADING": "讓您的探索可供出版", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_1": "教導有意義的事物 - 為目標讀者呈現新知— 不要只是在測試他們已吸收過的知識。另外如果您想要教導的主題;已有被現存的探索給涵蓋到,請改試著對於目前的探索提供意見回饋—如此一來會更容易!", "I18N_PLAYBOOK_PUBLICATION_INSTRUCTIONS_INSTRUCTION_2": "教學不僅是在陳述 - 當挑選主題時,嘗試挑選一個有著些微細別具深度的複雜概念,或是一整個有相關性、具趣味的各項事物。而能帶有進步感與挑戰感也是很好的方式,這可讓學習者有機會套用他們剛了解的概念到未知狀況裡。", @@ -528,7 +757,7 @@ "I18N_PLAYER_LEARN_AGAIN_BUTTON": "再次學習", "I18N_PLAYER_LEAVE_FEEDBACK": "給作者留下意見回饋(當提交後,會一同包括您目前所探索卡片的參考。)", "I18N_PLAYER_LOADING": "載入中...", - "I18N_PLAYER_NEXT_LESSON": "下一個課程", + "I18N_PLAYER_NEXT_LESSON": "下一課程", "I18N_PLAYER_NO_OBJECTIVE": "未指明對象。", "I18N_PLAYER_NO_TAGS": "未指明標籤。", "I18N_PLAYER_PLAY_EXPLORATION": "遊玩探索", @@ -564,6 +793,9 @@ "I18N_PLAYER_UNRATED": "未評級", "I18N_PLAYER_VIEWS_TOOLTIP": "瀏覽次數", "I18N_PRACTICE_SESSION_PAGE_BREADCRUMB": "練習課程", + "I18N_PRACTICE_SESSION_PAGE_TITLE": "練習活動:<[topicName]> - Oppia", + "I18N_PRACTICE_SESSION_START_BUTTON_TEXT": "開始練習", + "I18N_PREFERENCES_AUDIO_LANGUAGE_LABEL": "音訊語言", "I18N_PREFERENCES_BIO": "個人簡介", "I18N_PREFERENCES_BIO_EXPLAIN_TEXT": "此為可選項目。任何您編寫於此的內容為公開且全世界皆可見。", "I18N_PREFERENCES_BREADCRUMB": "偏好設定", @@ -575,20 +807,24 @@ "I18N_PREFERENCES_EMAIL_RECEIVE_EDIT_RIGHTS_NEWS": "當有人給予您編輯探索的權限時,接收相關電子郵件訊息", "I18N_PREFERENCES_EMAIL_RECEIVE_FEEDBACK_NEWS": "當有人寄給您有關探索的意見回饋時,接收相關電子郵件訊息", "I18N_PREFERENCES_EMAIL_RECEIVE_NEWS": "接收有關此網站的新聞和更新", - "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "當一個您所訂閱的創建者發佈了新探索內容時;接收該消息的電子郵件", + "I18N_PREFERENCES_EMAIL_RECEIVE_SUBSCRIPTION_NEWS": "當一個您所訂閱的創作者發佈了新探索內容時;接收該消息的電子郵件", "I18N_PREFERENCES_EMAIL_SIGNUP_TEXT": "我們無法自動將您加入到我們的郵件清單。請前往以下的連結,來註冊到我們的郵件清單:", + "I18N_PREFERENCES_EXPORT_ACCOUNT": "匯出帳號", + "I18N_PREFERENCES_EXPORT_ACCOUNT_INFO_TEXT": "這將會您的 Oppia 帳號資料下載成一個 JSON 格式文字檔案。", + "I18N_PREFERENCES_EXPORT_ACCOUNT_WARNING_TEXT": "請不要離開此頁面。目前正在載入您的資料,完成後會下載成 JSON 格式的文字檔案。若出現問題,請聯絡", "I18N_PREFERENCES_HEADING": "偏好設定", "I18N_PREFERENCES_HEADING_SUBTEXT": "任何您在此頁面上的更改將會被自動儲存。", - "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "您尚未訂閱任何創建者。以點擊在作者個人檔案頁面中「訂閱」按鈕的方式,來隨時訂閱您喜歡的作者。透過訂閱作者,當作者發布新課程時,您會在電子郵件收到通知。", + "I18N_PREFERENCES_NO_SUBSCRIPTIONS": "您尚未訂閱任何創作者。以點擊在作者個人檔案頁面中「訂閱」按鈕的方式,來隨時訂閱您喜歡的作者。透過訂閱作者,當作者發布新課程時,您會在電子郵件收到通知。", "I18N_PREFERENCES_PAGE_TITLE": "更改您的個人檔案設置 - Oppia", "I18N_PREFERENCES_PICTURE": "圖片", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE": "優先音訊語言", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE_EXPLAIN": "這將是您以音訊翻譯來遊玩探索時預設選用的語言(在可用的情況下)。", "I18N_PREFERENCES_PREFERRED_AUDIO_LANGUAGE_PLACEHOLDER": "優先音訊語言", - "I18N_PREFERENCES_PREFERRED_DASHBOARD": "首選控制面版", - "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "此控制面版預設會在登入時顯示出。", - "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "修先探索語言", + "I18N_PREFERENCES_PREFERRED_DASHBOARD": "偏好面板", + "I18N_PREFERENCES_PREFERRED_DASHBOARD_EXPLAIN": "登入時會預設顯示此儀表板。", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE": "優先探索語言", "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_EXPLAIN": "當您搜尋圖庫來探索時這些語言會被選為預設值。", + "I18N_PREFERENCES_PREFERRED_EXPLORATION_LANGUAGE_SELECT": "選擇優先語言。", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE": "優先網站語言", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_EXPLAIN": "這是網站所顯示的語言。", "I18N_PREFERENCES_PREFERRED_SITE_LANGUAGE_PLACEHOLDER": "優先網站語言", @@ -596,18 +832,25 @@ "I18N_PREFERENCES_PROFILE_PICTURE_DRAG": "拖放來剪裁和調整大小:", "I18N_PREFERENCES_PROFILE_PICTURE_ERROR": "錯誤:不能讀取圖片檔案", "I18N_PREFERENCES_PROFILE_PICTURE_UPLOAD": "更新個人檔案圖片", + "I18N_PREFERENCES_SEARCH_LABEL": "搜尋", "I18N_PREFERENCES_SELECT_EXPLORATION_LANGUAGE": "選擇優先語言...", + "I18N_PREFERENCES_SITE_LANGUAGE_LABEL": "站台語言", "I18N_PREFERENCES_SUBJECT_INTERESTS": "學科興趣", "I18N_PREFERENCES_SUBJECT_INTERESTS_HELP_BLOCK": "例如:數學、電腦科學、藝術...", "I18N_PREFERENCES_SUBJECT_INTERESTS_INVALID_SEARCH": "添加新的學科興趣(使用小寫字母和空格)...", "I18N_PREFERENCES_SUBJECT_INTERESTS_PLACEHOLDER": "輸入科目興趣...", - "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "您所訂閱的創建者", + "I18N_PREFERENCES_SUBSCRIBED_CREATORS": "您所訂閱的創作者", "I18N_PREFERENCES_USERNAME": "用戶名稱", "I18N_PREFERENCES_USERNAME_NOT_SELECTED": "尚未選擇", + "I18N_PRIVACY_POLICY_PAGE_TITLE": "隱私權政策 | Oppia", "I18N_PROFILE_NO_EXPLORATIONS": "此用戶尚未創建或編輯任何探索。", - "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "了解更多關於您的分數", - "I18N_QUESTION_PLAYER_MY_DASHBOARD": "我的控制面板", - "I18N_QUESTION_PLAYER_NEW_SESSION": "新課程", + "I18N_PROFILE_PAGE_TITLE": "個人檔案 | Oppia", + "I18N_PROGRESS_REMINDER_CONTINUE_TEXT": "您要繼續嗎?", + "I18N_PROGRESS_REMINDER_RESTART_LESSON": "不,從頭開始", + "I18N_PROGRESS_REMINDER_RESUME_LESSON": "是的,恢復課程", + "I18N_QUESTION_PLAYER_LEARN_MORE_ABOUT_SCORE": "分數詳情", + "I18N_QUESTION_PLAYER_MY_DASHBOARD": "我的面板", + "I18N_QUESTION_PLAYER_NEW_SESSION": "重新練習", "I18N_QUESTION_PLAYER_RETRY_TEST": "重試測試", "I18N_QUESTION_PLAYER_RETURN_TO_STORY": "返回故事", "I18N_QUESTION_PLAYER_REVIEW_LOWEST_SCORED_SKILL": "複習最低分技能", @@ -617,23 +860,49 @@ "I18N_QUESTION_PLAYER_TEST_PASSED": "Session 完成,做得好!", "I18N_REGISTRATION_SESSION_EXPIRED_HEADING": "註冊 Session 逾時", "I18N_REGISTRATION_SESSION_EXPIRED_MESSAGE": "很抱歉,您的註冊 Session 已逾時。請點擊「繼續註冊」來重新流程。", + "I18N_RESET_CODE": "重設代碼", + "I18N_RESTART_EXPLORATION_BUTTON": "重新開始課程", "I18N_REVIEW_TEST_PAGE_BREADCRUMB": "複習測試", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_1": "如果您有帳號,您的學習進度會自動保存。", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_2": "已經有帳號了?", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_3": "使用下方連結來保存進度 72 個小時。", + "I18N_SAVE_EXPLORATION_PROGRESS_TEXT_5": "寫入或複製下方連結", "I18N_SAVE_PROGRESS": "請登入或註冊來保存您的進度,並遊玩下一個課程。", + "I18N_SAVE_PROGRESS_BUTTON_TEXT": "複製", + "I18N_SAVE_PROGRESS_COPY_TOOLTIP": "已複製!", + "I18N_SAVE_PROGRESS_TEXT": "保存進度", "I18N_SHARE_LESSON": "分享這個課程", + "I18N_SHOW_LESS": "顯示較少", + "I18N_SHOW_MORE": "顯示更多", "I18N_SHOW_SOLUTION_BUTTON": "顯示解決方式", - "I18N_SIDEBAR_ABOUT_LINK": "關於 Oppia", + "I18N_SIDEBAR_ABOUT_LINK": "關於我們", + "I18N_SIDEBAR_ABOUT_OPPIA_FOUNDATION": "關於 Oppia 基金會", "I18N_SIDEBAR_BLOG": "部落格", "I18N_SIDEBAR_CLASSROOM": "教室", "I18N_SIDEBAR_CLASSROOM_BASIC_MATHS": "基礎數學", "I18N_SIDEBAR_CONTACT_US": "聯絡我們", + "I18N_SIDEBAR_CONTACT_US_DESCRIPTION": "我們在此處理您的任何問題。", "I18N_SIDEBAR_DONATE": "贊助", + "I18N_SIDEBAR_DONATE_DESCRIPTION": "您的貢獻幫助了為所有人提供優質教育。", "I18N_SIDEBAR_FORUM": "論壇", - "I18N_SIDEBAR_GET_STARTED": "入門", + "I18N_SIDEBAR_GET_INVOLVED": "參與", + "I18N_SIDEBAR_HOME": "首頁", + "I18N_SIDEBAR_LEARN": "學習", "I18N_SIDEBAR_LIBRARY_LINK": "圖書館", + "I18N_SIDEBAR_MATH_FOUNDATIONS": "數學基礎", + "I18N_SIDEBAR_MATH_FOUNDATIONS_DESCRIPTION": "友善的初學者課程可幫助您開始學習數學。", "I18N_SIDEBAR_OPPIA_FOUNDATION": "Oppia 基金會", "I18N_SIDEBAR_PARTNERSHIPS": "合作夥伴", + "I18N_SIDEBAR_PARTNERSHIPS_DESCRIPTION": "為您所在區域的學生帶來優質教育。", + "I18N_SIDEBAR_SUBMENU_ADDITION_AND_SUBTRACTION": "加減", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY": "社群圖書館", + "I18N_SIDEBAR_SUBMENU_COMMUNITY_LIBRARY_DESCRIPTION": "由社群提供的額外資源。", + "I18N_SIDEBAR_SUBMENU_MULTIPLICATION": "乘法", + "I18N_SIDEBAR_SUBMENU_PLACE_VALUES": "位值", + "I18N_SIDEBAR_SUBMENU_SEE_ALL_LESSONS": "查看所有課程", "I18N_SIDEBAR_TEACH_WITH_OPPIA": "以 Oppia 教學", "I18N_SIDEBAR_VOLUNTEER": "志工", + "I18N_SIDEBAR_VOLUNTEER_DESCRIPTION": "加入我們的全球團隊來建立與改善課程。", "I18N_SIGNIN_LOADING": "正在登入", "I18N_SIGNIN_PAGE_TITLE": "登入", "I18N_SIGNUP_AGREE_LICENSE_DESCRIPTION": "根據勾選此文字左邊的框,代表您承認、同意、接受可於此找到的<[sitename]>使用條款要求。", @@ -659,10 +928,11 @@ "I18N_SIGNUP_LOADING": "載入中", "I18N_SIGNUP_PAGE_TITLE": "加入社群 - Oppia", "I18N_SIGNUP_REGISTRATION": "註冊", + "I18N_SIGNUP_SECTION_DONT_ASK_ME_AGAIN": "不用再詢問我", "I18N_SIGNUP_SEND_ME_NEWS": "寄給我關於網站的新聞和更新訊息", "I18N_SIGNUP_SITE_DESCRIPTION": "<[sitename]>是一個開放共享學習資源網站。此網站上所有用具皆為可自由重複使用和分享。", "I18N_SIGNUP_SITE_OBJECTIVE": "<[sitename]>存在的目的是促進創造、和持續改進一個任何人可自由使用的高品質學習資源集合。", - "I18N_SIGNUP_UPDATE_WARNING": "請注意我們剛剛更新我們的使用條款。", + "I18N_SIGNUP_UPDATE_WARNING": "請注意,我們更新了使用條款。", "I18N_SIGNUP_USERNAME": "用戶名稱", "I18N_SIGNUP_USERNAME_EXPLANATION": "您的用戶名稱將會顯示在您的貢獻之後。", "I18N_SIGNUP_WHY_LICENSE": "為什麼採用創用CC-姓名標示-相同方式分享?", @@ -675,9 +945,9 @@ "I18N_SPLASH_BENEFITS_TITLE": "我們的優勢", "I18N_SPLASH_BENEFITS_TWO": "故事形式的課程", "I18N_SPLASH_FIRST_EXPLORATION_DESCRIPTION": "Oppia 的課程;也稱之為探索,提供比固定的影片或文字更身臨其境的體驗,幫助用戶靠行動學習。", - "I18N_SPLASH_FOR_STUDENTS": "給學生們", - "I18N_SPLASH_FOR_TEACHERS": "給導師們", - "I18N_SPLASH_FOR_VOLUNTEERS": "給志工們", + "I18N_SPLASH_FOR_STUDENTS": "給學生", + "I18N_SPLASH_FOR_TEACHERS": "給導師", + "I18N_SPLASH_FOR_VOLUNTEERS": "給志工", "I18N_SPLASH_ICON_ONE_TEXT": "1 百萬名以上使用者", "I18N_SPLASH_ICON_THREE_TEXT": "<[lessonCount]> 種策劃課程", "I18N_SPLASH_ICON_TWO_TEXT": "在 <[languageCount]>+ 種以上語言可用", @@ -697,7 +967,7 @@ "I18N_SPLASH_STUDENT_DETAILS_2": "- 印度學生 Dheeraj", "I18N_SPLASH_STUDENT_DETAILS_3": "- 巴勒斯坦學生 Sama", "I18N_SPLASH_STUDENT_DETAILS_4": "- 印度學生 Gaurav", - "I18N_SPLASH_SUBTITLE": "適用於每一位的迷人、有效果的品質教育", + "I18N_SPLASH_SUBTITLE": "適用於每一位的迷人、有效果的優質教育", "I18N_SPLASH_TEACHERS_CONTENT": "藉由 Oppia 的內容建立系統,您可以輕鬆地為您的學生定義課程形式。建立並分享您感興趣的主題課程。", "I18N_SPLASH_TEACHERS_TITLE": "容易地來分享您的知識", "I18N_SPLASH_TESTIMONIAL_1": "「我享受著遊玩許多課程,而且一點也不無聊,我覺得現在我已經精通了負數」", @@ -709,9 +979,31 @@ "I18N_SPLASH_VOLUNTEERS_CONTENT": "無論您是誰,您都可以在 Oppia 找到自己的家。我們永遠需要更多人透過提出問題、貢獻圖表、或是翻譯課程來改善課程。", "I18N_SPLASH_VOLUNTEERS_TITLE": "透過社群運作", "I18N_START_HERE": "在此點擊來開始!", + "I18N_STORY_3M5VBajMccXO_DESCRIPTION": "在這個故事中,我們將和馬修一起去烘培坊買蛋糕。但糟糕的是,他沒有足夠的錢買一塊完整的蛋糕。因此貝克先生幫助馬修,把馬修所選的蛋糕切分成他買得起的小塊。接下來會發生什麼事呢?來遊玩課程找出答案!", + "I18N_STORY_3M5VBajMccXO_TITLE": "馬修參訪烘培坊", + "I18N_STORY_JhiDkq01dqgC_TITLE": "在遊樂園的一天", + "I18N_STORY_Qu6THxP29tOy_TITLE": "瑪雅、歐瑪與馬利克在做披薩!", + "I18N_STORY_RRVMHsZ5Mobh_DESCRIPTION": "在這個故事中,我們將跟著傑米和他的妹妹妮可學習如何表示和閱讀數字的值。", + "I18N_STORY_RRVMHsZ5Mobh_TITLE": "傑米的街機遊戲冒險", "I18N_STORY_VIEWER_COMPLETED_CHAPTER": "<[title]> - 完成!", + "I18N_STORY_VIEWER_PAGE_TITLE": "學習<[topicName]> | <[storyTitle]> | Oppia", + "I18N_STORY_ialKSV0VYV0B_DESCRIPTION": "來與詹姆士及他的叔叔見面,和他們找出如何使用比例來調出美味的飲料!", + "I18N_STORY_ialKSV0VYV0B_TITLE": "詹姆士的冰沙歷險記", + "I18N_STORY_rqnxwceQyFnv_TITLE": "參觀市場的妮娜", + "I18N_STORY_vfJDB3JAdwIx_TITLE": "艾莉雅想種一個花園", "I18N_SUBSCRIBE_BUTTON_TEXT": "訂閱", - "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "下一個技能", + "I18N_SUBTOPIC_0abdeaJhmfPm_mixed-numbers_TITLE": "帶分數", + "I18N_SUBTOPIC_0abdeaJhmfPm_what-is-a-fraction_TITLE": "什麼是分數?", + "I18N_SUBTOPIC_5g0nxGUmx5J5_what-is-a-ratio_TITLE": "什麼是比例?", + "I18N_SUBTOPIC_C4fqwrvqWpRm_basic-concepts_TITLE": "乘法的基本概念", + "I18N_SUBTOPIC_C4fqwrvqWpRm_multiplication-techniques_TITLE": "乘法技巧", + "I18N_SUBTOPIC_VIEWER_NEXT_SKILL": "下一技能:", + "I18N_SUBTOPIC_VIEWER_PREVIOUS_SKILL": "之前的技能:", + "I18N_SUBTOPIC_iX9kYCjnouWN_rounding-numbers_TITLE": "四捨五入", + "I18N_SUBTOPIC_qW12maD4hiA8_basic-concepts_TITLE": "除法的基本概念", + "I18N_SUBTOPIC_qW12maD4hiA8_techniques-of-division_TITLE": "除法技巧", + "I18N_SUBTOPIC_sWBXKH4PZcK6_addition-subtraction_TITLE": "加法與減法之間的關係", + "I18N_SYLLABUS_SKILL_TITLE": "技能", "I18N_TEACH_BENEFITS_ONE": "適合所有年齡層的有效、高品質學習", "I18N_TEACH_BENEFITS_THREE": "永遠免費,而且容易上手", "I18N_TEACH_BENEFITS_TITLE": "我們的優勢", @@ -720,12 +1012,13 @@ "I18N_TEACH_PAGE_CLASSROOM_BUTTON": "參觀教室", "I18N_TEACH_PAGE_CLASSROOM_CONTENT": "在教室裡,您可以找到由 Oppia 團隊設計和測試的一組課程,來確認它們對所有學習者都有效且有趣。所有的課程都有經過教師和專家們的審核,因此您可以放心,您的學生會在按照自己進度學習當中,同時獲得有效的教育。", "I18N_TEACH_PAGE_CLASSROOM_TITLE": "從經過驗證的 Oppia 課程中學習", - "I18N_TEACH_PAGE_CONTENT": "Oppia 是一種吸引人的線上學習新方式,專注在確保每個人都能獲得高品質教育。", + "I18N_TEACH_PAGE_CONTENT": "Oppia 是一種吸引人的線上學習新方式,專注在確保每個人都能獲得優質教育。", "I18N_TEACH_PAGE_HEADING": "給家長、教師、監護人的 Oppia", "I18N_TEACH_PAGE_LIBRARY_BUTTON": "瀏覽圖書館", "I18N_TEACH_PAGE_LIBRARY_CONTENT": "來自世界各地的教育工作者和社群成員使用 Oppia 課程創建平台,來作為建立和分享課程的一種方式。在我們的探索圖書館中,您可以找到針對 17 種不同主題的 20000 個以上課程,另外您也可能會因此受到啟發,來打造出自己的課程!", "I18N_TEACH_PAGE_LIBRARY_TITLE": "瀏覽由社群做出的課程", "I18N_TEACH_PAGE_SIX_TITLE": "今天立刻學習", + "I18N_TEACH_PAGE_TITLE": "家長與導師的 Oppia 指南 | Oppia", "I18N_TEACH_STUDENT_DETAILS_1": "Riya Sogani", "I18N_TEACH_STUDENT_DETAILS_2": "Wala Awad", "I18N_TEACH_STUDENT_DETAILS_3": "Himanshu Taneja,Kurukshetra,印度", @@ -733,56 +1026,93 @@ "I18N_TEACH_TESTIMONIAL_1": "「我很高興有機會教育弱勢的印度孩子,並填補他們對批判性數學概念理解方面上的差距。看著這些學生們的自信心增加,這些額外幾個小時時間的學習都是值得的。」", "I18N_TEACH_TESTIMONIAL_2": "「Oppia 是第一個有這樣功能的網站!它能以吸引人的參與方式,來協助學生學習他們感興趣的特定主題,另外也能鼓勵學生使用智慧型設備來學習知識。」", "I18N_TEACH_TESTIMONIAL_3": "「我從沒想過學生能如此快速地學習技術和數學課程。這是他們第一次接觸智慧型技術,在一開始,他們對這些東西還很陌生。到了現在,我很高興看到在我進教室之前,他們就已經先上好 Oppia 的課程!」", + "I18N_TERMS_PAGE_TITLE": "使用條款 | Oppia", "I18N_THANKS_PAGE_BREADCRUMB": "感謝", + "I18N_THANKS_PAGE_TITLE": "感謝 | Oppia", + "I18N_TIME_FOR_BREAK_BODY_1": "您似乎沒特別思考就直接提交答案。您有點累了嗎?", + "I18N_TIME_FOR_BREAK_BODY_2": "如果是這樣的話,請休息一下!您可以稍後回來。", + "I18N_TIME_FOR_BREAK_FOOTER": "我準備好繼續課程", + "I18N_TIME_FOR_BREAK_TITLE": "要休息一下嗎?", + "I18N_TOPIC_0abdeaJhmfPm_TITLE": "分數", + "I18N_TOPIC_C4fqwrvqWpRm_DESCRIPTION": "如果一盒裡有 5 個蛋糕,然後你買了 60 盒,這樣你一共有多少個蛋糕?在本主題中,您將會學習如何使用乘法來解決此類問題(讓您不必每次都得相加大量數字!)。", + "I18N_TOPIC_C4fqwrvqWpRm_TITLE": "乘法", + "I18N_TOPIC_LANDING_PAGE_TITLE": "<[topicTitle]> | <[topicTagline]> | Oppia", + "I18N_TOPIC_LEARN": "學習", "I18N_TOPIC_SUMMARY_TILE_LESSONS": "{lessonCount, plural, =1{1 個課程} other{# 個課程}}", + "I18N_TOPIC_TITLE": "主題", "I18N_TOPIC_VIEWER_CHAPTER": "章節", "I18N_TOPIC_VIEWER_CHAPTERS": "{count, plural, one{1 個章節} other{# 個章節}}", + "I18N_TOPIC_VIEWER_COMING_SOON": "即將到來!", "I18N_TOPIC_VIEWER_DESCRIPTION": "描述", "I18N_TOPIC_VIEWER_LESSON": "課程", "I18N_TOPIC_VIEWER_LESSONS": "課程", "I18N_TOPIC_VIEWER_MASTER_SKILLS": "<[topicName]>的主要技能", + "I18N_TOPIC_VIEWER_NO_QUESTION_WARNING": "已選擇的子話題中還沒有問題建立。", + "I18N_TOPIC_VIEWER_PAGE_TITLE": "<[topicName]> | <[pageTitleFragment]> | Oppia", "I18N_TOPIC_VIEWER_PRACTICE": "練習", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_MESSAGE": "練習功能目前仍處於測試階段,且僅提供英文版本。您要繼續嗎?", + "I18N_TOPIC_VIEWER_PRACTICE_SESSION_BETA_NOTICE_MODAL_TITLE": "確認練習語言", "I18N_TOPIC_VIEWER_REVISION": "複習", "I18N_TOPIC_VIEWER_SELECT_SKILLS": "選擇技能以實踐您的<[topicName]>知識。", "I18N_TOPIC_VIEWER_SKILL": "技能", "I18N_TOPIC_VIEWER_SKILLS": "技能", "I18N_TOPIC_VIEWER_START_PRACTICE": "開始", "I18N_TOPIC_VIEWER_STORIES": "個故事", + "I18N_TOPIC_VIEWER_STORIES_HEAD": "您可以遊玩的故事", "I18N_TOPIC_VIEWER_STORY": "個故事", "I18N_TOPIC_VIEWER_STUDY_SKILLS": "<[topicName]>的學習技巧", "I18N_TOPIC_VIEWER_STUDY_SKILLS_SUBTITLE": "使用以下複習卡可以幫助您學習有關<[topicName]>的技能。", "I18N_TOPIC_VIEWER_VIEW_ALL": "檢視全部", "I18N_TOPIC_VIEWER_VIEW_LESS": "檢視較少", + "I18N_TOPIC_iX9kYCjnouWN_DESCRIPTION": "您知道所有可能的事物數字都可以僅用十位數字(0、1、2、3、…、9)來表示嗎?在本主題中,我們將學習如何使用位值來做到這一點,並了解為什麼「5」在「25」和「2506」中具有不同的值。", + "I18N_TOPIC_qW12maD4hiA8_DESCRIPTION": "如果你有 32 個番茄要分享給 4 個人,每個人應該得到多少個番茄?在本主題中,您將學習到使用除法來計算如何將某個東西拆分成多個部分。", + "I18N_TOPIC_qW12maD4hiA8_TITLE": "除法", + "I18N_TOPIC_sWBXKH4PZcK6_DESCRIPTION": "如果您有 4 個雞蛋,而您的朋友又多給了你 37 個,您一共有多少個?如果您少掉八個又會是多少呢?在本主題中,您將學習到如何使用加法和減法的基本技巧,來解決此類問題。", + "I18N_TOPIC_sWBXKH4PZcK6_TITLE": "加減", "I18N_TOPNAV_ABOUT": "關於", "I18N_TOPNAV_ABOUT_OPPIA": "關於 Oppia", "I18N_TOPNAV_ADMIN_PAGE": "管理頁面", "I18N_TOPNAV_BLOG": "部落格", "I18N_TOPNAV_BLOG_DASHBOARD": "部落格功能面板", - "I18N_TOPNAV_CLASSROOM": "教室", "I18N_TOPNAV_CLASSROOM_BASIC_MATHS": "基礎數學", "I18N_TOPNAV_CONTACT_US": "聯絡我們", + "I18N_TOPNAV_CONTACT_US_DESCRIPTION": "我們在此處理您的任何問題。", "I18N_TOPNAV_CONTRIBUTOR_DASHBOARD": "貢獻者面版", - "I18N_TOPNAV_CREATOR_DASHBOARD": "創建者控制面版", + "I18N_TOPNAV_CREATOR_DASHBOARD": "創作者面板", "I18N_TOPNAV_DONATE": "贊助", + "I18N_TOPNAV_DONATE_DESCRIPTION": "您的貢獻幫助了為所有人提供優質教育。", "I18N_TOPNAV_FORUM": "論壇", "I18N_TOPNAV_GET_INVOLVED": "參與", "I18N_TOPNAV_GET_STARTED": "入門", - "I18N_TOPNAV_LEARNER_DASHBOARD": "學習者控制面版", - "I18N_TOPNAV_LIBRARY": "圖書館", + "I18N_TOPNAV_HOME": "首頁", + "I18N_TOPNAV_LEARN": "學習", + "I18N_TOPNAV_LEARNER_DASHBOARD": "學習者面板", + "I18N_TOPNAV_LEARN_DESCRIPTION_1": "數學入門的基礎課程。", + "I18N_TOPNAV_LEARN_HEADING": "學習更多的方式", + "I18N_TOPNAV_LEARN_LINK_1": "查看所有課程", + "I18N_TOPNAV_LEARN_LINK_2": "繼續學習", + "I18N_TOPNAV_LIBRARY": "社群圖書館", + "I18N_TOPNAV_LIBRARY_DESCRIPTION": "由社群提供的額外資源能幫助您學習更多。", "I18N_TOPNAV_LOGOUT": "登出", "I18N_TOPNAV_MODERATOR_PAGE": "版主頁面", "I18N_TOPNAV_OPPIA_FOUNDATION": "Oppia 基金會", "I18N_TOPNAV_PARTICIPATION_PLAYBOOK": "參與規範", - "I18N_TOPNAV_PARTNERSHIPS": "合作夥伴", + "I18N_TOPNAV_PARTNERSHIPS": "學校與組織", + "I18N_TOPNAV_PARTNERSHIPS_DESCRIPTION": "一同合作並將 Oppia 帶到您的學校、社區、地區。", "I18N_TOPNAV_PREFERENCES": "偏好設定", "I18N_TOPNAV_SIGN_IN": "登入", "I18N_TOPNAV_SIGN_IN_WITH_GOOGLE": "使用 Google 帳號登入", "I18N_TOPNAV_TEACH_WITH_OPPIA": "以 Oppia 教學", - "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "主題與技能控制面板", + "I18N_TOPNAV_TOPICS_AND_SKILLS_DASHBOARD": "主題與技能面板", + "I18N_TOPNAV_TRY_ANDROID_APP_TEXT": "今天就試試吧!", + "I18N_TOPNAV_VOLUNTEER_DESCRIPTION": "加入我們的全球團隊來建立與改善課程。", "I18N_TOTAL_SUBSCRIBERS_TEXT": "您一共擁有<[totalSubscribers]>位訂閱者。", "I18N_UNSUBSCRIBE_BUTTON_TEXT": "取消訂閱", + "I18N_VIEW_ALL_TOPICS": "檢視所有<[classroomName]>主題", "I18N_VOLUNTEER_PAGE_BREADCRUMB": "志工", + "I18N_VOLUNTEER_PAGE_TITLE": "志工 | Oppia", "I18N_WARNING_MODAL_DESCRIPTION": "這會顯示出完整的解決方式,您確定嗎?", "I18N_WARNING_MODAL_TITLE": "注意!", - "I18N_WORKED_EXAMPLE": "處理範例" + "I18N_WORKED_EXAMPLE": "處理範例", + "I18N_YES": "是" } diff --git a/assets/images/about/background_dsk_1.svg b/assets/images/about/background_dsk_1.svg index c8a40d29c283..0f6115c07d74 100644 --- a/assets/images/about/background_dsk_1.svg +++ b/assets/images/about/background_dsk_1.svg @@ -1,2 +1 @@ - - + \ No newline at end of file diff --git a/assets/images/about/background_dsk_2.svg b/assets/images/about/background_dsk_2.svg index 47e25ee99348..8f25793c8553 100644 --- a/assets/images/about/background_dsk_2.svg +++ b/assets/images/about/background_dsk_2.svg @@ -1,2 +1 @@ - - + \ No newline at end of file diff --git a/assets/images/about/background_mobile_1.svg b/assets/images/about/background_mobile_1.svg index 9395b9411676..44dc9a03bef2 100644 --- a/assets/images/about/background_mobile_1.svg +++ b/assets/images/about/background_mobile_1.svg @@ -1,2 +1 @@ - - + \ No newline at end of file diff --git a/assets/images/about/background_mobile_2.svg b/assets/images/about/background_mobile_2.svg index ca1726f29b06..7285f7eeb899 100644 --- a/assets/images/about/background_mobile_2.svg +++ b/assets/images/about/background_mobile_2.svg @@ -1,2 +1 @@ - - + \ No newline at end of file diff --git a/assets/images/about/language_icon.svg b/assets/images/about/language_icon.svg index 4496921a0f25..7a1a1830c4fc 100644 --- a/assets/images/about/language_icon.svg +++ b/assets/images/about/language_icon.svg @@ -1,2 +1 @@ - - + \ No newline at end of file diff --git a/assets/images/about/lesson_icon.svg b/assets/images/about/lesson_icon.svg index 924d1d42933b..88a4924c5e7d 100644 --- a/assets/images/about/lesson_icon.svg +++ b/assets/images/about/lesson_icon.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/assets/images/activity/collection.svg b/assets/images/activity/collection.svg index f895c20b107d..3ecedcceef19 100644 --- a/assets/images/activity/collection.svg +++ b/assets/images/activity/collection.svg @@ -1 +1 @@ -collection \ No newline at end of file + \ No newline at end of file diff --git a/assets/images/activity/exploration.svg b/assets/images/activity/exploration.svg index e335b43ef3c8..1aff1ee4e3d2 100644 --- a/assets/images/activity/exploration.svg +++ b/assets/images/activity/exploration.svg @@ -1 +1 @@ -exploration \ No newline at end of file + \ No newline at end of file diff --git a/assets/images/android/android-landing-0.svg b/assets/images/android/android-landing-0.svg new file mode 100644 index 000000000000..81279f2a70a4 --- /dev/null +++ b/assets/images/android/android-landing-0.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/images/android/android-landing-1.svg b/assets/images/android/android-landing-1.svg new file mode 100644 index 000000000000..e46c18687527 --- /dev/null +++ b/assets/images/android/android-landing-1.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/assets/images/android/android-landing-2.svg b/assets/images/android/android-landing-2.svg new file mode 100644 index 000000000000..c8c9f58e5d34 --- /dev/null +++ b/assets/images/android/android-landing-2.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/assets/images/android/android-landing-3.svg b/assets/images/android/android-landing-3.svg new file mode 100644 index 000000000000..93996d8acf8a --- /dev/null +++ b/assets/images/android/android-landing-3.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/android/android-landing-4.svg b/assets/images/android/android-landing-4.svg new file mode 100644 index 000000000000..a0c14742b5f7 --- /dev/null +++ b/assets/images/android/android-landing-4.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/assets/images/android/android-landing-google-play.svg b/assets/images/android/android-landing-google-play.svg new file mode 100644 index 000000000000..b08a053236eb --- /dev/null +++ b/assets/images/android/android-landing-google-play.svg @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/images/android/android-landing-speech-bubble.svg b/assets/images/android/android-landing-speech-bubble.svg new file mode 100644 index 000000000000..584b9459a248 --- /dev/null +++ b/assets/images/android/android-landing-speech-bubble.svg @@ -0,0 +1,3 @@ + + + diff --git a/assets/images/avatar/oppia_avatar_100px.svg b/assets/images/avatar/oppia_avatar_100px.svg index 42220addbaa9..f2c47e3d4a96 100644 --- a/assets/images/avatar/oppia_avatar_100px.svg +++ b/assets/images/avatar/oppia_avatar_100px.svg @@ -1,3 +1 @@ - - - + \ No newline at end of file diff --git a/assets/images/avatar/oppia_avatar_large_100px.svg b/assets/images/avatar/oppia_avatar_large_100px.svg new file mode 100644 index 000000000000..0414dcea6ebd --- /dev/null +++ b/assets/images/avatar/oppia_avatar_large_100px.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/assets/images/avatar/user_blue_150px.png b/assets/images/avatar/user_blue_150px.png new file mode 100644 index 000000000000..4b2dcc778d02 Binary files /dev/null and b/assets/images/avatar/user_blue_150px.png differ diff --git a/assets/images/avatar/user_blue_72px.svg b/assets/images/avatar/user_blue_72px.svg index cc50c20273d7..c6d4b6e983e1 100644 --- a/assets/images/avatar/user_blue_72px.svg +++ b/assets/images/avatar/user_blue_72px.svg @@ -1,36 +1 @@ - - - - UserIcon - Created with Sketch. - - - - - - - - - - \ No newline at end of file + \ No newline at end of file diff --git a/assets/images/background/bannerA.svg b/assets/images/background/bannerA.svg index 89c33291b5f6..b6858b49cc5a 100644 --- a/assets/images/background/bannerA.svg +++ b/assets/images/background/bannerA.svg @@ -1,2306 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/background/bannerB.svg b/assets/images/background/bannerB.svg index ca6176cf1dea..f6b3f179af23 100644 --- a/assets/images/background/bannerB.svg +++ b/assets/images/background/bannerB.svg @@ -1,2473 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/background/bannerC.svg b/assets/images/background/bannerC.svg index 8ea31704a3f5..97df0f36312a 100644 --- a/assets/images/background/bannerC.svg +++ b/assets/images/background/bannerC.svg @@ -1,1292 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/background/bannerD.svg b/assets/images/background/bannerD.svg index fe73016e1398..0a38155684ef 100644 --- a/assets/images/background/bannerD.svg +++ b/assets/images/background/bannerD.svg @@ -1,1306 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/contributor_dashboard/locked-badge.svg b/assets/images/contributor_dashboard/locked-badge.svg new file mode 100644 index 000000000000..22cb5e575983 --- /dev/null +++ b/assets/images/contributor_dashboard/locked-badge.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/assets/images/contributor_dashboard/oppia-logo.jpg b/assets/images/contributor_dashboard/oppia-logo.jpg new file mode 100644 index 000000000000..e95acf7876bc Binary files /dev/null and b/assets/images/contributor_dashboard/oppia-logo.jpg differ diff --git a/assets/images/contributor_dashboard/question-correction-badge.component.svg b/assets/images/contributor_dashboard/question-correction-badge.component.svg new file mode 100644 index 000000000000..70d2cec0f786 --- /dev/null +++ b/assets/images/contributor_dashboard/question-correction-badge.component.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/contributor_dashboard/question-review-badge.component.svg b/assets/images/contributor_dashboard/question-review-badge.component.svg new file mode 100644 index 000000000000..24699d963bfc --- /dev/null +++ b/assets/images/contributor_dashboard/question-review-badge.component.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/contributor_dashboard/question-submission-badge.component.svg b/assets/images/contributor_dashboard/question-submission-badge.component.svg new file mode 100644 index 000000000000..e224758ebc5f --- /dev/null +++ b/assets/images/contributor_dashboard/question-submission-badge.component.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/contributor_dashboard/translation-correction-badge.component.svg b/assets/images/contributor_dashboard/translation-correction-badge.component.svg new file mode 100644 index 000000000000..e37be2fe8183 --- /dev/null +++ b/assets/images/contributor_dashboard/translation-correction-badge.component.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/contributor_dashboard/translation-review-badge.component.svg b/assets/images/contributor_dashboard/translation-review-badge.component.svg new file mode 100644 index 000000000000..c27b69d96879 --- /dev/null +++ b/assets/images/contributor_dashboard/translation-review-badge.component.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/contributor_dashboard/translation-submission-badge.component.svg b/assets/images/contributor_dashboard/translation-submission-badge.component.svg new file mode 100644 index 000000000000..b2f084eb388d --- /dev/null +++ b/assets/images/contributor_dashboard/translation-submission-badge.component.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/donate/donate-page-banner.jpg b/assets/images/donate/donate-page-banner.jpg new file mode 100644 index 000000000000..fa616679f4f3 Binary files /dev/null and b/assets/images/donate/donate-page-banner.jpg differ diff --git a/assets/images/donate/donate-page-banner.webp b/assets/images/donate/donate-page-banner.webp new file mode 100644 index 000000000000..873a66c665e4 Binary files /dev/null and b/assets/images/donate/donate-page-banner.webp differ diff --git a/assets/images/donate/oppia-donate-learner15x.png b/assets/images/donate/oppia-donate-learner15x.png new file mode 100644 index 000000000000..6b52316fa5ec Binary files /dev/null and b/assets/images/donate/oppia-donate-learner15x.png differ diff --git a/assets/images/donate/oppia-donate-learner15x.webp b/assets/images/donate/oppia-donate-learner15x.webp new file mode 100644 index 000000000000..de6bb4529d26 Binary files /dev/null and b/assets/images/donate/oppia-donate-learner15x.webp differ diff --git a/assets/images/donate/oppia-donate-learner1x.png b/assets/images/donate/oppia-donate-learner1x.png new file mode 100644 index 000000000000..dfe361263a49 Binary files /dev/null and b/assets/images/donate/oppia-donate-learner1x.png differ diff --git a/assets/images/donate/oppia-donate-learner1x.webp b/assets/images/donate/oppia-donate-learner1x.webp new file mode 100644 index 000000000000..f32ce349ec4a Binary files /dev/null and b/assets/images/donate/oppia-donate-learner1x.webp differ diff --git a/assets/images/donate/oppia-donate-learner2x.png b/assets/images/donate/oppia-donate-learner2x.png new file mode 100644 index 000000000000..a80a2a11e2d2 Binary files /dev/null and b/assets/images/donate/oppia-donate-learner2x.png differ diff --git a/assets/images/donate/oppia-donate-learner2x.webp b/assets/images/donate/oppia-donate-learner2x.webp new file mode 100644 index 000000000000..d8ee1c94c172 Binary files /dev/null and b/assets/images/donate/oppia-donate-learner2x.webp differ diff --git a/assets/images/donate/oppia-donate-maintenance.svg b/assets/images/donate/oppia-donate-maintenance.svg new file mode 100644 index 000000000000..77fce5c1b104 --- /dev/null +++ b/assets/images/donate/oppia-donate-maintenance.svg @@ -0,0 +1,3 @@ + + + diff --git a/assets/images/donate/oppia-donate-outreach.svg b/assets/images/donate/oppia-donate-outreach.svg new file mode 100644 index 000000000000..ee430f1170dc --- /dev/null +++ b/assets/images/donate/oppia-donate-outreach.svg @@ -0,0 +1,3 @@ + + + diff --git a/assets/images/donate/oppia-donate-spread-word.svg b/assets/images/donate/oppia-donate-spread-word.svg new file mode 100644 index 000000000000..ad14e5f5532e --- /dev/null +++ b/assets/images/donate/oppia-donate-spread-word.svg @@ -0,0 +1,3 @@ + + + diff --git a/assets/images/donate/thanks-for-donating.jpg b/assets/images/donate/thanks-for-donating.jpg new file mode 100644 index 000000000000..452c9b7cb50a Binary files /dev/null and b/assets/images/donate/thanks-for-donating.jpg differ diff --git a/assets/images/donate/thanks-for-donating.webp b/assets/images/donate/thanks-for-donating.webp new file mode 100644 index 000000000000..e04494a5efee Binary files /dev/null and b/assets/images/donate/thanks-for-donating.webp differ diff --git a/assets/images/donate/thanks-for-subscribing.jpg b/assets/images/donate/thanks-for-subscribing.jpg new file mode 100644 index 000000000000..5f5271c60c7d Binary files /dev/null and b/assets/images/donate/thanks-for-subscribing.jpg differ diff --git a/assets/images/donate/thanks-for-subscribing.webp b/assets/images/donate/thanks-for-subscribing.webp new file mode 100644 index 000000000000..bfaf7792318b Binary files /dev/null and b/assets/images/donate/thanks-for-subscribing.webp differ diff --git a/assets/images/favicon_alert/favicon_alert.ico b/assets/images/favicon_alert/favicon_alert.ico new file mode 100644 index 000000000000..6fb0d62bc25f Binary files /dev/null and b/assets/images/favicon_alert/favicon_alert.ico differ diff --git a/assets/images/general/apple.svg b/assets/images/general/apple.svg index cefa1fe9620c..65d39418b697 100644 --- a/assets/images/general/apple.svg +++ b/assets/images/general/apple.svg @@ -1,13 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/general/collection_corner.svg b/assets/images/general/collection_corner.svg index 1c0d754515ee..89f46bca921d 100644 --- a/assets/images/general/collection_corner.svg +++ b/assets/images/general/collection_corner.svg @@ -1,10 +1 @@ - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/general/collection_mascot.svg b/assets/images/general/collection_mascot.svg index eac9d78b9b6b..36174460a71d 100644 --- a/assets/images/general/collection_mascot.svg +++ b/assets/images/general/collection_mascot.svg @@ -1,43 +1 @@ - - - -image/svg+xml + \ No newline at end of file diff --git a/assets/images/general/collection_paw.svg b/assets/images/general/collection_paw.svg index 1c2496d6e70d..8b1189aeb603 100644 --- a/assets/images/general/collection_paw.svg +++ b/assets/images/general/collection_paw.svg @@ -1,96 +1 @@ - - - -image/svg+xml \ No newline at end of file + \ No newline at end of file diff --git a/assets/images/general/collection_start_here_arrow.svg b/assets/images/general/collection_start_here_arrow.svg index 429bce49f821..01344e0625da 100644 --- a/assets/images/general/collection_start_here_arrow.svg +++ b/assets/images/general/collection_start_here_arrow.svg @@ -1,11 +1 @@ - - - - - - - + \ No newline at end of file diff --git a/assets/images/general/congrats.svg b/assets/images/general/congrats.svg index 14831ff2664f..3c04558b912f 100644 --- a/assets/images/general/congrats.svg +++ b/assets/images/general/congrats.svg @@ -1,4 +1 @@ - - - - \ No newline at end of file + \ No newline at end of file diff --git a/assets/images/general/editor_welcome.svg b/assets/images/general/editor_welcome.svg index e251bc73caf2..f3ae1e72c6b0 100644 --- a/assets/images/general/editor_welcome.svg +++ b/assets/images/general/editor_welcome.svg @@ -1,4 +1 @@ - - - - \ No newline at end of file + \ No newline at end of file diff --git a/assets/images/general/empty_dashboard.svg b/assets/images/general/empty_dashboard.svg index 8cf26440e879..8765dbf59cc5 100644 --- a/assets/images/general/empty_dashboard.svg +++ b/assets/images/general/empty_dashboard.svg @@ -1,2 +1 @@ - - + \ No newline at end of file diff --git a/assets/images/general/milestone-message-star-icon.svg b/assets/images/general/milestone-message-star-icon.svg new file mode 100644 index 000000000000..caba18658e94 --- /dev/null +++ b/assets/images/general/milestone-message-star-icon.svg @@ -0,0 +1 @@ +Milestone message star icon diff --git a/assets/images/general/mobile_path_segment.svg b/assets/images/general/mobile_path_segment.svg index fff49c69850f..1605bb0f3a5c 100644 --- a/assets/images/general/mobile_path_segment.svg +++ b/assets/images/general/mobile_path_segment.svg @@ -1,185 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/google_signin_buttons/google_signin.svg b/assets/images/google_signin_buttons/google_signin.svg index acf7fa7569a7..e079b6f65e47 100644 --- a/assets/images/google_signin_buttons/google_signin.svg +++ b/assets/images/google_signin_buttons/google_signin.svg @@ -1,10 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/info_24px.svg b/assets/images/icons/info_24px.svg index c6255a26e911..f1a4417ce9bf 100644 --- a/assets/images/icons/info_24px.svg +++ b/assets/images/icons/info_24px.svg @@ -1,4 +1 @@ - - - - + \ No newline at end of file diff --git a/assets/images/icons/osk-delete.svg b/assets/images/icons/osk-delete.svg index 91b42c2918b2..58a450c1c46a 100644 --- a/assets/images/icons/osk-delete.svg +++ b/assets/images/icons/osk-delete.svg @@ -1,36 +1 @@ - - - - 7D43BFD7-E509-4B6F-B2CC-826C6D5E292D - Created with sketchtool. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/osk-exponent-2.svg b/assets/images/icons/osk-exponent-2.svg index 68f8b352950f..2d68364f758d 100644 --- a/assets/images/icons/osk-exponent-2.svg +++ b/assets/images/icons/osk-exponent-2.svg @@ -1,38 +1 @@ - - - - F920A12E-7090-48CB-9225-7B2EE1F67311 - Created with sketchtool. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/osk-exponent-3.svg b/assets/images/icons/osk-exponent-3.svg index d673b41bd96b..726f6391d898 100644 --- a/assets/images/icons/osk-exponent-3.svg +++ b/assets/images/icons/osk-exponent-3.svg @@ -1,38 +1 @@ - - - - AFB00068-CF58-4234-981A-27B3D3522C77 - Created with sketchtool. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/osk-exponent.svg b/assets/images/icons/osk-exponent.svg index b610e374cbde..7aede8934417 100644 --- a/assets/images/icons/osk-exponent.svg +++ b/assets/images/icons/osk-exponent.svg @@ -1,38 +1 @@ - - - - E26A7B0C-2446-4BCD-822A-3E8737D8FBC3 - Created with sketchtool. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/osk-fraction.svg b/assets/images/icons/osk-fraction.svg index b677ed25cd14..fe08105501cc 100644 --- a/assets/images/icons/osk-fraction.svg +++ b/assets/images/icons/osk-fraction.svg @@ -1,42 +1 @@ - - - - 3A1C17C5-47F6-48A0-8859-8FA964A9E255 - Created with sketchtool. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/osk-radical-2.svg b/assets/images/icons/osk-radical-2.svg index 95643b58b6b7..993e12e031d4 100644 --- a/assets/images/icons/osk-radical-2.svg +++ b/assets/images/icons/osk-radical-2.svg @@ -1,36 +1 @@ - - - - CF438228-E29F-4221-8EDF-FA6F6DCC67C8 - Created with sketchtool. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/play_icon_24px.svg b/assets/images/icons/play_icon_24px.svg index d0d1dd7e01ae..7d9589463342 100644 --- a/assets/images/icons/play_icon_24px.svg +++ b/assets/images/icons/play_icon_24px.svg @@ -1,19 +1 @@ - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/review_icon_24px.svg b/assets/images/icons/review_icon_24px.svg index 1d99910c64fe..5dfea263da86 100644 --- a/assets/images/icons/review_icon_24px.svg +++ b/assets/images/icons/review_icon_24px.svg @@ -1,31 +1 @@ - - - -review_icon_24px - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/rewind-five.svg b/assets/images/icons/rewind-five.svg index 78279e0920d4..11b18e2d0525 100644 --- a/assets/images/icons/rewind-five.svg +++ b/assets/images/icons/rewind-five.svg @@ -1,33 +1 @@ - - - - -Created by potrace 1.14, written by Peter Selinger 2001-2017 - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/time_machine.svg b/assets/images/icons/time_machine.svg index 702829682195..d3cf11087365 100644 --- a/assets/images/icons/time_machine.svg +++ b/assets/images/icons/time_machine.svg @@ -1,19 +1 @@ - - - - 51EA3A70-CB55-468B-A003-EDCD03143B0F - Created with sketchtool. - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/icons/train_icon_24px.svg b/assets/images/icons/train_icon_24px.svg index 769633c0f141..bb667b17d233 100644 --- a/assets/images/icons/train_icon_24px.svg +++ b/assets/images/icons/train_icon_24px.svg @@ -1,17 +1 @@ - - - - - train_icon_24px - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Algebra.svg b/assets/images/inverted_subjects/Algebra.svg index 86fcec473027..3902b2147963 100644 --- a/assets/images/inverted_subjects/Algebra.svg +++ b/assets/images/inverted_subjects/Algebra.svg @@ -1,15 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Algorithms.svg b/assets/images/inverted_subjects/Algorithms.svg index 9a1589ccda10..61322e4d0b5d 100644 --- a/assets/images/inverted_subjects/Algorithms.svg +++ b/assets/images/inverted_subjects/Algorithms.svg @@ -1,21 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Architecture.svg b/assets/images/inverted_subjects/Architecture.svg index 57d2fa3b73fc..5c98bdb0060a 100644 --- a/assets/images/inverted_subjects/Architecture.svg +++ b/assets/images/inverted_subjects/Architecture.svg @@ -1,20 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Arithmetic.svg b/assets/images/inverted_subjects/Arithmetic.svg index 45293f3f6708..d7a44de10966 100644 --- a/assets/images/inverted_subjects/Arithmetic.svg +++ b/assets/images/inverted_subjects/Arithmetic.svg @@ -1,32 +1 @@ - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Art.svg b/assets/images/inverted_subjects/Art.svg index 70f313774995..732816ca12ca 100644 --- a/assets/images/inverted_subjects/Art.svg +++ b/assets/images/inverted_subjects/Art.svg @@ -1,21 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Astronomy.svg b/assets/images/inverted_subjects/Astronomy.svg index 825c55a4fbd6..b9f5ea394a1e 100644 --- a/assets/images/inverted_subjects/Astronomy.svg +++ b/assets/images/inverted_subjects/Astronomy.svg @@ -1,13 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Biology.svg b/assets/images/inverted_subjects/Biology.svg index 079f3494a7e7..92124a0b919d 100644 --- a/assets/images/inverted_subjects/Biology.svg +++ b/assets/images/inverted_subjects/Biology.svg @@ -1,18 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Business.svg b/assets/images/inverted_subjects/Business.svg index 071b6d878e9a..54670d8ae5e7 100644 --- a/assets/images/inverted_subjects/Business.svg +++ b/assets/images/inverted_subjects/Business.svg @@ -1,35 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Calculus.svg b/assets/images/inverted_subjects/Calculus.svg index 70cc844331da..278bc1ebef3c 100644 --- a/assets/images/inverted_subjects/Calculus.svg +++ b/assets/images/inverted_subjects/Calculus.svg @@ -1,22 +1 @@ - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Chemistry.svg b/assets/images/inverted_subjects/Chemistry.svg index fe48823fd455..8ed4cffc4743 100644 --- a/assets/images/inverted_subjects/Chemistry.svg +++ b/assets/images/inverted_subjects/Chemistry.svg @@ -1,23 +1 @@ - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Chess.svg b/assets/images/inverted_subjects/Chess.svg index 1b47ecc2537a..a5ad3acba077 100644 --- a/assets/images/inverted_subjects/Chess.svg +++ b/assets/images/inverted_subjects/Chess.svg @@ -1,19 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Combinatorics.svg b/assets/images/inverted_subjects/Combinatorics.svg index ebf0a026a58e..551808fd2690 100644 --- a/assets/images/inverted_subjects/Combinatorics.svg +++ b/assets/images/inverted_subjects/Combinatorics.svg @@ -1,20 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Computing.svg b/assets/images/inverted_subjects/Computing.svg index f13cd5e3f119..f818eafb45c8 100644 --- a/assets/images/inverted_subjects/Computing.svg +++ b/assets/images/inverted_subjects/Computing.svg @@ -1,18 +1 @@ - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Cooking.svg b/assets/images/inverted_subjects/Cooking.svg index 73191d3d5c8e..de54976ba83d 100644 --- a/assets/images/inverted_subjects/Cooking.svg +++ b/assets/images/inverted_subjects/Cooking.svg @@ -1,24 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Creativity.svg b/assets/images/inverted_subjects/Creativity.svg index 8e01cd2b1023..1cdf42cf77ab 100644 --- a/assets/images/inverted_subjects/Creativity.svg +++ b/assets/images/inverted_subjects/Creativity.svg @@ -1,32 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Economics.svg b/assets/images/inverted_subjects/Economics.svg index f34e33520070..c4badbf5eee9 100644 --- a/assets/images/inverted_subjects/Economics.svg +++ b/assets/images/inverted_subjects/Economics.svg @@ -1,27 +1 @@ - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Education.svg b/assets/images/inverted_subjects/Education.svg index a11741d723a0..eb29c9063692 100644 --- a/assets/images/inverted_subjects/Education.svg +++ b/assets/images/inverted_subjects/Education.svg @@ -1,18 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Engineering.svg b/assets/images/inverted_subjects/Engineering.svg index c830d55e445c..e9aa4f9701c8 100644 --- a/assets/images/inverted_subjects/Engineering.svg +++ b/assets/images/inverted_subjects/Engineering.svg @@ -1,33 +1 @@ - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/English.svg b/assets/images/inverted_subjects/English.svg index 4b87af033943..440323e2d67c 100644 --- a/assets/images/inverted_subjects/English.svg +++ b/assets/images/inverted_subjects/English.svg @@ -1,39 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Environment.svg b/assets/images/inverted_subjects/Environment.svg index 079f3494a7e7..92124a0b919d 100644 --- a/assets/images/inverted_subjects/Environment.svg +++ b/assets/images/inverted_subjects/Environment.svg @@ -1,18 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Gaulish.svg b/assets/images/inverted_subjects/Gaulish.svg index 886fa61b698f..2abc5f1e5bba 100644 --- a/assets/images/inverted_subjects/Gaulish.svg +++ b/assets/images/inverted_subjects/Gaulish.svg @@ -1,52 +1 @@ - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Genetics.svg b/assets/images/inverted_subjects/Genetics.svg index e18e134e8f84..8fae3adcd8a1 100644 --- a/assets/images/inverted_subjects/Genetics.svg +++ b/assets/images/inverted_subjects/Genetics.svg @@ -1,23 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Geography.svg b/assets/images/inverted_subjects/Geography.svg index b1fd31735e15..949ce19b9ffd 100644 --- a/assets/images/inverted_subjects/Geography.svg +++ b/assets/images/inverted_subjects/Geography.svg @@ -1,21 +1 @@ - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Geometry.svg b/assets/images/inverted_subjects/Geometry.svg index bf0d5317d762..f2350a118734 100644 --- a/assets/images/inverted_subjects/Geometry.svg +++ b/assets/images/inverted_subjects/Geometry.svg @@ -1,11 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Government.svg b/assets/images/inverted_subjects/Government.svg index 89264b535f4b..fe29436f0e02 100644 --- a/assets/images/inverted_subjects/Government.svg +++ b/assets/images/inverted_subjects/Government.svg @@ -1,16 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/GraphTheory.svg b/assets/images/inverted_subjects/GraphTheory.svg index faf0d48e0720..38abe0a29a99 100644 --- a/assets/images/inverted_subjects/GraphTheory.svg +++ b/assets/images/inverted_subjects/GraphTheory.svg @@ -1,26 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/History.svg b/assets/images/inverted_subjects/History.svg index d7bee1044bc8..6da1a1303a55 100644 --- a/assets/images/inverted_subjects/History.svg +++ b/assets/images/inverted_subjects/History.svg @@ -1,32 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Humor.svg b/assets/images/inverted_subjects/Humor.svg index d44c5a3036d4..05ac20116299 100644 --- a/assets/images/inverted_subjects/Humor.svg +++ b/assets/images/inverted_subjects/Humor.svg @@ -1,13 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Language.svg b/assets/images/inverted_subjects/Language.svg index dc4135afa3a6..c0c43e682736 100644 --- a/assets/images/inverted_subjects/Language.svg +++ b/assets/images/inverted_subjects/Language.svg @@ -1,11 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Languages.svg b/assets/images/inverted_subjects/Languages.svg index dc4135afa3a6..c0c43e682736 100644 --- a/assets/images/inverted_subjects/Languages.svg +++ b/assets/images/inverted_subjects/Languages.svg @@ -1,11 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Latin.svg b/assets/images/inverted_subjects/Latin.svg index 18cdbd7f6a01..2aaf6a2ded12 100644 --- a/assets/images/inverted_subjects/Latin.svg +++ b/assets/images/inverted_subjects/Latin.svg @@ -1,41 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Law.svg b/assets/images/inverted_subjects/Law.svg index b0b95e48161e..61f14ddf82bc 100644 --- a/assets/images/inverted_subjects/Law.svg +++ b/assets/images/inverted_subjects/Law.svg @@ -1,24 +1 @@ - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Lightbulb.svg b/assets/images/inverted_subjects/Lightbulb.svg index 204a910740cf..75927a21cc2c 100644 --- a/assets/images/inverted_subjects/Lightbulb.svg +++ b/assets/images/inverted_subjects/Lightbulb.svg @@ -1,28 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Logic.svg b/assets/images/inverted_subjects/Logic.svg index 8f36fb8eb860..d7624c2eec31 100644 --- a/assets/images/inverted_subjects/Logic.svg +++ b/assets/images/inverted_subjects/Logic.svg @@ -1,32 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Mathematics.svg b/assets/images/inverted_subjects/Mathematics.svg index 1331db3cd842..c277d80ea138 100644 --- a/assets/images/inverted_subjects/Mathematics.svg +++ b/assets/images/inverted_subjects/Mathematics.svg @@ -1,19 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Medicine.svg b/assets/images/inverted_subjects/Medicine.svg index 0a1ed43f3eff..49f66fc2e884 100644 --- a/assets/images/inverted_subjects/Medicine.svg +++ b/assets/images/inverted_subjects/Medicine.svg @@ -1,57 +1 @@ - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Microbiology.svg b/assets/images/inverted_subjects/Microbiology.svg index a77dcff503e6..7ded206c84b9 100644 --- a/assets/images/inverted_subjects/Microbiology.svg +++ b/assets/images/inverted_subjects/Microbiology.svg @@ -1,15 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Music.svg b/assets/images/inverted_subjects/Music.svg index 28b1c3a235ae..0b5863949082 100644 --- a/assets/images/inverted_subjects/Music.svg +++ b/assets/images/inverted_subjects/Music.svg @@ -1,13 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Philosophy.svg b/assets/images/inverted_subjects/Philosophy.svg index 7933a6d3f226..0045f302bd7c 100644 --- a/assets/images/inverted_subjects/Philosophy.svg +++ b/assets/images/inverted_subjects/Philosophy.svg @@ -1,28 +1 @@ - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Physics.svg b/assets/images/inverted_subjects/Physics.svg index e93c43a39162..6f2226440663 100644 --- a/assets/images/inverted_subjects/Physics.svg +++ b/assets/images/inverted_subjects/Physics.svg @@ -1,34 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Poetry.svg b/assets/images/inverted_subjects/Poetry.svg index 196120f7534c..92de0e3c31e9 100644 --- a/assets/images/inverted_subjects/Poetry.svg +++ b/assets/images/inverted_subjects/Poetry.svg @@ -1,20 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Probability.svg b/assets/images/inverted_subjects/Probability.svg index 2a61fecc7efe..1791fa43f9d4 100644 --- a/assets/images/inverted_subjects/Probability.svg +++ b/assets/images/inverted_subjects/Probability.svg @@ -1,28 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Programming.svg b/assets/images/inverted_subjects/Programming.svg index 14ca240cc4e2..6766b11280d9 100644 --- a/assets/images/inverted_subjects/Programming.svg +++ b/assets/images/inverted_subjects/Programming.svg @@ -1,139 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Puzzles.svg b/assets/images/inverted_subjects/Puzzles.svg index 7c95b209ff47..71e073d80c2c 100644 --- a/assets/images/inverted_subjects/Puzzles.svg +++ b/assets/images/inverted_subjects/Puzzles.svg @@ -1,21 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Reading.svg b/assets/images/inverted_subjects/Reading.svg index d75793c29a87..32f0e79ac4bb 100644 --- a/assets/images/inverted_subjects/Reading.svg +++ b/assets/images/inverted_subjects/Reading.svg @@ -1,51 +1 @@ - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Religion.svg b/assets/images/inverted_subjects/Religion.svg index 78204fcf0481..3e738b15bcd8 100644 --- a/assets/images/inverted_subjects/Religion.svg +++ b/assets/images/inverted_subjects/Religion.svg @@ -1,17 +1 @@ - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Space.svg b/assets/images/inverted_subjects/Space.svg index 43be3dadf965..45b0fcb42718 100644 --- a/assets/images/inverted_subjects/Space.svg +++ b/assets/images/inverted_subjects/Space.svg @@ -1,15 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Spanish.svg b/assets/images/inverted_subjects/Spanish.svg index 1e82017effde..5d282f29b26c 100644 --- a/assets/images/inverted_subjects/Spanish.svg +++ b/assets/images/inverted_subjects/Spanish.svg @@ -1,30 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Sport.svg b/assets/images/inverted_subjects/Sport.svg index 9a6a3566368e..c616d282ce6f 100644 --- a/assets/images/inverted_subjects/Sport.svg +++ b/assets/images/inverted_subjects/Sport.svg @@ -1,35 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Statistics.svg b/assets/images/inverted_subjects/Statistics.svg index 7eaf6cc6f3f0..a4bc419e6b70 100644 --- a/assets/images/inverted_subjects/Statistics.svg +++ b/assets/images/inverted_subjects/Statistics.svg @@ -1,10 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Trigonometry.svg b/assets/images/inverted_subjects/Trigonometry.svg index 41421f839bce..6321c3b5c1e0 100644 --- a/assets/images/inverted_subjects/Trigonometry.svg +++ b/assets/images/inverted_subjects/Trigonometry.svg @@ -1,13 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/inverted_subjects/Welcome.svg b/assets/images/inverted_subjects/Welcome.svg index dbe46d5b98fe..20b304b225d1 100644 --- a/assets/images/inverted_subjects/Welcome.svg +++ b/assets/images/inverted_subjects/Welcome.svg @@ -1,33 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/learner_dashboard/dropdown.svg b/assets/images/learner_dashboard/dropdown.svg index 61f88e2e5b6b..5c82cf9a30f8 100644 --- a/assets/images/learner_dashboard/dropdown.svg +++ b/assets/images/learner_dashboard/dropdown.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/assets/images/learner_dashboard/home.svg b/assets/images/learner_dashboard/home.svg index 15d2b5ef006a..5dd2f3d4c20b 100644 --- a/assets/images/learner_dashboard/home.svg +++ b/assets/images/learner_dashboard/home.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/assets/images/learner_dashboard/info.svg b/assets/images/learner_dashboard/info.svg index f62076c6958d..2ec70a89d7d8 100644 --- a/assets/images/learner_dashboard/info.svg +++ b/assets/images/learner_dashboard/info.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/assets/images/learner_dashboard/paw.svg b/assets/images/learner_dashboard/paw.svg index 74fb9698bf4a..d80b1571789d 100644 --- a/assets/images/learner_dashboard/paw.svg +++ b/assets/images/learner_dashboard/paw.svg @@ -1,19 +1 @@ - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/learner_dashboard/progress.svg b/assets/images/learner_dashboard/progress.svg index 42106daff671..b283f23e542f 100644 --- a/assets/images/learner_dashboard/progress.svg +++ b/assets/images/learner_dashboard/progress.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/assets/images/learner_dashboard/star.svg b/assets/images/learner_dashboard/star.svg index a7aee44ccdb7..16678edd8048 100644 --- a/assets/images/learner_dashboard/star.svg +++ b/assets/images/learner_dashboard/star.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/assets/images/learner_dashboard/todolist.svg b/assets/images/learner_dashboard/todolist.svg index ac6a787e1838..e37e3bb3754e 100644 --- a/assets/images/learner_dashboard/todolist.svg +++ b/assets/images/learner_dashboard/todolist.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/assets/images/learner_dashboard/up.svg b/assets/images/learner_dashboard/up.svg index edcfd437eed3..b05d0fdf380b 100644 --- a/assets/images/learner_dashboard/up.svg +++ b/assets/images/learner_dashboard/up.svg @@ -1 +1 @@ - + \ No newline at end of file diff --git a/assets/images/library/banner1.svg b/assets/images/library/banner1.svg index 21bbaf432164..9c81c1aab105 100644 --- a/assets/images/library/banner1.svg +++ b/assets/images/library/banner1.svg @@ -1,218 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/library/banner2.svg b/assets/images/library/banner2.svg index c2ec8ab2ecbe..9b823e37190a 100644 --- a/assets/images/library/banner2.svg +++ b/assets/images/library/banner2.svg @@ -1,128 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/library/banner3.svg b/assets/images/library/banner3.svg index 2732a45a596e..00770488294b 100644 --- a/assets/images/library/banner3.svg +++ b/assets/images/library/banner3.svg @@ -1,98 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/library/banner4.svg b/assets/images/library/banner4.svg index 6aeba0fdd1cb..8559d49465ac 100644 --- a/assets/images/library/banner4.svg +++ b/assets/images/library/banner4.svg @@ -1,150 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/logo/48x48_logo_mint.svg b/assets/images/logo/48x48_logo_mint.svg new file mode 100644 index 000000000000..94586d9a0353 --- /dev/null +++ b/assets/images/logo/48x48_logo_mint.svg @@ -0,0 +1,3 @@ + + + diff --git a/assets/images/sidebar/learn-math-logo.png b/assets/images/sidebar/learn-math-logo.png new file mode 100644 index 000000000000..cb234e1f5287 Binary files /dev/null and b/assets/images/sidebar/learn-math-logo.png differ diff --git a/assets/images/sidebar/learn-math-logo.webp b/assets/images/sidebar/learn-math-logo.webp new file mode 100644 index 000000000000..fd177504e907 Binary files /dev/null and b/assets/images/sidebar/learn-math-logo.webp differ diff --git a/assets/images/splash/Dheeraj_3.png b/assets/images/splash/Dheeraj15x.png similarity index 100% rename from assets/images/splash/Dheeraj_3.png rename to assets/images/splash/Dheeraj15x.png diff --git a/assets/images/splash/Dheeraj15x.webp b/assets/images/splash/Dheeraj15x.webp new file mode 100644 index 000000000000..78e404688655 Binary files /dev/null and b/assets/images/splash/Dheeraj15x.webp differ diff --git a/assets/images/splash/Dheeraj1x.png b/assets/images/splash/Dheeraj1x.png new file mode 100644 index 000000000000..5a283e12187a Binary files /dev/null and b/assets/images/splash/Dheeraj1x.png differ diff --git a/assets/images/splash/Dheeraj1x.webp b/assets/images/splash/Dheeraj1x.webp new file mode 100644 index 000000000000..d0a252515e47 Binary files /dev/null and b/assets/images/splash/Dheeraj1x.webp differ diff --git a/assets/images/splash/Dheeraj2x.png b/assets/images/splash/Dheeraj2x.png new file mode 100644 index 000000000000..bf87ea0edf76 Binary files /dev/null and b/assets/images/splash/Dheeraj2x.png differ diff --git a/assets/images/splash/Dheeraj2x.webp b/assets/images/splash/Dheeraj2x.webp new file mode 100644 index 000000000000..78e404688655 Binary files /dev/null and b/assets/images/splash/Dheeraj2x.webp differ diff --git a/assets/images/splash/Dheeraj_3.webp b/assets/images/splash/Dheeraj_3.webp deleted file mode 100644 index 376b2769bcb5..000000000000 Binary files a/assets/images/splash/Dheeraj_3.webp and /dev/null differ diff --git a/assets/images/splash/Gaurav_2.png b/assets/images/splash/Gaurav15x.png similarity index 100% rename from assets/images/splash/Gaurav_2.png rename to assets/images/splash/Gaurav15x.png diff --git a/assets/images/splash/Gaurav15x.webp b/assets/images/splash/Gaurav15x.webp new file mode 100644 index 000000000000..476893c2e70b Binary files /dev/null and b/assets/images/splash/Gaurav15x.webp differ diff --git a/assets/images/splash/Gaurav1x.png b/assets/images/splash/Gaurav1x.png new file mode 100644 index 000000000000..0be1cf1694e8 Binary files /dev/null and b/assets/images/splash/Gaurav1x.png differ diff --git a/assets/images/splash/Gaurav1x.webp b/assets/images/splash/Gaurav1x.webp new file mode 100644 index 000000000000..a373744a4d47 Binary files /dev/null and b/assets/images/splash/Gaurav1x.webp differ diff --git a/assets/images/splash/Gaurav2x.png b/assets/images/splash/Gaurav2x.png new file mode 100644 index 000000000000..33535e756068 Binary files /dev/null and b/assets/images/splash/Gaurav2x.png differ diff --git a/assets/images/splash/Gaurav2x.webp b/assets/images/splash/Gaurav2x.webp new file mode 100644 index 000000000000..476893c2e70b Binary files /dev/null and b/assets/images/splash/Gaurav2x.webp differ diff --git a/assets/images/splash/Gaurav_2.webp b/assets/images/splash/Gaurav_2.webp deleted file mode 100644 index e0a1cc4d44ba..000000000000 Binary files a/assets/images/splash/Gaurav_2.webp and /dev/null differ diff --git a/assets/images/splash/books.svg b/assets/images/splash/books.svg index 35b88a651d58..008e85aabb2e 100644 --- a/assets/images/splash/books.svg +++ b/assets/images/splash/books.svg @@ -1,242 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/splash/books_dsk.png b/assets/images/splash/books_dsk.png deleted file mode 100644 index ad3f95a8a368..000000000000 Binary files a/assets/images/splash/books_dsk.png and /dev/null differ diff --git a/assets/images/splash/books_dsk.svg b/assets/images/splash/books_dsk.svg new file mode 100644 index 000000000000..d355f7119b71 --- /dev/null +++ b/assets/images/splash/books_dsk.svg @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + diff --git a/assets/images/splash/books_dsk.webp b/assets/images/splash/books_dsk.webp deleted file mode 100644 index 549cd24098ea..000000000000 Binary files a/assets/images/splash/books_dsk.webp and /dev/null differ diff --git a/assets/images/splash/bullet1.svg b/assets/images/splash/bullet1.svg index 03431290e2b6..65910eb2dd1f 100644 --- a/assets/images/splash/bullet1.svg +++ b/assets/images/splash/bullet1.svg @@ -1,4 +1 @@ - - - - + \ No newline at end of file diff --git a/assets/images/splash/bullet1icon.svg b/assets/images/splash/bullet1icon.svg index a3d9ea552b37..ce8c21b81ab7 100644 --- a/assets/images/splash/bullet1icon.svg +++ b/assets/images/splash/bullet1icon.svg @@ -1,136 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/splash/bullet2.svg b/assets/images/splash/bullet2.svg index d645dd280d84..4d5f2cfe444b 100644 --- a/assets/images/splash/bullet2.svg +++ b/assets/images/splash/bullet2.svg @@ -1,6 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/splash/bullet2icon.svg b/assets/images/splash/bullet2icon.svg index b4cabe5277f9..d23bd6126b4e 100644 --- a/assets/images/splash/bullet2icon.svg +++ b/assets/images/splash/bullet2icon.svg @@ -1,89 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/splash/bullet3.svg b/assets/images/splash/bullet3.svg index 94621eb6f760..35da65d23ef9 100644 --- a/assets/images/splash/bullet3.svg +++ b/assets/images/splash/bullet3.svg @@ -1,8 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/splash/bullet3icon.svg b/assets/images/splash/bullet3icon.svg index de437fff64ba..c182cd4d59fb 100644 --- a/assets/images/splash/bullet3icon.svg +++ b/assets/images/splash/bullet3icon.svg @@ -1,77 +1 @@ - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/splash/dsk_community_background.webp b/assets/images/splash/dsk_community_background.webp new file mode 100644 index 000000000000..7b64f10a7e73 Binary files /dev/null and b/assets/images/splash/dsk_community_background.webp differ diff --git a/assets/images/splash/dsk_testimonial_background.webp b/assets/images/splash/dsk_testimonial_background.webp new file mode 100644 index 000000000000..13dc098d3291 Binary files /dev/null and b/assets/images/splash/dsk_testimonial_background.webp differ diff --git a/assets/images/splash/languageIcon.svg b/assets/images/splash/languageIcon.svg index 35698e7d2cc3..a69efb797c3d 100644 --- a/assets/images/splash/languageIcon.svg +++ b/assets/images/splash/languageIcon.svg @@ -1,3 +1 @@ - - - + \ No newline at end of file diff --git a/assets/images/splash/lessonIcon.svg b/assets/images/splash/lessonIcon.svg index a7daac4f4fd9..c85957d05bc5 100644 --- a/assets/images/splash/lessonIcon.svg +++ b/assets/images/splash/lessonIcon.svg @@ -1,37 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/splash/m_community_background.webp b/assets/images/splash/m_community_background.webp new file mode 100644 index 000000000000..ba34ee3e7b5d Binary files /dev/null and b/assets/images/splash/m_community_background.webp differ diff --git a/assets/images/splash/m_testimonial_background.webp b/assets/images/splash/m_testimonial_background.webp new file mode 100644 index 000000000000..6e332d32e0ae Binary files /dev/null and b/assets/images/splash/m_testimonial_background.webp differ diff --git a/assets/images/splash/matthew_fractions.png b/assets/images/splash/matthew_fractions.png deleted file mode 100644 index 4932c7e76c63..000000000000 Binary files a/assets/images/splash/matthew_fractions.png and /dev/null differ diff --git a/assets/images/splash/matthew_fractions.svg b/assets/images/splash/matthew_fractions.svg new file mode 100644 index 000000000000..9865133f68a7 --- /dev/null +++ b/assets/images/splash/matthew_fractions.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/assets/images/splash/matthew_fractions.webp b/assets/images/splash/matthew_fractions.webp deleted file mode 100644 index e197aa26172a..000000000000 Binary files a/assets/images/splash/matthew_fractions.webp and /dev/null differ diff --git a/assets/images/splash/mira.webp b/assets/images/splash/mira.webp deleted file mode 100644 index ab8509447601..000000000000 Binary files a/assets/images/splash/mira.webp and /dev/null differ diff --git a/assets/images/splash/mira.png b/assets/images/splash/mira15x.png similarity index 100% rename from assets/images/splash/mira.png rename to assets/images/splash/mira15x.png diff --git a/assets/images/splash/mira15x.webp b/assets/images/splash/mira15x.webp new file mode 100644 index 000000000000..d09529ce134c Binary files /dev/null and b/assets/images/splash/mira15x.webp differ diff --git a/assets/images/splash/mira1x.png b/assets/images/splash/mira1x.png new file mode 100644 index 000000000000..3f02b3bf4bd5 Binary files /dev/null and b/assets/images/splash/mira1x.png differ diff --git a/assets/images/splash/mira1x.webp b/assets/images/splash/mira1x.webp new file mode 100644 index 000000000000..9a46ac74d8df Binary files /dev/null and b/assets/images/splash/mira1x.webp differ diff --git a/assets/images/splash/mira2x.png b/assets/images/splash/mira2x.png new file mode 100644 index 000000000000..7cffbd5e7634 Binary files /dev/null and b/assets/images/splash/mira2x.png differ diff --git a/assets/images/splash/mira2x.webp b/assets/images/splash/mira2x.webp new file mode 100644 index 000000000000..d09529ce134c Binary files /dev/null and b/assets/images/splash/mira2x.webp differ diff --git a/assets/images/splash/sama.webp b/assets/images/splash/sama.webp deleted file mode 100644 index 6f97b59c0185..000000000000 Binary files a/assets/images/splash/sama.webp and /dev/null differ diff --git a/assets/images/splash/sama.png b/assets/images/splash/sama15x.png similarity index 100% rename from assets/images/splash/sama.png rename to assets/images/splash/sama15x.png diff --git a/assets/images/splash/sama15x.webp b/assets/images/splash/sama15x.webp new file mode 100644 index 000000000000..92584100826b Binary files /dev/null and b/assets/images/splash/sama15x.webp differ diff --git a/assets/images/splash/sama1x.png b/assets/images/splash/sama1x.png new file mode 100644 index 000000000000..51004ef920c6 Binary files /dev/null and b/assets/images/splash/sama1x.png differ diff --git a/assets/images/splash/sama1x.webp b/assets/images/splash/sama1x.webp new file mode 100644 index 000000000000..85f68fca7e52 Binary files /dev/null and b/assets/images/splash/sama1x.webp differ diff --git a/assets/images/splash/sama2x.png b/assets/images/splash/sama2x.png new file mode 100644 index 000000000000..2e9f0e6e0c67 Binary files /dev/null and b/assets/images/splash/sama2x.png differ diff --git a/assets/images/splash/sama2x.webp b/assets/images/splash/sama2x.webp new file mode 100644 index 000000000000..92584100826b Binary files /dev/null and b/assets/images/splash/sama2x.webp differ diff --git a/assets/images/splash/sara.png b/assets/images/splash/sara.png deleted file mode 100644 index ccec1e5e1090..000000000000 Binary files a/assets/images/splash/sara.png and /dev/null differ diff --git a/assets/images/splash/splashMainDesktop.webp b/assets/images/splash/splashMainDesktop.webp deleted file mode 100644 index 2c0ec180b6c2..000000000000 Binary files a/assets/images/splash/splashMainDesktop.webp and /dev/null differ diff --git a/assets/images/splash/splashMainDesktop15x.png b/assets/images/splash/splashMainDesktop15x.png new file mode 100644 index 000000000000..36f1a0bcc9f1 Binary files /dev/null and b/assets/images/splash/splashMainDesktop15x.png differ diff --git a/assets/images/splash/splashMainDesktop15x.webp b/assets/images/splash/splashMainDesktop15x.webp new file mode 100644 index 000000000000..bc37926e3195 Binary files /dev/null and b/assets/images/splash/splashMainDesktop15x.webp differ diff --git a/assets/images/splash/splashMainDesktop1x.png b/assets/images/splash/splashMainDesktop1x.png new file mode 100644 index 000000000000..72979ef9638b Binary files /dev/null and b/assets/images/splash/splashMainDesktop1x.png differ diff --git a/assets/images/splash/splashMainDesktop1x.webp b/assets/images/splash/splashMainDesktop1x.webp new file mode 100644 index 000000000000..c605fa525d4d Binary files /dev/null and b/assets/images/splash/splashMainDesktop1x.webp differ diff --git a/assets/images/splash/splashMainDesktop.png b/assets/images/splash/splashMainDesktop2x.png similarity index 100% rename from assets/images/splash/splashMainDesktop.png rename to assets/images/splash/splashMainDesktop2x.png diff --git a/assets/images/splash/splashMainDesktop2x.webp b/assets/images/splash/splashMainDesktop2x.webp new file mode 100644 index 000000000000..92d5091ac2a6 Binary files /dev/null and b/assets/images/splash/splashMainDesktop2x.webp differ diff --git a/assets/images/splash/splashMainMobile.webp b/assets/images/splash/splashMainMobile.webp index 4bcfd7bee19a..59df5bfe076f 100644 Binary files a/assets/images/splash/splashMainMobile.webp and b/assets/images/splash/splashMainMobile.webp differ diff --git a/assets/images/splash/student_desk.png b/assets/images/splash/student_desk.png deleted file mode 100644 index e8bf6da1d860..000000000000 Binary files a/assets/images/splash/student_desk.png and /dev/null differ diff --git a/assets/images/splash/student_desk.webp b/assets/images/splash/student_desk.webp deleted file mode 100644 index 10c81029a929..000000000000 Binary files a/assets/images/splash/student_desk.webp and /dev/null differ diff --git a/assets/images/splash/student_desk15x.png b/assets/images/splash/student_desk15x.png new file mode 100644 index 000000000000..e49e80e2dca2 Binary files /dev/null and b/assets/images/splash/student_desk15x.png differ diff --git a/assets/images/splash/student_desk15x.webp b/assets/images/splash/student_desk15x.webp new file mode 100644 index 000000000000..bec2133777c5 Binary files /dev/null and b/assets/images/splash/student_desk15x.webp differ diff --git a/assets/images/splash/student_desk1x.png b/assets/images/splash/student_desk1x.png new file mode 100644 index 000000000000..93b6f5ed6520 Binary files /dev/null and b/assets/images/splash/student_desk1x.png differ diff --git a/assets/images/splash/student_desk1x.webp b/assets/images/splash/student_desk1x.webp new file mode 100644 index 000000000000..f4095273d47c Binary files /dev/null and b/assets/images/splash/student_desk1x.webp differ diff --git a/assets/images/splash/student_desk2x.png b/assets/images/splash/student_desk2x.png new file mode 100644 index 000000000000..eda5baae1b6e Binary files /dev/null and b/assets/images/splash/student_desk2x.png differ diff --git a/assets/images/splash/student_desk2x.webp b/assets/images/splash/student_desk2x.webp new file mode 100644 index 000000000000..88aa116fb920 Binary files /dev/null and b/assets/images/splash/student_desk2x.webp differ diff --git a/assets/images/splash/student_desk_mobile.webp b/assets/images/splash/student_desk_mobile.webp index be42d87e6c02..9758035f6226 100644 Binary files a/assets/images/splash/student_desk_mobile.webp and b/assets/images/splash/student_desk_mobile.webp differ diff --git a/assets/images/splash/userIcon.svg b/assets/images/splash/userIcon.svg index 6e709489bce8..cc417cdb6819 100644 --- a/assets/images/splash/userIcon.svg +++ b/assets/images/splash/userIcon.svg @@ -1,3 +1 @@ - - - + \ No newline at end of file diff --git a/assets/images/splash/volunteer_section.png b/assets/images/splash/volunteer_section.png deleted file mode 100644 index 16d4860f35df..000000000000 Binary files a/assets/images/splash/volunteer_section.png and /dev/null differ diff --git a/assets/images/splash/volunteer_section.webp b/assets/images/splash/volunteer_section.webp deleted file mode 100644 index a94ad957e99b..000000000000 Binary files a/assets/images/splash/volunteer_section.webp and /dev/null differ diff --git a/assets/images/splash/volunteer_section15x.png b/assets/images/splash/volunteer_section15x.png new file mode 100644 index 000000000000..c19f8f5815ac Binary files /dev/null and b/assets/images/splash/volunteer_section15x.png differ diff --git a/assets/images/splash/volunteer_section15x.webp b/assets/images/splash/volunteer_section15x.webp new file mode 100644 index 000000000000..78e771e51908 Binary files /dev/null and b/assets/images/splash/volunteer_section15x.webp differ diff --git a/assets/images/splash/volunteer_section1x.png b/assets/images/splash/volunteer_section1x.png new file mode 100644 index 000000000000..c625e4d0adaf Binary files /dev/null and b/assets/images/splash/volunteer_section1x.png differ diff --git a/assets/images/splash/volunteer_section1x.webp b/assets/images/splash/volunteer_section1x.webp new file mode 100644 index 000000000000..58fea8c6ab66 Binary files /dev/null and b/assets/images/splash/volunteer_section1x.webp differ diff --git a/assets/images/splash/volunteer_section2x.png b/assets/images/splash/volunteer_section2x.png new file mode 100644 index 000000000000..94243235868c Binary files /dev/null and b/assets/images/splash/volunteer_section2x.png differ diff --git a/assets/images/splash/volunteer_section2x.webp b/assets/images/splash/volunteer_section2x.webp new file mode 100644 index 000000000000..41e82bcb9e01 Binary files /dev/null and b/assets/images/splash/volunteer_section2x.webp differ diff --git a/assets/images/subjects/Algebra.svg b/assets/images/subjects/Algebra.svg index 91824d6d9867..e559780d933e 100644 --- a/assets/images/subjects/Algebra.svg +++ b/assets/images/subjects/Algebra.svg @@ -1,13 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Algorithms.svg b/assets/images/subjects/Algorithms.svg index c74c5d220b0c..902624b22e8a 100644 --- a/assets/images/subjects/Algorithms.svg +++ b/assets/images/subjects/Algorithms.svg @@ -1,19 +1 @@ - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Architecture.svg b/assets/images/subjects/Architecture.svg index a53fac6f0d93..34aee8d6318a 100644 --- a/assets/images/subjects/Architecture.svg +++ b/assets/images/subjects/Architecture.svg @@ -1,15 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Arithmetic.svg b/assets/images/subjects/Arithmetic.svg index 31d6fc2f43ff..7f9c2cc3758b 100644 --- a/assets/images/subjects/Arithmetic.svg +++ b/assets/images/subjects/Arithmetic.svg @@ -1,30 +1 @@ - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Art.svg b/assets/images/subjects/Art.svg index 5f4e09136f7a..ffe7ffd261f7 100644 --- a/assets/images/subjects/Art.svg +++ b/assets/images/subjects/Art.svg @@ -1,24 +1 @@ - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Astronomy.svg b/assets/images/subjects/Astronomy.svg index 14dd608b57d5..eb2afcd835bb 100644 --- a/assets/images/subjects/Astronomy.svg +++ b/assets/images/subjects/Astronomy.svg @@ -1,9 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Biology.svg b/assets/images/subjects/Biology.svg index ec334e16aa3e..83b8b29663b9 100644 --- a/assets/images/subjects/Biology.svg +++ b/assets/images/subjects/Biology.svg @@ -1,14 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Business.svg b/assets/images/subjects/Business.svg index 51c67d6b5aab..ee0018306cdd 100644 --- a/assets/images/subjects/Business.svg +++ b/assets/images/subjects/Business.svg @@ -1,28 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Calculus.svg b/assets/images/subjects/Calculus.svg index de171e3169fa..32f3e4f20ef6 100644 --- a/assets/images/subjects/Calculus.svg +++ b/assets/images/subjects/Calculus.svg @@ -1,14 +1 @@ - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Chemistry.svg b/assets/images/subjects/Chemistry.svg index 4dbb07f700c6..746f6e7dec77 100644 --- a/assets/images/subjects/Chemistry.svg +++ b/assets/images/subjects/Chemistry.svg @@ -1,18 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Chess.svg b/assets/images/subjects/Chess.svg index f9569044bf38..8d7d29131841 100644 --- a/assets/images/subjects/Chess.svg +++ b/assets/images/subjects/Chess.svg @@ -1,15 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Combinatorics.svg b/assets/images/subjects/Combinatorics.svg index 9db23358054c..7f5f7e64895d 100644 --- a/assets/images/subjects/Combinatorics.svg +++ b/assets/images/subjects/Combinatorics.svg @@ -1,18 +1 @@ - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Computing.svg b/assets/images/subjects/Computing.svg index ae0bee1a518f..42fb46cd76ce 100644 --- a/assets/images/subjects/Computing.svg +++ b/assets/images/subjects/Computing.svg @@ -1,16 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Cooking.svg b/assets/images/subjects/Cooking.svg index d9ee0f3e3d3b..d22a242073d2 100644 --- a/assets/images/subjects/Cooking.svg +++ b/assets/images/subjects/Cooking.svg @@ -1,19 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Creativity.svg b/assets/images/subjects/Creativity.svg index ab6beb8443c4..15d55c2beccf 100644 --- a/assets/images/subjects/Creativity.svg +++ b/assets/images/subjects/Creativity.svg @@ -1,27 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Economics.svg b/assets/images/subjects/Economics.svg index 0878af963379..eff8749a496c 100644 --- a/assets/images/subjects/Economics.svg +++ b/assets/images/subjects/Economics.svg @@ -1,23 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Education.svg b/assets/images/subjects/Education.svg index 2495d1658384..680a9fa27f18 100644 --- a/assets/images/subjects/Education.svg +++ b/assets/images/subjects/Education.svg @@ -1,13 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Engineering.svg b/assets/images/subjects/Engineering.svg index 9867417cc3f6..aa3f95ee8834 100644 --- a/assets/images/subjects/Engineering.svg +++ b/assets/images/subjects/Engineering.svg @@ -1,27 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/English.svg b/assets/images/subjects/English.svg index f5eeceab60d8..bc6e6cafafa4 100644 --- a/assets/images/subjects/English.svg +++ b/assets/images/subjects/English.svg @@ -1,41 +1 @@ - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Environment.svg b/assets/images/subjects/Environment.svg index ec334e16aa3e..83b8b29663b9 100644 --- a/assets/images/subjects/Environment.svg +++ b/assets/images/subjects/Environment.svg @@ -1,14 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Gaulish.svg b/assets/images/subjects/Gaulish.svg index a73f596cf383..f9d1dcc37f0f 100644 --- a/assets/images/subjects/Gaulish.svg +++ b/assets/images/subjects/Gaulish.svg @@ -1,54 +1 @@ - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Genetics.svg b/assets/images/subjects/Genetics.svg index 07f9104bed13..5c52b681334d 100644 --- a/assets/images/subjects/Genetics.svg +++ b/assets/images/subjects/Genetics.svg @@ -1,25 +1 @@ - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Geography.svg b/assets/images/subjects/Geography.svg index 2c7c4a3642bd..bff34faccf52 100644 --- a/assets/images/subjects/Geography.svg +++ b/assets/images/subjects/Geography.svg @@ -1,20 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Geometry.svg b/assets/images/subjects/Geometry.svg index ebe40460cbc9..60a16a8856fe 100644 --- a/assets/images/subjects/Geometry.svg +++ b/assets/images/subjects/Geometry.svg @@ -1,11 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Government.svg b/assets/images/subjects/Government.svg index 27e5691c6a12..4e99bb161056 100644 --- a/assets/images/subjects/Government.svg +++ b/assets/images/subjects/Government.svg @@ -1,15 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/GraphTheory.svg b/assets/images/subjects/GraphTheory.svg index f18bc4206535..fc97d0a7da34 100644 --- a/assets/images/subjects/GraphTheory.svg +++ b/assets/images/subjects/GraphTheory.svg @@ -1,32 +1 @@ - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/History.svg b/assets/images/subjects/History.svg index e92f7059b46e..388d470b822b 100644 --- a/assets/images/subjects/History.svg +++ b/assets/images/subjects/History.svg @@ -1,31 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Humor.svg b/assets/images/subjects/Humor.svg index 907cc7449b0c..9f7efec0c244 100644 --- a/assets/images/subjects/Humor.svg +++ b/assets/images/subjects/Humor.svg @@ -1,19 +1 @@ - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Language.svg b/assets/images/subjects/Language.svg index b3764af4382d..399f67d8cf76 100644 --- a/assets/images/subjects/Language.svg +++ b/assets/images/subjects/Language.svg @@ -1,11 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Languages.svg b/assets/images/subjects/Languages.svg index b3764af4382d..399f67d8cf76 100644 --- a/assets/images/subjects/Languages.svg +++ b/assets/images/subjects/Languages.svg @@ -1,11 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Latin.svg b/assets/images/subjects/Latin.svg index e8f95eba8619..b3b733640c43 100644 --- a/assets/images/subjects/Latin.svg +++ b/assets/images/subjects/Latin.svg @@ -1,48 +1 @@ - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Law.svg b/assets/images/subjects/Law.svg index b1023ea82023..2b19028bfecf 100644 --- a/assets/images/subjects/Law.svg +++ b/assets/images/subjects/Law.svg @@ -1,30 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Lightbulb.svg b/assets/images/subjects/Lightbulb.svg index ab6beb8443c4..15d55c2beccf 100644 --- a/assets/images/subjects/Lightbulb.svg +++ b/assets/images/subjects/Lightbulb.svg @@ -1,27 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Logic.svg b/assets/images/subjects/Logic.svg index 33c6d9c0c493..aee33cc5337e 100644 --- a/assets/images/subjects/Logic.svg +++ b/assets/images/subjects/Logic.svg @@ -1,30 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Mathematics.svg b/assets/images/subjects/Mathematics.svg index f4a01de8f8f4..8afc310b80f3 100644 --- a/assets/images/subjects/Mathematics.svg +++ b/assets/images/subjects/Mathematics.svg @@ -1,18 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Medicine.svg b/assets/images/subjects/Medicine.svg index 2cef8667eb3f..8d8c84f8eafe 100644 --- a/assets/images/subjects/Medicine.svg +++ b/assets/images/subjects/Medicine.svg @@ -1,48 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Microbiology.svg b/assets/images/subjects/Microbiology.svg index 10028256bcce..d9bfb40bbbf5 100644 --- a/assets/images/subjects/Microbiology.svg +++ b/assets/images/subjects/Microbiology.svg @@ -1,14 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Music.svg b/assets/images/subjects/Music.svg index 6cbfe3007252..73719905e892 100644 --- a/assets/images/subjects/Music.svg +++ b/assets/images/subjects/Music.svg @@ -1,12 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Philosophy.svg b/assets/images/subjects/Philosophy.svg index c6bbdc1dd870..f891076a912b 100644 --- a/assets/images/subjects/Philosophy.svg +++ b/assets/images/subjects/Philosophy.svg @@ -1,24 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Physics.svg b/assets/images/subjects/Physics.svg index dc5e21360a68..c28a8702c1cc 100644 --- a/assets/images/subjects/Physics.svg +++ b/assets/images/subjects/Physics.svg @@ -1,43 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Poetry.svg b/assets/images/subjects/Poetry.svg index 111ab44ecbbc..84fd0e460370 100644 --- a/assets/images/subjects/Poetry.svg +++ b/assets/images/subjects/Poetry.svg @@ -1,20 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Probability.svg b/assets/images/subjects/Probability.svg index 773c6c209a1a..415b6fc2e57a 100644 --- a/assets/images/subjects/Probability.svg +++ b/assets/images/subjects/Probability.svg @@ -1,29 +1 @@ - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Programming.svg b/assets/images/subjects/Programming.svg index 6b8d5916f1cb..a6e3872dd64d 100644 --- a/assets/images/subjects/Programming.svg +++ b/assets/images/subjects/Programming.svg @@ -1,127 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Puzzles.svg b/assets/images/subjects/Puzzles.svg index 03b764337e35..fcdf81703775 100644 --- a/assets/images/subjects/Puzzles.svg +++ b/assets/images/subjects/Puzzles.svg @@ -1,19 +1 @@ - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Reading.svg b/assets/images/subjects/Reading.svg index 91b675933d0e..2e43beca8eca 100644 --- a/assets/images/subjects/Reading.svg +++ b/assets/images/subjects/Reading.svg @@ -1,42 +1 @@ - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Religion.svg b/assets/images/subjects/Religion.svg index af2392d4c8ae..ced3863d4754 100644 --- a/assets/images/subjects/Religion.svg +++ b/assets/images/subjects/Religion.svg @@ -1,19 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Space.svg b/assets/images/subjects/Space.svg index 4285768893e2..41693a7d56ac 100644 --- a/assets/images/subjects/Space.svg +++ b/assets/images/subjects/Space.svg @@ -1,17 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Spanish.svg b/assets/images/subjects/Spanish.svg index d9fdf9f8419e..a2e9abaac98b 100644 --- a/assets/images/subjects/Spanish.svg +++ b/assets/images/subjects/Spanish.svg @@ -1,32 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Sport.svg b/assets/images/subjects/Sport.svg index bc87875e0b50..beff8abe9c68 100644 --- a/assets/images/subjects/Sport.svg +++ b/assets/images/subjects/Sport.svg @@ -1,43 +1 @@ - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Statistics.svg b/assets/images/subjects/Statistics.svg index 7171dcd9bc1a..3d149d38128b 100644 --- a/assets/images/subjects/Statistics.svg +++ b/assets/images/subjects/Statistics.svg @@ -1,9 +1 @@ - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Trigonometry.svg b/assets/images/subjects/Trigonometry.svg index 1b945f058de6..d75b9a9024e4 100644 --- a/assets/images/subjects/Trigonometry.svg +++ b/assets/images/subjects/Trigonometry.svg @@ -1,15 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/subjects/Welcome.svg b/assets/images/subjects/Welcome.svg index e6e1ca35b3ae..7c4d14b3ef65 100644 --- a/assets/images/subjects/Welcome.svg +++ b/assets/images/subjects/Welcome.svg @@ -1,30 +1 @@ - - - - - - - - - - - - + \ No newline at end of file diff --git a/assets/images/teach/student.jpg b/assets/images/teach/student.jpg new file mode 100644 index 000000000000..726942b3624d Binary files /dev/null and b/assets/images/teach/student.jpg differ diff --git a/assets/images/teach/student.png b/assets/images/teach/student.png deleted file mode 100644 index 76b9b2564e2d..000000000000 Binary files a/assets/images/teach/student.png and /dev/null differ diff --git a/assets/images/teach/student.webp b/assets/images/teach/student.webp index 0ae6f9b4633c..e0f141fef19f 100644 Binary files a/assets/images/teach/student.webp and b/assets/images/teach/student.webp differ diff --git a/assets/images/topnav/learn.png b/assets/images/topnav/learn.png new file mode 100644 index 000000000000..3289045469b0 Binary files /dev/null and b/assets/images/topnav/learn.png differ diff --git a/assets/images/topnav/learn.webp b/assets/images/topnav/learn.webp new file mode 100644 index 000000000000..5283bee674ba Binary files /dev/null and b/assets/images/topnav/learn.webp differ diff --git a/assets/release_constants.json b/assets/release_constants.json index 714665206951..01966de040d9 100644 --- a/assets/release_constants.json +++ b/assets/release_constants.json @@ -1,58 +1,41 @@ { "APP_DEV_YAML_PATH": "app_dev.yaml", "APP_YAML_PATH": "app.yaml", - "BLOCKING_BUG_MILESTONE_NUMBER": 39, - "BRANCH_TYPE_HOTFIX": "hotfix", - "BRANCH_TYPE_RELEASE": "release", "BUILD_MODULE_TARGET": "scripts.build", - "CHANGELOG_HEADER": "### Changelog:\n", - "COMMIT_HISTORY_HEADER": "### Commit History:\n", "CONSTANTS_PATH": "assets/constants.ts", - "CREDITS_FORM_URL": "https://docs.google.com/forms/d/1yH6ZO2UiD_VspgKJR40byRSjUP1AaBF9ARSe814p8K0/edit#responses", "CRON_YAML_PATH": "cron.yaml", "CRITICAL_USER_JOURNEYS_DOC": "http://docs.google.com/document/d/1s3MG2MVh_7m7B0wIlZb7sAcoyUdY0zq7a1JEFtwYBjI", "CUT_BRANCH_MODULE_TARGET": "scripts.release_scripts.cut_release_or_hotfix_branch", - "EMAIL_HEADER": "### Email C&P Blurbs about authors:\n", - "EXISTING_AUTHORS_HEADER": "### Existing Authors:\n", - "GAE_DIR": "../oppia_tools/google-cloud-sdk-335.0.0/google-cloud-sdk/platform/google_appengine", - "GCLOUD_PATH": "../oppia_tools/google-cloud-sdk-335.0.0/google-cloud-sdk/bin/gcloud", + "DEPLOYMENT_CONFIG_PATH": "core/feconf.py", + "GCLOUD_PATH": "../oppia_tools/google-cloud-sdk-364.0.0/google-cloud-sdk/bin/gcloud", "GITHUB_RELEASE_TAB_URL": "https://github.com/oppia/oppia/releases", "INDEX_YAML_PATH": "index.yaml", "INSTALL_THIRD_PARTY_MODULE_TARGET": "scripts.install_third_party_libs", - "INVALID_EMAIL_SUFFIX": "users.noreply.github.com", "ISSUE_FILING_URL": "https://github.com/oppia/oppia/milestone/39", - "ISSUE_URL_FORMAT_STRING": "https://github.com/oppia/oppia/issues/%s", - "ISSUES_HEADER": "### Issues mentioned in commits:\n", "JOBS_FORM_URL": "https://goo.gl/forms/XIj00RJ2h5L55XzU2", "JOBS_SPREADSHEETS_URL": "https://docs.google.com/spreadsheets/d/1Wegd0rZhVOm3Q3VCIw0xMbLC7IWtRyrEahiPn61Fhoo/edit#gid=948463314", "LABEL_FOR_CURRENT_RELEASE_PRS": "PR: for current release", "LABEL_FOR_RELEASED_PRS": "PR: released", - "NEW_AUTHORS_HEADER": "### New Authors:\n", - "NEW_CONTRIBUTORS_HEADER": "### New Contributors:\n", "NEW_RELEASE_URL": "https://github.com/oppia/oppia/releases/new", "OPPIA_DEV_GROUP_URL": "https://groups.google.com/forum/#!forum/oppia-dev", "PY_GITHUB_PATH": "../oppia_tools/PyGithub-1.45", "RELEASE_DRIVE_URL": "https://drive.google.com/drive/folders/0B9KSjiibL_WDNjJyYlEtbTNvY3c", "RELEASE_NOTES_EXAMPLE_URL": "https://docs.google.com/document/d/1OUwgMPNORABJAz7DS0iuDUr5A2FxcXg4Y5-qUEdgo-M", - "RELEASE_NOTES_TEMPLATE_URL": "https://docs.google.com/document/d/1VBa3pdRLnvobNlfmZB6-uRYJHBz_Gc-6eN_ilSoVlhE", - "RELEASE_NOTES_URL": "https://docs.google.com/document/d/1pmcDNfM2KtmkZeYipuInC48RE5JfkSJWQYdIQAkD0hQ", - "RELEASE_ROTA_URL": "https://github.com/oppia/oppia/wiki/Release-schedule-and-other-information#release-coordinators-and-qa-coordinators-for-upcoming-releases", - "RELEASE_SUMMARY_FILEPATH": "../release_summary.md", - "REMOTE_URL": "git@github.com:oppia/oppia.git", - "REPO_SPECIFIC_CHANGE_MODULE_TARGET": "scripts.release_scripts.repo_specific_changes_fetcher", - "DEPLOYMENT_CONFIG_PATH": "core/feconf.py", + "REMOTE_URLS": ["git@github.com:oppia/oppia.git", "https://github.com/oppia/oppia.git"], + "SERVER_ERROR_PLAYBOOK_URL": "https://docs.google.com/document/d/1nCEDVGpU_jfdb8oP5HuOGigJqwlxrd6FyqWaffaWUXo/edit#heading=h.dugm7khzkjay", "SCHEMA_VERSIONS_TO_JOBS_MAPPING": { - "CURRENT_COLLECTION_SCHEMA_VERSION": ["CollectionMigrationOneOffJob"], + "CURRENT_COLLECTION_SCHEMA_VERSION": ["MigrateCollectionJob"], "CURRENT_STATE_SCHEMA_VERSION": [ - "ExplorationMigrationJobManager", "QuestionMigrationOneOffJob", + "MigrateExplorationJob", "MigrateQuestionJob", "QuestionSuggestionMigrationJobManager"], - "CURRENT_SKILL_CONTENTS_SCHEMA_VERSION": ["SkillMigrationOneOffJob"], - "CURRENT_MISCONCEPTIONS_SCHEMA_VERSION": ["SkillMigrationOneOffJob"], - "CURRENT_RUBRIC_SCHEMA_VERSION": ["SkillMigrationOneOffJob"], - "CURRENT_STORY_CONTENTS_SCHEMA_VERSION": ["StoryMigrationOneOffJob"], - "CURRENT_SUBTOPIC_SCHEMA_VERSION": ["TopicMigrationOneOffJob"], - "CURRENT_STORY_REFERENCE_SCHEMA_VERSION": ["TopicMigrationOneOffJob"] + "CURRENT_SKILL_CONTENTS_SCHEMA_VERSION": ["MigrateSkillJob"], + "CURRENT_MISCONCEPTIONS_SCHEMA_VERSION": ["MigrateSkillJob"], + "CURRENT_RUBRIC_SCHEMA_VERSION": ["MigrateSkillJob"], + "CURRENT_STORY_CONTENTS_SCHEMA_VERSION": ["MigrateStoryJob"], + "CURRENT_SUBTOPIC_SCHEMA_VERSION": ["MigrateTopicJob"], + "CURRENT_STORY_REFERENCE_SCHEMA_VERSION": ["MigrateTopicJob"] }, "THIRD_PARTY_DIR": "third_party", - "UPDATE_CONFIGS_MODULE_TARGET": "scripts.release_scripts.update_configs" + "UPDATE_CONFIGS_MODULE_TARGET": "scripts.release_scripts.update_configs", + "GCLOUDIGNORE_PATH": ".gcloudignore" } diff --git a/assets/rich_text_components_definitions.ts b/assets/rich_text_components_definitions.ts index 1a15e7f85fdd..8410e0abe96c 100644 --- a/assets/rich_text_components_definitions.ts +++ b/assets/rich_text_components_definitions.ts @@ -74,7 +74,7 @@ export default { "type": "unicode", "validators": [{ "id": "has_length_at_most", - "max_value": 160 + "max_value": 500 }] }, "default_value": "" diff --git a/assets/scripts/embedding_tests_dev_0.0.1.html b/assets/scripts/embedding_tests_dev_0.0.1.html index 3735b6858702..35ae835df72c 100644 --- a/assets/scripts/embedding_tests_dev_0.0.1.html +++ b/assets/scripts/embedding_tests_dev_0.0.1.html @@ -15,27 +15,27 @@

Iframe embedding

-
+

Standard embedding of the latest version

-
+

Standard embedding of version 1 of the exploration with deferred loading

-
+

ERROR: No oppia id specified

-
+

ERROR: 404 error

-
+

ERROR: 404 error with deferred loading

diff --git a/assets/scripts/embedding_tests_dev_0.0.1.min.html b/assets/scripts/embedding_tests_dev_0.0.1.min.html index f4cf5d1abf57..e069f7cad753 100644 --- a/assets/scripts/embedding_tests_dev_0.0.1.min.html +++ b/assets/scripts/embedding_tests_dev_0.0.1.min.html @@ -7,7 +7,7 @@

v0.0.1 (minified)

@@ -16,37 +16,37 @@

v0.0.1 (minified)

- - + + -
+

Iframe embedding

-
+

Standard embedding of the latest version

-
+

Standard embedding of version 2 of the exploration with deferred loading

-
+

ERROR: No oppia id specified

-
+

ERROR: 404 error

-
+

ERROR: 404 error with deferred loading

@@ -57,7 +57,7 @@

ERROR: 404 error with deferred loading

var placeholderExplorationId = 'idToBeReplaced'; var onChangeExpId = function() { var newExplorationId = $( - '.protractor-test-exploration-id-input-field').val(); + '.e2e-test-exploration-id-input-field').val(); $('div > iframe').each(function(index) { var src = $(this).attr('src'); src = src.replace(placeholderExplorationId, newExplorationId); @@ -71,6 +71,6 @@

ERROR: 404 error with deferred loading

}); // Show the contents. - $('.protractor-test-results').show(); + $('.e2e-test-results').show(); }; diff --git a/assets/scripts/embedding_tests_dev_0.0.2.min.html b/assets/scripts/embedding_tests_dev_0.0.2.min.html index d5ce9365592f..33b86aeab7f6 100644 --- a/assets/scripts/embedding_tests_dev_0.0.2.min.html +++ b/assets/scripts/embedding_tests_dev_0.0.2.min.html @@ -7,7 +7,7 @@

v0.0.2 (minified)

@@ -16,32 +16,32 @@

v0.0.2 (minified)

- - + + -
+

Iframe embedding

-
+

Standard embedding of the latest version

-
+

Standard embedding of version 2 of the exploration

-
+

ERROR: No oppia id specified

-
+

ERROR: 404 error

@@ -52,7 +52,7 @@

ERROR: 404 error

var placeholderExplorationId = 'idToBeReplaced'; var onChangeExpId = function() { var newExplorationId = $( - '.protractor-test-exploration-id-input-field').val(); + '.e2e-test-exploration-id-input-field').val(); $('div > iframe').each(function(index) { var src = $(this).attr('src'); src = src.replace(placeholderExplorationId, newExplorationId); @@ -66,6 +66,6 @@

ERROR: 404 error

}); // Show the contents - $('.protractor-test-results').show(); + $('.e2e-test-results').show(); }; diff --git a/assets/scripts/embedding_tests_dev_0.0.3.min.html b/assets/scripts/embedding_tests_dev_0.0.3.min.html index 55e348b3e97c..98cc06ddfc5d 100644 --- a/assets/scripts/embedding_tests_dev_0.0.3.min.html +++ b/assets/scripts/embedding_tests_dev_0.0.3.min.html @@ -15,22 +15,22 @@

Iframe embedding

-
+

Standard embedding of the latest version

-
+

Standard embedding of version 1 of the exploration

-
+

ERROR: No oppia id specified

-
+

ERROR: 404 error

diff --git a/assets/scripts/embedding_tests_dev_i18n_0.0.1.html b/assets/scripts/embedding_tests_dev_i18n_0.0.1.html index aaf55f061cd2..c8b3ca796516 100644 --- a/assets/scripts/embedding_tests_dev_i18n_0.0.1.html +++ b/assets/scripts/embedding_tests_dev_i18n_0.0.1.html @@ -2,7 +2,7 @@

v0.0.1 (unminified)

For instructions: see the README file in /assets/scripts.

@@ -11,13 +11,13 @@

v0.0.1 (unminified)

- - + + -
-
+
+

Standard embedding of the latest version

- +
@@ -26,7 +26,7 @@

Standard embedding of the latest version

var placeholderExplorationId = 'idToBeReplaced'; var onChangeExpId = function() { var newExplorationId = $( - '.protractor-test-exploration-id-input-field').val(); + '.e2e-test-exploration-id-input-field').val(); $('div > iframe').each(function(index) { var src = $(this).attr('src'); src = src.replace(placeholderExplorationId, newExplorationId); @@ -35,6 +35,6 @@

Standard embedding of the latest version

placeholderExplorationId = newExplorationId; // Show the contents. - $('.protractor-test-results').show(); + $('.e2e-test-results').show(); }; diff --git a/assets/scripts/embedding_tests_jsdelivr_0.0.1.min.html b/assets/scripts/embedding_tests_jsdelivr_0.0.1.min.html index a0c68046c069..42f8dad0a78b 100644 --- a/assets/scripts/embedding_tests_jsdelivr_0.0.1.min.html +++ b/assets/scripts/embedding_tests_jsdelivr_0.0.1.min.html @@ -13,27 +13,27 @@

v0.0.1 (minified) from the JSDelivr CDN

-
+

Standard embedding of the latest version

-
+

Standard embedding of version 1 of the exploration with deferred loading

-
+

ERROR: No oppia id specified

-
+

ERROR: 404 error

-
+

ERROR: 404 error with deferred loading

diff --git a/assets/scripts/embedding_tests_jsdelivr_0.0.2.min.html b/assets/scripts/embedding_tests_jsdelivr_0.0.2.min.html index 662a1e4d48a0..62c38e9b1ac9 100644 --- a/assets/scripts/embedding_tests_jsdelivr_0.0.2.min.html +++ b/assets/scripts/embedding_tests_jsdelivr_0.0.2.min.html @@ -13,27 +13,27 @@

v0.0.2 (minified) from the JSDelivr CDN

-
+

Standard embedding of the latest version

-
+

Standard embedding of version 1 of the exploration with deferred loading

-
+

ERROR: No oppia id specified

-
+

ERROR: 404 error

-
+

ERROR: 404 error with deferred loading

diff --git a/assets/scripts/oppia-player-0.0.1.js b/assets/scripts/oppia-player-0.0.1.js index e49af433957d..aa6a0de289d6 100644 --- a/assets/scripts/oppia-player-0.0.1.js +++ b/assets/scripts/oppia-player-0.0.1.js @@ -59,10 +59,29 @@ * location hash. This defends against fraudulent messages being sent to the * child iframe by other code within the parent page. */ + + /** + * Generate a 0-1 random number using a crytographically secure method + * without using division, which results in biased random numbers. Reference: + * https://thecompetentdev.com/weeklyjstips/tips/73_generate_secure_randoms/ + * @returns The random number between 0 and 1. + */ + const random = () => { + var buffer = new ArrayBuffer(8); + var ints = new Int8Array(buffer); + window.crypto.getRandomValues(ints); + + ints[7] = 63; + ints[6] |= 0xf0; + + var float = new DataView(buffer).getFloat64(0, true) - 1; + return float; + } + var SECRET_LENGTH = 64; var secret = ''; for (var i = 0; i < SECRET_LENGTH; i++) { - secret += String.fromCharCode(65 + Math.floor(Math.random() * 26)); + secret += String.fromCharCode(65 + Math.floor(random() * 26)); } var OppiaEmbed = (function() { @@ -187,9 +206,11 @@ var VERSION_KEY = 'version='; var SECRET_KEY = 'secret='; var versionString = explorationVersion ? '&v=' + explorationVersion : ''; + var langCode = this.oppiaNode.getAttribute('exp-language') || ''; + var languageString = langCode ? '&lang=' + langCode : ''; this.iframe.src = encodeURI( - this.targetDomain + '/explore/' + this.oppiaNode.getAttribute('oppia-id') + - '?iframed=true&locale=en' + versionString + + this.targetDomain + '/embed/exploration/' + this.oppiaNode.getAttribute('oppia-id') + + '?iframed=true&locale=en' + versionString + languageString + '#' + VERSION_KEY + OPPIA_EMBED_GLOBALS.version + '&' + SECRET_KEY + secret); diff --git a/assets/scripts/oppia-player-0.0.2.js b/assets/scripts/oppia-player-0.0.2.js index caa8f15cab6f..48658ddcdea9 100644 --- a/assets/scripts/oppia-player-0.0.2.js +++ b/assets/scripts/oppia-player-0.0.2.js @@ -56,10 +56,29 @@ * location hash. This defends against fraudulent messages being sent to the * child iframe by other code within the parent page. */ + + /** + * Generate a 0-1 random number using a crytographically secure method + * without using division, which results in biased random numbers. Reference: + * https://thecompetentdev.com/weeklyjstips/tips/73_generate_secure_randoms/ + * @returns The random number between 0 and 1. + */ + const random = () => { + var buffer = new ArrayBuffer(8); + var ints = new Int8Array(buffer); + window.crypto.getRandomValues(ints); + + ints[7] = 63; + ints[6] |= 0xf0; + + var float = new DataView(buffer).getFloat64(0, true) - 1; + return float; + } + var SECRET_LENGTH = 64; var secret = ''; for (var i = 0; i < SECRET_LENGTH; i++) { - secret += String.fromCharCode(65 + Math.floor(Math.random() * 26)); + secret += String.fromCharCode(65 + Math.floor(random() * 26)); } var OppiaEmbed = (function() { diff --git a/assets/scripts/oppia-player-0.0.3.js b/assets/scripts/oppia-player-0.0.3.js index f34ba118e886..6d7671f62065 100644 --- a/assets/scripts/oppia-player-0.0.3.js +++ b/assets/scripts/oppia-player-0.0.3.js @@ -56,10 +56,29 @@ * location hash. This defends against fraudulent messages being sent to the * child iframe by other code within the parent page. */ + + /** + * Generate a 0-1 random number using a crytographically secure method + * without using division, which results in biased random numbers. Reference: + * https://thecompetentdev.com/weeklyjstips/tips/73_generate_secure_randoms/ + * @returns The random number between 0 and 1. + */ + const random = () => { + var buffer = new ArrayBuffer(8); + var ints = new Int8Array(buffer); + window.crypto.getRandomValues(ints); + + ints[7] = 63; + ints[6] |= 0xf0; + + var float = new DataView(buffer).getFloat64(0, true) - 1; + return float; + } + var SECRET_LENGTH = 64; var secret = ''; for (var i = 0; i < SECRET_LENGTH; i++) { - secret += String.fromCharCode(65 + Math.floor(Math.random() * 26)); + secret += String.fromCharCode(65 + Math.floor(random() * 26)); } var OppiaEmbed = (function() { diff --git a/assets/security.txt b/assets/security.txt deleted file mode 100644 index d047ebb4fcb0..000000000000 --- a/assets/security.txt +++ /dev/null @@ -1,4 +0,0 @@ -Contact: admin@oppia.org -Preferred-Languages: en -Canonical: https://www.oppia.org/security.txt -Acknowledgements: https://www.oppia.org/about#credits diff --git a/assets/sitemap.xml b/assets/sitemap.xml index db799ee45705..d37733ae5e81 100644 --- a/assets/sitemap.xml +++ b/assets/sitemap.xml @@ -3,6 +3,9 @@ https://www.oppia.org/ + + https://www.oppia.org/android + https://www.oppia.org/donate @@ -27,6 +30,9 @@ https://www.oppia.org/community-library + + https://www.oppia.org/blog + https://www.oppia.org/contact diff --git a/core/android_validation_constants.py b/core/android_validation_constants.py index 029ebcbe2c74..55e06e4a6c26 100644 --- a/core/android_validation_constants.py +++ b/core/android_validation_constants.py @@ -41,7 +41,7 @@ SUPPORTED_LANGUAGES = ['en'] # This is linked to VALID_RTE_COMPONENTS_FOR_ANDROID in constants.ts. -VALID_RTE_COMPONENTS = ['image', 'link', 'math', 'skillreview'] +VALID_RTE_COMPONENTS = ['image', 'math', 'skillreview'] # If any of the following values are changed, edit the corresponding value in # app.constants.ts as well. @@ -49,8 +49,11 @@ MAX_CHARS_IN_ABBREV_TOPIC_NAME = 12 MAX_CHARS_IN_TOPIC_DESCRIPTION = 240 MAX_CHARS_IN_SUBTOPIC_TITLE = 64 +MAX_CHARS_IN_SUBTOPIC_URL_FRAGMENT = 25 +SUBTOPIC_URL_FRAGMENT_REGEXP = '^[a-z]+(-[a-z]+)*$' MAX_CHARS_IN_SKILL_DESCRIPTION = 100 MAX_CHARS_IN_STORY_TITLE = 39 +MAX_CHARS_IN_STORY_DESCRIPTION = 1000 MAX_CHARS_IN_EXPLORATION_TITLE = 36 MAX_CHARS_IN_CHAPTER_DESCRIPTION = 152 MAX_CHARS_IN_MISCONCEPTION_NAME = 100 diff --git a/core/android_validation_constants_test.py b/core/android_validation_constants_test.py index 8e372aabd3e7..b2e1159972bc 100644 --- a/core/android_validation_constants_test.py +++ b/core/android_validation_constants_test.py @@ -54,6 +54,9 @@ def test_that_character_limits_in_both_files_are_equal(self) -> None: self.assertEqual( android_validation_constants.MAX_CHARS_IN_MISCONCEPTION_NAME, constants.MAX_CHARS_IN_MISCONCEPTION_NAME) + self.assertEqual( + android_validation_constants.MAX_CHARS_IN_STORY_DESCRIPTION, + constants.MAX_CHARS_IN_STORY_DESCRIPTION) def test_exploration_constants_in_both_files_are_equal(self) -> None: interaction_ids_in_constants = [] @@ -67,12 +70,12 @@ def test_exploration_constants_in_both_files_are_equal(self) -> None: for obj in constants_languages_list: language_ids_in_constants.append(obj['code']) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( interaction_ids_in_constants, android_validation_constants.VALID_INTERACTION_IDS) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( constants.VALID_RTE_COMPONENTS_FOR_ANDROID, android_validation_constants.VALID_RTE_COMPONENTS) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( language_ids_in_constants, android_validation_constants.SUPPORTED_LANGUAGES) diff --git a/core/constants.py b/core/constants.py index ec312aa5f49e..b99c7480561b 100644 --- a/core/constants.py +++ b/core/constants.py @@ -18,16 +18,17 @@ from __future__ import annotations +import io import json +import os +import pkgutil import re -from core import python_utils +from typing import Any, Dict, Literal, Union, overload -from typing import Any, Dict - -# Here we use Dict[str, Any] as return type because we need to parse and return -# generic JSON objects. +# Here we use type Any because we need to parse and return the generic JSON +# objects and these JSON objects are of type Dict[str, Any]. def parse_json_from_ts(ts_file_contents: str) -> Dict[str, Any]: """Extracts JSON object from TS file. @@ -42,6 +43,8 @@ def parse_json_from_ts(ts_file_contents: str) -> Dict[str, Any]: json_start = text_without_comments.find('{\n') # Add 1 to index returned because the '}' is part of the JSON object. json_end = text_without_comments.rfind('}') + 1 + # Here we use type Any because 'json_dict' is a generic JSON object and + # generic JSON objects are of type Dict[str, Any]. json_dict: Dict[str, Any] = ( json.loads(text_without_comments[json_start:json_end])) return json_dict @@ -59,27 +62,90 @@ def remove_comments(text: str) -> str: return re.sub(r' //.*\n', r'', text) +# This function could be in utils but a race conditions happens because of +# the chronology of our files execution. utils imports constants and constants +# need utils.get_package_file_contents but it does not have it loaded to memory +# yet. If called from utils we get error as `module has no attribute`. +@overload +def get_package_file_contents( + package: str, filepath: str, *, binary_mode: Literal[True] +) -> bytes: ... + + +@overload +def get_package_file_contents(package: str, filepath: str) -> str: ... + + +@overload +def get_package_file_contents( + package: str, filepath: str, *, binary_mode: Literal[False] +) -> str: ... + + +def get_package_file_contents( + package: str, filepath: str, *, binary_mode: bool = False +) -> Union[str, bytes]: + """Open file and return its contents. This needs to be used for files that + are loaded by the Python code directly, like constants.ts or + rich_text_components.json. This function is needed to make loading these + files work even when Oppia is packaged. + + Args: + package: str. The package where the file is located. + For Oppia the package is usually the folder in the root folder, + like 'core' or 'extensions'. + filepath: str. The path to the file in the package. + binary_mode: bool. True when we want to read file in binary mode. + + Returns: + str. The contents of the file. + + Raises: + FileNotFoundError. The file does not exist. + """ + try: + if binary_mode: + with io.open( + os.path.join(package, filepath), 'rb', encoding=None + ) as binary_file: + read_binary_mode_data: bytes = binary_file.read() + return read_binary_mode_data + with io.open( + os.path.join(package, filepath), 'r', encoding='utf-8' + ) as file: + return file.read() + except FileNotFoundError as e: + file_data = pkgutil.get_data(package, filepath) + if file_data is None: + raise e + if binary_mode: + return file_data + return file_data.decode('utf-8') + + +# Here we use MyPy ignore because the flag 'disallow-any-generics' is disabled +# in MyPy settings and this flag does not allow generic types to be defined +# without type parameters, but here to transform dicts to objects, we are +# inheriting from dict type without providing type parameters which cause MyPy +# to throw an error. Thus, to avoid the error, we used ignore here. class Constants(dict): # type: ignore[type-arg] """Transforms dict to object, attributes can be accessed by dot notation.""" - # Here `value` has the type Any because it parses and stores the values of - # contants defined in constants.ts file and we cannot define a single type + # Here we use type Any because this method parses and stores the values of + # constants defined in constants.ts file and we cannot define a single type # which works for all of them. def __setattr__(self, name: str, value: Any) -> None: self[name] = value - # The return value here refers to the `value` in the above method, hence the - # type Any is used for it. + # Here we use type Any because the return value here refers to the `value` + # in the __setattr__ method, hence the type Any is used for it. def __getattr__(self, name: str) -> Any: return self[name] constants = Constants(parse_json_from_ts( # pylint:disable=invalid-name - python_utils.get_package_file_contents('assets', 'constants.ts'))) + get_package_file_contents('assets', 'constants.ts'))) release_constants = Constants( # pylint:disable=invalid-name - json.loads( - python_utils.get_package_file_contents( - 'assets', 'release_constants.json') - ) + json.loads(get_package_file_contents('assets', 'release_constants.json')) ) diff --git a/core/constants_test.py b/core/constants_test.py index a68f6b5dd2a8..ea87589dcec9 100644 --- a/core/constants_test.py +++ b/core/constants_test.py @@ -17,10 +17,11 @@ from __future__ import annotations import os +import pkgutil from core import constants from core import feconf -from core import python_utils +from core import utils from core.tests import test_utils @@ -33,13 +34,72 @@ def test_constants_file_is_existing(self) -> None: def test_constants_file_contains_valid_json(self) -> None: """Test if the constants file is valid json file.""" - with python_utils.open_file( # type: ignore[no-untyped-call] + with utils.open_file( os.path.join('assets', 'constants.ts'), 'r' ) as f: json = constants.parse_json_from_ts(f.read()) self.assertTrue(isinstance(json, dict)) self.assertEqual(json['TESTING_CONSTANT'], 'test') + def test_loading_non_existing_file_throws_error(self) -> None: + """Test get_package_file_contents with imaginary file.""" + with self.swap_to_always_raise( + pkgutil, + 'get_data', + FileNotFoundError( + 'No such file or directory: \'assets/non_exist.xy\'' + ) + ): + with self.assertRaisesRegex( + FileNotFoundError, + 'No such file or directory: \'assets/non_exist.xy\'' + ): + constants.get_package_file_contents( + 'assets', 'non_exist.xy', binary_mode=False) + + def test_loading_binary_file_in_package_returns_the_content(self) -> None: + """Test get_package_file_contents with imaginary binary file.""" + with self.swap_to_always_return(pkgutil, 'get_data', 'File data'): + self.assertEqual( + constants.get_package_file_contents( + 'assets', 'non_exist.xy', binary_mode=True), 'File data' + ) + + def test_loading_binary_file_returns_the_content(self) -> None: + """Test get_package_file_contents with binary file.""" + with utils.open_file( + os.path.join( + 'assets', 'images', 'avatar', 'user_blue_150px.png'), + 'rb', + encoding=None + ) as f: + raw_image_png = f.read() + default_image_path = os.path.join( + 'images', 'avatar', 'user_blue_150px.png') + self.assertEqual( + constants.get_package_file_contents( + 'assets', default_image_path, binary_mode=True), raw_image_png + ) + + def test_loading_file_in_package_returns_the_content(self) -> None: + """Test get_package_file_contents with imaginary file.""" + with self.swap_to_always_return(pkgutil, 'get_data', b'File data'): + self.assertEqual( + constants.get_package_file_contents('assets', 'non_exist.xy'), + 'File data' + ) + + def test_loading_file_in_non_existent_package_throws_error(self) -> None: + """Test get_package_file_contents with imaginary file.""" + with self.swap_to_always_return(pkgutil, 'get_data', None): + with self.assertRaisesRegex( + FileNotFoundError, + 'No such file or directory: \'assets/non_exist.xy\'' + ): + constants.get_package_file_contents('assets', 'non_exist.xy') + constants.get_package_file_contents( + 'assets', 'non_exist.xy', binary_mode=True) + def test_difficulty_values_are_matched(self) -> None: """Tests that the difficulty values and strings are matched in the various constants. @@ -49,13 +109,13 @@ def test_difficulty_values_are_matched(self) -> None: constants.constants.SKILL_DIFFICULTY_EASY, constants.constants.SKILL_DIFFICULTY_MEDIUM, constants.constants.SKILL_DIFFICULTY_HARD]) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( list(constants.constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.keys()), constants.constants.SKILL_DIFFICULTIES) self.assertEqual( constants.constants.DEFAULT_SKILL_DIFFICULTY, constants.constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT[ - constants.constants.SKILL_DIFFICULTY_EASY]) + constants.constants.SKILL_DIFFICULTY_MEDIUM]) def test_constants_and_feconf_are_consistent(self) -> None: """Test if constants that are related are consistent between feconf and @@ -71,7 +131,7 @@ def test_all_comments_are_removed_from_json_text(self) -> None: """Tests if comments are removed from json text.""" dummy_constants_filepath = os.path.join( feconf.TESTS_DATA_DIR, 'dummy_constants.js') - with python_utils.open_file(dummy_constants_filepath, 'r') as f: # type: ignore[no-untyped-call] + with utils.open_file(dummy_constants_filepath, 'r') as f: actual_text_without_comments = constants.remove_comments(f.read()) expected_text_without_comments = ( 'var dummy_constants = {\n' @@ -122,3 +182,8 @@ def test_language_constants_are_in_sync(self) -> None: set(rtl_audio_languages) & set(ltr_content_languages) ) self.assertFalse(conflicts) + + def test_constants_can_be_set(self) -> None: + """Test __setattr__ to see if constants can be set as needed.""" + with self.swap(constants.constants, 'TESTING_CONSTANT', 'test_2'): + self.assertEqual(constants.constants.TESTING_CONSTANT, 'test_2') diff --git a/core/controllers/access_validators.py b/core/controllers/access_validators.py index 7f3e861ce5ce..0cc28b4ff040 100644 --- a/core/controllers/access_validators.py +++ b/core/controllers/access_validators.py @@ -17,29 +17,41 @@ from __future__ import annotations from core import feconf +from core.constants import constants from core.controllers import acl_decorators from core.controllers import base +from core.domain import blog_services from core.domain import classroom_services +from core.domain import config_domain +from core.domain import learner_group_services from core.domain import user_services -from typing import Any, Dict # isort: skip +from typing import Dict, TypedDict # TODO(#13605): Refactor access validation handlers to follow a single handler # pattern. -class ClassroomAccessValidationHandler(base.BaseHandler): +class ClassroomAccessValidationHandlerNormalizedRequestDict(TypedDict): + """Dict representation of ClassroomAccessValidationHandler's + normalized_request dictionary. + """ + + classroom_url_fragment: str + + +class ClassroomAccessValidationHandler( + base.BaseHandler[ + Dict[str, str], ClassroomAccessValidationHandlerNormalizedRequestDict + ] +): """Validates whether request made to /learn route. """ GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - # Type[str, Any] is used to match the type defined for this attribute in - # its parent class `base.BaseHandler`. - URL_PATH_ARGS_SCHEMAS: Dict[str, Any] = {} - # Type[str, Any] is used to match the type defined for this attribute in - # its parent class `base.BaseHandler`. - HANDLER_ARGS_SCHEMAS: Dict[str, Any] = { + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS = { 'GET': { 'classroom_url_fragment': { 'schema': { @@ -49,97 +61,209 @@ class ClassroomAccessValidationHandler(base.BaseHandler): } } - # Using type ignore[misc] here because untyped decorator makes function - # "get" also untyped. - @acl_decorators.open_access # type: ignore[misc] + @acl_decorators.open_access def get(self) -> None: - # Please use type casting here instead of type ignore[union-attr] once - # this attribute `normalized_request` has been type annotated in the - # parent class BaseHandler. - classroom_url_fragment = self.normalized_request.get( # type: ignore[union-attr] - 'classroom_url_fragment') - classroom = classroom_services.get_classroom_by_url_fragment( # type: ignore[no-untyped-call] + assert self.normalized_request is not None + classroom_url_fragment = self.normalized_request[ + 'classroom_url_fragment' + ] + classroom = classroom_services.get_classroom_by_url_fragment( classroom_url_fragment) if not classroom: raise self.PageNotFoundException -class ManageOwnAccountValidationHandler(base.BaseHandler): +class ManageOwnAccountValidationHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): """Validates access to preferences page. """ GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - # Type[str, Any] is used to match the type defined for this attribute in - # its parent class `base.BaseHandler`. - URL_PATH_ARGS_SCHEMAS: Dict[str, Any] = {} - - # Type[str, Any] is used to match the type defined for this attribute in - # its parent class `base.BaseHandler`. - HANDLER_ARGS_SCHEMAS: Dict[str, Any] = { + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = { 'GET': {} } - # Using type ignore[misc] here because untyped decorator makes function - # "get" also untyped. - @acl_decorators.can_manage_own_account # type: ignore[misc] + @acl_decorators.can_manage_own_account def get(self) -> None: pass -class ProfileExistsValidationHandler(base.BaseHandler): +class ProfileExistsValidationHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): """The world-viewable profile page.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - # Type[str, Any] is used to match the type defined for this attribute in - # its parent class `base.BaseHandler`. - URL_PATH_ARGS_SCHEMAS: Dict[str, Any] = { + URL_PATH_ARGS_SCHEMAS = { 'username': { 'schema': { 'type': 'basestring' } } } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} - # Type[str, Any] is used to match the type defined for this attribute in - # its parent class `base.BaseHandler`. - HANDLER_ARGS_SCHEMAS: Dict[str, Any] = { - 'GET': {} - } - - # Using type ignore[misc] here because untyped decorator makes function - # "get" also untyped. - @acl_decorators.open_access # type: ignore[misc] + @acl_decorators.open_access def get(self, username: str) -> None: """Validates access to profile page.""" - user_settings = user_services.get_user_settings_from_username( # type: ignore[no-untyped-call] + user_settings = user_services.get_user_settings_from_username( username) if not user_settings: raise self.PageNotFoundException -class ReleaseCoordinatorAccessValidationHandler(base.BaseHandler): +class ReleaseCoordinatorAccessValidationHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): """Validates access to release coordinator page.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - # Type[str, Any] is used to match the type defined for this attribute in - # its parent class `base.BaseHandler`. - URL_PATH_ARGS_SCHEMAS: Dict[str, Any] = {} - - # Type[str, Any] is used to match the type defined for this attribute in - # its parent class `base.BaseHandler`. - HANDLER_ARGS_SCHEMAS: Dict[str, Any] = { + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = { 'GET': {} } - # Using type ignore[misc] here because untyped decorator makes function - # "get" also untyped. - @acl_decorators.can_access_release_coordinator_page # type: ignore[misc] + @acl_decorators.can_access_release_coordinator_page def get(self) -> None: """Handles GET requests.""" pass + + +class ViewLearnerGroupPageAccessValidationHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): + """Validates access to view learner group page.""" + + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + + URL_PATH_ARGS_SCHEMAS = { + 'learner_group_id': { + 'schema': { + 'type': 'basestring', + 'validators': [{ + 'id': 'is_regex_matched', + 'regex_pattern': constants.LEARNER_GROUP_ID_REGEX + }] + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = { + 'GET': {} + } + + @acl_decorators.can_access_learner_groups + def get(self, learner_group_id: str) -> None: + """Handles GET requests.""" + assert self.user_id is not None + if not config_domain.LEARNER_GROUPS_ARE_ENABLED.value: + raise self.PageNotFoundException + + is_valid_request = learner_group_services.is_user_learner( + self.user_id, learner_group_id) + + if not is_valid_request: + raise self.PageNotFoundException + + +class BlogHomePageAccessValidationHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): + """Validates access to blog home page.""" + + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = { + 'GET': {} + } + + @acl_decorators.open_access + def get(self) -> None: + """Validates access to blog home page.""" + pass + + +class BlogPostPageAccessValidationHandlerNormalizedRequestDict(TypedDict): + """Dict representation of BlogPostPageAccessValidationHandler's + normalized_request dictionary. + """ + + blog_post_url_fragment: str + + +class BlogPostPageAccessValidationHandler( + base.BaseHandler[ + Dict[str, str], BlogPostPageAccessValidationHandlerNormalizedRequestDict + ] +): + """Validates whether request made to correct blog post route.""" + + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS = { + 'GET': { + 'blog_post_url_fragment': { + 'schema': { + 'type': 'basestring' + } + } + } + } + + @acl_decorators.open_access + def get(self) -> None: + assert self.normalized_request is not None + blog_post_url_fragment = self.normalized_request[ + 'blog_post_url_fragment'] + blog_post = blog_services.get_blog_post_by_url_fragment( + blog_post_url_fragment) + + if not blog_post: + raise self.PageNotFoundException + + +class BlogAuthorProfilePageAccessValidationHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): + """Validates access to blog author profile page.""" + + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + + URL_PATH_ARGS_SCHEMAS = { + 'author_username': { + 'schema': { + 'type': 'basestring' + }, + 'validators': [{ + 'id': 'has_length_at_most', + 'max_value': constants.MAX_AUTHOR_NAME_LENGTH + }] + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = { + 'GET': {} + } + + @acl_decorators.open_access + def get(self, author_username: str) -> None: + author_settings = ( + user_services.get_user_settings_from_username(author_username)) + + if author_settings is None: + raise self.PageNotFoundException( + 'User with given username does not exist' + ) + + if not user_services.is_user_blog_post_author(author_settings.user_id): + raise self.PageNotFoundException( + 'User with given username is not a blog post author.' + ) diff --git a/core/controllers/access_validators_test.py b/core/controllers/access_validators_test.py index 89818ad1734f..be75fe9ecf4a 100644 --- a/core/controllers/access_validators_test.py +++ b/core/controllers/access_validators_test.py @@ -16,25 +16,41 @@ from __future__ import annotations +import datetime + from core import feconf from core.domain import config_services +from core.domain import learner_group_fetchers +from core.domain import learner_group_services +from core.platform import models +from core.storage.blog import gae_models as blog_models from core.tests import test_utils -ACCESS_VALIDATION_HANDLER_PREFIX = feconf.ACCESS_VALIDATION_HANDLER_PREFIX +from typing import Final + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import blog_models + +(blog_models,) = models.Registry.import_models([models.Names.BLOG]) + +ACCESS_VALIDATION_HANDLER_PREFIX: Final = ( + feconf.ACCESS_VALIDATION_HANDLER_PREFIX +) class ClassroomPageAccessValidationHandlerTests(test_utils.GenericTestBase): def setUp(self) -> None: - super(ClassroomPageAccessValidationHandlerTests, self).setUp() + super().setUp() self.signup( self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) # type: ignore[no-untyped-call] + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.user_id_admin = ( - self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) # type: ignore[no-untyped-call] + self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) - self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) # type: ignore[no-untyped-call] - config_services.set_property( # type: ignore[no-untyped-call] + self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) + config_services.set_property( self.user_id_admin, 'classroom_pages_data', [{ 'name': 'math', 'url_fragment': 'math', @@ -45,7 +61,7 @@ def setUp(self) -> None: def test_validation_returns_true_if_classroom_is_available(self) -> None: self.login(self.EDITOR_EMAIL) - self.get_html_response( # type: ignore[no-untyped-call] + self.get_html_response( '%s/can_access_classroom_page?classroom_url_fragment=%s' % (ACCESS_VALIDATION_HANDLER_PREFIX, 'math')) @@ -63,7 +79,7 @@ class ReleaseCoordinatorAccessValidationHandlerTests( def setUp(self) -> None: """Complete the signup process for self.RELEASE_COORDINATOR_EMAIL.""" - super(ReleaseCoordinatorAccessValidationHandlerTests, self).setUp() + super().setUp() self.signup( self.RELEASE_COORDINATOR_EMAIL, self.RELEASE_COORDINATOR_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) @@ -86,7 +102,7 @@ def test_exploration_editor_does_not_pass_validation(self) -> None: def test_release_coordinator_passes_validation(self) -> None: self.login(self.RELEASE_COORDINATOR_EMAIL) - self.get_html_response( # type: ignore[no-untyped-call] + self.get_html_response( '%s/can_access_release_coordinator_page' % ACCESS_VALIDATION_HANDLER_PREFIX) @@ -94,7 +110,7 @@ def test_release_coordinator_passes_validation(self) -> None: class ProfileExistsValidationHandlerTests(test_utils.GenericTestBase): def setUp(self) -> None: - super(ProfileExistsValidationHandlerTests, self).setUp() + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) @@ -103,7 +119,7 @@ def test_profile_validation_returns_true_if_user_views_other_profile( ) -> None: # Viewer looks at editor's profile page. self.login(self.VIEWER_EMAIL) - self.get_html_response( # type: ignore[no-untyped-call] + self.get_html_response( '%s/does_profile_exist/%s' % ( ACCESS_VALIDATION_HANDLER_PREFIX, self.EDITOR_USERNAME)) self.logout() @@ -113,7 +129,7 @@ def test_profile_validation_returns_true_if_user_views_own_profile( ) -> None: # Editor looks at their own profile page. self.login(self.EDITOR_EMAIL) - self.get_html_response( # type: ignore[no-untyped-call] + self.get_html_response( '%s/does_profile_exist/%s' % ( ACCESS_VALIDATION_HANDLER_PREFIX, self.EDITOR_USERNAME)) self.logout() @@ -138,10 +154,10 @@ class ManageOwnAccountValidationHandlerTests(test_utils.GenericTestBase): user_email = 'user@example.com' def setUp(self) -> None: - super(ManageOwnAccountValidationHandlerTests, self).setUp() + super().setUp() self.signup(self.banned_user_email, self.banned_user) self.signup(self.user_email, self.username) - self.mark_user_banned(self.banned_user) # type: ignore[no-untyped-call] + self.mark_user_banned(self.banned_user) def test_banned_user_cannot_manage_account(self) -> None: self.login(self.banned_user_email) @@ -151,6 +167,225 @@ def test_banned_user_cannot_manage_account(self) -> None: def test_normal_user_can_manage_account(self) -> None: self.login(self.user_email) - self.get_html_response( # type: ignore[no-untyped-call] + self.get_html_response( '%s/can_manage_own_account' % ACCESS_VALIDATION_HANDLER_PREFIX) self.logout() + + +class ViewLearnerGroupPageAccessValidationHandlerTests( + test_utils.GenericTestBase +): + + def setUp(self) -> None: + super().setUp() + self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME) + self.signup( + self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + + self.facilitator_id = self.get_user_id_from_email( + self.CURRICULUM_ADMIN_EMAIL) + self.learner_id = self.get_user_id_from_email(self.NEW_USER_EMAIL) + + self.LEARNER_GROUP_ID = ( + learner_group_fetchers.get_new_learner_group_id() + ) + learner_group_services.create_learner_group( + self.LEARNER_GROUP_ID, 'Learner Group Title', 'Description', + [self.facilitator_id], [self.learner_id], + ['subtopic_id_1'], ['story_id_1']) + + self.login(self.NEW_USER_EMAIL) + + def test_validation_returns_false_with_learner_groups_feature_disabled( + self + ) -> None: + config_services.set_property( + 'admin', 'learner_groups_are_enabled', False) + self.get_json( + '%s/does_learner_group_exist/%s' % ( + ACCESS_VALIDATION_HANDLER_PREFIX, self.LEARNER_GROUP_ID), + expected_status_int=404) + self.logout() + + def test_validation_returns_false_with_user_not_being_a_learner( + self + ) -> None: + config_services.set_property( + 'admin', 'learner_groups_are_enabled', True) + self.get_json( + '%s/does_learner_group_exist/%s' % ( + ACCESS_VALIDATION_HANDLER_PREFIX, self.LEARNER_GROUP_ID), + expected_status_int=404) + self.logout() + + def test_validation_returns_true_for_valid_learner(self) -> None: + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.learner_id, False) + config_services.set_property( + 'admin', 'learner_groups_are_enabled', True) + self.get_html_response( + '%s/does_learner_group_exist/%s' % ( + ACCESS_VALIDATION_HANDLER_PREFIX, self.LEARNER_GROUP_ID)) + + +class BlogHomePageAccessValidationHandlerTests(test_utils.GenericTestBase): + """Checks the access to the blog home page and its rendering.""" + + def test_blog_home_page_access_without_logging_in(self) -> None: + self.get_html_response( + '%s/can_access_blog_home_page' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=200) + + def test_blog_home_page_access_without_having_rights(self) -> None: + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.login(self.VIEWER_EMAIL) + self.get_html_response( + '%s/can_access_blog_home_page' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=200) + self.logout() + + def test_blog_home_page_access_as_blog_admin(self) -> None: + self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) + self.add_user_role( + self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) + self.login(self.BLOG_ADMIN_EMAIL) + self.get_html_response( + '%s/can_access_blog_home_page' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=200) + self.logout() + + def test_blog_home_page_access_as_blog_post_editor(self) -> None: + self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) + self.add_user_role( + self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) + self.login(self.BLOG_EDITOR_EMAIL) + self.get_html_response( + '%s/can_access_blog_home_page' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=200) + self.logout() + + +class BlogPostPageAccessValidationHandlerTests(test_utils.GenericTestBase): + """Checks the access to the blog post page and its rendering.""" + + def setUp(self) -> None: + super().setUp() + blog_post_model = blog_models.BlogPostModel( + id='blog_one', + author_id='user_1', + content='content', + title='title', + published_on=datetime.datetime.utcnow(), + url_fragment='sample-url', + tags=['news'], + thumbnail_filename='thumbnail.svg', + ) + blog_post_model.update_timestamps() + blog_post_model.put() + + def test_blog_post_page_access_without_logging_in(self) -> None: + self.get_html_response( + '%s/can_access_blog_post_page?blog_post_url_fragment=sample-url' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=200) + + def test_blog_post_page_access_without_having_rights(self) -> None: + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.login(self.VIEWER_EMAIL) + self.get_html_response( + '%s/can_access_blog_post_page?blog_post_url_fragment=sample-url' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=200) + self.logout() + + def test_blog_post_page_access_as_blog_admin(self) -> None: + self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) + self.add_user_role( + self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) + self.login(self.BLOG_ADMIN_EMAIL) + self.get_html_response( + '%s/can_access_blog_post_page?blog_post_url_fragment=sample-url' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=200) + self.logout() + + def test_blog_post_page_access_as_blog_post_editor(self) -> None: + self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) + self.add_user_role( + self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) + self.login(self.BLOG_EDITOR_EMAIL) + self.get_html_response( + '%s/can_access_blog_post_page?blog_post_url_fragment=sample-url' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=200) + self.logout() + + def test_validation_returns_false_if_blog_post_is_not_available( + self + ) -> None: + self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) + self.add_user_role( + self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) + self.login(self.BLOG_EDITOR_EMAIL) + + self.get_json( + '%s/can_access_blog_post_page?blog_post_url_fragment=invalid-url' % + ACCESS_VALIDATION_HANDLER_PREFIX, expected_status_int=404) + self.logout() + + +class BlogAuthorProfilePageAccessValidationHandlerTests( + test_utils.GenericTestBase): + """Checks the access to the blog author profile page and its rendering.""" + + def setUp(self) -> None: + super().setUp() + self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) + self.add_user_role( + self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) + + def test_blog_author_profile_page_access_without_logging_in(self) -> None: + self.get_html_response( + '%s/can_access_blog_author_profile_page/%s' % ( + ACCESS_VALIDATION_HANDLER_PREFIX, self.BLOG_ADMIN_USERNAME + ), expected_status_int=200 + ) + + def test_blog_author_profile_page_access_after_logging_in(self) -> None: + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.login(self.VIEWER_EMAIL) + self.get_html_response( + '%s/can_access_blog_author_profile_page/%s' % ( + ACCESS_VALIDATION_HANDLER_PREFIX, self.BLOG_ADMIN_USERNAME + ), expected_status_int=200 + ) + self.logout() + + def test_blog_author_profile_page_access_as_blog_admin(self) -> None: + self.login(self.BLOG_ADMIN_EMAIL) + self.get_html_response( + '%s/can_access_blog_author_profile_page/%s' % ( + ACCESS_VALIDATION_HANDLER_PREFIX, self.BLOG_ADMIN_USERNAME + ), expected_status_int=200 + ) + self.logout() + + def test_validation_returns_false_if_given_user_is_not_blog_post_author( + self + ) -> None: + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.login(self.VIEWER_EMAIL) + self.get_json( + '%s/can_access_blog_author_profile_page/%s' % ( + ACCESS_VALIDATION_HANDLER_PREFIX, self.VIEWER_USERNAME + ), expected_status_int=404 + ) + self.logout() + + def test_validation_returns_false_if_given_user_is_non_existent( + self + ) -> None: + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.login(self.VIEWER_EMAIL) + self.get_json( + '%s/can_access_blog_author_profile_page/invalid_username' % ( + ACCESS_VALIDATION_HANDLER_PREFIX + ), expected_status_int=404 + ) + self.logout() diff --git a/core/controllers/acl_decorators.py b/core/controllers/acl_decorators.py index 2df53f50d069..43f715f12196 100644 --- a/core/controllers/acl_decorators.py +++ b/core/controllers/acl_decorators.py @@ -30,6 +30,7 @@ from core.domain import blog_services from core.domain import classifier_services from core.domain import classroom_services +from core.domain import email_manager from core.domain import feedback_services from core.domain import question_services from core.domain import rights_manager @@ -45,11 +46,23 @@ from core.domain import topic_services from core.domain import user_services -from typing import Any, Callable # isort: skip +from typing import Any, Callable, Dict, List, Optional, Type, TypeVar + +# Note: '_SelfBaseHandlerType' is a private type variable because it is only +# supposed to denote the 'self' argument of the handler function that the +# decorator is decorating. So, do not make it public type variable in future. +_SelfBaseHandlerType = Type[base.BaseHandler] +# Note: '_GenericHandlerFunctionReturnType' is a private type variable because +# it is only supposed to denote the return type of handler function that the +# decorator is decorating. So, do not make it public type variable in future. +_GenericHandlerFunctionReturnType = TypeVar('_GenericHandlerFunctionReturnType') def _redirect_based_on_return_type( - handler, redirection_url, expected_return_type): + handler: _SelfBaseHandlerType, + redirection_url: str, + expected_return_type: str +) -> None: """Redirects to the provided URL if the handler type is not JSON. Args: @@ -63,11 +76,13 @@ def _redirect_based_on_return_type( """ if expected_return_type == feconf.HANDLER_TYPE_JSON: raise handler.PageNotFoundException - else: - handler.redirect(redirection_url) + + handler.redirect(redirection_url) -def open_access(handler): +def open_access( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to give access to everyone. Args: @@ -78,7 +93,14 @@ def open_access(handler): everyone. """ - def test_can_access(self, *args, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( + self: _SelfBaseHandlerType, + *args: Any, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Gives access to everyone. Args: @@ -89,12 +111,13 @@ def test_can_access(self, *args, **kwargs): *. The return value of the decorated function. """ return handler(self, *args, **kwargs) - test_can_access.__wrapped__ = True return test_can_access -def is_source_mailchimp(handler): +def is_source_mailchimp( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the request was generated from Mailchimp. Args: @@ -104,7 +127,12 @@ def is_source_mailchimp(handler): function. The newly decorated function. """ - def test_is_source_mailchimp(self, secret, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_is_source_mailchimp( + self: _SelfBaseHandlerType, secret: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the request was generated from Mailchimp. Args: @@ -115,21 +143,18 @@ def test_is_source_mailchimp(self, secret, **kwargs): Returns: *. The return value of the decorated function. """ - if feconf.MAILCHIMP_WEBHOOK_SECRET is None: + if not email_manager.verify_mailchimp_secret(secret): + logging.error('Received invalid Mailchimp webhook secret') raise self.PageNotFoundException - elif secret != feconf.MAILCHIMP_WEBHOOK_SECRET: - logging.error( - 'Invalid Mailchimp webhook request received with secret: %s' - % secret) - raise self.PageNotFoundException - else: - return handler(self, secret, **kwargs) - test_is_source_mailchimp.__wrapped__ = True + + return handler(self, secret, **kwargs) return test_is_source_mailchimp -def does_classroom_exist(handler): +def does_classroom_exist( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether classroom exists. Args: @@ -139,7 +164,14 @@ def does_classroom_exist(handler): function. The newly decorated function. """ - def test_does_classroom_exist(self, classroom_url_fragment, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_does_classroom_exist( + self: _SelfBaseHandlerType, + classroom_url_fragment: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if classroom url fragment provided is valid. If so, return handler or else redirect to the correct classroom. @@ -149,6 +181,10 @@ def test_does_classroom_exist(self, classroom_url_fragment, **kwargs): Returns: handler. function. The newly decorated function. + + Raises: + Exception. This decorator is not expected to be used with other + handler types. """ classroom = classroom_services.get_classroom_by_url_fragment( classroom_url_fragment) @@ -160,20 +196,21 @@ def test_does_classroom_exist(self, classroom_url_fragment, **kwargs): # the access validation handler endpoint. if self.GET_HANDLER_ERROR_RETURN_TYPE == feconf.HANDLER_TYPE_JSON: raise self.PageNotFoundException - else: - # As this decorator is not expected to be used with other - # handler types, raising an error here. - raise Exception( - 'does_classroom_exist decorator is only expected to ' - 'be used with json return type handlers.') + + # As this decorator is not expected to be used with other + # handler types, raising an error here. + raise Exception( + 'does_classroom_exist decorator is only expected to ' + 'be used with json return type handlers.') return handler(self, classroom_url_fragment, **kwargs) - test_does_classroom_exist.__wrapped__ = True return test_does_classroom_exist -def can_play_exploration(handler): +def can_play_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can play given exploration. Args: @@ -184,7 +221,62 @@ def can_play_exploration(handler): play a given exploration. """ - def test_can_play(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_play( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: + """Checks if the user can play the exploration. + + Args: + exploration_id: str. The exploration id. + **kwargs: *. Keyword arguments. + + Returns: + *. The return value of the decorated function. + + Raises: + PageNotFoundException. The page is not found. + """ + if exploration_id in feconf.DISABLED_EXPLORATION_IDS: + raise self.PageNotFoundException + + exploration_rights = rights_manager.get_exploration_rights( + exploration_id, strict=False) + + if exploration_rights is None: + raise self.PageNotFoundException + + if rights_manager.check_can_access_activity( + self.user, exploration_rights): + return handler(self, exploration_id, **kwargs) + else: + raise self.PageNotFoundException + + return test_can_play + + +def can_play_exploration_as_logged_in_user( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: + """Decorator to check whether user can play given exploration if the user + is logged in. + + Args: + handler: function. The function to be decorated. + + Returns: + function. The newly decorated function that now can check if users can + play a given exploration if the user is logged in. + """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_play( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can play the exploration. Args: @@ -196,7 +288,11 @@ def test_can_play(self, exploration_id, **kwargs): Raises: PageNotFoundException. The page is not found. + NotLoggedInException. The user is not logged in. """ + if self.user_id is None: + raise self.NotLoggedInException + if exploration_id in feconf.DISABLED_EXPLORATION_IDS: raise self.PageNotFoundException @@ -211,12 +307,13 @@ def test_can_play(self, exploration_id, **kwargs): return handler(self, exploration_id, **kwargs) else: raise self.PageNotFoundException - test_can_play.__wrapped__ = True return test_can_play -def can_view_skills(handler): +def can_view_skills( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can view multiple given skills. Args: @@ -227,12 +324,18 @@ def can_view_skills(handler): can view multiple given skills. """ - def test_can_view(self, comma_separated_skill_ids, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_view( + self: _SelfBaseHandlerType, + selected_skill_ids: List[str], + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can view the skills. Args: - comma_separated_skill_ids: str. The skill ids - separated by commas. + selected_skill_ids: list(str). List of skill ids. **kwargs: *. Keyword arguments. Returns: @@ -244,26 +347,26 @@ def test_can_view(self, comma_separated_skill_ids, **kwargs): # This is a temporary check, since a decorator is required for every # method. Once skill publishing is done, whether given skill is # published should be checked here. - skill_ids = comma_separated_skill_ids.split(',') try: - for skill_id in skill_ids: + for skill_id in selected_skill_ids: skill_domain.Skill.require_valid_skill_id(skill_id) - except utils.ValidationError: - raise self.InvalidInputException + except utils.ValidationError as e: + raise self.InvalidInputException(e) try: - skill_fetchers.get_multi_skills(skill_ids) + skill_fetchers.get_multi_skills(selected_skill_ids) except Exception as e: raise self.PageNotFoundException(e) - return handler(self, comma_separated_skill_ids, **kwargs) - test_can_view.__wrapped__ = True + return handler(self, selected_skill_ids, **kwargs) return test_can_view -def can_play_collection(handler): +def can_play_collection( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can play given collection. Args: @@ -274,7 +377,12 @@ def can_play_collection(handler): play a given collection. """ - def test_can_play(self, collection_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_play( + self: _SelfBaseHandlerType, collection_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can play the collection. Args: @@ -298,12 +406,13 @@ def test_can_play(self, collection_id, **kwargs): return handler(self, collection_id, **kwargs) else: raise self.PageNotFoundException - test_can_play.__wrapped__ = True return test_can_play -def can_download_exploration(handler): +def can_download_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can download given exploration. If a user is authorized to play given exploration, they can download it. @@ -315,7 +424,12 @@ def can_download_exploration(handler): has permission to download a given exploration. """ - def test_can_download(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_download( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can download the exploration. Args: @@ -342,12 +456,13 @@ def test_can_download(self, exploration_id, **kwargs): return handler(self, exploration_id, **kwargs) else: raise self.PageNotFoundException - test_can_download.__wrapped__ = True return test_can_download -def can_view_exploration_stats(handler): +def can_view_exploration_stats( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can view exploration stats. If a user is authorized to play given exploration, they can view its stats. @@ -359,7 +474,12 @@ def can_view_exploration_stats(handler): has permission to view exploration stats. """ - def test_can_view_stats(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_view_stats( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can view the exploration stats. Args: @@ -386,12 +506,13 @@ def test_can_view_stats(self, exploration_id, **kwargs): return handler(self, exploration_id, **kwargs) else: raise base.UserFacingExceptions.PageNotFoundException - test_can_view_stats.__wrapped__ = True return test_can_view_stats -def can_edit_collection(handler): +def can_edit_collection( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can edit collection. Args: @@ -402,7 +523,12 @@ def can_edit_collection(handler): permission to edit a given collection. """ - def test_can_edit(self, collection_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_edit( + self: _SelfBaseHandlerType, collection_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can edit the collection. Args: @@ -431,12 +557,13 @@ def test_can_edit(self, collection_id, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to edit this collection.') - test_can_edit.__wrapped__ = True return test_can_edit -def can_manage_email_dashboard(handler): +def can_manage_email_dashboard( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can access email dashboard. Args: @@ -447,7 +574,12 @@ def can_manage_email_dashboard(handler): permission to access the email dashboard. """ - def test_can_manage_emails(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_manage_emails( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can access email dashboard. Args: @@ -469,12 +601,13 @@ def test_can_manage_emails(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to access email dashboard.') - test_can_manage_emails.__wrapped__ = True return test_can_manage_emails -def can_access_blog_admin_page(handler): +def can_access_blog_admin_page( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can access blog admin page. Args: @@ -485,7 +618,12 @@ def can_access_blog_admin_page(handler): permission to access the blog admin page. """ - def test_can_access_blog_admin_page(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access_blog_admin_page( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can access blog admin page. Args: @@ -507,12 +645,13 @@ def test_can_access_blog_admin_page(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to access blog admin page.') - test_can_access_blog_admin_page.__wrapped__ = True return test_can_access_blog_admin_page -def can_manage_blog_post_editors(handler): +def can_manage_blog_post_editors( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can add and remove users as blog post editors. @@ -524,7 +663,12 @@ def can_manage_blog_post_editors(handler): permission to manage blog post editors. """ - def test_can_manage_blog_post_editors(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_manage_blog_post_editors( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can add and remove users as blog post editors. @@ -547,12 +691,13 @@ def test_can_manage_blog_post_editors(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to add or remove blog post editors.') - test_can_manage_blog_post_editors.__wrapped__ = True return test_can_manage_blog_post_editors -def can_access_blog_dashboard(handler): +def can_access_blog_dashboard( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can access blog dashboard. Args: @@ -562,7 +707,13 @@ def can_access_blog_dashboard(handler): function. The newly decorated function that now checks if the user has permission to access the blog dashboard. """ - def test_can_access_blog_dashboard(self, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access_blog_dashboard( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can access blog dashboard. Args: @@ -584,12 +735,13 @@ def test_can_access_blog_dashboard(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to access blog dashboard page.') - test_can_access_blog_dashboard.__wrapped__ = True return test_can_access_blog_dashboard -def can_delete_blog_post(handler): +def can_delete_blog_post( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can delete blog post. Args: @@ -599,7 +751,13 @@ def can_delete_blog_post(handler): function. The newly decorated function that checks if a user has permission to delete a given blog post. """ - def test_can_delete(self, blog_post_id, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_delete( + self: _SelfBaseHandlerType, blog_post_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can delete the blog post. Args: @@ -632,12 +790,13 @@ def test_can_delete(self, blog_post_id, **kwargs): raise base.UserFacingExceptions.UnauthorizedUserException( 'User %s does not have permissions to delete blog post %s' % (self.user_id, blog_post_id)) - test_can_delete.__wrapped__ = True return test_can_delete -def can_edit_blog_post(handler): +def can_edit_blog_post( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can edit blog post. Args: @@ -647,7 +806,13 @@ def can_edit_blog_post(handler): function. The newly decorated function that checks if a user has permission to edit a given blog post. """ - def test_can_edit(self, blog_post_id, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_edit( + self: _SelfBaseHandlerType, blog_post_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can edit the blog post. Args: @@ -680,12 +845,13 @@ def test_can_edit(self, blog_post_id, **kwargs): raise base.UserFacingExceptions.UnauthorizedUserException( 'User %s does not have permissions to edit blog post %s' % (self.user_id, blog_post_id)) - test_can_edit.__wrapped__ = True return test_can_edit -def can_access_moderator_page(handler): +def can_access_moderator_page( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can access moderator page. Args: @@ -696,7 +862,12 @@ def can_access_moderator_page(handler): permission to access the moderator page. """ - def test_can_access_moderator_page(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access_moderator_page( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can access moderator page. Args: @@ -718,12 +889,13 @@ def test_can_access_moderator_page(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to access moderator page.') - test_can_access_moderator_page.__wrapped__ = True return test_can_access_moderator_page -def can_access_release_coordinator_page(handler): +def can_access_release_coordinator_page( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can access release coordinator page. Args: @@ -734,7 +906,12 @@ def can_access_release_coordinator_page(handler): permission to access the release coordinator page. """ - def test_can_access_release_coordinator_page(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access_release_coordinator_page( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can access release coordinator page. @@ -758,12 +935,13 @@ def test_can_access_release_coordinator_page(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to access release coordinator page.') - test_can_access_release_coordinator_page.__wrapped__ = True return test_can_access_release_coordinator_page -def can_manage_memcache(handler): +def can_manage_memcache( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can can manage memcache. Args: @@ -774,7 +952,12 @@ def can_manage_memcache(handler): permission to manage memcache. """ - def test_can_manage_memcache(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_manage_memcache( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can manage memcache. Args: @@ -796,12 +979,13 @@ def test_can_manage_memcache(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to manage memcache.') - test_can_manage_memcache.__wrapped__ = True return test_can_manage_memcache -def can_run_any_job(handler: Callable[..., None]) -> Callable[..., None]: +def can_run_any_job( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can can run any job. Args: @@ -812,9 +996,12 @@ def can_run_any_job(handler: Callable[..., None]) -> Callable[..., None]: permission to run any job. """ + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) def test_can_run_any_job( - self: base.BaseHandler, *args: Any, **kwargs: Any - ) -> None: + self: _SelfBaseHandlerType, *args: Any, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can run any job. Args: @@ -837,12 +1024,13 @@ def test_can_run_any_job( raise self.UnauthorizedUserException( 'You do not have credentials to run jobs.') - setattr(test_can_run_any_job, '__wrapped__', True) return test_can_run_any_job -def can_send_moderator_emails(handler): +def can_send_moderator_emails( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can send moderator emails. Args: @@ -853,7 +1041,12 @@ def can_send_moderator_emails(handler): has permission to send moderator emails. """ - def test_can_send_moderator_emails(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_send_moderator_emails( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can send moderator emails. Args: @@ -875,12 +1068,13 @@ def test_can_send_moderator_emails(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to send moderator emails.') - test_can_send_moderator_emails.__wrapped__ = True return test_can_send_moderator_emails -def can_manage_own_account(handler): +def can_manage_own_account( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can manage their account. Args: @@ -891,7 +1085,12 @@ def can_manage_own_account(handler): has permission to manage their account. """ - def test_can_manage_account(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_manage_account( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and can manage their account. Args: @@ -913,12 +1112,13 @@ def test_can_manage_account(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to manage account or preferences.') - test_can_manage_account.__wrapped__ = True return test_can_manage_account -def can_access_admin_page(handler): +def can_access_admin_page( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator that checks if the current user is a super admin. Args: @@ -929,7 +1129,12 @@ def can_access_admin_page(handler): is a super admin. """ - def test_super_admin(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_super_admin( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and is a super admin. Args: @@ -950,12 +1155,13 @@ def test_super_admin(self, **kwargs): raise self.UnauthorizedUserException( '%s is not a super admin of this application' % self.user_id) return handler(self, **kwargs) - test_super_admin.__wrapped__ = True return test_super_admin -def can_access_contributor_dashboard_admin_page(handler): +def can_access_contributor_dashboard_admin_page( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator that checks if the user can access the contributor dashboard admin page. @@ -967,7 +1173,12 @@ def can_access_contributor_dashboard_admin_page(handler): access the contributor dashboard admin page. """ - def test_can_access_contributor_dashboard_admin_page(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access_contributor_dashboard_admin_page( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can access the contributor dashboard admin page. Args: @@ -992,12 +1203,12 @@ def test_can_access_contributor_dashboard_admin_page(self, **kwargs): 'You do not have credentials to access contributor dashboard ' 'admin page.') - test_can_access_contributor_dashboard_admin_page.__wrapped__ = True - return test_can_access_contributor_dashboard_admin_page -def can_manage_contributors_role(handler): +def can_manage_contributors_role( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator that checks if the current user can modify contributor's role for the contributor dashboard page. @@ -1009,7 +1220,12 @@ def can_manage_contributors_role(handler): can modify contributor's role for the contributor dashboard page. """ - def test_can_manage_contributors_role(self, category, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_manage_contributors_role( + self: _SelfBaseHandlerType, category: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can modify contributor's role for the contributor dashboard page. @@ -1045,12 +1261,13 @@ def test_can_manage_contributors_role(self, category, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to modify contributor\'s role.') - test_can_manage_contributors_role.__wrapped__ = True return test_can_manage_contributors_role -def can_delete_any_user(handler): +def can_delete_any_user( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator that checks if the current user can delete any user. Args: @@ -1061,7 +1278,12 @@ def can_delete_any_user(handler): can delete any user. """ - def test_primary_admin(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_primary_admin( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user is logged in and is a primary admin e.g. user with email address equal to feconf.SYSTEM_EMAIL_ADDRESS. @@ -1085,12 +1307,13 @@ def test_primary_admin(self, **kwargs): '%s cannot delete any user.' % self.user_id) return handler(self, **kwargs) - test_primary_admin.__wrapped__ = True return test_primary_admin -def can_upload_exploration(handler): +def can_upload_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator that checks if the current user can upload exploration. Args: @@ -1101,7 +1324,12 @@ def can_upload_exploration(handler): has permission to upload an exploration. """ - def test_can_upload(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_upload( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can upload exploration. Args: @@ -1122,12 +1350,13 @@ def test_can_upload(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to upload explorations.') return handler(self, **kwargs) - test_can_upload.__wrapped__ = True return test_can_upload -def can_create_exploration(handler): +def can_create_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can create an exploration. Args: @@ -1138,7 +1367,12 @@ def can_create_exploration(handler): has permission to create an exploration. """ - def test_can_create(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_create( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can create an exploration. Args: @@ -1160,12 +1394,13 @@ def test_can_create(self, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to create an exploration.') - test_can_create.__wrapped__ = True return test_can_create -def can_create_collection(handler): +def can_create_collection( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can create a collection. Args: @@ -1176,7 +1411,12 @@ def can_create_collection(handler): has permission to create a collection. """ - def test_can_create(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_create( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can create a collection. Args: @@ -1198,12 +1438,13 @@ def test_can_create(self, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to create a collection.') - test_can_create.__wrapped__ = True return test_can_create -def can_access_creator_dashboard(handler): +def can_access_creator_dashboard( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can access creator dashboard page. Args: @@ -1214,7 +1455,12 @@ def can_access_creator_dashboard(handler): user has permission to access the creator dashboard page. """ - def test_can_access(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can access the creator dashboard page. Args: @@ -1236,12 +1482,13 @@ def test_can_access(self, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to access creator dashboard.') - test_can_access.__wrapped__ = True return test_can_access -def can_create_feedback_thread(handler): +def can_create_feedback_thread( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can create a feedback thread. Args: @@ -1252,7 +1499,12 @@ def can_create_feedback_thread(handler): has permission to create a feedback thread. """ - def test_can_access(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can create a feedback thread. Args: @@ -1279,12 +1531,13 @@ def test_can_access(self, exploration_id, **kwargs): else: raise self.UnauthorizedUserException( 'You do not have credentials to create exploration feedback.') - test_can_access.__wrapped__ = True return test_can_access -def can_view_feedback_thread(handler): +def can_view_feedback_thread( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can view a feedback thread. Args: @@ -1295,7 +1548,12 @@ def can_view_feedback_thread(handler): has permission to view a feedback thread. """ - def test_can_access(self, thread_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( + self: _SelfBaseHandlerType, thread_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can view a feedback thread. Args: @@ -1337,12 +1595,13 @@ def test_can_access(self, thread_id, **kwargs): else: raise self.UnauthorizedUserException( 'You do not have credentials to view exploration feedback.') - test_can_access.__wrapped__ = True return test_can_access -def can_comment_on_feedback_thread(handler): +def can_comment_on_feedback_thread( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can comment on feedback thread. Args: @@ -1353,7 +1612,12 @@ def can_comment_on_feedback_thread(handler): has permission to comment on a given feedback thread. """ - def test_can_access(self, thread_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( + self: _SelfBaseHandlerType, thread_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can comment on the feedback thread. Args: @@ -1395,12 +1659,13 @@ def test_can_access(self, thread_id, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to comment on exploration' ' feedback.') - test_can_access.__wrapped__ = True return test_can_access -def can_rate_exploration(handler): +def can_rate_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can give rating to given exploration. @@ -1412,7 +1677,12 @@ def can_rate_exploration(handler): has permission to rate a given exploration. """ - def test_can_rate(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_rate( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can rate the exploration. Args: @@ -1432,12 +1702,13 @@ def test_can_rate(self, exploration_id, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to give ratings to explorations.') - test_can_rate.__wrapped__ = True return test_can_rate -def can_flag_exploration(handler): +def can_flag_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can flag given exploration. Args: @@ -1448,7 +1719,12 @@ def can_flag_exploration(handler): a user can flag a given exploration. """ - def test_can_flag(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_flag( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can flag the exploration. Args: @@ -1467,12 +1743,13 @@ def test_can_flag(self, exploration_id, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to flag explorations.') - test_can_flag.__wrapped__ = True return test_can_flag -def can_subscribe_to_users(handler): +def can_subscribe_to_users( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can subscribe/unsubscribe a creator. Args: @@ -1483,7 +1760,12 @@ def can_subscribe_to_users(handler): has permission to subscribe/unsubscribe a creator. """ - def test_can_subscribe(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_subscribe( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can subscribe/unsubscribe a creator. Args: @@ -1501,12 +1783,13 @@ def test_can_subscribe(self, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to manage subscriptions.') - test_can_subscribe.__wrapped__ = True return test_can_subscribe -def can_edit_exploration(handler): +def can_edit_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can edit given exploration. Args: @@ -1517,7 +1800,15 @@ def can_edit_exploration(handler): a user has permission to edit a given exploration. """ - def test_can_edit(self, exploration_id, *args, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_edit( + self: _SelfBaseHandlerType, + exploration_id: str, + *args: Any, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can edit the exploration. Args: @@ -1549,12 +1840,13 @@ def test_can_edit(self, exploration_id, *args, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to edit this exploration.') - test_can_edit.__wrapped__ = True return test_can_edit -def can_voiceover_exploration(handler): +def can_voiceover_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can voiceover given exploration. Args: @@ -1565,7 +1857,12 @@ def can_voiceover_exploration(handler): has permission to voiceover a given exploration. """ - def test_can_voiceover(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_voiceover( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can voiceover the exploration. Args: @@ -1595,24 +1892,34 @@ def test_can_voiceover(self, exploration_id, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to voiceover this exploration.') - test_can_voiceover.__wrapped__ = True return test_can_voiceover -def can_manage_voice_artist(handler): - """Decorator to check whether the user can manage voice artist. +def can_add_voice_artist( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: + """Decorator to check whether the user can add voice artist to + the given activity. Args: handler: function. The function to be decorated. Returns: function. The newly decorated function that now also checks if a user - has permission to manage voice artist. + has permission to add voice artist. """ - def test_can_manage_voice_artist(self, entity_type, entity_id, **kwargs): - """Checks if the user can manage a voice artist for the given entity. + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_add_voice_artist( + self: _SelfBaseHandlerType, + entity_type: str, + entity_id: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: + """Checks if the user can add a voice artist for the given entity. Args: entity_type: str. The type of entity. @@ -1626,6 +1933,7 @@ def test_can_manage_voice_artist(self, entity_type, entity_id, **kwargs): NotLoggedInException. The user is not logged in. InvalidInputException. The given entity type is not supported. PageNotFoundException. The page is not found. + InvalidInputException. The given exploration is private. UnauthorizedUserException. The user does not have the credentials to manage voice artist. """ @@ -1641,18 +1949,84 @@ def test_can_manage_voice_artist(self, entity_type, entity_id, **kwargs): if exploration_rights is None: raise base.UserFacingExceptions.PageNotFoundException + if exploration_rights.is_private(): + raise base.UserFacingExceptions.InvalidInputException( + 'Could not assign voice artist to private activity.') if rights_manager.check_can_manage_voice_artist_in_activity( self.user, exploration_rights): return handler(self, entity_type, entity_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to manage voice artists.') - test_can_manage_voice_artist.__wrapped__ = True - return test_can_manage_voice_artist + return test_can_add_voice_artist + +def can_remove_voice_artist( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: + """Decorator to check whether the user can remove voice artist + from the given activity. -def can_save_exploration(handler): + Args: + handler: function. The function to be decorated. + + Returns: + function. The newly decorated function that now also checks if a user + has permission to remove voice artist. + """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_remove_voice_artist( + self: _SelfBaseHandlerType, + entity_type: str, + entity_id: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: + """Checks if the user can remove a voice artist for the given entity. + + Args: + entity_type: str. The type of entity. + entity_id: str. The Id of the entity. + **kwargs: dict(str: *). Keyword arguments. + + Returns: + *. The return value of the decorated function. + + Raises: + NotLoggedInException. The user is not logged in. + InvalidInputException. The given entity type is not supported. + PageNotFoundException. The page is not found. + UnauthorizedUserException. The user does not have the credentials + to manage voice artist. + """ + if not self.user_id: + raise base.UserFacingExceptions.NotLoggedInException + + if entity_type != feconf.ENTITY_TYPE_EXPLORATION: + raise self.InvalidInputException( + 'Unsupported entity_type: %s' % entity_type) + + exploration_rights = rights_manager.get_exploration_rights( + entity_id, strict=False) + if exploration_rights is None: + raise base.UserFacingExceptions.PageNotFoundException + + if rights_manager.check_can_manage_voice_artist_in_activity( + self.user, exploration_rights): + return handler(self, entity_type, entity_id, **kwargs) + else: + raise base.UserFacingExceptions.UnauthorizedUserException( + 'You do not have credentials to manage voice artists.') + + return test_can_remove_voice_artist + + +def can_save_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can save exploration. Args: @@ -1663,7 +2037,14 @@ def can_save_exploration(handler): a user has permission to save a given exploration. """ - def test_can_save(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_save( + self: _SelfBaseHandlerType, + exploration_id: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can save the exploration. Args: @@ -1695,12 +2076,12 @@ def test_can_save(self, exploration_id, **kwargs): raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have permissions to save this exploration.') - test_can_save.__wrapped__ = True - return test_can_save -def can_delete_exploration(handler): +def can_delete_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can delete exploration. Args: @@ -1711,7 +2092,12 @@ def can_delete_exploration(handler): permission to delete a given exploration. """ - def test_can_delete(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_delete( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can delete the exploration. Args: @@ -1739,12 +2125,13 @@ def test_can_delete(self, exploration_id, **kwargs): raise base.UserFacingExceptions.UnauthorizedUserException( 'User %s does not have permissions to delete exploration %s' % (self.user_id, exploration_id)) - test_can_delete.__wrapped__ = True return test_can_delete -def can_suggest_changes_to_exploration(handler): +def can_suggest_changes_to_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether a user can make suggestions to an exploration. @@ -1756,7 +2143,12 @@ def can_suggest_changes_to_exploration(handler): has permission to make suggestions to an exploration. """ - def test_can_suggest(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_suggest( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can make suggestions to an exploration. Args: @@ -1776,12 +2168,13 @@ def test_can_suggest(self, exploration_id, **kwargs): raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to give suggestions to this ' 'exploration.') - test_can_suggest.__wrapped__ = True return test_can_suggest -def can_suggest_changes(handler): +def can_suggest_changes( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether a user can make suggestions. Args: @@ -1792,7 +2185,12 @@ def can_suggest_changes(handler): has permission to make suggestions. """ - def test_can_suggest(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_suggest( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can make suggestions to an exploration. Args: @@ -1810,15 +2208,21 @@ def test_can_suggest(self, **kwargs): else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to make suggestions.') - test_can_suggest.__wrapped__ = True return test_can_suggest -def can_resubmit_suggestion(handler): +def can_resubmit_suggestion( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether a user can resubmit a suggestion.""" - def test_can_resubmit_suggestion(self, suggestion_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_resubmit_suggestion( + self: _SelfBaseHandlerType, suggestion_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can edit the given suggestion. Args: @@ -1832,23 +2236,26 @@ def test_can_resubmit_suggestion(self, suggestion_id, **kwargs): UnauthorizedUserException. The user does not have credentials to edit this suggestion. """ - suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) - if not suggestion: + suggestion = suggestion_services.get_suggestion_by_id( + suggestion_id, strict=False + ) + if suggestion is None: raise self.InvalidInputException( 'No suggestion found with given suggestion id') - if suggestion_services.check_can_resubmit_suggestion( + if self.user_id and suggestion_services.check_can_resubmit_suggestion( suggestion_id, self.user_id): return handler(self, suggestion_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to resubmit this suggestion.') - test_can_resubmit_suggestion.__wrapped__ = True return test_can_resubmit_suggestion -def can_publish_exploration(handler): +def can_publish_exploration( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can publish exploration. Args: @@ -1859,7 +2266,15 @@ def can_publish_exploration(handler): has permission to publish an exploration. """ - def test_can_publish(self, exploration_id, *args, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_publish( + self: _SelfBaseHandlerType, + exploration_id: str, + *args: Any, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can publish the exploration. Args: @@ -1887,12 +2302,13 @@ def test_can_publish(self, exploration_id, *args, **kwargs): raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to publish this exploration.') - test_can_publish.__wrapped__ = True return test_can_publish -def can_publish_collection(handler): +def can_publish_collection( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can publish collection. Args: @@ -1903,7 +2319,12 @@ def can_publish_collection(handler): has permission to publish a collection. """ - def test_can_publish_collection(self, collection_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_publish_collection( + self: _SelfBaseHandlerType, collection_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can publish the collection. Args: @@ -1929,12 +2350,13 @@ def test_can_publish_collection(self, collection_id, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to publish this collection.') - test_can_publish_collection.__wrapped__ = True return test_can_publish_collection -def can_unpublish_collection(handler): +def can_unpublish_collection( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can unpublish a given collection. @@ -1946,7 +2368,12 @@ def can_unpublish_collection(handler): the user has permission to unpublish a collection. """ - def test_can_unpublish_collection(self, collection_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_unpublish_collection( + self: _SelfBaseHandlerType, collection_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can unpublish the collection. Args: @@ -1972,12 +2399,13 @@ def test_can_unpublish_collection(self, collection_id, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to unpublish this collection.') - test_can_unpublish_collection.__wrapped__ = True return test_can_unpublish_collection -def can_modify_exploration_roles(handler): +def can_modify_exploration_roles( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorators to check whether user can manage rights related to an exploration. @@ -1990,7 +2418,12 @@ def can_modify_exploration_roles(handler): exploration. """ - def test_can_modify(self, exploration_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_modify( + self: _SelfBaseHandlerType, exploration_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can modify the rights related to an exploration. Args: @@ -2014,12 +2447,13 @@ def test_can_modify(self, exploration_id, **kwargs): raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to change rights for this ' 'exploration.') - test_can_modify.__wrapped__ = True return test_can_modify -def can_perform_tasks_in_taskqueue(handler): +def can_perform_tasks_in_taskqueue( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to ensure that the handler is being called by task scheduler or by a superadmin of the application. @@ -2032,7 +2466,12 @@ def can_perform_tasks_in_taskqueue(handler): a superadmin of the application. """ - def test_can_perform(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_perform( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the handler is called by task scheduler or by a superadmin of the application. @@ -2054,14 +2493,15 @@ def test_can_perform(self, **kwargs): not self.current_user_is_super_admin): raise self.UnauthorizedUserException( 'You do not have the credentials to access this page.') - else: - return handler(self, **kwargs) - test_can_perform.__wrapped__ = True + + return handler(self, **kwargs) return test_can_perform -def can_perform_cron_tasks(handler): +def can_perform_cron_tasks( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to ensure that the handler is being called by cron or by a superadmin of the application. @@ -2074,7 +2514,12 @@ def can_perform_cron_tasks(handler): a superadmin of the application. """ - def test_can_perform(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_perform( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the handler is called by cron or by a superadmin of the application. @@ -2097,12 +2542,13 @@ def test_can_perform(self, **kwargs): 'You do not have the credentials to access this page.') return handler(self, **kwargs) - test_can_perform.__wrapped__ = True return test_can_perform -def can_access_learner_dashboard(handler): +def can_access_learner_dashboard( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check access to learner dashboard. Args: @@ -2113,7 +2559,12 @@ def can_access_learner_dashboard(handler): one can access the learner dashboard. """ - def test_can_access(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can access the learner dashboard. Args: @@ -2124,17 +2575,68 @@ def test_can_access(self, **kwargs): Raises: NotLoggedInException. The user is not logged in. + UnauthorizedUserException. The user does not have + credentials to access the page. """ + if not self.user_id: + raise base.UserFacingExceptions.NotLoggedInException + if role_services.ACTION_ACCESS_LEARNER_DASHBOARD in self.user.actions: return handler(self, **kwargs) else: - raise self.NotLoggedInException - test_can_access.__wrapped__ = True + raise self.UnauthorizedUserException( + 'You do not have the credentials to access this page.') return test_can_access -def can_manage_question_skill_status(handler): +def can_access_learner_groups( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: + """Decorator to check access to learner groups. + + Args: + handler: function. The function to be decorated. + + Returns: + function. The newly decorated function that now also checks if + one can access the learner groups. + """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: + """Checks if the user can access the learner groups. + + Args: + **kwargs: *. Keyword arguments. + + Returns: + *. The return value of the decorated function. + + Raises: + NotLoggedInException. The user is not logged in. + UnauthorizedUserException. The user does not have + credentials to access the page. + """ + if not self.user_id: + raise base.UserFacingExceptions.NotLoggedInException + + if role_services.ACTION_ACCESS_LEARNER_GROUPS in self.user.actions: + return handler(self, **kwargs) + else: + raise self.UnauthorizedUserException( + 'You do not have the credentials to access this page.') + + return test_can_access + + +def can_manage_question_skill_status( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can publish a question and link it to a skill. @@ -2147,7 +2649,12 @@ def can_manage_question_skill_status(handler): to a skill. """ - def test_can_manage_question_skill_status(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_manage_question_skill_status( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can publish a question directly. Args: @@ -2171,12 +2678,13 @@ def test_can_manage_question_skill_status(self, **kwargs): else: raise self.UnauthorizedUserException( 'You do not have credentials to publish a question.') - test_can_manage_question_skill_status.__wrapped__ = True return test_can_manage_question_skill_status -def require_user_id_else_redirect_to_homepage(handler): +def require_user_id_else_redirect_to_homepage( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., Optional[_GenericHandlerFunctionReturnType]]: """Decorator that checks if a user_id is associated with the current session. If not, the user is redirected to the main page. Note that the user may not yet have registered. @@ -2190,7 +2698,12 @@ def require_user_id_else_redirect_to_homepage(handler): session. """ - def test_login(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_login( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> Optional[_GenericHandlerFunctionReturnType]: """Checks if the user for the current session is logged in. If not, redirects the user to the home page. @@ -2202,17 +2715,23 @@ def test_login(self, **kwargs): """ if not self.user_id: self.redirect('/') - return + return None return handler(self, **kwargs) - test_login.__wrapped__ = True return test_login -def can_edit_topic(handler): +def can_edit_topic( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can edit given topic.""" - def test_can_edit(self, topic_id, *args, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_edit( + self: _SelfBaseHandlerType, topic_id: str, *args: Any, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can edit a given topic. Args: @@ -2247,12 +2766,13 @@ def test_can_edit(self, topic_id, *args, **kwargs): else: raise self.UnauthorizedUserException( 'You do not have credentials to edit this topic.') - test_can_edit.__wrapped__ = True return test_can_edit -def can_edit_question(handler): +def can_edit_question( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can edit given question. Args: @@ -2263,7 +2783,12 @@ def can_edit_question(handler): whether the user has permission to edit a given question. """ - def test_can_edit(self, question_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_edit( + self: _SelfBaseHandlerType, question_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can edit the given question. Args: @@ -2291,12 +2816,13 @@ def test_can_edit(self, question_id, **kwargs): else: raise self.UnauthorizedUserException( 'You do not have credentials to edit this question.') - test_can_edit.__wrapped__ = True return test_can_edit -def can_play_question(handler): +def can_play_question( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can play given question. Args: @@ -2306,7 +2832,13 @@ def can_play_question(handler): function. The newly decorated function that now also checks whether the user can play a given question. """ - def test_can_play_question(self, question_id, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_play_question( + self: _SelfBaseHandlerType, question_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can play the given question. Args: @@ -2324,11 +2856,13 @@ def test_can_play_question(self, question_id, **kwargs): if question is None: raise self.PageNotFoundException return handler(self, question_id, **kwargs) - test_can_play_question.__wrapped__ = True + return test_can_play_question -def can_view_question_editor(handler): +def can_view_question_editor( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can view any question editor. Args: @@ -2339,7 +2873,12 @@ def can_view_question_editor(handler): if the user has permission to view any question editor. """ - def test_can_view_question_editor(self, question_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_view_question_editor( + self: _SelfBaseHandlerType, question_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can view the question editor. Args: @@ -2369,12 +2908,13 @@ def test_can_view_question_editor(self, question_id, **kwargs): raise self.UnauthorizedUserException( '%s does not have enough rights to access the questions editor' % self.user_id) - test_can_view_question_editor.__wrapped__ = True return test_can_view_question_editor -def can_delete_question(handler): +def can_delete_question( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can delete a question. Args: @@ -2385,7 +2925,12 @@ def can_delete_question(handler): if the user has permission to delete a question. """ - def test_can_delete_question(self, question_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_delete_question( + self: _SelfBaseHandlerType, question_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can delete a given question. Args: @@ -2412,12 +2957,13 @@ def test_can_delete_question(self, question_id, **kwargs): raise self.UnauthorizedUserException( '%s does not have enough rights to delete the' ' question.' % self.user_id) - test_can_delete_question.__wrapped__ = True return test_can_delete_question -def can_add_new_story_to_topic(handler): +def can_add_new_story_to_topic( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can add a story to a given topic. Args: @@ -2428,7 +2974,12 @@ def can_add_new_story_to_topic(handler): if the user has permission to add a story to a given topic. """ - def test_can_add_story(self, topic_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_add_story( + self: _SelfBaseHandlerType, topic_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can add a story to a given topic. @@ -2463,12 +3014,13 @@ def test_can_add_story(self, topic_id, **kwargs): else: raise self.UnauthorizedUserException( 'You do not have credentials to add a story to this topic.') - test_can_add_story.__wrapped__ = True return test_can_add_story -def can_edit_story(handler): +def can_edit_story( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can edit a story belonging to a given topic. @@ -2480,7 +3032,12 @@ def can_edit_story(handler): a user has permission to edit a story for a given topic. """ - def test_can_edit_story(self, story_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_edit_story( + self: _SelfBaseHandlerType, story_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can edit a story belonging to a given topic. @@ -2520,12 +3077,13 @@ def test_can_edit_story(self, story_id, **kwargs): else: raise self.UnauthorizedUserException( 'You do not have credentials to edit this story.') - test_can_edit_story.__wrapped__ = True return test_can_edit_story -def can_edit_skill(handler): +def can_edit_skill( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can edit a skill, which can be independent or belong to a topic. @@ -2536,7 +3094,13 @@ def can_edit_skill(handler): function. The newly decorated function that now also checks if the user has permission to edit a skill. """ - def test_can_edit_skill(self, skill_id, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_edit_skill( + self: _SelfBaseHandlerType, skill_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Test to see if user can edit a given skill by checking if logged in and using can_user_edit_skill. @@ -2562,11 +3126,58 @@ def test_can_edit_skill(self, skill_id, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to edit this skill.') - test_can_edit_skill.__wrapped__ = True return test_can_edit_skill -def can_delete_skill(handler): +def can_submit_images_to_questions( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: + """Decorator to check whether the user can submit images to questions. + + Args: + handler: function. The function to be decorated. + + Returns: + function. The newly decorated function that now also checks if + the user has permission to submit a question. + """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_submit_images_to_questions( + self: _SelfBaseHandlerType, skill_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: + """Test to see if user can submit images to questions. + + Args: + skill_id: str. The skill ID. + **kwargs: *. Keyword arguments. + + Returns: + *. The return value of the decorated function. + + Raises: + NotLoggedInException. The user is not logged in. + PageNotFoundException. The given page cannot be found. + UnauthorizedUserException. The user does not have the + credentials to edit the given skill. + """ + if not self.user_id: + raise base.UserFacingExceptions.NotLoggedInException + + if role_services.ACTION_SUGGEST_CHANGES in self.user.actions: + return handler(self, skill_id, **kwargs) + else: + raise self.UnauthorizedUserException( + 'You do not have credentials to submit images to questions.') + + return test_can_submit_images_to_questions + + +def can_delete_skill( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can delete a skill. Args: @@ -2577,7 +3188,12 @@ def can_delete_skill(handler): if the user can delete a skill. """ - def test_can_delete_skill(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_delete_skill( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can delete a skill. Args: @@ -2601,11 +3217,12 @@ def test_can_delete_skill(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to delete the skill.') - test_can_delete_skill.__wrapped__ = True return test_can_delete_skill -def can_create_skill(handler): +def can_create_skill( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can create a skill, which can be independent or added to a topic. @@ -2616,7 +3233,13 @@ def can_create_skill(handler): function. The newly decorated function that now also checks if the user has permission to create a skill. """ - def test_can_create_skill(self, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_create_skill( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can create a skill, which can be independent or belong to a topic. @@ -2641,11 +3264,12 @@ def test_can_create_skill(self, **kwargs): raise self.UnauthorizedUserException( 'You do not have credentials to create a skill.') - test_can_create_skill.__wrapped__ = True return test_can_create_skill -def can_delete_story(handler): +def can_delete_story( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can delete a story in a given topic. @@ -2658,7 +3282,12 @@ def can_delete_story(handler): given topic. """ - def test_can_delete_story(self, story_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_delete_story( + self: _SelfBaseHandlerType, story_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can delete a story in a given topic. @@ -2692,12 +3321,13 @@ def test_can_delete_story(self, story_id, **kwargs): else: raise self.UnauthorizedUserException( 'You do not have credentials to delete this story.') - test_can_delete_story.__wrapped__ = True return test_can_delete_story -def can_delete_topic(handler): +def can_delete_topic( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can delete a topic. Args: @@ -2708,7 +3338,12 @@ def can_delete_topic(handler): checks if the user can delete a given topic. """ - def test_can_delete_topic(self, topic_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_delete_topic( + self: _SelfBaseHandlerType, topic_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can delete a given topic. Args: @@ -2739,12 +3374,13 @@ def test_can_delete_topic(self, topic_id, **kwargs): raise self.UnauthorizedUserException( '%s does not have enough rights to delete the' ' topic.' % self.user_id) - test_can_delete_topic.__wrapped__ = True return test_can_delete_topic -def can_create_topic(handler): +def can_create_topic( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can create a topic. Args: @@ -2755,7 +3391,12 @@ def can_create_topic(handler): if the user can create a topic. """ - def test_can_create_topic(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_create_topic( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can create a topic. Args: @@ -2780,12 +3421,13 @@ def test_can_create_topic(self, **kwargs): raise self.UnauthorizedUserException( '%s does not have enough rights to create a' ' topic.' % self.user_id) - test_can_create_topic.__wrapped__ = True return test_can_create_topic -def can_access_topics_and_skills_dashboard(handler): +def can_access_topics_and_skills_dashboard( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can access the topics and skills dashboard. @@ -2797,7 +3439,12 @@ def can_access_topics_and_skills_dashboard(handler): the user can access the topics and skills dashboard. """ - def test_can_access_topics_and_skills_dashboard(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access_topics_and_skills_dashboard( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can access the topics and skills dashboard. @@ -2826,12 +3473,13 @@ def test_can_access_topics_and_skills_dashboard(self, **kwargs): raise self.UnauthorizedUserException( '%s does not have enough rights to access the topics and skills' ' dashboard.' % self.user_id) - test_can_access_topics_and_skills_dashboard.__wrapped__ = True return test_can_access_topics_and_skills_dashboard -def can_view_any_topic_editor(handler): +def can_view_any_topic_editor( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can view any topic editor. Args: @@ -2842,7 +3490,12 @@ def can_view_any_topic_editor(handler): if the user can view any topic editor. """ - def test_can_view_any_topic_editor(self, topic_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_view_any_topic_editor( + self: _SelfBaseHandlerType, topic_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can view any topic editor. Args: @@ -2874,12 +3527,13 @@ def test_can_view_any_topic_editor(self, topic_id, **kwargs): raise self.UnauthorizedUserException( '%s does not have enough rights to view any topic editor.' % self.user_id) - test_can_view_any_topic_editor.__wrapped__ = True return test_can_view_any_topic_editor -def can_manage_rights_for_topic(handler): +def can_manage_rights_for_topic( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can manage a topic's rights. Args: @@ -2890,7 +3544,12 @@ def can_manage_rights_for_topic(handler): if the user can manage a given topic's rights. """ - def test_can_manage_topic_rights(self, topic_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_manage_topic_rights( + self: _SelfBaseHandlerType, topic_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can manage a topic's rights. Args: @@ -2918,12 +3577,13 @@ def test_can_manage_topic_rights(self, topic_id, **kwargs): raise self.UnauthorizedUserException( '%s does not have enough rights to assign roles for the ' 'topic.' % self.user_id) - test_can_manage_topic_rights.__wrapped__ = True return test_can_manage_topic_rights -def can_change_topic_publication_status(handler): +def can_change_topic_publication_status( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the user can publish or unpublish a topic. Args: @@ -2934,7 +3594,12 @@ def can_change_topic_publication_status(handler): if the user can publish or unpublish a topic. """ - def test_can_change_topic_publication_status(self, topic_id, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_change_topic_publication_status( + self: _SelfBaseHandlerType, topic_id: str, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can can publish or unpublish a topic. Args: @@ -2967,12 +3632,13 @@ def test_can_change_topic_publication_status(self, topic_id, **kwargs): raise self.UnauthorizedUserException( '%s does not have enough rights to publish or unpublish the ' 'topic.' % self.user_id) - test_can_change_topic_publication_status.__wrapped__ = True return test_can_change_topic_publication_status -def can_access_topic_viewer_page(handler): +def can_access_topic_viewer_page( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., Optional[_GenericHandlerFunctionReturnType]]: """Decorator to check whether user can access topic viewer page. Args: @@ -2983,8 +3649,15 @@ def can_access_topic_viewer_page(handler): if the user can access the given topic viewer page. """ + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) def test_can_access( - self, classroom_url_fragment, topic_url_fragment, **kwargs): + self: _SelfBaseHandlerType, + classroom_url_fragment: str, + topic_url_fragment: str, + **kwargs: Any + ) -> Optional[_GenericHandlerFunctionReturnType]: """Checks if the user can access topic viewer page. Args: @@ -2997,6 +3670,8 @@ def test_can_access( Raises: PageNotFoundException. The given page cannot be found. + EntityNotFoundError. The TopicRights with ID topic_id was not + found in the datastore. """ if topic_url_fragment != topic_url_fragment.lower(): _redirect_based_on_return_type( @@ -3004,7 +3679,7 @@ def test_can_access( classroom_url_fragment, topic_url_fragment.lower()), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None topic = topic_fetchers.get_topic_by_url_fragment( topic_url_fragment) @@ -3013,7 +3688,7 @@ def test_can_access( _redirect_based_on_return_type( self, '/learn/%s' % classroom_url_fragment, self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None verified_classroom_url_fragment = ( classroom_services.get_classroom_url_fragment_for_topic_id( @@ -3025,11 +3700,11 @@ def test_can_access( verified_classroom_url_fragment, url_substring), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None topic_id = topic.id topic_rights = topic_fetchers.get_topic_rights( - topic_id, strict=False) + topic_id, strict=True) user_actions_info = user_services.get_user_actions_info(self.user_id) if ( @@ -3039,12 +3714,13 @@ def test_can_access( return handler(self, topic.name, **kwargs) else: raise self.PageNotFoundException - test_can_access.__wrapped__ = True return test_can_access -def can_access_story_viewer_page(handler): +def can_access_story_viewer_page( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., Optional[_GenericHandlerFunctionReturnType]]: """Decorator to check whether user can access story viewer page. Args: @@ -3055,9 +3731,17 @@ def can_access_story_viewer_page(handler): if the user can access the given story viewer page. """ + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) def test_can_access( - self, classroom_url_fragment, topic_url_fragment, - story_url_fragment, *args, **kwargs): + self: _SelfBaseHandlerType, + classroom_url_fragment: str, + topic_url_fragment: str, + story_url_fragment: str, + *args: Any, + **kwargs: Any + ) -> Optional[_GenericHandlerFunctionReturnType]: """Checks if the user can access story viewer page. Args: @@ -3081,7 +3765,7 @@ def test_can_access( topic_url_fragment, story_url_fragment.lower()), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None story = story_fetchers.get_story_by_url_fragment(story_url_fragment) @@ -3091,7 +3775,7 @@ def test_can_access( '/learn/%s/%s/story' % (classroom_url_fragment, topic_url_fragment), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None story_is_published = False topic_is_published = False @@ -3108,7 +3792,7 @@ def test_can_access( topic.url_fragment, story_url_fragment), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None verified_classroom_url_fragment = ( classroom_services.get_classroom_url_fragment_for_topic_id( @@ -3121,7 +3805,7 @@ def test_can_access( verified_classroom_url_fragment, url_substring), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None topic_rights = topic_fetchers.get_topic_rights(topic_id) topic_is_published = topic_rights.topic_is_published all_story_references = topic.get_all_story_references() @@ -3136,12 +3820,126 @@ def test_can_access( return handler(self, story_id, *args, **kwargs) else: raise self.PageNotFoundException - test_can_access.__wrapped__ = True return test_can_access -def can_access_subtopic_viewer_page(handler): +def can_access_story_viewer_page_as_logged_in_user( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., Optional[_GenericHandlerFunctionReturnType]]: + """Decorator to check whether the user can access story viewer page + if the user is logged in. + + Args: + handler: function. The function to be decorated. + + Returns: + function. The newly decorated function that now checks + if the user can access the given story viewer page if the + user is logged in. + """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( + self: _SelfBaseHandlerType, + classroom_url_fragment: str, + topic_url_fragment: str, + story_url_fragment: str, + *args: Any, + **kwargs: Any + ) -> Optional[_GenericHandlerFunctionReturnType]: + """Checks if the user can access story viewer page. + + Args: + classroom_url_fragment: str. The classroom url fragment. + topic_url_fragment: str. The url fragment of the topic + associated with the story. + story_url_fragment: str. The story url fragment. + *args: list(*). A list of arguments from the calling function. + **kwargs: *. Keyword arguments. + + Returns: + *. The return value of the decorated function. + + Raises: + NotLoggedInException. The user is not logged in. + PageNotFoundException. The given page cannot be found. + """ + if self.user_id is None: + raise self.NotLoggedInException + + if story_url_fragment != story_url_fragment.lower(): + _redirect_based_on_return_type( + self, '/learn/%s/%s/story/%s' % ( + classroom_url_fragment, + topic_url_fragment, + story_url_fragment.lower()), + self.GET_HANDLER_ERROR_RETURN_TYPE) + return None + + story = story_fetchers.get_story_by_url_fragment(story_url_fragment) + + if story is None: + _redirect_based_on_return_type( + self, + '/learn/%s/%s/story' % + (classroom_url_fragment, topic_url_fragment), + self.GET_HANDLER_ERROR_RETURN_TYPE) + return None + + story_is_published = False + topic_is_published = False + topic_id = story.corresponding_topic_id + story_id = story.id + user_actions_info = user_services.get_user_actions_info(self.user_id) + if topic_id: + topic = topic_fetchers.get_topic_by_id(topic_id) + if topic.url_fragment != topic_url_fragment: + _redirect_based_on_return_type( + self, + '/learn/%s/%s/story/%s' % ( + classroom_url_fragment, + topic.url_fragment, + story_url_fragment), + self.GET_HANDLER_ERROR_RETURN_TYPE) + return None + + verified_classroom_url_fragment = ( + classroom_services.get_classroom_url_fragment_for_topic_id( + topic.id)) + if classroom_url_fragment != verified_classroom_url_fragment: + url_substring = '%s/story/%s' % ( + topic_url_fragment, story_url_fragment) + _redirect_based_on_return_type( + self, '/learn/%s/%s' % ( + verified_classroom_url_fragment, + url_substring), + self.GET_HANDLER_ERROR_RETURN_TYPE) + return None + topic_rights = topic_fetchers.get_topic_rights(topic_id) + topic_is_published = topic_rights.topic_is_published + all_story_references = topic.get_all_story_references() + for reference in all_story_references: + if reference.story_id == story_id: + story_is_published = reference.story_is_published + + if ( + (story_is_published and topic_is_published) or + role_services.ACTION_VISIT_ANY_TOPIC_EDITOR_PAGE in + user_actions_info.actions + ): + return handler(self, story_id, *args, **kwargs) + else: + raise self.PageNotFoundException + + return test_can_access + + +def can_access_subtopic_viewer_page( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., Optional[_GenericHandlerFunctionReturnType]]: """Decorator to check whether user can access subtopic page viewer. Args: @@ -3152,9 +3950,16 @@ def can_access_subtopic_viewer_page(handler): if the user can access the given subtopic viewer page. """ - def test_can_access( - self, classroom_url_fragment, topic_url_fragment, - subtopic_url_fragment, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_access( # pylint: disable=too-many-return-statements + self: _SelfBaseHandlerType, + classroom_url_fragment: str, + topic_url_fragment: str, + subtopic_url_fragment: str, + **kwargs: Any + ) -> Optional[_GenericHandlerFunctionReturnType]: """Checks if the user can access subtopic viewer page. Args: @@ -3177,7 +3982,7 @@ def test_can_access( topic_url_fragment, subtopic_url_fragment.lower()), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None topic = topic_fetchers.get_topic_by_url_fragment(topic_url_fragment) subtopic_id = None @@ -3186,7 +3991,7 @@ def test_can_access( _redirect_based_on_return_type( self, '/learn/%s' % classroom_url_fragment, self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None user_actions_info = user_services.get_user_actions_info(self.user_id) topic_rights = topic_fetchers.get_topic_rights(topic.id) @@ -3198,7 +4003,7 @@ def test_can_access( _redirect_based_on_return_type( self, '/learn/%s' % classroom_url_fragment, self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None for subtopic in topic.subtopics: if subtopic.url_fragment == subtopic_url_fragment: @@ -3210,7 +4015,7 @@ def test_can_access( '/learn/%s/%s/revision' % (classroom_url_fragment, topic_url_fragment), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None verified_classroom_url_fragment = ( classroom_services.get_classroom_url_fragment_for_topic_id( @@ -3223,7 +4028,7 @@ def test_can_access( verified_classroom_url_fragment, url_substring), self.GET_HANDLER_ERROR_RETURN_TYPE) - return + return None subtopic_page = subtopic_page_services.get_subtopic_page_by_id( topic.id, subtopic_id, strict=False) @@ -3233,14 +4038,16 @@ def test_can_access( '/learn/%s/%s/revision' % ( classroom_url_fragment, topic_url_fragment), self.GET_HANDLER_ERROR_RETURN_TYPE) + return None else: return handler(self, topic.name, subtopic_id, **kwargs) - test_can_access.__wrapped__ = True return test_can_access -def get_decorator_for_accepting_suggestion(decorator): +def get_decorator_for_accepting_suggestion( + decorator: Callable[[Callable[..., None]], Callable[..., None]] +) -> Callable[[Callable[..., None]], Callable[..., None]]: """Function that takes a decorator as an argument and then applies some common checks and then checks the permissions specified by the passed in decorator. @@ -3258,7 +4065,9 @@ def get_decorator_for_accepting_suggestion(decorator): - Any user with edit permissions to the target entity can accept/reject suggestions for that entity. """ - def generate_decorator_for_handler(handler): + def generate_decorator_for_handler( + handler: Callable[..., None] + ) -> Callable[..., None]: """Function that generates a decorator for a given handler. Args: @@ -3271,8 +4080,16 @@ def generate_decorator_for_handler(handler): Raises: NotLoggedInException. The user is not logged in. """ + + # Here we use type Any because this method can accept arbitrary number + # of arguments with different types. + @functools.wraps(handler) def test_can_accept_suggestion( - self, target_id, suggestion_id, **kwargs): + self: _SelfBaseHandlerType, + target_id: str, + suggestion_id: str, + **kwargs: Any + ) -> None: """Returns a (possibly-decorated) handler to test whether a suggestion can be accepted based on the user actions and roles. @@ -3301,7 +4118,9 @@ def test_can_accept_suggestion( 'Invalid format for suggestion_id.' ' It must contain 3 parts separated by \'.\'') - suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) + suggestion = suggestion_services.get_suggestion_by_id( + suggestion_id, strict=False + ) if suggestion is None: raise self.PageNotFoundException @@ -3326,13 +4145,14 @@ def test_can_accept_suggestion( return decorator(handler)(self, target_id, suggestion_id, **kwargs) - test_can_accept_suggestion.__wrapped__ = True return test_can_accept_suggestion return generate_decorator_for_handler -def can_view_reviewable_suggestions(handler): +def can_view_reviewable_suggestions( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., Optional[_GenericHandlerFunctionReturnType]]: """Decorator to check whether user can view the list of suggestions that they are allowed to review. @@ -3343,8 +4163,16 @@ def can_view_reviewable_suggestions(handler): function. The newly decorated function that now checks if the user can view reviewable suggestions. """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) def test_can_view_reviewable_suggestions( - self, target_type, suggestion_type, **kwargs): + self: _SelfBaseHandlerType, + target_type: str, + suggestion_type: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the user can view reviewable suggestions. Args: @@ -3357,6 +4185,8 @@ def test_can_view_reviewable_suggestions( Raises: PageNotFoundException. The given page cannot be found. + Exception. User is not allowed to review translation suggestions. + Exception. User is not allowed to review question suggestions. """ if not self.user_id: raise base.UserFacingExceptions.NotLoggedInException @@ -3364,19 +4194,29 @@ def test_can_view_reviewable_suggestions( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT): if user_services.can_review_translation_suggestions(self.user_id): return handler(self, target_type, suggestion_type, **kwargs) + else: + raise Exception( + 'User with user_id: %s is not allowed to review ' + 'translation suggestions.' % self.user_id + ) elif suggestion_type == ( feconf.SUGGESTION_TYPE_ADD_QUESTION): if user_services.can_review_question_suggestions(self.user_id): return handler(self, target_type, suggestion_type, **kwargs) + else: + raise Exception( + 'User with user_id: %s is not allowed to review question ' + 'suggestions.' % self.user_id + ) else: raise self.PageNotFoundException - test_can_view_reviewable_suggestions.__wrapped__ = True - return test_can_view_reviewable_suggestions -def can_edit_entity(handler): +def can_edit_entity( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can edit entity. Args: @@ -3386,7 +4226,16 @@ def can_edit_entity(handler): function. The newly decorated function that now checks if the user can edit the entity. """ - def test_can_edit_entity(self, entity_type, entity_id, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_edit_entity( + self: _SelfBaseHandlerType, + entity_type: str, + entity_id: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can edit entity. Args: @@ -3407,29 +4256,41 @@ def test_can_edit_entity(self, entity_type, entity_id, **kwargs): # for the corresponding decorators. reduced_handler = functools.partial( arg_swapped_handler, entity_type) - if entity_type == feconf.ENTITY_TYPE_EXPLORATION: - return can_edit_exploration(reduced_handler)( - self, entity_id, **kwargs) - elif entity_type == feconf.ENTITY_TYPE_QUESTION: - return can_edit_question(reduced_handler)(self, entity_id, **kwargs) - elif entity_type == feconf.ENTITY_TYPE_TOPIC: - return can_edit_topic(reduced_handler)(self, entity_id, **kwargs) - elif entity_type == feconf.ENTITY_TYPE_SKILL: - return can_edit_skill(reduced_handler)(self, entity_id, **kwargs) - elif entity_type == feconf.ENTITY_TYPE_STORY: - return can_edit_story(reduced_handler)(self, entity_id, **kwargs) - elif entity_type == feconf.ENTITY_TYPE_BLOG_POST: - return ( - can_edit_blog_post(reduced_handler)(self, entity_id, **kwargs)) - else: + functions: ( + Dict[str, Callable[[str], _GenericHandlerFunctionReturnType]] + ) = { + feconf.ENTITY_TYPE_EXPLORATION: lambda entity_id: ( + can_edit_exploration(reduced_handler)( + self, entity_id, **kwargs)), + feconf.ENTITY_TYPE_QUESTION: lambda entity_id: ( + can_edit_question(reduced_handler)( + self, entity_id, **kwargs)), + feconf.ENTITY_TYPE_TOPIC: lambda entity_id: ( + can_edit_topic(reduced_handler)( + self, entity_id, **kwargs)), + feconf.ENTITY_TYPE_SKILL: lambda entity_id: ( + can_edit_skill(reduced_handler)( + self, entity_id, **kwargs)), + feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS: lambda entity_id: ( + can_submit_images_to_questions(reduced_handler)( + self, entity_id, **kwargs)), + feconf.ENTITY_TYPE_STORY: lambda entity_id: ( + can_edit_story(reduced_handler)( + self, entity_id, **kwargs)), + feconf.ENTITY_TYPE_BLOG_POST: lambda entity_id: ( + can_edit_blog_post(reduced_handler)( + self, entity_id, **kwargs)) + } + if entity_type not in dict.keys(functions): raise self.PageNotFoundException - - test_can_edit_entity.__wrapped__ = True + return functions[entity_type](entity_id) return test_can_edit_entity -def can_play_entity(handler): +def can_play_entity( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether user can play entity. Args: @@ -3439,7 +4300,16 @@ def can_play_entity(handler): function. The newly decorated function that now checks if the user can play the entity. """ - def test_can_play_entity(self, entity_type, entity_id, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_play_entity( + self: _SelfBaseHandlerType, + entity_type: str, + entity_id: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the user can play entity. Args: @@ -3472,12 +4342,12 @@ def test_can_play_entity(self, entity_type, entity_id, **kwargs): else: raise self.PageNotFoundException - test_can_play_entity.__wrapped__ = True - return test_can_play_entity -def is_from_oppia_ml(handler): +def is_from_oppia_ml( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the incoming request is from a valid Oppia-ML VM instance. @@ -3488,7 +4358,14 @@ def is_from_oppia_ml(handler): function. The newly decorated function that now can check if incoming request is from a valid VM instance. """ - def test_request_originates_from_valid_oppia_ml_instance(self, **kwargs): + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_request_originates_from_valid_oppia_ml_instance( + self: base.OppiaMLVMHandler[Dict[str, str], Dict[str, str]], + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks if the incoming request is from a valid Oppia-ML VM instance. @@ -3512,12 +4389,12 @@ def test_request_originates_from_valid_oppia_ml_instance(self, **kwargs): return handler(self, **kwargs) - test_request_originates_from_valid_oppia_ml_instance.__wrapped__ = True - return test_request_originates_from_valid_oppia_ml_instance -def can_update_suggestion(handler): +def can_update_suggestion( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the current user can update suggestions. Args: @@ -3535,8 +4412,15 @@ def can_update_suggestion(handler): PageNotFoundException. A suggestion is not found with the given suggestion id. """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) def test_can_update_suggestion( - self, suggestion_id, **kwargs): + self: _SelfBaseHandlerType, + suggestion_id: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Returns a handler to test whether a suggestion can be updated based on the user's roles. @@ -3564,7 +4448,9 @@ def test_can_update_suggestion( 'Invalid format for suggestion_id.' + ' It must contain 3 parts separated by \'.\'') - suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) + suggestion = suggestion_services.get_suggestion_by_id( + suggestion_id, strict=False + ) if suggestion is None: raise self.PageNotFoundException @@ -3595,11 +4481,129 @@ def test_can_update_suggestion( raise base.UserFacingExceptions.UnauthorizedUserException( 'You are not allowed to update the suggestion.') - test_can_update_suggestion.__wrapped__ = True return test_can_update_suggestion -def is_from_oppia_android(handler): +def can_fetch_contributor_dashboard_stats( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: + """Decorator to check whether the current user can fetch contributor + dashboard stats. + + Args: + handler: function. The function to be decorated. + + Returns: + function. The newly decorated function that now checks + if the user can fetch stats. + + Raises: + NotLoggedInException. The user is not logged in. + UnauthorizedUserException. The user does not have credentials to + fetch stats for the given username. + """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_fetch_contributor_dashboard_stats( + self: _SelfBaseHandlerType, + contribution_type: str, + contribution_subtype: str, + username: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: + """Returns a handler to test whether stats can be fetched based + on the logged in user. + + Args: + contribution_type: str. The type of the contribution that the stats + are requested. + contribution_subtype: str. The subtype of the contribution that the + stats are requested. + username: str. The provided username. + **kwargs: *. Keyword arguments. + + Returns: + function. The handler for fetching stats. + + Raises: + NotLoggedInException. The user is not logged in. + UnauthorizedUserException. The user does not have credentials to + fetch stats for the given username. + """ + if not self.user_id: + raise base.UserFacingExceptions.NotLoggedInException + + if user_services.get_username(self.user_id) != username: + raise base.UserFacingExceptions.UnauthorizedUserException( + 'The user %s is not allowed to fetch the stats of other ' + 'users.' % (user_services.get_username(self.user_id))) + + return handler( + self, contribution_type, contribution_subtype, username, **kwargs) + + return test_can_fetch_contributor_dashboard_stats + + +def can_fetch_all_contributor_dashboard_stats( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: + """Decorator to check whether the current user can fetch contributor + dashboard stats. + + Args: + handler: function. The function to be decorated. + + Returns: + function. The newly decorated function that now checks + if the user can fetch stats. + + Raises: + NotLoggedInException. The user is not logged in. + UnauthorizedUserException. The user does not have credentials to + fetch stats for the given username. + """ + + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_can_fetch_all_contributor_dashboard_stats( + self: _SelfBaseHandlerType, + username: str, + **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: + """Returns a handler to test whether stats can be fetched based + on the logged in user. + + Args: + username: str. The provided username. + **kwargs: *. Keyword arguments. + + Returns: + function. The handler for fetching stats. + + Raises: + NotLoggedInException. The user is not logged in. + UnauthorizedUserException. The user does not have credentials to + fetch stats for the given username. + """ + if not self.user_id: + raise base.UserFacingExceptions.NotLoggedInException + + if user_services.get_username(self.user_id) != username: + raise base.UserFacingExceptions.UnauthorizedUserException( + 'The user %s is not allowed to fetch the stats of other ' + 'users.' % (user_services.get_username(self.user_id))) + + return handler(self, username, **kwargs) + + return test_can_fetch_all_contributor_dashboard_stats + + +def is_from_oppia_android( + handler: Callable[..., _GenericHandlerFunctionReturnType] +) -> Callable[..., _GenericHandlerFunctionReturnType]: """Decorator to check whether the request was sent from Oppia Android. Args: @@ -3609,7 +4613,12 @@ def is_from_oppia_android(handler): function. The newly decorated function. """ - def test_is_from_oppia_android(self, **kwargs): + # Here we use type Any because this method can accept arbitrary number of + # arguments with different types. + @functools.wraps(handler) + def test_is_from_oppia_android( + self: _SelfBaseHandlerType, **kwargs: Any + ) -> _GenericHandlerFunctionReturnType: """Checks whether the request was sent from Oppia Android. Args: @@ -3643,6 +4652,4 @@ def test_is_from_oppia_android(self, **kwargs): 'The incoming request is not a valid Oppia Android request.') return handler(self, **kwargs) - test_is_from_oppia_android.__wrapped__ = True - return test_is_from_oppia_android diff --git a/core/controllers/acl_decorators_test.py b/core/controllers/acl_decorators_test.py index 494e882628db..edd00f924128 100644 --- a/core/controllers/acl_decorators_test.py +++ b/core/controllers/acl_decorators_test.py @@ -25,7 +25,7 @@ from core.constants import constants from core.controllers import acl_decorators from core.controllers import base -from core.domain import app_feedback_report_domain +from core.controllers import incoming_app_feedback_report from core.domain import blog_services from core.domain import classifier_domain from core.domain import classifier_services @@ -46,22 +46,215 @@ from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services +from core.domain import translation_domain from core.domain import user_services +from core.platform import models from core.tests import test_utils +from typing import Dict, Final, List, TypedDict, Union import webapp2 import webtest +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import secrets_services -class PlayExplorationDecoratorTests(test_utils.GenericTestBase): - """Tests for play exploration decorator.""" +datastore_services = models.Registry.import_datastore_services() +secrets_services = models.Registry.import_secrets_services() + + +class OpenAccessDecoratorTests(test_utils.GenericTestBase): + """Tests for open access decorator.""" + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.open_access + def get(self) -> None: + self.render_json({'success': True}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route('/mock', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_access_with_logged_in_user(self) -> None: + self.login(self.VIEWER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock') + self.assertTrue(response['success']) + self.logout() + + def test_access_with_guest_user(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock') + self.assertTrue(response['success']) + + +class IsSourceMailChimpDecoratorTests(test_utils.GenericTestBase): + """Tests for is_source_mailchimp decorator.""" + + user_email = 'user@example.com' + username = 'user' + secret = 'webhook_secret' + invalid_secret = 'invalid' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'secret': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.is_source_mailchimp + def get(self, secret: str) -> None: + self.render_json({'secret': secret}) + + def setUp(self) -> None: + super().setUp() + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route('/mock_secret_page/', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_error_when_mailchimp_webhook_secret_is_none(self) -> None: + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + swap_api_key_secrets_return_none = self.swap_with_checks( + secrets_services, + 'get_secret', + lambda _: None, + expected_args=[ + ('MAILCHIMP_WEBHOOK_SECRET',), + ] + ) + + with testapp_swap: + with swap_api_key_secrets_return_none: + response = self.get_json( + '/mock_secret_page/%s' % self.secret, + expected_status_int=404 + ) + + error_msg = ( + 'Could not find the page http://localhost' + '/mock_secret_page/%s.' % self.secret + ) + self.assertEqual(response['error'], error_msg) + self.assertEqual(response['status_code'], 404) + + def test_error_when_given_webhook_secret_is_invalid(self) -> None: + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + mailchimp_swap = self.swap_to_always_return( + secrets_services, 'get_secret', self.secret) + + with testapp_swap, mailchimp_swap: + response = self.get_json( + '/mock_secret_page/%s' % self.invalid_secret, + expected_status_int=404 + ) + + error_msg = ( + 'Could not find the page http://localhost' + '/mock_secret_page/%s.' % self.invalid_secret + ) + self.assertEqual(response['error'], error_msg) + self.assertEqual(response['status_code'], 404) + + def test_no_error_when_given_webhook_secret_is_valid(self) -> None: + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + mailchimp_swap = self.swap_to_always_return( + secrets_services, 'get_secret', self.secret) + + with testapp_swap, mailchimp_swap: + response = self.get_json( + '/mock_secret_page/%s' % self.secret, + expected_status_int=200 + ) + + self.assertEqual(response['secret'], self.secret) + + +class ViewSkillsDecoratorTests(test_utils.GenericTestBase): + """Tests for can_view_skills decorator.""" + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + REQUIRE_PAYLOAD_CSRF_CHECK = False + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'selected_skill_ids': { + 'schema': { + 'type': 'custom', + 'obj_type': 'JsonEncodedInString' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_view_skills + def get(self, selected_skill_ids: List[str]) -> None: + self.render_json({'selected_skill_ids': selected_skill_ids}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.admin = user_services.get_user_actions_info(self.admin_id) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_view_skills/', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_can_view_skill_with_valid_skill_id(self) -> None: + skill_id = skill_services.get_new_skill_id() + self.save_new_skill(skill_id, self.admin_id, description='Description') + skill_ids = [skill_id] + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_skills/%s' % json.dumps(skill_ids)) + self.assertEqual(response['selected_skill_ids'], skill_ids) + + def test_invalid_input_exception_with_invalid_skill_ids(self) -> None: + skill_ids = ['abcd1234'] + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_skills/%s' % json.dumps(skill_ids), + expected_status_int=400) + self.assertEqual(response['error'], 'Invalid skill id.') + + def test_page_not_found_exception_with_invalid_skill_ids(self) -> None: + skill_ids = ['invalid_id12', 'invalid_id13'] + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_skills/%s' % json.dumps(skill_ids), + expected_status_int=404) + error_msg = ( + 'Could not find the page http://localhost/mock_view_skills/' + '%5B%22invalid_id12%22,%20%22invalid_id13%22%5D.' + ) + self.assertEqual(response['error'], error_msg) + + +class DownloadExplorationDecoratorTests(test_utils.GenericTestBase): + """Tests for download exploration decorator.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -70,14 +263,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} - @acl_decorators.can_play_exploration - def get(self, exploration_id): - return self.render_json({'exploration_id': exploration_id}) + @acl_decorators.can_download_exploration + def get(self, exploration_id: str) -> None: + self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(PlayExplorationDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) @@ -86,7 +279,8 @@ def setUp(self): self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( - '/mock_play_exploration/', self.MockHandler)], + '/mock_download_exploration/', + self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( @@ -95,76 +289,119 @@ def setUp(self): self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_can_not_access_exploration_with_disabled_exploration_ids(self): + def test_cannot_download_exploration_with_disabled_exploration_ids( + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): - self.get_json( - '/mock_play_exploration/%s' - % (feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404) + response = self.get_json( + '/mock_download_exploration/%s' % ( + feconf.DISABLED_EXPLORATION_IDS[0]), + expected_status_int=404 + ) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_download_exploration/%s.' % ( + feconf.DISABLED_EXPLORATION_IDS[0] + ) + ) + self.assertEqual(response['error'], error_msg) - def test_guest_can_access_published_exploration(self): + def test_guest_can_download_published_exploration(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( - '/mock_play_exploration/%s' % self.published_exp_id) + '/mock_download_exploration/%s' % self.published_exp_id) self.assertEqual(response['exploration_id'], self.published_exp_id) - def test_guest_cannot_access_private_exploration(self): + def test_guest_cannot_download_private_exploration(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): - self.get_json( - '/mock_play_exploration/%s' % self.private_exp_id, + response = self.get_json( + '/mock_download_exploration/%s' % self.private_exp_id, expected_status_int=404) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_download_exploration/%s.' % ( + self.private_exp_id + ) + ) + self.assertEqual(response['error'], error_msg) - def test_moderator_can_access_private_exploration(self): + def test_moderator_can_download_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( - '/mock_play_exploration/%s' % self.private_exp_id) + '/mock_download_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_owner_can_access_private_exploration(self): + def test_owner_can_download_private_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( - '/mock_play_exploration/%s' % self.private_exp_id) + '/mock_download_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_logged_in_user_cannot_access_not_owned_exploration(self): + def test_logged_in_user_cannot_download_unowned_exploration(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): - self.get_json( - '/mock_play_exploration/%s' % self.private_exp_id, + response = self.get_json( + '/mock_download_exploration/%s' % self.private_exp_id, + expected_status_int=404) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_download_exploration/%s.' % ( + self.private_exp_id + ) + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_page_not_found_exception_when_exploration_rights_is_none( + self + ) -> None: + self.login(self.user_email) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + exp_rights_swap = self.swap_to_always_return( + rights_manager, 'get_exploration_rights', value=None) + with testapp_swap, exp_rights_swap: + response = self.get_json( + '/mock_download_exploration/%s' % self.published_exp_id, expected_status_int=404) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_download_exploration/%s.' % ( + self.published_exp_id + ) + ) + self.assertEqual(response['error'], error_msg) self.logout() -class PlayCollectionDecoratorTests(test_utils.GenericTestBase): - """Tests for play collection decorator.""" +class ViewExplorationStatsDecoratorTests(test_utils.GenericTestBase): + """Tests for view exploration stats decorator.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' - published_col_id = 'col_id_1' - private_col_id = 'col_id_2' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { - 'collection_id': { + 'exploration_id': { 'schema': { 'type': 'basestring' } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} - @acl_decorators.can_play_collection - def get(self, collection_id): - return self.render_json({'collection_id': collection_id}) + @acl_decorators.can_view_exploration_stats + def get(self, exploration_id: str) -> None: + self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(PlayCollectionDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) @@ -173,189 +410,535 @@ def setUp(self): self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( - '/mock_play_collection/', self.MockHandler)], + '/mock_view_exploration_stats/', + self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) - self.save_new_valid_collection( - self.published_col_id, self.owner_id, - exploration_id=self.published_col_id) - self.save_new_valid_collection( - self.private_col_id, self.owner_id, - exploration_id=self.private_col_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) - rights_manager.publish_collection(self.owner, self.published_col_id) - def test_guest_can_access_published_collection(self): + def test_cannot_view_exploration_stats_with_disabled_exploration_ids( + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( - '/mock_play_collection/%s' % self.published_col_id) - self.assertEqual(response['collection_id'], self.published_col_id) + '/mock_view_exploration_stats/%s' % ( + feconf.DISABLED_EXPLORATION_IDS[0]), + expected_status_int=404 + ) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_view_exploration_stats/%s.' % ( + feconf.DISABLED_EXPLORATION_IDS[0] + ) + ) + self.assertEqual(response['error'], error_msg) - def test_guest_cannot_access_private_collection(self): + def test_guest_can_view_published_exploration_stats(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): - self.get_json( - '/mock_play_collection/%s' % self.private_col_id, + response = self.get_json( + '/mock_view_exploration_stats/%s' % self.published_exp_id) + self.assertEqual(response['exploration_id'], self.published_exp_id) + + def test_guest_cannot_view_private_exploration_stats(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_exploration_stats/%s' % self.private_exp_id, expected_status_int=404) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_view_exploration_stats/%s.' % ( + self.private_exp_id + ) + ) + self.assertEqual(response['error'], error_msg) - def test_moderator_can_access_private_collection(self): + def test_moderator_can_view_private_exploration_stats(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( - '/mock_play_collection/%s' % self.private_col_id) - self.assertEqual(response['collection_id'], self.private_col_id) + '/mock_view_exploration_stats/%s' % self.private_exp_id) + self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_owner_can_access_private_collection(self): + def test_owner_can_view_private_exploration_stats(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( - '/mock_play_collection/%s' % self.private_col_id) - self.assertEqual(response['collection_id'], self.private_col_id) + '/mock_view_exploration_stats/%s' % self.private_exp_id) + self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_logged_in_user_cannot_access_not_owned_private_collection(self): + def test_logged_in_user_cannot_view_unowned_exploration_stats(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): - self.get_json( - '/mock_play_collection/%s' % self.private_col_id, + response = self.get_json( + '/mock_view_exploration_stats/%s' % self.private_exp_id, expected_status_int=404) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_view_exploration_stats/%s.' % ( + self.private_exp_id + ) + ) + self.assertEqual(response['error'], error_msg) self.logout() - def test_cannot_access_collection_with_invalid_collection_id(self): - self.login(self.OWNER_EMAIL) - with self.swap(self, 'testapp', self.mock_testapp): - self.get_json( - '/mock_play_collection/invalid_collection_id', + def test_page_not_found_exception_when_exploration_rights_is_none( + self + ) -> None: + self.login(self.user_email) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + exp_rights_swap = self.swap_to_always_return( + rights_manager, 'get_exploration_rights', value=None) + with testapp_swap, exp_rights_swap: + response = self.get_json( + '/mock_view_exploration_stats/%s' % self.published_exp_id, expected_status_int=404) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_view_exploration_stats/%s.' % ( + self.published_exp_id + ) + ) + self.assertEqual(response['error'], error_msg) self.logout() -class EditCollectionDecoratorTests(test_utils.GenericTestBase): - """Tests for can_edit_collection decorator.""" +class RequireUserIdElseRedirectToHomepageTests(test_utils.GenericTestBase): + """Tests for require_user_id_else_redirect_to_homepage decorator.""" + + username = 'user' + user_email = 'user@example.com' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_HTML + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.require_user_id_else_redirect_to_homepage + def get(self) -> None: + self.redirect('/access_page') + + def setUp(self) -> None: + super().setUp() + self.signup(self.user_email, self.username) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route('/mock/', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_logged_in_user_is_redirected_to_access_page(self) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_html_response('/mock/', expected_status_int=302) + self.assertEqual( + 'http://localhost/access_page', response.headers['location']) + self.logout() + + def test_guest_user_is_redirected_to_homepage(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_html_response('/mock/', expected_status_int=302) + self.assertEqual( + 'http://localhost/', response.headers['location']) + + +class PlayExplorationDecoratorTests(test_utils.GenericTestBase): + """Tests for play exploration decorator.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' - published_col_id = 'col_id_1' - private_col_id = 'col_id_2' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { - 'collection_id': { + 'exploration_id': { 'schema': { 'type': 'basestring' } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} - @acl_decorators.can_edit_collection - def get(self, collection_id): - return self.render_json({'collection_id': collection_id}) + @acl_decorators.can_play_exploration + def get(self, exploration_id: str) -> None: + self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(EditCollectionDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) - self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) - self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.set_moderators([self.MODERATOR_USERNAME]) - self.set_collection_editors([self.OWNER_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( - '/mock_edit_collection/', self.MockHandler)], + '/mock_play_exploration/', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) - self.save_new_valid_collection( - self.published_col_id, self.owner_id, - exploration_id=self.published_col_id) - self.save_new_valid_collection( - self.private_col_id, self.owner_id, - exploration_id=self.private_col_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) - rights_manager.publish_collection(self.owner, self.published_col_id) - def test_can_not_edit_collection_with_invalid_collection_id(self): - self.login(self.OWNER_EMAIL) + def test_cannot_access_exploration_with_disabled_exploration_ids( + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( - '/mock_edit_collection/invalid_col_id', expected_status_int=404) - self.logout() + '/mock_play_exploration/%s' + % (feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404) - def test_guest_cannot_edit_collection_via_json_handler(self): + def test_guest_can_access_published_exploration(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): - self.get_json( - '/mock_edit_collection/%s' % self.published_col_id, - expected_status_int=401) - - def test_guest_is_redirected_when_using_html_handler(self): - with self.swap( - self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', - feconf.HANDLER_TYPE_HTML): - response = self.mock_testapp.get( - '/mock_edit_collection/%s' % self.published_col_id, - expect_errors=True) - self.assertEqual(response.status_int, 302) + response = self.get_json( + '/mock_play_exploration/%s' % self.published_exp_id) + self.assertEqual(response['exploration_id'], self.published_exp_id) - def test_normal_user_cannot_edit_collection(self): - self.login(self.user_email) + def test_guest_cannot_access_private_exploration(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( - '/mock_edit_collection/%s' % self.private_col_id, - expected_status_int=401) - self.logout() - - def test_owner_can_edit_owned_collection(self): - self.login(self.OWNER_EMAIL) - with self.swap(self, 'testapp', self.mock_testapp): - response = self.get_json( - '/mock_edit_collection/%s' % self.private_col_id) - self.assertEqual(response['collection_id'], self.private_col_id) - self.logout() + '/mock_play_exploration/%s' % self.private_exp_id, + expected_status_int=404) - def test_moderator_can_edit_private_collection(self): + def test_moderator_can_access_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( - '/mock_edit_collection/%s' % self.private_col_id) - - self.assertEqual(response['collection_id'], self.private_col_id) + '/mock_play_exploration/%s' % self.private_exp_id) + self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_moderator_can_edit_public_collection(self): - self.login(self.MODERATOR_EMAIL) + def test_owner_can_access_private_exploration(self) -> None: + self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( - '/mock_edit_collection/%s' % self.published_col_id) - self.assertEqual(response['collection_id'], self.published_col_id) + '/mock_play_exploration/%s' % self.private_exp_id) + self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_admin_can_edit_any_private_collection(self): - self.login(self.CURRICULUM_ADMIN_EMAIL) + def test_logged_in_user_cannot_access_not_owned_exploration(self) -> None: + self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): - response = self.get_json( - '/mock_edit_collection/%s' % self.private_col_id) - self.assertEqual(response['collection_id'], self.private_col_id) + self.get_json( + '/mock_play_exploration/%s' % self.private_exp_id, + expected_status_int=404) + self.logout() + + +class PlayExplorationAsLoggedInUserTests(test_utils.GenericTestBase): + """Tests for can_play_exploration_as_logged_in_user decorator.""" + + user_email = 'user@example.com' + username = 'user' + published_exp_id = 'exp_id_1' + private_exp_id = 'exp_id_2' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'exploration_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_play_exploration_as_logged_in_user + def get(self, exploration_id: str) -> None: + self.render_json({'exploration_id': exploration_id}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) + self.signup(self.user_email, self.username) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.set_moderators([self.MODERATOR_USERNAME]) + self.owner = user_services.get_user_actions_info(self.owner_id) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_play_exploration/', self.MockHandler)], + debug=feconf.DEBUG, + )) + self.save_new_valid_exploration( + self.published_exp_id, self.owner_id) + self.save_new_valid_exploration( + self.private_exp_id, self.owner_id) + rights_manager.publish_exploration(self.owner, self.published_exp_id) + + def test_cannot_access_explorations_with_disabled_exploration_ids( + self + ) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + ( + '/mock_play_exploration/%s' % + feconf.DISABLED_EXPLORATION_IDS[0] + ), + expected_status_int=404 + ) + self.logout() + + def test_moderator_user_can_access_private_exploration(self) -> None: + self.login(self.MODERATOR_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_play_exploration/%s' % self.private_exp_id + ) + self.assertEqual(response['exploration_id'], self.private_exp_id) + self.logout() + + def test_exp_owner_can_access_private_exploration(self) -> None: + self.login(self.OWNER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_play_exploration/%s' % self.private_exp_id + ) + self.assertEqual(response['exploration_id'], self.private_exp_id) + self.logout() + + def test_logged_in_user_cannot_access_not_owned_exploration(self) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_play_exploration/%s' % self.private_exp_id, + expected_status_int=404 + ) + self.logout() + + def test_invalid_exploration_id_raises_error(self) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_play_exploration/%s' % 'invalid_exp_id', + expected_status_int=404 + ) + self.logout() + + +class PlayCollectionDecoratorTests(test_utils.GenericTestBase): + """Tests for play collection decorator.""" + + user_email = 'user@example.com' + username = 'user' + published_exp_id = 'exp_id_1' + private_exp_id = 'exp_id_2' + published_col_id = 'col_id_1' + private_col_id = 'col_id_2' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'collection_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_play_collection + def get(self, collection_id: str) -> None: + self.render_json({'collection_id': collection_id}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) + self.signup(self.user_email, self.username) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.set_moderators([self.MODERATOR_USERNAME]) + self.owner = user_services.get_user_actions_info(self.owner_id) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_play_collection/', self.MockHandler)], + debug=feconf.DEBUG, + )) + self.save_new_valid_exploration( + self.published_exp_id, self.owner_id) + self.save_new_valid_exploration( + self.private_exp_id, self.owner_id) + self.save_new_valid_collection( + self.published_col_id, self.owner_id, + exploration_id=self.published_col_id) + self.save_new_valid_collection( + self.private_col_id, self.owner_id, + exploration_id=self.private_col_id) + rights_manager.publish_exploration(self.owner, self.published_exp_id) + rights_manager.publish_collection(self.owner, self.published_col_id) + + def test_guest_can_access_published_collection(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_play_collection/%s' % self.published_col_id) + self.assertEqual(response['collection_id'], self.published_col_id) + + def test_guest_cannot_access_private_collection(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_play_collection/%s' % self.private_col_id, + expected_status_int=404) + + def test_moderator_can_access_private_collection(self) -> None: + self.login(self.MODERATOR_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_play_collection/%s' % self.private_col_id) + self.assertEqual(response['collection_id'], self.private_col_id) + self.logout() + + def test_owner_can_access_private_collection(self) -> None: + self.login(self.OWNER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_play_collection/%s' % self.private_col_id) + self.assertEqual(response['collection_id'], self.private_col_id) + self.logout() + + def test_logged_in_user_cannot_access_not_owned_private_collection( + self + ) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_play_collection/%s' % self.private_col_id, + expected_status_int=404) + self.logout() + + def test_cannot_access_collection_with_invalid_collection_id(self) -> None: + self.login(self.OWNER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_play_collection/invalid_collection_id', + expected_status_int=404) + self.logout() + + +class EditCollectionDecoratorTests(test_utils.GenericTestBase): + """Tests for can_edit_collection decorator.""" + + user_email = 'user@example.com' + username = 'user' + published_col_id = 'col_id_1' + private_col_id = 'col_id_2' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'collection_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_edit_collection + def get(self, collection_id: str) -> None: + self.render_json({'collection_id': collection_id}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) + self.signup(self.user_email, self.username) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + self.set_moderators([self.MODERATOR_USERNAME]) + self.set_collection_editors([self.OWNER_USERNAME]) + self.owner = user_services.get_user_actions_info(self.owner_id) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_edit_collection/', self.MockHandler)], + debug=feconf.DEBUG, + )) + self.save_new_valid_collection( + self.published_col_id, self.owner_id, + exploration_id=self.published_col_id) + self.save_new_valid_collection( + self.private_col_id, self.owner_id, + exploration_id=self.private_col_id) + rights_manager.publish_collection(self.owner, self.published_col_id) + + def test_cannot_edit_collection_with_invalid_collection_id(self) -> None: + self.login(self.OWNER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_edit_collection/invalid_col_id', expected_status_int=404) + self.logout() + + def test_guest_cannot_edit_collection_via_json_handler(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_edit_collection/%s' % self.published_col_id, + expected_status_int=401) + + def test_guest_is_redirected_when_using_html_handler(self) -> None: + with self.swap( + self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', + feconf.HANDLER_TYPE_HTML): + response = self.mock_testapp.get( + '/mock_edit_collection/%s' % self.published_col_id, + expect_errors=True) + self.assertEqual(response.status_int, 302) + + def test_normal_user_cannot_edit_collection(self) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_edit_collection/%s' % self.private_col_id, + expected_status_int=401) + self.logout() + + def test_owner_can_edit_owned_collection(self) -> None: + self.login(self.OWNER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_edit_collection/%s' % self.private_col_id) + self.assertEqual(response['collection_id'], self.private_col_id) + self.logout() + + def test_moderator_can_edit_private_collection(self) -> None: + self.login(self.MODERATOR_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_edit_collection/%s' % self.private_col_id) + + self.assertEqual(response['collection_id'], self.private_col_id) + self.logout() + + def test_moderator_can_edit_public_collection(self) -> None: + self.login(self.MODERATOR_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_edit_collection/%s' % self.published_col_id) + self.assertEqual(response['collection_id'], self.published_col_id) + self.logout() + + def test_admin_can_edit_any_private_collection(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_edit_collection/%s' % self.private_col_id) + self.assertEqual(response['collection_id'], self.private_col_id) self.logout() class ClassroomExistDecoratorTests(test_utils.GenericTestBase): """Tests for does_classroom_exist decorator""" - class MockDataHandler(base.BaseHandler): + class MockDataHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'classroom_url_fragment': { @@ -364,15 +947,13 @@ class MockDataHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = { - 'GET': {} - } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.does_classroom_exist - def get(self, _): + def get(self, _: str) -> None: self.render_json({'success': True}) - class MockPageHandler(base.BaseHandler): + class MockPageHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): URL_PATH_ARGS_SCHEMAS = { 'classroom_url_fragment': { 'schema': { @@ -380,16 +961,14 @@ class MockPageHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = { - 'GET': {} - } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.does_classroom_exist - def get(self, _): + def get(self, _: str) -> None: self.render_json('oppia-root.mainpage.html') - def setUp(self): - super(ClassroomExistDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup( self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) @@ -416,17 +995,18 @@ def setUp(self): debug=feconf.DEBUG )) - def test_any_user_can_access_a_valid_classroom(self): + def test_any_user_can_access_a_valid_classroom(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_classroom_data/math', expected_status_int=200) def test_redirects_user_to_default_classroom_if_given_not_available( - self): + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_classroom_data/invalid', expected_status_int=404) - def test_raises_error_if_return_type_is_not_json(self): + def test_raises_error_if_return_type_is_not_json(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_html_response( '/mock_classroom_page/invalid', expected_status_int=500) @@ -438,17 +1018,17 @@ class CreateExplorationDecoratorTests(test_utils.GenericTestBase): username = 'banneduser' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_create_exploration - def get(self): + def get(self) -> None: self.render_json({'success': True}) - def setUp(self): - super(CreateExplorationDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.user_email, self.username) self.mark_user_banned(self.username) @@ -457,24 +1037,24 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_banned_user_cannot_create_exploration(self): + def test_banned_user_cannot_create_exploration(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/create', expected_status_int=401) self.logout() - def test_normal_user_can_create_exploration(self): + def test_normal_user_can_create_exploration(self) -> None: self.login(self.EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/create') - self.assertEqual(response['success'], True) + self.assertTrue(response['success']) self.logout() - def test_guest_cannot_create_exploration_via_json_handler(self): + def test_guest_cannot_create_exploration_via_json_handler(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/create', expected_status_int=401) - def test_guest_is_redirected_when_using_html_handler(self): + def test_guest_is_redirected_when_using_html_handler(self) -> None: with self.swap( self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', feconf.HANDLER_TYPE_HTML): @@ -488,17 +1068,17 @@ class CreateCollectionDecoratorTests(test_utils.GenericTestBase): username = 'collectioneditor' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_create_collection - def get(self): + def get(self) -> None: self.render_json({'success': True}) - def setUp(self): - super(CreateCollectionDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.user_email, self.username) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -509,28 +1089,28 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_guest_cannot_create_collection_via_json_handler(self): + def test_guest_cannot_create_collection_via_json_handler(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/create', expected_status_int=401) - def test_guest_is_redirected_when_using_html_handler(self): + def test_guest_is_redirected_when_using_html_handler(self) -> None: with self.swap( self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', feconf.HANDLER_TYPE_HTML): response = self.mock_testapp.get('/mock/create', expect_errors=True) self.assertEqual(response.status_int, 302) - def test_normal_user_cannot_create_collection(self): + def test_normal_user_cannot_create_collection(self) -> None: self.login(self.EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/create', expected_status_int=401) self.logout() - def test_collection_editor_can_create_collection(self): + def test_collection_editor_can_create_collection(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/create') - self.assertEqual(response['success'], True) + self.assertTrue(response['success']) self.logout() @@ -540,17 +1120,17 @@ class AccessCreatorDashboardTests(test_utils.GenericTestBase): username = 'banneduser' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_creator_dashboard - def get(self): + def get(self) -> None: self.render_json({'success': True}) - def setUp(self): - super(AccessCreatorDashboardTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.user_email, self.username) self.mark_user_banned(self.username) @@ -559,17 +1139,24 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_banned_user_cannot_access_editor_dashboard(self): + def test_banned_user_cannot_access_editor_dashboard(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/access', expected_status_int=401) self.logout() - def test_normal_user_can_access_editor_dashboard(self): + def test_normal_user_can_access_editor_dashboard(self) -> None: self.login(self.EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/access') - self.assertEqual(response['success'], True) + self.assertTrue(response['success']) + self.logout() + + def test_guest_user_cannot_access_editor_dashboard(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/access', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) class CommentOnFeedbackThreadTests(test_utils.GenericTestBase): @@ -580,7 +1167,7 @@ class CommentOnFeedbackThreadTests(test_utils.GenericTestBase): viewer_username = 'viewer' viewer_email = 'viewer@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'thread_id': { @@ -589,14 +1176,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_comment_on_feedback_thread - def get(self, thread_id): + def get(self, thread_id: str) -> None: self.render_json({'thread_id': thread_id}) - def setUp(self): - super(CommentOnFeedbackThreadTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -615,10 +1202,11 @@ def setUp(self): self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) - rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_can_not_comment_on_feedback_threads_with_disabled_exp_id(self): + def test_cannot_comment_on_feedback_threads_with_disabled_exp_id( + self + ) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -627,7 +1215,9 @@ def test_can_not_comment_on_feedback_threads_with_disabled_exp_id(self): expected_status_int=404) self.logout() - def test_viewer_cannot_comment_on_feedback_for_private_exploration(self): + def test_viewer_cannot_comment_on_feedback_for_private_exploration( + self + ) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -638,7 +1228,9 @@ def test_viewer_cannot_comment_on_feedback_for_private_exploration(self): 'exploration feedback.') self.logout() - def test_can_not_comment_on_feedback_threads_with_invalid_thread_id(self): + def test_cannot_comment_on_feedback_threads_with_invalid_thread_id( + self + ) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -647,7 +1239,9 @@ def test_can_not_comment_on_feedback_threads_with_invalid_thread_id(self): self.assertEqual(response['error'], 'Not a valid thread id.') self.logout() - def test_guest_cannot_comment_on_feedback_threads_via_json_handler(self): + def test_guest_cannot_comment_on_feedback_threads_via_json_handler( + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_comment_on_feedback_thread/exploration.%s.thread1' @@ -656,7 +1250,7 @@ def test_guest_cannot_comment_on_feedback_threads_via_json_handler(self): '/mock_comment_on_feedback_thread/exploration.%s.thread1' % (self.published_exp_id), expected_status_int=401) - def test_guest_is_redirected_when_using_html_handler(self): + def test_guest_is_redirected_when_using_html_handler(self) -> None: with self.swap( self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', feconf.HANDLER_TYPE_HTML): @@ -669,7 +1263,9 @@ def test_guest_is_redirected_when_using_html_handler(self): % (self.published_exp_id), expect_errors=True) self.assertEqual(response.status_int, 302) - def test_owner_can_comment_on_feedback_for_private_exploration(self): + def test_owner_can_comment_on_feedback_for_private_exploration( + self + ) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -677,7 +1273,9 @@ def test_owner_can_comment_on_feedback_for_private_exploration(self): % (self.private_exp_id)) self.logout() - def test_moderator_can_comment_on_feeback_for_public_exploration(self): + def test_moderator_can_comment_on_feeback_for_public_exploration( + self + ) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -685,7 +1283,9 @@ def test_moderator_can_comment_on_feeback_for_public_exploration(self): % (self.published_exp_id)) self.logout() - def test_moderator_can_comment_on_feeback_for_private_exploration(self): + def test_moderator_can_comment_on_feeback_for_private_exploration( + self + ) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -702,7 +1302,7 @@ class CreateFeedbackThreadTests(test_utils.GenericTestBase): viewer_username = 'viewer' viewer_email = 'viewer@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -711,14 +1311,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_create_feedback_thread - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(CreateFeedbackThreadTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -737,16 +1337,17 @@ def setUp(self): self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) - rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_can_not_create_feedback_threads_with_disabled_exp_id(self): + def test_cannot_create_feedback_threads_with_disabled_exp_id(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % (feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404) - def test_viewer_cannot_create_feedback_for_private_exploration(self): + def test_viewer_cannot_create_feedback_for_private_exploration( + self + ) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -757,26 +1358,28 @@ def test_viewer_cannot_create_feedback_for_private_exploration(self): 'exploration feedback.') self.logout() - def test_guest_can_create_feedback_threads_for_public_exploration(self): + def test_guest_can_create_feedback_threads_for_public_exploration( + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % self.published_exp_id) - def test_owner_cannot_create_feedback_for_private_exploration(self): + def test_owner_cannot_create_feedback_for_private_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % self.private_exp_id) self.logout() - def test_moderator_can_create_feeback_for_public_exploration(self): + def test_moderator_can_create_feeback_for_public_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % self.published_exp_id) self.logout() - def test_moderator_can_create_feeback_for_private_exploration(self): + def test_moderator_can_create_feeback_for_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -792,7 +1395,7 @@ class ViewFeedbackThreadTests(test_utils.GenericTestBase): viewer_username = 'viewer' viewer_email = 'viewer@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'thread_id': { @@ -801,14 +1404,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_view_feedback_thread - def get(self, thread_id): + def get(self, thread_id: str) -> None: self.render_json({'thread_id': thread_id}) - def setUp(self): - super(ViewFeedbackThreadTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -838,13 +1441,13 @@ def setUp(self): rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_can_not_view_feedback_threads_with_disabled_exp_id(self): + def test_cannot_view_feedback_threads_with_disabled_exp_id(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.disabled_exp_thread_id, expected_status_int=404) - def test_viewer_cannot_view_feedback_for_private_exploration(self): + def test_viewer_cannot_view_feedback_for_private_exploration(self) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -855,7 +1458,9 @@ def test_viewer_cannot_view_feedback_for_private_exploration(self): 'exploration feedback.') self.logout() - def test_viewer_cannot_view_feedback_threads_with_invalid_thread_id(self): + def test_viewer_cannot_view_feedback_threads_with_invalid_thread_id( + self + ) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -864,33 +1469,35 @@ def test_viewer_cannot_view_feedback_threads_with_invalid_thread_id(self): self.assertEqual(response['error'], 'Not a valid thread id.') self.logout() - def test_viewer_can_view_non_exploration_related_feedback(self): + def test_viewer_can_view_non_exploration_related_feedback(self) -> None: self.login(self.viewer_email) skill_thread_id = feedback_services.create_thread( 'skill', 'skillid1', None, 'unused subject', 'unused text') with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_view_feedback_thread/%s' % skill_thread_id) - def test_guest_can_view_feedback_threads_for_public_exploration(self): + def test_guest_can_view_feedback_threads_for_public_exploration( + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.public_exp_thread_id) - def test_owner_cannot_view_feedback_for_private_exploration(self): + def test_owner_cannot_view_feedback_for_private_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.private_exp_thread_id) self.logout() - def test_moderator_can_view_feeback_for_public_exploration(self): + def test_moderator_can_view_feeback_for_public_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.public_exp_thread_id) self.logout() - def test_moderator_can_view_feeback_for_private_exploration(self): + def test_moderator_can_view_feeback_for_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -903,7 +1510,7 @@ class ManageEmailDashboardTests(test_utils.GenericTestBase): query_id = 'query_id' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'query_id': { @@ -913,22 +1520,21 @@ class MockHandler(base.BaseHandler): 'default_value': None } } - HANDLER_ARGS_SCHEMAS = { + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = { 'GET': {}, 'PUT': {} } @acl_decorators.can_manage_email_dashboard - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) @acl_decorators.can_manage_email_dashboard - def put(self, query_id): + def put(self, query_id: str) -> None: return self.render_json({'query_id': query_id}) - def setUp(self): - - super(ManageEmailDashboardTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.set_moderators([self.MODERATOR_USERNAME]) @@ -940,13 +1546,13 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_moderator_cannot_access_email_dashboard(self): + def test_moderator_cannot_access_email_dashboard(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() - def test_super_admin_can_access_email_dashboard(self): + def test_super_admin_can_access_email_dashboard(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') @@ -957,6 +1563,12 @@ def test_super_admin_can_access_email_dashboard(self): self.assertEqual(response.status_int, 200) self.logout() + def test_error_when_user_is_not_logged_in(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + class RateExplorationTests(test_utils.GenericTestBase): """Tests for can_rate_exploration decorator.""" @@ -965,7 +1577,7 @@ class RateExplorationTests(test_utils.GenericTestBase): user_email = 'user@example.com' exp_id = 'exp_id' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -974,26 +1586,26 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_rate_exploration - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(RateExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) - def test_guest_cannot_give_rating(self): + def test_guest_cannot_give_rating(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.exp_id, expected_status_int=401) - def test_normal_user_can_give_rating(self): + def test_normal_user_can_give_rating(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.exp_id) @@ -1002,20 +1614,22 @@ def test_normal_user_can_give_rating(self): class AccessModeratorPageTests(test_utils.GenericTestBase): + """Tests for can_access_moderator_page decorator.""" + username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_moderator_page - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(AccessModeratorPageTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.set_moderators([self.MODERATOR_USERNAME]) @@ -1024,19 +1638,25 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_access_moderator_page(self): + def test_normal_user_cannot_access_moderator_page(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() - def test_moderator_can_access_moderator_page(self): + def test_moderator_can_access_moderator_page(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], 1) self.logout() + def test_guest_cannot_access_moderator_page(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + class FlagExplorationTests(test_utils.GenericTestBase): """Tests for can_flag_exploration decorator.""" @@ -1045,7 +1665,7 @@ class FlagExplorationTests(test_utils.GenericTestBase): user_email = 'user@example.com' exp_id = 'exp_id' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -1054,26 +1674,26 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_flag_exploration - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(FlagExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) - def test_guest_cannot_flag_exploration(self): + def test_guest_cannot_flag_exploration(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.exp_id, expected_status_int=401) - def test_normal_user_can_flag_exploration(self): + def test_normal_user_can_flag_exploration(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.exp_id) @@ -1087,51 +1707,52 @@ class SubscriptionToUsersTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_subscribe_to_users - def get(self): + def get(self) -> None: self.render_json({'success': True}) - def setUp(self): - super(SubscriptionToUsersTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) - def test_guest_cannot_subscribe_to_users(self): + def test_guest_cannot_subscribe_to_users(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) - def test_normal_user_can_subscribe_to_users(self): + def test_normal_user_can_subscribe_to_users(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') - self.assertEqual(response['success'], True) + self.assertTrue(response['success']) self.logout() class SendModeratorEmailsTests(test_utils.GenericTestBase): + """Tests for can_send_moderator_emails decorator.""" username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_send_moderator_emails - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(SendModeratorEmailsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.set_moderators([self.MODERATOR_USERNAME]) @@ -1140,36 +1761,43 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_send_moderator_emails(self): + def test_normal_user_cannot_send_moderator_emails(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() - def test_moderator_can_send_moderator_emails(self): + def test_moderator_can_send_moderator_emails(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], 1) self.logout() + def test_guest_cannot_send_moderator_emails(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + class CanAccessReleaseCoordinatorPageDecoratorTests(test_utils.GenericTestBase): + """Tests for can_access_release_coordinator_page decorator.""" username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_release_coordinator_page - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(CanAccessReleaseCoordinatorPageDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) @@ -1185,7 +1813,7 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_access_release_coordinator_page(self): + def test_normal_user_cannot_access_release_coordinator_page(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1196,7 +1824,7 @@ def test_normal_user_cannot_access_release_coordinator_page(self): 'You do not have credentials to access release coordinator page.') self.logout() - def test_guest_user_cannot_access_release_coordinator_page(self): + def test_guest_user_cannot_access_release_coordinator_page(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/release-coordinator', expected_status_int=401) @@ -1206,7 +1834,7 @@ def test_guest_user_cannot_access_release_coordinator_page(self): 'You must be logged in to access this resource.') self.logout() - def test_super_admin_cannot_access_release_coordinator_page(self): + def test_super_admin_cannot_access_release_coordinator_page(self) -> None: self.login(feconf.SYSTEM_EMAIL_ADDRESS) with self.swap(self, 'testapp', self.mock_testapp): @@ -1218,7 +1846,9 @@ def test_super_admin_cannot_access_release_coordinator_page(self): 'You do not have credentials to access release coordinator page.') self.logout() - def test_release_coordinator_can_access_release_coordinator_page(self): + def test_release_coordinator_can_access_release_coordinator_page( + self + ) -> None: self.login(self.RELEASE_COORDINATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1229,23 +1859,22 @@ def test_release_coordinator_can_access_release_coordinator_page(self): class CanAccessBlogAdminPageDecoratorTests(test_utils.GenericTestBase): + """Tests for can_access_blog_admin_page decorator.""" username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = { - 'GET': {}, - } + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_blog_admin_page - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(CanAccessBlogAdminPageDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) @@ -1259,7 +1888,7 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_access_blog_admin_page(self): + def test_normal_user_cannot_access_blog_admin_page(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1270,7 +1899,7 @@ def test_normal_user_cannot_access_blog_admin_page(self): 'You do not have credentials to access blog admin page.') self.logout() - def test_guest_user_cannot_access_blog_admin_page(self): + def test_guest_user_cannot_access_blog_admin_page(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blog-admin', expected_status_int=401) @@ -1280,7 +1909,7 @@ def test_guest_user_cannot_access_blog_admin_page(self): 'You must be logged in to access this resource.') self.logout() - def test_blog_post_editor_cannot_access_blog_admin_page(self): + def test_blog_post_editor_cannot_access_blog_admin_page(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1291,7 +1920,7 @@ def test_blog_post_editor_cannot_access_blog_admin_page(self): 'You do not have credentials to access blog admin page.') self.logout() - def test_blog_admin_can_access_blog_admin_page(self): + def test_blog_admin_can_access_blog_admin_page(self) -> None: self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1302,23 +1931,22 @@ def test_blog_admin_can_access_blog_admin_page(self): class CanManageBlogPostEditorsDecoratorTests(test_utils.GenericTestBase): + """Tests for can_manage_blog_post_editors decorator.""" username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = { - 'GET': {}, - } + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_manage_blog_post_editors - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(CanManageBlogPostEditorsDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) @@ -1333,7 +1961,7 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_manage_blog_post_editors(self): + def test_normal_user_cannot_manage_blog_post_editors(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1344,7 +1972,7 @@ def test_normal_user_cannot_manage_blog_post_editors(self): 'You do not have credentials to add or remove blog post editors.') self.logout() - def test_guest_user_cannot_manage_blog_post_editors(self): + def test_guest_user_cannot_manage_blog_post_editors(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blogadminrolehandler', expected_status_int=401) @@ -1354,7 +1982,7 @@ def test_guest_user_cannot_manage_blog_post_editors(self): 'You must be logged in to access this resource.') self.logout() - def test_blog_post_editors_cannot_manage_blog_post_editors(self): + def test_blog_post_editors_cannot_manage_blog_post_editors(self) -> None: self.login(self.BLOG_EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1365,7 +1993,7 @@ def test_blog_post_editors_cannot_manage_blog_post_editors(self): 'You do not have credentials to add or remove blog post editors.') self.logout() - def test_blog_admin_can_manage_blog_editors(self): + def test_blog_admin_can_manage_blog_editors(self) -> None: self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1376,23 +2004,22 @@ def test_blog_admin_can_manage_blog_editors(self): class CanAccessBlogDashboardDecoratorTests(test_utils.GenericTestBase): + """Tests for can_access_blog_dashboard decorator.""" username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = { - 'GET': {}, - } + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_blog_dashboard - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(CanAccessBlogDashboardDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) @@ -1409,7 +2036,7 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_access_blog_dashboard(self): + def test_normal_user_cannot_access_blog_dashboard(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1420,7 +2047,7 @@ def test_normal_user_cannot_access_blog_dashboard(self): 'You do not have credentials to access blog dashboard page.') self.logout() - def test_guest_user_cannot_access_blog_dashboard(self): + def test_guest_user_cannot_access_blog_dashboard(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blog-dashboard', expected_status_int=401) @@ -1430,7 +2057,7 @@ def test_guest_user_cannot_access_blog_dashboard(self): 'You must be logged in to access this resource.') self.logout() - def test_blog_editors_can_access_blog_dashboard(self): + def test_blog_editors_can_access_blog_dashboard(self) -> None: self.login(self.BLOG_EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1439,7 +2066,7 @@ def test_blog_editors_can_access_blog_dashboard(self): self.assertEqual(response['success'], 1) self.logout() - def test_blog_admins_can_access_blog_dashboard(self): + def test_blog_admins_can_access_blog_dashboard(self) -> None: self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1455,7 +2082,7 @@ class CanDeleteBlogPostTests(test_utils.GenericTestBase): username = 'userone' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'blog_post_id': { @@ -1464,16 +2091,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = { - 'GET': {}, - } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_delete_blog_post - def get(self, blog_post_id): + def get(self, blog_post_id: str) -> None: self.render_json({'blog_id': blog_post_id}) - def setUp(self): - super(CanDeleteBlogPostTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) @@ -1496,7 +2121,7 @@ def setUp(self): blog_post = blog_services.create_new_blog_post(self.blog_editor_id) self.blog_post_id = blog_post.id - def test_guest_can_not_delete_blog_post(self): + def test_guest_cannot_delete_blog_post(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_blog_post/%s' % self.blog_post_id, @@ -1505,7 +2130,7 @@ def test_guest_can_not_delete_blog_post(self): response['error'], 'You must be logged in to access this resource.') - def test_blog_editor_can_delete_owned_blog_post(self): + def test_blog_editor_can_delete_owned_blog_post(self) -> None: self.login(self.BLOG_EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1513,7 +2138,7 @@ def test_blog_editor_can_delete_owned_blog_post(self): self.assertEqual(response['blog_id'], self.blog_post_id) self.logout() - def test_blog_admin_can_delete_any_blog_post(self): + def test_blog_admin_can_delete_any_blog_post(self) -> None: self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1521,7 +2146,7 @@ def test_blog_admin_can_delete_any_blog_post(self): self.assertEqual(response['blog_id'], self.blog_post_id) self.logout() - def test_blog_editor_cannot_delete_not_owned_blog_post(self): + def test_blog_editor_cannot_delete_not_owned_blog_post(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1533,6 +2158,22 @@ def test_blog_editor_cannot_delete_not_owned_blog_post(self): % (self.user_id, self.blog_post_id)) self.logout() + def test_error_with_invalid_blog_post_id(self) -> None: + self.login(self.user_email) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + blog_post_rights_swap = self.swap_to_always_return( + blog_services, 'get_blog_post_rights', value=None) + with testapp_swap, blog_post_rights_swap: + response = self.get_json( + '/mock_delete_blog_post/%s' % self.blog_post_id, + expected_status_int=404) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_delete_blog_post/%s.' % self.blog_post_id + ) + self.assertEqual(response['error'], error_msg) + self.logout() + class CanEditBlogPostTests(test_utils.GenericTestBase): """Tests for can_edit_blog_post decorator.""" @@ -1540,7 +2181,7 @@ class CanEditBlogPostTests(test_utils.GenericTestBase): username = 'userone' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'blog_post_id': { @@ -1549,16 +2190,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = { - 'GET': {}, - } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_edit_blog_post - def get(self, blog_post_id): + def get(self, blog_post_id: str) -> None: self.render_json({'blog_id': blog_post_id}) - def setUp(self): - super(CanEditBlogPostTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup( self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) @@ -1576,13 +2215,13 @@ def setUp(self): debug=feconf.DEBUG, )) - self.blog_editor_id = ( - self.get_user_id_from_email(self.BLOG_EDITOR_EMAIL)) + self.blog_editor_id = self.get_user_id_from_email( + self.BLOG_EDITOR_EMAIL) self.user_id = self.get_user_id_from_email(self.user_email) blog_post = blog_services.create_new_blog_post(self.blog_editor_id) self.blog_post_id = blog_post.id - def test_guest_can_not_edit_blog_post(self): + def test_guest_cannot_edit_blog_post(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_blog_post/%s' % self.blog_post_id, @@ -1591,7 +2230,7 @@ def test_guest_can_not_edit_blog_post(self): response['error'], 'You must be logged in to access this resource.') - def test_blog_editor_can_edit_owned_blog_post(self): + def test_blog_editor_can_edit_owned_blog_post(self) -> None: self.login(self.BLOG_EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1599,7 +2238,7 @@ def test_blog_editor_can_edit_owned_blog_post(self): self.assertEqual(response['blog_id'], self.blog_post_id) self.logout() - def test_blog_admin_can_edit_any_blog_post(self): + def test_blog_admin_can_edit_any_blog_post(self) -> None: self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1607,7 +2246,7 @@ def test_blog_admin_can_edit_any_blog_post(self): self.assertEqual(response['blog_id'], self.blog_post_id) self.logout() - def test_blog_editor_cannot_edit_not_owned_blog_post(self): + def test_blog_editor_cannot_edit_not_owned_blog_post(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1619,23 +2258,40 @@ def test_blog_editor_cannot_edit_not_owned_blog_post(self): % (self.user_id, self.blog_post_id)) self.logout() + def test_error_with_invalid_blog_post_id(self) -> None: + self.login(self.user_email) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + blog_post_rights_swap = self.swap_to_always_return( + blog_services, 'get_blog_post_rights', value=None) + with testapp_swap, blog_post_rights_swap: + response = self.get_json( + '/mock_edit_blog_post/%s' % self.blog_post_id, + expected_status_int=404) + error_msg = ( + 'Could not find the page ' + 'http://localhost/mock_edit_blog_post/%s.' % self.blog_post_id + ) + self.assertEqual(response['error'], error_msg) + self.logout() + class CanRunAnyJobDecoratorTests(test_utils.GenericTestBase): + """Tests for can_run_any_job decorator.""" username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_run_any_job - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(CanRunAnyJobDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) @@ -1651,7 +2307,7 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_access_release_coordinator_page(self): + def test_normal_user_cannot_access_release_coordinator_page(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/run-anny-job', expected_status_int=401) @@ -1661,7 +2317,7 @@ def test_normal_user_cannot_access_release_coordinator_page(self): 'You do not have credentials to run jobs.') self.logout() - def test_guest_user_cannot_access_release_coordinator_page(self): + def test_guest_user_cannot_access_release_coordinator_page(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/run-anny-job', expected_status_int=401) @@ -1670,7 +2326,7 @@ def test_guest_user_cannot_access_release_coordinator_page(self): 'You must be logged in to access this resource.') self.logout() - def test_super_admin_cannot_access_release_coordinator_page(self): + def test_super_admin_cannot_access_release_coordinator_page(self) -> None: self.login(feconf.SYSTEM_EMAIL_ADDRESS) with self.swap(self, 'testapp', self.mock_testapp): @@ -1681,7 +2337,7 @@ def test_super_admin_cannot_access_release_coordinator_page(self): 'You do not have credentials to run jobs.') self.logout() - def test_release_coordinator_can_run_any_job(self): + def test_release_coordinator_can_run_any_job(self) -> None: self.login(self.RELEASE_COORDINATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1692,21 +2348,22 @@ def test_release_coordinator_can_run_any_job(self): class CanManageMemcacheDecoratorTests(test_utils.GenericTestBase): + """Tests for can_manage_memcache decorator.""" username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_manage_memcache - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(CanManageMemcacheDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) @@ -1722,7 +2379,7 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_access_release_coordinator_page(self): + def test_normal_user_cannot_access_release_coordinator_page(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1733,7 +2390,7 @@ def test_normal_user_cannot_access_release_coordinator_page(self): 'You do not have credentials to manage memcache.') self.logout() - def test_guest_user_cannot_access_release_coordinator_page(self): + def test_guest_user_cannot_access_release_coordinator_page(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/manage-memcache', expected_status_int=401) @@ -1743,7 +2400,7 @@ def test_guest_user_cannot_access_release_coordinator_page(self): 'You must be logged in to access this resource.') self.logout() - def test_super_admin_cannot_access_release_coordinator_page(self): + def test_super_admin_cannot_access_release_coordinator_page(self) -> None: self.login(feconf.SYSTEM_EMAIL_ADDRESS) with self.swap(self, 'testapp', self.mock_testapp): @@ -1755,7 +2412,7 @@ def test_super_admin_cannot_access_release_coordinator_page(self): 'You do not have credentials to manage memcache.') self.logout() - def test_release_coordinator_can_run_any_job(self): + def test_release_coordinator_can_run_any_job(self) -> None: self.login(self.RELEASE_COORDINATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1766,15 +2423,16 @@ def test_release_coordinator_can_run_any_job(self): class CanManageContributorsRoleDecoratorTests(test_utils.GenericTestBase): + """Tests for can_manage_contributors_role decorator.""" username = 'user' user_email = 'user@example.com' - QUESTION_ADMIN_EMAIL = 'questionExpert@app.com' - QUESTION_ADMIN_USERNAME = 'questionExpert' - TRANSLATION_ADMIN_EMAIL = 'translatorExpert@app.com' - TRANSLATION_ADMIN_USERNAME = 'translationExpert' + QUESTION_ADMIN_EMAIL: Final = 'questionExpert@app.com' + QUESTION_ADMIN_USERNAME: Final = 'questionExpert' + TRANSLATION_ADMIN_EMAIL: Final = 'translatorExpert@app.com' + TRANSLATION_ADMIN_USERNAME: Final = 'translationExpert' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'category': { @@ -1783,16 +2441,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = { - 'GET': {} - } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_manage_contributors_role - def get(self, unused_category): - return self.render_json({'success': 1}) + def get(self, unused_category: str) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(CanManageContributorsRoleDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.signup( @@ -1810,7 +2466,7 @@ def setUp(self): '/can_manage_contributors_role/', self.MockHandler) ], debug=feconf.DEBUG)) - def test_normal_user_cannot_access_release_coordinator_page(self): + def test_normal_user_cannot_access_release_coordinator_page(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -1822,7 +2478,7 @@ def test_normal_user_cannot_access_release_coordinator_page(self): 'You do not have credentials to modify contributor\'s role.') self.logout() - def test_guest_user_cannot_manage_contributors_role(self): + def test_guest_user_cannot_manage_contributors_role(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/can_manage_contributors_role/translation', @@ -1833,7 +2489,7 @@ def test_guest_user_cannot_manage_contributors_role(self): 'You must be logged in to access this resource.') self.logout() - def test_translation_admin_can_manage_translation_role(self): + def test_translation_admin_can_manage_translation_role(self) -> None: self.login(self.TRANSLATION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1843,7 +2499,7 @@ def test_translation_admin_can_manage_translation_role(self): self.assertEqual(response['success'], 1) self.logout() - def test_translation_admin_cannot_manage_question_role(self): + def test_translation_admin_cannot_manage_question_role(self) -> None: self.login(self.TRANSLATION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1856,7 +2512,7 @@ def test_translation_admin_cannot_manage_question_role(self): 'You do not have credentials to modify contributor\'s role.') self.logout() - def test_question_admin_can_manage_question_role(self): + def test_question_admin_can_manage_question_role(self) -> None: self.login(self.QUESTION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1866,7 +2522,7 @@ def test_question_admin_can_manage_question_role(self): self.assertEqual(response['success'], 1) self.logout() - def test_question_admin_cannot_manage_translation_role(self): + def test_question_admin_cannot_manage_translation_role(self) -> None: self.login(self.QUESTION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1879,7 +2535,7 @@ def test_question_admin_cannot_manage_translation_role(self): 'You do not have credentials to modify contributor\'s role.') self.logout() - def test_invalid_category_raise_error(self): + def test_invalid_category_raise_error(self) -> None: self.login(self.QUESTION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): @@ -1892,21 +2548,22 @@ def test_invalid_category_raise_error(self): class DeleteAnyUserTests(test_utils.GenericTestBase): + """Tests for can_delete_any_user decorator.""" username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_delete_any_user - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) - def setUp(self): - super(DeleteAnyUserTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( @@ -1914,17 +2571,17 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_normal_user_cannot_delete_any_user(self): + def test_normal_user_cannot_delete_any_user(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() - def test_not_logged_user_cannot_delete_any_user(self): + def test_not_logged_user_cannot_delete_any_user(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) - def test_primary_admin_can_delete_any_user(self): + def test_primary_admin_can_delete_any_user(self) -> None: self.login(feconf.SYSTEM_EMAIL_ADDRESS) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') @@ -1945,7 +2602,7 @@ class VoiceoverExplorationTests(test_utils.GenericTestBase): private_exp_id_1 = 'exp_3' private_exp_id_2 = 'exp_4' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -1954,14 +2611,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_voiceover_exploration - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(VoiceoverExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -2001,28 +2658,28 @@ def setUp(self): self.voiceover_admin, self.published_exp_id_1, self.voice_artist_id, self.role) - def test_banned_user_cannot_voiceover_exploration(self): + def test_banned_user_cannot_voiceover_exploration(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.private_exp_id_1, expected_status_int=401) self.logout() - def test_owner_can_voiceover_exploration(self): + def test_owner_can_voiceover_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() - def test_moderator_can_voiceover_public_exploration(self): + def test_moderator_can_voiceover_public_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.published_exp_id_1) self.assertEqual(response['exploration_id'], self.published_exp_id_1) self.logout() - def test_moderator_can_voiceover_private_exploration(self): + def test_moderator_can_voiceover_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) @@ -2030,14 +2687,16 @@ def test_moderator_can_voiceover_private_exploration(self): self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() - def test_admin_can_voiceover_private_exploration(self): + def test_admin_can_voiceover_private_exploration(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() - def test_voice_artist_can_only_voiceover_assigned_public_exploration(self): + def test_voice_artist_can_only_voiceover_assigned_public_exploration( + self + ) -> None: self.login(self.VOICE_ARTIST_EMAIL) # Checking voice artist can voiceover assigned public exploration. with self.swap(self, 'testapp', self.mock_testapp): @@ -2051,22 +2710,45 @@ def test_voice_artist_can_only_voiceover_assigned_public_exploration(self): '/mock/%s' % self.published_exp_id_2, expected_status_int=401) self.logout() - def test_user_without_voice_artist_role_of_exploration_cannot_voiceover_public_exploration(self): # pylint: disable=line-too-long + def test_user_without_voice_artist_role_of_exploration_cannot_voiceover_public_exploration( # pylint: disable=line-too-long + self + ) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.published_exp_id_1, expected_status_int=401) self.logout() - def test_user_without_voice_artist_role_of_exploration_cannot_voiceover_private_exploration(self): # pylint: disable=line-too-long + def test_user_without_voice_artist_role_of_exploration_cannot_voiceover_private_exploration( # pylint: disable=line-too-long + self + ) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.private_exp_id_1, expected_status_int=401) self.logout() + def test_guest_cannot_voiceover_exploration(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock/%s' % self.private_exp_id_1, expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + def test_error_with_invalid_voiceover_exploration_id(self) -> None: + self.login(self.user_email) + invalid_id = 'invalid' + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock/%s' % invalid_id, expected_status_int=404) + error_msg = ( + 'Could not find the page http://localhost/mock/%s.' % invalid_id) + self.assertEqual(response['error'], error_msg) + self.logout() + class VoiceArtistManagementTests(test_utils.GenericTestBase): + """Tests for can_add_voice_artist and can_remove_voice_artist decorator.""" role = rights_domain.ROLE_VOICE_ARTIST username = 'user' @@ -2078,7 +2760,7 @@ class VoiceArtistManagementTests(test_utils.GenericTestBase): private_exp_id_1 = 'exp_3' private_exp_id_2 = 'exp_4' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'entity_type': { @@ -2092,16 +2774,27 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'POST': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = { + 'POST': {}, + 'DELETE': {} + } + + @acl_decorators.can_add_voice_artist + def post(self, entity_type: str, entity_id: str) -> None: + self.render_json({ + 'entity_type': entity_type, + 'entity_id': entity_id + }) - @acl_decorators.can_manage_voice_artist - def post(self, entity_type, entity_id): + @acl_decorators.can_remove_voice_artist + def delete(self, entity_type: str, entity_id: str) -> None: self.render_json({ 'entity_type': entity_type, - 'entity_id': entity_id}) + 'entity_id': entity_id + }) - def setUp(self): - super(VoiceArtistManagementTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -2141,7 +2834,7 @@ def setUp(self): self.voiceover_admin, self.published_exp_id_1, self.voice_artist_id, self.role) - def test_voiceover_admin_can_manage_voice_artist_in_public_exp(self): + def test_voiceover_admin_can_add_voice_artist_to_public_exp(self) -> None: self.login(self.VOICEOVER_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): @@ -2150,57 +2843,119 @@ def test_voiceover_admin_can_manage_voice_artist_in_public_exp(self): {}, csrf_token=csrf_token) self.logout() - def test_assigning_voice_artist_for_unsupported_entity_type_raise_400(self): + def test_voiceover_admin_can_remove_voice_artist_from_public_exp( + self + ) -> None: + self.login(self.VOICEOVER_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.delete_json( + '/mock/exploration/%s' % self.published_exp_id_1, {}) + self.logout() + + def test_adding_voice_artist_to_unsupported_entity_type_raises_400( + self + ) -> None: unsupported_entity_type = 'topic' self.login(self.VOICEOVER_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): response = self.post_json( - '/mock/%s/%s' % ( - unsupported_entity_type, self.published_exp_id_1), + '/mock/%s/abc' % unsupported_entity_type, {}, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'Unsupported entity_type: topic') self.logout() - def test_voiceover_admin_cannot_assign_voice_artist_in_private_exp(self): + def test_removing_voice_artist_from_unsupported_entity_type_raises_400( + self + ) -> None: + unsupported_entity_type = 'topic' + self.login(self.VOICEOVER_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.delete_json( + '/mock/%s/abc' % unsupported_entity_type, + {}, expected_status_int=400 + ) + self.assertEqual( + response['error'], + 'Unsupported entity_type: topic') + self.logout() + + def test_voiceover_admin_cannot_add_voice_artist_to_private_exp( + self + ) -> None: self.login(self.VOICEOVER_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): response = self.post_json( '/mock/exploration/%s' % self.private_exp_id_1, {}, - csrf_token=csrf_token, expected_status_int=401) + csrf_token=csrf_token, expected_status_int=400 + ) self.assertEqual( response['error'], - 'You do not have credentials to manage voice artists.') + 'Could not assign voice artist to private activity.') + self.logout() + + def test_voiceover_admin_can_remove_voice_artist_from_private_exp( + self + ) -> None: + self.login(self.VOICEOVER_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.delete_json('/mock/exploration/%s' % self.private_exp_id_1, {}) self.logout() - def test_owner_cannot_assign_voice_artist_in_public_exp(self): + def test_owner_cannot_add_voice_artist_to_public_exp(self) -> None: self.login(self.OWNER_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): response = self.post_json( - '/mock/exploration/%s' % self.private_exp_id_1, {}, + '/mock/exploration/%s' % self.published_exp_id_1, {}, csrf_token=csrf_token, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to manage voice artists.') self.logout() - def test_random_user_cannot_assign_voice_artist_in_public_exp(self): + def test_owner_cannot_remove_voice_artist_in_public_exp(self) -> None: + self.login(self.OWNER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.delete_json( + '/mock/exploration/%s' % self.private_exp_id_1, {}, + expected_status_int=401) + self.assertEqual( + response['error'], + 'You do not have credentials to manage voice artists.') + self.logout() + + def test_random_user_cannot_add_voice_artist_to_public_exp(self) -> None: self.login(self.user_email) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): response = self.post_json( - '/mock/exploration/%s' % self.private_exp_id_1, {}, + '/mock/exploration/%s' % self.published_exp_id_1, {}, csrf_token=csrf_token, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to manage voice artists.') self.logout() - def test_voiceover_admin_cannot_assign_voice_artist_in_invalid_exp(self): + def test_random_user_cannot_remove_voice_artist_from_public_exp( + self + ) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.delete_json( + '/mock/exploration/%s' % self.published_exp_id_1, {}, + expected_status_int=401) + self.assertEqual( + response['error'], + 'You do not have credentials to manage voice artists.') + self.logout() + + def test_voiceover_admin_cannot_add_voice_artist_to_invalid_exp( + self + ) -> None: self.login(self.VOICEOVER_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): @@ -2209,13 +2964,33 @@ def test_voiceover_admin_cannot_assign_voice_artist_in_invalid_exp(self): csrf_token=csrf_token, expected_status_int=404) self.logout() - def test_voiceover_admin_cannot_assign_voice_artist_without_login(self): + def test_voiceover_admin_cannot_remove_voice_artist_to_invalid_exp( + self + ) -> None: + self.login(self.VOICEOVER_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.delete_json( + '/mock/exploration/invalid_exp_id', {}, + expected_status_int=404) + self.logout() + + def test_voiceover_admin_cannot_add_voice_artist_without_login( + self + ) -> None: csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/mock/exploration/%s' % self.private_exp_id_1, {}, csrf_token=csrf_token, expected_status_int=401) + def test_voiceover_admin_cannot_remove_voice_artist_without_login( + self + ) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + self.delete_json( + '/mock/exploration/%s' % self.private_exp_id_1, {}, + expected_status_int=401) + class EditExplorationTests(test_utils.GenericTestBase): """Tests for can_edit_exploration decorator.""" @@ -2225,7 +3000,7 @@ class EditExplorationTests(test_utils.GenericTestBase): published_exp_id = 'exp_0' private_exp_id = 'exp_1' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -2234,14 +3009,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_edit_exploration - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(EditExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -2263,7 +3038,7 @@ def setUp(self): self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_can_not_edit_exploration_with_invalid_exp_id(self): + def test_cannot_edit_exploration_with_invalid_exp_id(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -2271,7 +3046,7 @@ def test_can_not_edit_exploration_with_invalid_exp_id(self): expected_status_int=404) self.logout() - def test_banned_user_cannot_edit_exploration(self): + def test_banned_user_cannot_edit_exploration(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -2279,7 +3054,7 @@ def test_banned_user_cannot_edit_exploration(self): expected_status_int=401) self.logout() - def test_owner_can_edit_exploration(self): + def test_owner_can_edit_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2287,7 +3062,7 @@ def test_owner_can_edit_exploration(self): self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_moderator_can_edit_public_exploration(self): + def test_moderator_can_edit_public_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2295,7 +3070,7 @@ def test_moderator_can_edit_public_exploration(self): self.assertEqual(response['exploration_id'], self.published_exp_id) self.logout() - def test_moderator_can_edit_private_exploration(self): + def test_moderator_can_edit_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2304,7 +3079,7 @@ def test_moderator_can_edit_private_exploration(self): self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_admin_can_edit_private_exploration(self): + def test_admin_can_edit_private_exploration(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2312,6 +3087,14 @@ def test_admin_can_edit_private_exploration(self): self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() + def test_guest_cannot_cannot_edit_exploration(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_edit_exploration/%s' % self.private_exp_id, + expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + class ManageOwnAccountTests(test_utils.GenericTestBase): """Tests for decorator can_manage_own_account.""" @@ -2321,17 +3104,64 @@ class ManageOwnAccountTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_manage_own_account - def get(self): - return self.render_json({'success': 1}) + def get(self) -> None: + self.render_json({'success': 1}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.banned_user_email, self.banned_user) + self.signup(self.user_email, self.username) + self.mark_user_banned(self.banned_user) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route('/mock/', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_banned_user_cannot_update_preferences(self) -> None: + self.login(self.banned_user_email) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json('/mock/', expected_status_int=401) + self.logout() + + def test_normal_user_can_manage_preferences(self) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/') + self.assertEqual(response['success'], 1) + self.logout() + + def test_guest_cannot_update_preferences(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + - def setUp(self): - super(ManageOwnAccountTests, self).setUp() +class AccessAdminPageTests(test_utils.GenericTestBase): + """Tests for decorator can_access_admin_page.""" + + banned_user = 'banneduser' + banned_user_email = 'banned@example.com' + username = 'user' + user_email = 'user@example.com' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_access_admin_page + def get(self) -> None: + self.render_json({'success': 1}) + + def setUp(self) -> None: + super().setUp() self.signup(self.banned_user_email, self.banned_user) self.signup(self.user_email, self.username) self.mark_user_banned(self.banned_user) @@ -2340,34 +3170,124 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_banned_user_cannot_update_preferences(self): + def test_banned_user_cannot_access_admin_page(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() - def test_normal_user_can_manage_preferences(self): + def test_normal_user_cannot_access_admin_page(self) -> None: + self.login(self.user_email) + user_id = user_services.get_user_id_from_username(self.username) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = '%s is not a super admin of this application' % user_id + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_super_admin_can_access_admin_page(self) -> None: + self.login(self.user_email, is_super_admin=True) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/') + self.assertEqual(response['success'], 1) + self.logout() + + def test_guest_cannot_access_admin_page(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + +class AccessContributorDashboardAdminPageTests(test_utils.GenericTestBase): + """Tests for decorator can_access_contributor_dashboard_admin_page.""" + + banned_user = 'banneduser' + banned_user_email = 'banned@example.com' + username = 'user' + user_email = 'user@example.com' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_access_contributor_dashboard_admin_page + def get(self) -> None: + self.render_json({'success': 1}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.banned_user_email, self.banned_user) + self.signup(self.user_email, self.username) + self.mark_user_banned(self.banned_user) + self.user = user_services.get_user_actions_info( + user_services.get_user_id_from_username(self.username)) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route('/mock/', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_banned_user_cannot_access_contributor_dashboard_admin_page( + self + ) -> None: + self.login(self.banned_user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = ( + 'You do not have credentials to access contributor dashboard ' + 'admin page.' + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_question_admin_can_access_contributor_dashboard_admin_page( + self + ) -> None: + self.add_user_role( + self.username, feconf.ROLE_ID_QUESTION_ADMIN) self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], 1) self.logout() + def test_guest_cannot_access_contributor_dashboard_admin_page( + self + ) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + def test_normal_user_cannot_access_contributor_dashboard_admin_page( + self + ) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = ( + 'You do not have credentials to access contributor dashboard ' + 'admin page.' + ) + self.assertEqual(response['error'], error_msg) + self.logout() + class UploadExplorationTests(test_utils.GenericTestBase): """Tests for can_upload_exploration decorator.""" - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_upload_exploration - def get(self): - return self.render_json({}) + def get(self) -> None: + self.render_json({}) - def setUp(self): - super(UploadExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( @@ -2375,13 +3295,13 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_super_admin_can_upload_explorations(self): + def test_super_admin_can_upload_explorations(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_upload_exploration/') self.logout() - def test_normal_user_cannot_upload_explorations(self): + def test_normal_user_cannot_upload_explorations(self) -> None: self.login(self.EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2391,7 +3311,7 @@ def test_normal_user_cannot_upload_explorations(self): 'You do not have credentials to upload explorations.') self.logout() - def test_guest_cannot_upload_explorations(self): + def test_guest_cannot_upload_explorations(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_upload_exploration/', expected_status_int=401) @@ -2406,7 +3326,7 @@ class DeleteExplorationTests(test_utils.GenericTestBase): private_exp_id = 'exp_0' published_exp_id = 'exp_1' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -2415,14 +3335,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_delete_exploration - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(DeleteExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.set_moderators([self.MODERATOR_USERNAME]) @@ -2440,7 +3360,7 @@ def setUp(self): self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_guest_can_not_delete_exploration(self): + def test_guest_cannot_delete_exploration(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_exploration/%s' % self.private_exp_id, @@ -2449,7 +3369,7 @@ def test_guest_can_not_delete_exploration(self): response['error'], 'You must be logged in to access this resource.') - def test_owner_can_delete_owned_private_exploration(self): + def test_owner_can_delete_owned_private_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2457,7 +3377,7 @@ def test_owner_can_delete_owned_private_exploration(self): self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_moderator_can_delete_published_exploration(self): + def test_moderator_can_delete_published_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2465,7 +3385,7 @@ def test_moderator_can_delete_published_exploration(self): self.assertEqual(response['exploration_id'], self.published_exp_id) self.logout() - def test_owner_cannot_delete_published_exploration(self): + def test_owner_cannot_delete_published_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2477,7 +3397,7 @@ def test_owner_cannot_delete_published_exploration(self): % (self.owner_id, self.published_exp_id)) self.logout() - def test_moderator_can_delete_private_exploration(self): + def test_moderator_can_delete_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2496,7 +3416,7 @@ class SuggestChangesToExplorationTests(test_utils.GenericTestBase): banned_user_email = 'banned@example.com' exploration_id = 'exp_id' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -2505,14 +3425,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_suggest_changes_to_exploration - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(SuggestChangesToExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.signup(self.banned_user_email, self.banned_username) self.mark_user_banned(self.banned_username) @@ -2521,14 +3441,14 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_banned_user_cannot_suggest_changes(self): + def test_banned_user_cannot_suggest_changes(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.exploration_id, expected_status_int=401) self.logout() - def test_normal_user_can_suggest_changes(self): + def test_normal_user_can_suggest_changes(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.exploration_id) @@ -2545,17 +3465,17 @@ class SuggestChangesDecoratorsTests(test_utils.GenericTestBase): banned_user_email = 'banned@example.com' exploration_id = 'exp_id' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_suggest_changes - def get(self): + def get(self) -> None: self.render_json({}) - def setUp(self): - super(SuggestChangesDecoratorsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.username) self.signup(self.banned_user_email, self.banned_username) self.mark_user_banned(self.banned_username) @@ -2564,13 +3484,13 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_banned_user_cannot_suggest_changes(self): + def test_banned_user_cannot_suggest_changes(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock', expected_status_int=401) self.logout() - def test_normal_user_can_suggest_changes(self): + def test_normal_user_can_suggest_changes(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock') @@ -2586,8 +3506,8 @@ class ResubmitSuggestionDecoratorsTests(test_utils.GenericTestBase): author_email = 'author@example.com' username = 'user' user_email = 'user@example.com' - TARGET_TYPE = 'exploration' - SUGGESTION_TYPE = 'edit_exploration_state_content' + TARGET_TYPE: Final = 'exploration' + SUGGESTION_TYPE: Final = 'edit_exploration_state_content' exploration_id = 'exp_id' target_version_id = 1 change_dict = { @@ -2597,7 +3517,7 @@ class ResubmitSuggestionDecoratorsTests(test_utils.GenericTestBase): 'new_value': '' } - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'suggestion_id': { @@ -2606,14 +3526,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_resubmit_suggestion - def get(self, suggestion_id): + def get(self, suggestion_id: str) -> None: self.render_json({'suggestion_id': suggestion_id}) - def setUp(self): - super(ResubmitSuggestionDecoratorsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.author_email, self.author_username) self.signup(self.user_email, self.username) self.signup(self.owner_email, self.owner_username) @@ -2634,40 +3554,60 @@ def setUp(self): ('target_id', self.exploration_id)])[0] self.suggestion_id = suggestion.suggestion_id - def test_author_can_resubmit_suggestion(self): + def test_author_can_resubmit_suggestion(self) -> None: self.login(self.author_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.suggestion_id) self.assertEqual(response['suggestion_id'], self.suggestion_id) self.logout() - def test_non_author_cannot_resubmit_suggestion(self): + def test_non_author_cannot_resubmit_suggestion(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.suggestion_id, expected_status_int=401) self.logout() + def test_error_with_invalid_suggestion_id(self) -> None: + invalid_id = 'invalid' + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock/%s' % invalid_id, expected_status_int=400) + error_msg = 'No suggestion found with given suggestion id' + self.assertEqual(response['error'], error_msg) + self.logout() + class DecoratorForAcceptingSuggestionTests(test_utils.GenericTestBase): """Tests for get_decorator_for_accepting_suggestion decorator.""" - AUTHOR_USERNAME = 'author' - AUTHOR_EMAIL = 'author@example.com' - VIEWER_USERNAME = 'user' - VIEWER_EMAIL = 'user@example.com' - TARGET_TYPE = 'exploration' - SUGGESTION_TYPE = 'edit_exploration_state_content' - EXPLORATION_ID = 'exp_id' - TARGET_VERSION_ID = 1 - CHANGE_DICT = { + AUTHOR_USERNAME: Final = 'author' + AUTHOR_EMAIL: Final = 'author@example.com' + TARGET_TYPE: Final = feconf.ENTITY_TYPE_EXPLORATION + SUGGESTION_TYPE_1: Final = feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT + SUGGESTION_TYPE_2: Final = feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT + SUGGESTION_TYPE_3: Final = feconf.SUGGESTION_TYPE_ADD_QUESTION + EXPLORATION_ID: Final = 'exp_id' + SKILL_ID: Final = 'skill_id' + TARGET_VERSION_ID: Final = 1 + CHANGE_DICT_1: Final = { 'cmd': 'edit_state_property', 'property_name': 'content', 'state_name': 'Introduction', 'new_value': '' } + CHANGE_DICT_2: Final = { + 'cmd': 'add_written_translation', + 'state_name': 'Introduction', + 'language_code': constants.DEFAULT_LANGUAGE_CODE, + 'content_id': 'content_0', + 'content_html': '', + 'translation_html': '', + 'data_format': 'html' + } - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'suggestion_id': { @@ -2681,21 +3621,25 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.get_decorator_for_accepting_suggestion( - acl_decorators.can_edit_exploration) - def get(self, target_id, suggestion_id): + acl_decorators.open_access) + def get(self, target_id: str, suggestion_id: str) -> None: self.render_json({ 'target_id': target_id, 'suggestion_id': suggestion_id }) - def setUp(self): - super(DecoratorForAcceptingSuggestionTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, self.AUTHOR_USERNAME) self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) + self.signup( + self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner = user_services.get_user_actions_info(self.owner_id) @@ -2705,71 +3649,317 @@ def setUp(self): self.MockHandler)], debug=feconf.DEBUG, )) + content_id_generator = translation_domain.ContentIdGenerator() + change_dict: Dict[ + str, Union[str, question_domain.QuestionDict, float] + ] = { + 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, + 'question_dict': { + 'question_state_data': ( + self._create_valid_question_data( + 'default_state', content_id_generator).to_dict() + ), + 'language_code': 'en', + 'question_state_data_schema_version': ( + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'linked_skill_ids': ['skill_1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index), + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'version': 44, + 'id': '' + }, + 'skill_id': self.SKILL_ID, + 'skill_difficulty': 0.3 + } self.save_new_default_exploration(self.EXPLORATION_ID, self.owner_id) rights_manager.publish_exploration(self.owner, self.EXPLORATION_ID) - suggestion_services.create_suggestion( - self.SUGGESTION_TYPE, self.TARGET_TYPE, + self.save_new_skill(self.SKILL_ID, self.author_id) + self.suggestion_1 = suggestion_services.create_suggestion( + self.SUGGESTION_TYPE_1, self.TARGET_TYPE, self.EXPLORATION_ID, self.TARGET_VERSION_ID, self.author_id, - self.CHANGE_DICT, '') - suggestion = suggestion_services.query_suggestions( - [('author_id', self.author_id), - ('target_id', self.EXPLORATION_ID)])[0] - self.suggestion_id = suggestion.suggestion_id + self.CHANGE_DICT_1, '') + self.suggestion_2 = suggestion_services.create_suggestion( + self.SUGGESTION_TYPE_2, self.TARGET_TYPE, + self.EXPLORATION_ID, self.TARGET_VERSION_ID, + self.author_id, + self.CHANGE_DICT_2, '') + self.suggestion_3 = suggestion_services.create_suggestion( + self.SUGGESTION_TYPE_3, self.TARGET_TYPE, + self.EXPLORATION_ID, self.TARGET_VERSION_ID, + self.author_id, + change_dict, '') + self.suggestion_id_1 = self.suggestion_1.suggestion_id + self.suggestion_id_2 = self.suggestion_2.suggestion_id + self.suggestion_id_3 = self.suggestion_3.suggestion_id - def test_guest_cannot_accept_suggestion(self): + def test_guest_cannot_accept_suggestion(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_accept_suggestion/%s/%s' - % (self.EXPLORATION_ID, self.suggestion_id), + % (self.EXPLORATION_ID, self.suggestion_id_1), expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') - def test_owner_can_accept_suggestion(self): + def test_owner_can_accept_suggestion(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_accept_suggestion/%s/%s' - % (self.EXPLORATION_ID, self.suggestion_id)) - self.assertEqual(response['suggestion_id'], self.suggestion_id) + % (self.EXPLORATION_ID, self.suggestion_id_1)) + self.assertEqual(response['suggestion_id'], self.suggestion_id_1) self.assertEqual(response['target_id'], self.EXPLORATION_ID) self.logout() - def test_viewer_cannot_accept_suggestion(self): - self.login(self.VIEWER_EMAIL) + def test_user_with_review_rights_can_accept_suggestion(self) -> None: + self.login(self.EDITOR_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + review_swap = self.swap_to_always_return( + suggestion_services, 'can_user_review_category', value=True) + with testapp_swap, review_swap: + response = self.get_json( + '/mock_accept_suggestion/%s/%s' + % (self.EXPLORATION_ID, self.suggestion_id_1)) + self.assertEqual(response['suggestion_id'], self.suggestion_id_1) + self.assertEqual(response['target_id'], self.EXPLORATION_ID) + self.logout() + + def test_user_with_review_rights_can_accept_translation_suggestion( + self + ) -> None: + self.login(self.EDITOR_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + translation_review_swap = self.swap_to_always_return( + user_services, 'can_review_translation_suggestions', value=True) + with testapp_swap, translation_review_swap: + response = self.get_json( + '/mock_accept_suggestion/%s/%s' + % (self.EXPLORATION_ID, self.suggestion_id_2)) + self.assertEqual(response['suggestion_id'], self.suggestion_id_2) + self.assertEqual(response['target_id'], self.EXPLORATION_ID) + self.logout() + + def test_user_with_review_rights_can_accept_question_suggestion( + self + ) -> None: + self.login(self.EDITOR_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + question_review_swap = self.swap_to_always_return( + user_services, 'can_review_question_suggestions', value=True) + with testapp_swap, question_review_swap: + response = self.get_json( + '/mock_accept_suggestion/%s/%s' + % (self.EXPLORATION_ID, self.suggestion_id_3)) + self.assertEqual(response['suggestion_id'], self.suggestion_id_3) + self.assertEqual(response['target_id'], self.EXPLORATION_ID) + self.logout() + + def test_curriculum_admin_can_accept_suggestions(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): - self.get_json( + response = self.get_json( '/mock_accept_suggestion/%s/%s' - % (self.EXPLORATION_ID, self.suggestion_id), - expected_status_int=401) + % (self.EXPLORATION_ID, self.suggestion_id_1)) + self.assertEqual(response['suggestion_id'], self.suggestion_id_1) + self.assertEqual(response['target_id'], self.EXPLORATION_ID) self.logout() + def test_error_when_format_of_suggestion_id_is_invalid(self) -> None: + self.login(self.OWNER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_accept_suggestion/%s/%s' + % (self.EXPLORATION_ID, 'invalid_suggestion_id'), + expected_status_int=400) + error_msg = ( + 'Invalid format for suggestion_id.' + ' It must contain 3 parts separated by \'.\'' + ) + self.assertEqual(response['error'], error_msg) + + def test_page_not_found_exception_when_suggestion_id_is_invalid( + self + ) -> None: + self.login(self.OWNER_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_accept_suggestion/%s/%s' + % (self.EXPLORATION_ID, 'invalid.suggestion.id'), + expected_status_int=404) + -class PublishExplorationTests(test_utils.GenericTestBase): - """Tests for can_publish_exploration decorator.""" +class ViewReviewableSuggestionsTests(test_utils.GenericTestBase): + """Tests for can_view_reviewable_suggestions decorator.""" - private_exp_id = 'exp_0' - public_exp_id = 'exp_1' + TARGET_TYPE = feconf.ENTITY_TYPE_EXPLORATION - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { - 'exploration_id': { + 'target_type': { 'schema': { 'type': 'basestring' } - } - } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + }, + 'suggestion_type': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_view_reviewable_suggestions + def get(self, target_type: str, suggestion_type: str) -> None: + self.render_json({ + 'target_type': target_type, + 'suggestion_type': suggestion_type + }) + + def setUp(self) -> None: + super().setUp() + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.signup( + self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_review_suggestion//', + self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_guest_cannot_review_suggestion(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_review_suggestion/%s/%s' % ( + self.TARGET_TYPE, feconf.SUGGESTION_TYPE_ADD_QUESTION), + expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + def test_error_when_suggestion_type_is_invalid(self) -> None: + self.login(self.VIEWER_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + with testapp_swap: + response = self.get_json( + '/mock_review_suggestion/%s/%s' % ( + self.TARGET_TYPE, 'invalid'), + expected_status_int=404) + error_msg = ( + 'Could not find the page http://localhost/' + 'mock_review_suggestion/%s/%s.' % (self.TARGET_TYPE, 'invalid') + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_user_with_review_rights_can_review_translation_suggestions( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + translation_review_swap = self.swap_to_always_return( + user_services, 'can_review_translation_suggestions', value=True) + with testapp_swap, translation_review_swap: + response = self.get_json( + '/mock_review_suggestion/%s/%s' % ( + self.TARGET_TYPE, feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT)) + self.assertEqual(response['target_type'], self.TARGET_TYPE) + self.assertEqual( + response['suggestion_type'], + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT + ) + self.logout() + + def test_user_with_review_rights_can_review_question_suggestions( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + question_review_swap = self.swap_to_always_return( + user_services, 'can_review_question_suggestions', value=True) + with testapp_swap, question_review_swap: + response = self.get_json( + '/mock_review_suggestion/%s/%s' % ( + self.TARGET_TYPE, feconf.SUGGESTION_TYPE_ADD_QUESTION)) + self.assertEqual(response['target_type'], self.TARGET_TYPE) + self.assertEqual( + response['suggestion_type'], + feconf.SUGGESTION_TYPE_ADD_QUESTION + ) + self.logout() + + def test_user_without_review_rights_cannot_review_question_suggestions( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + user_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + question_review_swap = self.swap_to_always_return( + user_services, 'can_review_question_suggestions', value=False) + with testapp_swap, question_review_swap: + response = self.get_json( + '/mock_review_suggestion/%s/%s' % ( + self.TARGET_TYPE, feconf.SUGGESTION_TYPE_ADD_QUESTION + ), + expected_status_int=500 + ) + self.assertEqual( + 'User with user_id: %s is not allowed to review ' + 'question suggestions.' % user_id, + response['error'] + ) + self.logout() + + def test_user_without_review_rights_cannot_review_translation_suggestions( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + user_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + translation_review_swap = self.swap_to_always_return( + user_services, 'can_review_translation_suggestions', value=False) + with testapp_swap, translation_review_swap: + response = self.get_json( + '/mock_review_suggestion/%s/%s' % ( + self.TARGET_TYPE, feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT + ), + expected_status_int=500 + ) + self.assertEqual( + 'User with user_id: %s is not allowed to review ' + 'translation suggestions.' % user_id, + response['error'] + ) + self.logout() + + +class PublishExplorationTests(test_utils.GenericTestBase): + """Tests for can_publish_exploration decorator.""" + + private_exp_id = 'exp_0' + public_exp_id = 'exp_1' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'exploration_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_publish_exploration - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(PublishExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -2789,7 +3979,7 @@ def setUp(self): self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.public_exp_id) - def test_cannot_publish_exploration_with_invalid_exp_id(self): + def test_cannot_publish_exploration_with_invalid_exp_id(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -2797,7 +3987,7 @@ def test_cannot_publish_exploration_with_invalid_exp_id(self): expected_status_int=404) self.logout() - def test_owner_can_publish_owned_exploration(self): + def test_owner_can_publish_owned_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2805,7 +3995,7 @@ def test_owner_can_publish_owned_exploration(self): self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_already_published_exploration_cannot_be_published(self): + def test_already_published_exploration_cannot_be_published(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -2813,7 +4003,7 @@ def test_already_published_exploration_cannot_be_published(self): expected_status_int=401) self.logout() - def test_moderator_cannot_publish_private_exploration(self): + def test_moderator_cannot_publish_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -2821,7 +4011,7 @@ def test_moderator_cannot_publish_private_exploration(self): expected_status_int=401) self.logout() - def test_admin_can_publish_any_exploration(self): + def test_admin_can_publish_any_exploration(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2833,8 +4023,10 @@ class ModifyExplorationRolesTests(test_utils.GenericTestBase): """Tests for can_modify_exploration_roles decorator.""" private_exp_id = 'exp_0' + banned_user = 'banneduser' + banned_user_email = 'banned@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -2843,17 +4035,19 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_modify_exploration_roles - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(ModifyExplorationRolesTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.banned_user_email, self.banned_user) + self.mark_user_banned(self.banned_user) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) @@ -2864,20 +4058,32 @@ def setUp(self): self.save_new_valid_exploration( self.private_exp_id, self.owner_id) - def test_owner_can_modify_exploration_roles(self): + def test_banned_user_cannot_modify_exploration_roles(self) -> None: + self.login(self.banned_user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock/%s' % self.private_exp_id, expected_status_int=401) + error_msg = ( + 'You do not have credentials to change rights ' + 'for this exploration.' + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_owner_can_modify_exploration_roles(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() - def test_moderator_can_modify_roles_of_unowned_exploration(self): + def test_moderator_can_modify_roles_of_unowned_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/%s' % self.private_exp_id) self.logout() - def test_admin_can_modify_roles_of_any_exploration(self): + def test_admin_can_modify_roles_of_any_exploration(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id) @@ -2895,7 +4101,7 @@ class CollectionPublishStatusTests(test_utils.GenericTestBase): published_col_id = 'col_id_1' private_col_id = 'col_id_2' - class MockPublishHandler(base.BaseHandler): + class MockPublishHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'collection_id': { @@ -2904,13 +4110,15 @@ class MockPublishHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_publish_collection - def get(self, collection_id): + def get(self, collection_id: str) -> None: return self.render_json({'collection_id': collection_id}) - class MockUnpublishHandler(base.BaseHandler): + class MockUnpublishHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] + ): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'collection_id': { @@ -2919,14 +4127,14 @@ class MockUnpublishHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_unpublish_collection - def get(self, collection_id): + def get(self, collection_id: str) -> None: return self.render_json({'collection_id': collection_id}) - def setUp(self): - super(CollectionPublishStatusTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) @@ -2960,7 +4168,7 @@ def setUp(self): rights_manager.publish_exploration(self.owner, self.published_exp_id) rights_manager.publish_collection(self.owner, self.published_col_id) - def test_cannot_publish_collection_with_invalid_exp_id(self): + def test_cannot_publish_collection_with_invalid_exp_id(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -2968,7 +4176,7 @@ def test_cannot_publish_collection_with_invalid_exp_id(self): expected_status_int=404) self.logout() - def test_cannot_unpublish_collection_with_invalid_exp_id(self): + def test_cannot_unpublish_collection_with_invalid_exp_id(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -2976,7 +4184,7 @@ def test_cannot_unpublish_collection_with_invalid_exp_id(self): expected_status_int=404) self.logout() - def test_owner_can_publish_collection(self): + def test_owner_can_publish_collection(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -2984,7 +4192,7 @@ def test_owner_can_publish_collection(self): self.assertEqual(response['collection_id'], self.private_col_id) self.logout() - def test_owner_cannot_unpublish_public_collection(self): + def test_owner_cannot_unpublish_public_collection(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -2992,7 +4200,7 @@ def test_owner_cannot_unpublish_public_collection(self): expected_status_int=401) self.logout() - def test_moderator_can_unpublish_public_collection(self): + def test_moderator_can_unpublish_public_collection(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -3000,7 +4208,7 @@ def test_moderator_can_unpublish_public_collection(self): self.assertEqual(response['collection_id'], self.published_col_id) self.logout() - def test_admin_can_publish_any_collection(self): + def test_admin_can_publish_any_collection(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -3008,7 +4216,7 @@ def test_admin_can_publish_any_collection(self): self.assertEqual(response['collection_id'], self.private_col_id) self.logout() - def test_admin_cannot_publish_already_published_collection(self): + def test_admin_cannot_publish_already_published_collection(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -3025,17 +4233,17 @@ class AccessLearnerDashboardDecoratorTests(test_utils.GenericTestBase): banned_user = 'banneduser' banned_user_email = 'banned@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_learner_dashboard - def get(self): - return self.render_json({}) + def get(self) -> None: + self.render_json({'success': True}) - def setUp(self): - super(AccessLearnerDashboardDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.user_email, self.user) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) @@ -3044,18 +4252,76 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_banned_user_is_redirected(self): + def test_banned_user_cannot_access_learner_dashboard(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): - self.get_json('/mock/', expected_status_int=401) + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You do not have the credentials to access this page.' + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_exploration_editor_can_access_learner_dashboard(self) -> None: + self.login(self.user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/') + self.assertTrue(response['success']) + self.logout() + + def test_guest_user_cannot_access_learner_dashboard(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + +class AccessLearnerGroupsDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_access_learner_groups.""" + + user = 'user' + user_email = 'user@example.com' + banned_user = 'banneduser' + banned_user_email = 'banned@example.com' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_access_learner_groups + def get(self) -> None: + self.render_json({'success': True}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.user_email, self.user) + self.signup(self.banned_user_email, self.banned_user) + self.mark_user_banned(self.banned_user) + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route('/mock/', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_banned_user_cannot_access_teacher_dashboard(self) -> None: + self.login(self.banned_user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You do not have the credentials to access this page.' + self.assertEqual(response['error'], error_msg) self.logout() - def test_exploration_editor_can_access_learner_dashboard(self): + def test_exploration_editor_can_access_learner_groups(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): - self.get_json('/mock/') + response = self.get_json('/mock/') + self.assertTrue(response['success']) self.logout() + def test_guest_user_cannot_access_teacher_dashboard(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + class EditTopicDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_edit_topic.""" @@ -3066,7 +4332,7 @@ class EditTopicDecoratorTests(test_utils.GenericTestBase): viewer_email = 'viewer@example.com' topic_id = 'topic_1' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_id': { @@ -3075,67 +4341,219 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_edit_topic - def get(self, topic_id): + def get(self, topic_id: str) -> None: self.render_json({'topic_id': topic_id}) - def setUp(self): - super(EditTopicDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.manager_email, self.manager_username) self.signup(self.viewer_email, self.viewer_username) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.manager_id = self.get_user_id_from_email(self.manager_email) self.viewer_id = self.get_user_id_from_email(self.viewer_email) - self.admin = user_services.get_user_actions_info(self.admin_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_edit_topic/', self.MockHandler)], debug=feconf.DEBUG, )) self.topic_id = topic_fetchers.get_new_topic_id() - self.save_new_topic( - self.topic_id, self.viewer_id, name='Name', - description='Description', canonical_story_ids=[], - additional_story_ids=[], uncategorized_skill_ids=[], - subtopics=[], next_subtopic_id=1) + self.save_new_topic(self.topic_id, self.viewer_id) topic_services.create_new_topic_rights(self.topic_id, self.admin_id) - self.set_topic_managers([self.manager_username], self.topic_id) - self.manager = user_services.get_user_actions_info(self.manager_id) - def test_can_not_edit_topic_with_invalid_topic_id(self): + def test_cannot_edit_topic_with_invalid_topic_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_topic/invalid_topic_id', expected_status_int=404) self.logout() - def test_admin_can_edit_topic(self): + def test_admin_can_edit_topic(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_topic/%s' % self.topic_id) self.assertEqual(response['topic_id'], self.topic_id) self.logout() - def test_topic_manager_can_edit_topic(self): + def test_topic_manager_can_edit_topic(self) -> None: self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_topic/%s' % self.topic_id) self.assertEqual(response['topic_id'], self.topic_id) self.logout() - def test_normal_user_cannot_edit_topic(self): + def test_normal_user_cannot_edit_topic(self) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_topic/%s' % self.topic_id, expected_status_int=401) self.logout() + def test_guest_user_cannot_edit_topic(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_edit_topic/%s' % self.topic_id, expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + +class DeleteTopicDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_delete_topic.""" + + viewer_username = 'viewer' + viewer_email = 'viewer@example.com' + topic_id = 'topic_1' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'topic_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_delete_topic + def get(self, topic_id: str) -> None: + self.render_json({'topic_id': topic_id}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.viewer_email, self.viewer_username) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.viewer_id = self.get_user_id_from_email(self.viewer_email) + + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route('/mock_delete_topic/', self.MockHandler)], + debug=feconf.DEBUG, + )) + self.topic_id = topic_fetchers.get_new_topic_id() + self.save_new_topic(self.topic_id, self.viewer_id) + topic_services.create_new_topic_rights(self.topic_id, self.admin_id) + + def test_cannot_delete_topic_with_invalid_topic_id(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_delete_topic/invalid_topic_id', expected_status_int=404) + self.logout() + + def test_admin_can_delete_topic(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_delete_topic/%s' % self.topic_id) + self.assertEqual(response['topic_id'], self.topic_id) + self.logout() + + def test_normal_user_cannot_delete_topic(self) -> None: + self.login(self.viewer_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_topic/%s' % self.topic_id, + expected_status_int=401) + error_msg = ( + '%s does not have enough rights to delete the' + ' topic.' % self.viewer_id + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_guest_user_cannot_delete_topic(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_topic/%s' % self.topic_id, + expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + +class ViewAnyTopicEditorDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_view_any_topic_editor.""" + + viewer_username = 'viewer' + viewer_email = 'viewer@example.com' + topic_id = 'topic_1' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'topic_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_view_any_topic_editor + def get(self, topic_id: str) -> None: + self.render_json({'topic_id': topic_id}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.viewer_email, self.viewer_username) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.viewer_id = self.get_user_id_from_email(self.viewer_email) + + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_view_topic_editor/', self.MockHandler)], + debug=feconf.DEBUG, + )) + self.topic_id = topic_fetchers.get_new_topic_id() + self.save_new_topic(self.topic_id, self.viewer_id) + topic_services.create_new_topic_rights(self.topic_id, self.admin_id) + + def test_cannot_delete_topic_with_invalid_topic_id(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_view_topic_editor/invalid_topic_id', + expected_status_int=404) + self.logout() + + def test_admin_can_view_topic_editor(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_view_topic_editor/%s' % ( + self.topic_id)) + self.assertEqual(response['topic_id'], self.topic_id) + self.logout() + + def test_normal_user_cannot_view_topic_editor(self) -> None: + self.login(self.viewer_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_topic_editor/%s' % self.topic_id, + expected_status_int=401) + error_msg = ( + '%s does not have enough rights to view any' + ' topic editor.' % self.viewer_id + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_guest_user_cannot_view_topic_editor(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_topic_editor/%s' % self.topic_id, + expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + class EditStoryDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_edit_story.""" @@ -3145,7 +4563,7 @@ class EditStoryDecoratorTests(test_utils.GenericTestBase): viewer_username = 'viewer' viewer_email = 'viewer@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'story_id': { @@ -3154,19 +4572,18 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_edit_story - def get(self, story_id): + def get(self, story_id: str) -> None: self.render_json({'story_id': story_id}) - def setUp(self): - super(EditStoryDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.admin = user_services.get_user_actions_info(self.admin_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_edit_story/', self.MockHandler)], @@ -3175,21 +4592,18 @@ def setUp(self): self.story_id = story_services.get_new_story_id() self.topic_id = topic_fetchers.get_new_topic_id() self.save_new_story(self.story_id, self.admin_id, self.topic_id) - self.save_new_topic( - self.topic_id, self.admin_id, name='Name', - description='Description', canonical_story_ids=[self.story_id], - additional_story_ids=[], uncategorized_skill_ids=[], - subtopics=[], next_subtopic_id=1) + self.topic = self.save_new_topic( + self.topic_id, self.admin_id, canonical_story_ids=[self.story_id]) topic_services.create_new_topic_rights(self.topic_id, self.admin_id) - def test_can_not_edit_story_with_invalid_story_id(self): + def test_cannot_edit_story_with_invalid_story_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_story/story_id_new', expected_status_int=404) self.logout() - def test_can_not_edit_story_with_invalid_topic_id(self): + def test_cannot_edit_story_with_invalid_topic_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) story_id = story_services.get_new_story_id() topic_id = topic_fetchers.get_new_topic_id() @@ -3199,14 +4613,29 @@ def test_can_not_edit_story_with_invalid_topic_id(self): '/mock_edit_story/%s' % story_id, expected_status_int=404) self.logout() - def test_admin_can_edit_story(self): + def test_cannot_edit_story_with_invalid_canonical_story_ids(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + canonical_story_ids_swap = self.swap_to_always_return( + topic_domain.Topic, 'get_canonical_story_ids', value=[]) + with testapp_swap, canonical_story_ids_swap: + response = self.get_json( + '/mock_edit_story/%s' % self.story_id, expected_status_int=404) + error_msg = ( + 'Could not find the page http://localhost/mock_edit_story/%s.' % ( + self.story_id) + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_admin_can_edit_story(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_story/%s' % self.story_id) self.assertEqual(response['story_id'], self.story_id) self.logout() - def test_topic_manager_can_edit_story(self): + def test_topic_manager_can_edit_story(self) -> None: self.signup(self.manager_email, self.manager_username) self.set_topic_managers([self.manager_username], self.topic_id) @@ -3216,7 +4645,7 @@ def test_topic_manager_can_edit_story(self): self.assertEqual(response['story_id'], self.story_id) self.logout() - def test_normal_user_cannot_edit_story(self): + def test_normal_user_cannot_edit_story(self) -> None: self.signup(self.viewer_email, self.viewer_username) self.login(self.viewer_email) @@ -3225,60 +4654,227 @@ def test_normal_user_cannot_edit_story(self): '/mock_edit_story/%s' % self.story_id, expected_status_int=401) self.logout() + def test_guest_user_cannot_edit_story(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_edit_story/%s' % self.story_id, expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + -class AddStoryToTopicTests(test_utils.GenericTestBase): - """Tests for decorator can_add_new_story_to_topic.""" +class DeleteStoryDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_delete_story.""" manager_username = 'topicmanager' manager_email = 'topicmanager@example.com' viewer_username = 'viewer' viewer_email = 'viewer@example.com' - topic_id = 'topic_1' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { - 'topic_id': { + 'story_id': { 'schema': { 'type': 'basestring' } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} - @acl_decorators.can_add_new_story_to_topic - def get(self, topic_id): - self.render_json({'topic_id': topic_id}) + @acl_decorators.can_delete_story + def get(self, story_id: str) -> None: + self.render_json({'story_id': story_id}) - def setUp(self): - super(AddStoryToTopicTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - self.signup(self.manager_email, self.manager_username) - self.signup(self.viewer_email, self.viewer_username) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.manager_id = self.get_user_id_from_email(self.manager_email) - self.admin = user_services.get_user_actions_info(self.admin_id) - self.viewer_id = self.get_user_id_from_email(self.viewer_email) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( - [webapp2.Route( - '/mock_add_story_to_topic/', self.MockHandler)], + [webapp2.Route('/mock_delete_story/', self.MockHandler)], debug=feconf.DEBUG, )) + self.story_id = story_services.get_new_story_id() self.topic_id = topic_fetchers.get_new_topic_id() - self.save_new_topic( - self.topic_id, self.viewer_id, name='Name', - description='Description', canonical_story_ids=[], - additional_story_ids=[], uncategorized_skill_ids=[], - subtopics=[], next_subtopic_id=1) + self.save_new_story(self.story_id, self.admin_id, self.topic_id) + self.topic = self.save_new_topic( + self.topic_id, self.admin_id, canonical_story_ids=[self.story_id]) topic_services.create_new_topic_rights(self.topic_id, self.admin_id) + def test_cannot_delete_story_with_invalid_story_id(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_delete_story/story_id_new', expected_status_int=404) + self.logout() + + def test_cannot_delete_story_with_invalid_topic_id(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + story_id = story_services.get_new_story_id() + topic_id = topic_fetchers.get_new_topic_id() + self.save_new_story(story_id, self.admin_id, topic_id) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_delete_story/%s' % story_id, expected_status_int=404) + self.logout() + + def test_admin_can_delete_story(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_delete_story/%s' % self.story_id) + self.assertEqual(response['story_id'], self.story_id) + self.logout() + + def test_topic_manager_can_delete_story(self) -> None: + self.signup(self.manager_email, self.manager_username) self.set_topic_managers([self.manager_username], self.topic_id) - self.manager = user_services.get_user_actions_info(self.manager_id) - def test_can_not_add_story_to_topic_with_invalid_topic_id(self): + self.login(self.manager_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_delete_story/%s' % self.story_id) + self.assertEqual(response['story_id'], self.story_id) + self.logout() + + def test_normal_user_cannot_delete_story(self) -> None: + self.signup(self.viewer_email, self.viewer_username) + + self.login(self.viewer_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_story/%s' % self.story_id, + expected_status_int=401) + error_msg = 'You do not have credentials to delete this story.' + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_guest_user_cannot_delete_story(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_story/%s' % self.story_id, + expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + +class AccessTopicsAndSkillsDashboardDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_access_topics_and_skills_dashboard.""" + + manager_username = 'topicmanager' + manager_email = 'topicmanager@example.com' + viewer_username = 'viewer' + viewer_email = 'viewer@example.com' + topic_id = 'topic_1' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_access_topics_and_skills_dashboard + def get(self) -> None: + self.render_json({'success': True}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.manager_email, self.manager_username) + self.signup(self.viewer_email, self.viewer_username) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.viewer_id = self.get_user_id_from_email(self.viewer_email) + + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_access_dashboard/', self.MockHandler)], + debug=feconf.DEBUG, + )) + self.topic_id = topic_fetchers.get_new_topic_id() + self.save_new_topic(self.topic_id, self.viewer_id) + topic_services.create_new_topic_rights(self.topic_id, self.admin_id) + self.set_topic_managers([self.manager_username], self.topic_id) + + def test_admin_can_access_dashboard(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_access_dashboard/') + self.assertTrue(response['success']) + self.logout() + + def test_topic_manager_can_access_dashboard(self) -> None: + self.login(self.manager_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_access_dashboard/') + self.assertTrue(response['success']) + self.logout() + + def test_normal_user_cannot_access_dashboard(self) -> None: + self.login(self.viewer_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_access_dashboard/', expected_status_int=401) + error_msg = ( + '%s does not have enough rights to access the topics and skills' + ' dashboard.' % self.viewer_id + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_guest_user_cannot_access_dashboard(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_access_dashboard/', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + +class AddStoryToTopicTests(test_utils.GenericTestBase): + """Tests for decorator can_add_new_story_to_topic.""" + + manager_username = 'topicmanager' + manager_email = 'topicmanager@example.com' + viewer_username = 'viewer' + viewer_email = 'viewer@example.com' + topic_id = 'topic_1' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'topic_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_add_new_story_to_topic + def get(self, topic_id: str) -> None: + self.render_json({'topic_id': topic_id}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.manager_email, self.manager_username) + self.signup(self.viewer_email, self.viewer_username) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.viewer_id = self.get_user_id_from_email(self.viewer_email) + + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_add_story_to_topic/', self.MockHandler)], + debug=feconf.DEBUG, + )) + self.topic_id = topic_fetchers.get_new_topic_id() + self.save_new_topic(self.topic_id, self.viewer_id) + topic_services.create_new_topic_rights(self.topic_id, self.admin_id) + self.set_topic_managers([self.manager_username], self.topic_id) + + def test_cannot_add_story_to_topic_with_invalid_topic_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -3286,7 +4882,7 @@ def test_can_not_add_story_to_topic_with_invalid_topic_id(self): expected_status_int=404) self.logout() - def test_admin_can_add_story_to_topic(self): + def test_admin_can_add_story_to_topic(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -3295,7 +4891,8 @@ def test_admin_can_add_story_to_topic(self): self.logout() def test_topic_manager_cannot_add_story_to_topic_with_invalid_topic_id( - self): + self + ) -> None: self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -3303,7 +4900,7 @@ def test_topic_manager_cannot_add_story_to_topic_with_invalid_topic_id( expected_status_int=404) self.logout() - def test_topic_manager_can_add_story_to_topic(self): + def test_topic_manager_can_add_story_to_topic(self) -> None: self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -3311,7 +4908,7 @@ def test_topic_manager_can_add_story_to_topic(self): self.assertEqual(response['topic_id'], self.topic_id) self.logout() - def test_normal_user_cannot_add_story_to_topic(self): + def test_normal_user_cannot_add_story_to_topic(self) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -3322,7 +4919,7 @@ def test_normal_user_cannot_add_story_to_topic(self): 'You do not have credentials to add a story to this topic.') self.logout() - def test_guest_cannot_add_story_to_topic(self): + def test_guest_cannot_add_story_to_topic(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_add_story_to_topic/%s' % self.topic_id, @@ -3332,13 +4929,213 @@ def test_guest_cannot_add_story_to_topic(self): 'You must be logged in to access this resource.') +class StoryViewerAsLoggedInUserTests(test_utils.GenericTestBase): + """Tests for decorator can_access_story_viewer_page_as_logged_in_user.""" + + user_email = 'user@example.com' + username = 'user' + banned_user = 'banneduser' + banned_user_email = 'banned@example.com' + + class MockDataHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'topic_url_fragment': { + 'schema': { + 'type': 'basestring' + } + }, + 'story_url_fragment': { + 'schema': { + 'type': 'basestring' + } + }, + 'classroom_url_fragment': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_access_story_viewer_page_as_logged_in_user + def get(self, story_url_fragment: str) -> None: + self.render_json({'story_url_fragment': story_url_fragment}) + + class MockPageHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + URL_PATH_ARGS_SCHEMAS = { + 'topic_url_fragment': { + 'schema': { + 'type': 'basestring' + } + }, + 'story_url_fragment': { + 'schema': { + 'type': 'basestring' + } + }, + 'classroom_url_fragment': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_access_story_viewer_page_as_logged_in_user + def get(self, _: str) -> None: + self.render_template('oppia-root.mainpage.html') + + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.user_email, self.username) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.admin = user_services.get_user_actions_info(self.admin_id) + self.signup(self.banned_user_email, self.banned_user) + self.mark_user_banned(self.banned_user) + story_data_url = ( + '/mock_story_data//' + '/') + story_page_url = ( + '/mock_story_page//' + '/story/') + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [ + webapp2.Route(story_data_url, self.MockDataHandler), + webapp2.Route(story_page_url, self.MockPageHandler) + ], + debug=feconf.DEBUG, + )) + + self.topic_id = topic_fetchers.get_new_topic_id() + self.story_id = story_services.get_new_story_id() + self.story_url_fragment = 'story-frag' + self.save_new_story( + self.story_id, self.admin_id, self.topic_id, + url_fragment=self.story_url_fragment) + subtopic_1 = topic_domain.Subtopic.create_default_subtopic( + 1, 'Subtopic Title 1', 'url-frag-one') + subtopic_1.skill_ids = ['skill_id_1'] + subtopic_1.url_fragment = 'sub-one-frag' + self.save_new_topic( + self.topic_id, self.admin_id, name='Name', + description='Description', canonical_story_ids=[self.story_id], + additional_story_ids=[], uncategorized_skill_ids=[], + subtopics=[subtopic_1], next_subtopic_id=2) + self.login(self.user_email) + + def test_user_cannot_access_non_existent_story(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_story_data/staging/topic/non-existent-frag', + expected_status_int=404) + + def test_user_cannot_access_story_when_topic_is_not_published(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_story_data/staging/topic/%s' + % self.story_url_fragment, + expected_status_int=404) + + def test_user_cannot_access_story_when_story_is_not_published(self) -> None: + topic_services.publish_topic(self.topic_id, self.admin_id) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_story_data/staging/topic/%s' + % self.story_url_fragment, + expected_status_int=404) + + def test_user_can_access_story_when_story_and_topic_are_published( + self + ) -> None: + topic_services.publish_topic(self.topic_id, self.admin_id) + topic_services.publish_story( + self.topic_id, self.story_id, self.admin_id) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json( + '/mock_story_data/staging/topic/%s' + % self.story_url_fragment, + expected_status_int=200) + + def test_user_can_access_story_when_all_url_fragments_are_valid( + self + ) -> None: + topic_services.publish_topic(self.topic_id, self.admin_id) + topic_services.publish_story( + self.topic_id, self.story_id, self.admin_id) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_html_response( + '/mock_story_page/staging/topic/story/%s' + % self.story_url_fragment, + expected_status_int=200) + + def test_user_redirect_to_story_page_if_story_url_fragment_is_invalid( + self + ) -> None: + topic_services.publish_topic(self.topic_id, self.admin_id) + topic_services.publish_story( + self.topic_id, self.story_id, self.admin_id) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_html_response( + '/mock_story_page/staging/topic/story/000', + expected_status_int=302) + self.assertEqual( + 'http://localhost/learn/staging/topic/story', + response.headers['location']) + + def test_user_redirect_to_correct_url_if_abbreviated_topic_is_invalid( + self + ) -> None: + topic_services.publish_topic(self.topic_id, self.admin_id) + topic_services.publish_story( + self.topic_id, self.story_id, self.admin_id) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_html_response( + '/mock_story_page/staging/invalid-topic/story/%s' + % self.story_url_fragment, + expected_status_int=302) + self.assertEqual( + 'http://localhost/learn/staging/topic/story/%s' + % self.story_url_fragment, + response.headers['location']) + + def test_user_redirect_with_correct_classroom_name_in_url(self) -> None: + topic_services.publish_topic(self.topic_id, self.admin_id) + topic_services.publish_story( + self.topic_id, self.story_id, self.admin_id) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_html_response( + '/mock_story_page/math/topic/story/%s' + % self.story_url_fragment, + expected_status_int=302) + self.assertEqual( + 'http://localhost/learn/staging/topic/story/%s' + % self.story_url_fragment, + response.headers['location']) + + def test_user_redirect_to_lowercase_story_url_fragment(self) -> None: + topic_services.publish_topic(self.topic_id, self.admin_id) + topic_services.publish_story( + self.topic_id, self.story_id, self.admin_id) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_html_response( + '/mock_story_page/staging/topic/story/Story-frag', + expected_status_int=302) + self.assertEqual( + 'http://localhost/learn/staging/topic/story/story-frag', + response.headers['location']) + + class StoryViewerTests(test_utils.GenericTestBase): """Tests for decorator can_access_story_viewer_page.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' - class MockDataHandler(base.BaseHandler): + class MockDataHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { @@ -3357,13 +5154,13 @@ class MockDataHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_story_viewer_page - def get(self, story_url_fragment): + def get(self, story_url_fragment: str) -> None: self.render_json({'story_url_fragment': story_url_fragment}) - class MockPageHandler(base.BaseHandler): + class MockPageHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { @@ -3381,14 +5178,14 @@ class MockPageHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_story_viewer_page - def get(self, _): + def get(self, _: str) -> None: self.render_template('oppia-root.mainpage.html') - def setUp(self): - super(StoryViewerTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) @@ -3417,7 +5214,7 @@ def setUp(self): self.story_id, self.admin_id, self.topic_id, url_fragment=self.story_url_fragment) subtopic_1 = topic_domain.Subtopic.create_default_subtopic( - 1, 'Subtopic Title 1') + 1, 'Subtopic Title 1', 'url-frag-one') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' self.save_new_topic( @@ -3426,20 +5223,20 @@ def setUp(self): additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic_1], next_subtopic_id=2) - def test_cannot_access_non_existent_story(self): + def test_cannot_access_non_existent_story(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_story_data/staging/topic/non-existent-frag', expected_status_int=404) - def test_cannot_access_story_when_topic_is_not_published(self): + def test_cannot_access_story_when_topic_is_not_published(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_story_data/staging/topic/%s' % self.story_url_fragment, expected_status_int=404) - def test_cannot_access_story_when_story_is_not_published(self): + def test_cannot_access_story_when_story_is_not_published(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -3447,7 +5244,7 @@ def test_cannot_access_story_when_story_is_not_published(self): % self.story_url_fragment, expected_status_int=404) - def test_can_access_story_when_story_and_topic_are_published(self): + def test_can_access_story_when_story_and_topic_are_published(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) @@ -3457,7 +5254,7 @@ def test_can_access_story_when_story_and_topic_are_published(self): % self.story_url_fragment, expected_status_int=200) - def test_can_access_story_when_all_url_fragments_are_valid(self): + def test_can_access_story_when_all_url_fragments_are_valid(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) @@ -3467,7 +5264,9 @@ def test_can_access_story_when_all_url_fragments_are_valid(self): % self.story_url_fragment, expected_status_int=200) - def test_redirect_to_story_page_if_story_url_fragment_is_invalid(self): + def test_redirect_to_story_page_if_story_url_fragment_is_invalid( + self + ) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) @@ -3479,7 +5278,9 @@ def test_redirect_to_story_page_if_story_url_fragment_is_invalid(self): 'http://localhost/learn/staging/topic/story', response.headers['location']) - def test_redirect_to_correct_url_if_abbreviated_topic_is_invalid(self): + def test_redirect_to_correct_url_if_abbreviated_topic_is_invalid( + self + ) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) @@ -3493,7 +5294,7 @@ def test_redirect_to_correct_url_if_abbreviated_topic_is_invalid(self): % self.story_url_fragment, response.headers['location']) - def test_redirect_with_correct_classroom_name_in_url(self): + def test_redirect_with_correct_classroom_name_in_url(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) @@ -3507,7 +5308,7 @@ def test_redirect_with_correct_classroom_name_in_url(self): % self.story_url_fragment, response.headers['location']) - def test_redirect_lowercase_story_url_fragment(self): + def test_redirect_lowercase_story_url_fragment(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) @@ -3526,7 +5327,7 @@ class SubtopicViewerTests(test_utils.GenericTestBase): banned_user = 'banneduser' banned_user_email = 'banned@example.com' - class MockDataHandler(base.BaseHandler): + class MockDataHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { @@ -3545,13 +5346,17 @@ class MockDataHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_subtopic_viewer_page - def get(self, unused_topic_url_fragment, subtopic_url_fragment): + def get( + self, + unused_topic_url_fragment: str, + subtopic_url_fragment: str + ) -> None: self.render_json({'subtopic_url_fragment': subtopic_url_fragment}) - class MockPageHandler(base.BaseHandler): + class MockPageHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { @@ -3569,14 +5374,18 @@ class MockPageHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_subtopic_viewer_page - def get(self, unused_topic_url_fragment, unused_subtopic_url_fragment): + def get( + self, + unused_topic_url_fragment: str, + unused_subtopic_url_fragment: str + ) -> None: self.render_template('subtopic-viewer-page.mainpage.html') - def setUp(self): - super(SubtopicViewerTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) @@ -3600,11 +5409,11 @@ def setUp(self): self.topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( - 1, 'Subtopic Title 1') + 1, 'Subtopic Title 1', 'url-frag-one') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' subtopic_2 = topic_domain.Subtopic.create_default_subtopic( - 2, 'Subtopic Title 2') + 2, 'Subtopic Title 2', 'url-frag-two') subtopic_2.skill_ids = ['skill_id_2'] subtopic_2.url_fragment = 'sub-two-frag' self.subtopic_page_1 = ( @@ -3615,7 +5424,8 @@ def setUp(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'sample-fragment' })] ) self.save_new_topic( @@ -3625,33 +5435,45 @@ def setUp(self): subtopics=[subtopic_1, subtopic_2], next_subtopic_id=3, url_fragment='topic-frag') - def test_cannot_access_non_existent_subtopic(self): + def test_cannot_access_non_existent_subtopic(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_subtopic_data/staging/topic-frag/non-existent-frag', expected_status_int=404) - def test_cannot_access_subtopic_when_topic_is_not_published(self): + def test_cannot_access_subtopic_when_topic_is_not_published(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_subtopic_data/staging/topic-frag/sub-one-frag', expected_status_int=404) - def test_can_access_subtopic_when_topic_is_published(self): + def test_can_access_subtopic_when_topic_is_published(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_subtopic_data/staging/topic-frag/sub-one-frag', expected_status_int=200) - def test_can_access_subtopic_when_all_url_fragments_are_valid(self): + def test_redirect_to_classroom_if_user_is_banned(self) -> None: + self.login(self.banned_user_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_html_response( + '/mock_subtopic_page/staging/topic-frag/revision/000', + expected_status_int=302) + self.assertEqual( + response.headers['location'], 'http://localhost/learn/staging') + self.logout() + + def test_can_access_subtopic_when_all_url_fragments_are_valid(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_html_response( '/mock_subtopic_page/staging/topic-frag/revision/sub-one-frag', expected_status_int=200) - def test_fall_back_to_revision_page_if_subtopic_url_frag_is_invalid(self): + def test_fall_back_to_revision_page_if_subtopic_url_frag_is_invalid( + self + ) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( @@ -3661,7 +5483,24 @@ def test_fall_back_to_revision_page_if_subtopic_url_frag_is_invalid(self): 'http://localhost/learn/staging/topic-frag/revision', response.headers['location']) - def test_redirect_to_classroom_if_abbreviated_topic_is_invalid(self): + def test_fall_back_to_revision_page_when_subtopic_page_does_not_exist( + self + ) -> None: + topic_services.publish_topic(self.topic_id, self.admin_id) + testapp_swap = self.swap(self, 'testapp', self.mock_testapp) + subtopic_swap = self.swap_to_always_return( + subtopic_page_services, 'get_subtopic_page_by_id', None) + with testapp_swap, subtopic_swap: + response = self.get_html_response( + '/mock_subtopic_page/staging/topic-frag/revision/sub-one-frag', + expected_status_int=302) + self.assertEqual( + 'http://localhost/learn/staging/topic-frag/revision', + response.headers['location']) + + def test_redirect_to_classroom_if_abbreviated_topic_is_invalid( + self + ) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( @@ -3671,7 +5510,7 @@ def test_redirect_to_classroom_if_abbreviated_topic_is_invalid(self): 'http://localhost/learn/math', response.headers['location']) - def test_redirect_with_correct_classroom_name_in_url(self): + def test_redirect_with_correct_classroom_name_in_url(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( @@ -3682,7 +5521,7 @@ def test_redirect_with_correct_classroom_name_in_url(self): '/sub-one-frag', response.headers['location']) - def test_redirect_with_lowercase_subtopic_url_fragment(self): + def test_redirect_with_lowercase_subtopic_url_fragment(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( @@ -3700,7 +5539,7 @@ class TopicViewerTests(test_utils.GenericTestBase): banned_user = 'banneduser' banned_user_email = 'banned@example.com' - class MockDataHandler(base.BaseHandler): + class MockDataHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { @@ -3714,13 +5553,13 @@ class MockDataHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_topic_viewer_page - def get(self, topic_name): + def get(self, topic_name: str) -> None: self.render_json({'topic_name': topic_name}) - class MockPageHandler(base.BaseHandler): + class MockPageHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { @@ -3733,14 +5572,14 @@ class MockPageHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_topic_viewer_page - def get(self, unused_topic_name): + def get(self, unused_topic_name: str) -> None: self.render_template('topic-viewer-page.mainpage.html') - def setUp(self): - super(TopicViewerTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) @@ -3762,7 +5601,7 @@ def setUp(self): self.topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( - 1, 'Subtopic Title 1') + 1, 'Subtopic Title 1', 'url-frag-one') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' self.save_new_topic( @@ -3771,33 +5610,35 @@ def setUp(self): additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic_1], next_subtopic_id=2) - def test_cannot_access_non_existent_topic(self): + def test_cannot_access_non_existent_topic(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_topic_data/staging/invalid-topic', expected_status_int=404) - def test_cannot_access_unpublished_topic(self): + def test_cannot_access_unpublished_topic(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_topic_data/staging/topic', expected_status_int=404) - def test_can_access_published_topic(self): + def test_can_access_published_topic(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_topic_data/staging/topic', expected_status_int=200) - def test_can_access_topic_when_all_url_fragments_are_valid(self): + def test_can_access_topic_when_all_url_fragments_are_valid(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_html_response( '/mock_topic_page/staging/topic', expected_status_int=200) - def test_redirect_to_classroom_if_abbreviated_topic_is_invalid(self): + def test_redirect_to_classroom_if_abbreviated_topic_is_invalid( + self + ) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( @@ -3807,7 +5648,7 @@ def test_redirect_to_classroom_if_abbreviated_topic_is_invalid(self): 'http://localhost/learn/math', response.headers['location']) - def test_redirect_with_correct_classroom_name_in_url(self): + def test_redirect_with_correct_classroom_name_in_url(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( @@ -3817,7 +5658,7 @@ def test_redirect_with_correct_classroom_name_in_url(self): 'http://localhost/learn/staging/topic', response.headers['location']) - def test_redirect_with_lowercase_topic_url_fragment(self): + def test_redirect_with_lowercase_topic_url_fragment(self) -> None: topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( @@ -3834,17 +5675,17 @@ class CreateSkillTests(test_utils.GenericTestBase): banned_user = 'banneduser' banned_user_email = 'banned@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_create_skill - def get(self): + def get(self) -> None: self.render_json({}) - def setUp(self): - super(CreateSkillTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) @@ -3858,13 +5699,13 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_admin_can_create_skill(self): + def test_admin_can_create_skill(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_create_skill') self.logout() - def test_banned_user_cannot_create_skill(self): + def test_banned_user_cannot_create_skill(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -3874,7 +5715,7 @@ def test_banned_user_cannot_create_skill(self): 'You do not have credentials to create a skill.') self.logout() - def test_guest_cannot_add_create_skill(self): + def test_guest_cannot_add_create_skill(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_create_skill', expected_status_int=401) @@ -3891,7 +5732,7 @@ class ManageQuestionSkillStatusTests(test_utils.GenericTestBase): viewer_email = 'viewer@example.com' skill_id = '1' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'skill_id': { @@ -3900,20 +5741,18 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_manage_question_skill_status - def get(self, skill_id): + def get(self, skill_id: str) -> None: self.render_json({'skill_id': skill_id}) - def setUp(self): - super(ManageQuestionSkillStatusTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.viewer_email, self.viewer_username) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.admin = user_services.get_user_actions_info(self.admin_id) - self.signup(self.viewer_email, self.viewer_username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( @@ -3922,13 +5761,16 @@ def setUp(self): debug=feconf.DEBUG, )) self.question_id = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.question = self.save_new_question( self.question_id, self.admin_id, - self._create_valid_question_data('ABC'), [self.skill_id]) + self._create_valid_question_data('ABC', content_id_generator), + [self.skill_id], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.admin_id, self.question_id, self.skill_id, 0.5) - def test_admin_can_manage_question_skill_status(self): + def test_admin_can_manage_question_skill_status(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -3936,7 +5778,7 @@ def test_admin_can_manage_question_skill_status(self): self.assertEqual(response['skill_id'], self.skill_id) self.logout() - def test_viewer_cannot_manage_question_skill_status(self): + def test_viewer_cannot_manage_question_skill_status(self) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -3947,7 +5789,7 @@ def test_viewer_cannot_manage_question_skill_status(self): 'You do not have credentials to publish a question.') self.logout() - def test_guest_cannot_manage_question_skill_status(self): + def test_guest_cannot_manage_question_skill_status(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_manage_question_skill_status/%s' % self.skill_id, @@ -3963,23 +5805,20 @@ class CreateTopicTests(test_utils.GenericTestBase): banned_user = 'banneduser' banned_user_email = 'banned@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_create_topic - def get(self): + def get(self) -> None: self.render_json({}) - def setUp(self): - super(CreateTopicTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - - self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mark_user_banned(self.banned_user) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( @@ -3987,13 +5826,13 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_admin_can_create_topic(self): + def test_admin_can_create_topic(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_create_topic') self.logout() - def test_banned_user_cannot_create_topic(self): + def test_banned_user_cannot_create_topic(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -4003,7 +5842,7 @@ def test_banned_user_cannot_create_topic(self): response['error']) self.logout() - def test_guest_cannot_create_topic(self): + def test_guest_cannot_create_topic(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_create_topic', expected_status_int=401) @@ -4019,7 +5858,7 @@ class ManageRightsForTopicTests(test_utils.GenericTestBase): banned_user_email = 'banned@example.com' topic_id = 'topic_1' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_id': { @@ -4028,22 +5867,19 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_manage_rights_for_topic - def get(self, topic_id): + def get(self, topic_id: str) -> None: self.render_json({'topic_id': topic_id}) - def setUp(self): - super(ManageRightsForTopicTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - - self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mark_user_banned(self.banned_user) - + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_manage_rights_for_topic/', self.MockHandler)], @@ -4051,13 +5887,13 @@ def setUp(self): )) topic_services.create_new_topic_rights(self.topic_id, self.admin_id) - def test_admin_can_manage_rights(self): + def test_admin_can_manage_rights(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_manage_rights_for_topic/%s' % self.topic_id) self.logout() - def test_banned_user_cannot_manage_rights(self): + def test_banned_user_cannot_manage_rights(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -4068,7 +5904,7 @@ def test_banned_user_cannot_manage_rights(self): response['error']) self.logout() - def test_guest_cannot_manage_rights(self): + def test_guest_cannot_manage_rights(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_manage_rights_for_topic/%s' % self.topic_id, @@ -4084,7 +5920,7 @@ class ChangeTopicPublicationStatusTests(test_utils.GenericTestBase): banned_user = 'banneduser' banned_user_email = 'banned@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_id': { @@ -4093,30 +5929,23 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_change_topic_publication_status - def get(self, topic_id): + def get(self, topic_id: str) -> None: self.render_json({ topic_id: topic_id }) - def setUp(self): - super(ChangeTopicPublicationStatusTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - - self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mark_user_banned(self.banned_user) - + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.topic_id = topic_fetchers.get_new_topic_id() - self.save_new_topic( - self.topic_id, self.admin_id, name='Name1', - description='Description', canonical_story_ids=[], - additional_story_ids=[], uncategorized_skill_ids=[], - subtopics=[], next_subtopic_id=1) + self.save_new_topic(self.topic_id, self.admin_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( @@ -4125,14 +5954,15 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_admin_can_change_topic_publication_status(self): + def test_admin_can_change_topic_publication_status(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_change_publication_status/%s' % self.topic_id) self.logout() - def test_can_not_change_topic_publication_status_with_invalid_topic_id( - self): + def test_cannot_change_topic_publication_status_with_invalid_topic_id( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -4140,7 +5970,7 @@ def test_can_not_change_topic_publication_status_with_invalid_topic_id( expected_status_int=404) self.logout() - def test_banned_user_cannot_change_topic_publication_status(self): + def test_banned_user_cannot_change_topic_publication_status(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -4151,7 +5981,7 @@ def test_banned_user_cannot_change_topic_publication_status(self): 'topic.', response['error']) self.logout() - def test_guest_cannot_change_topic_publication_status(self): + def test_guest_cannot_change_topic_publication_status(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_change_publication_status/%s' % self.topic_id, @@ -4167,21 +5997,18 @@ class PerformTasksInTaskqueueTests(test_utils.GenericTestBase): viewer_username = 'viewer' viewer_email = 'viewer@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_perform_tasks_in_taskqueue - def get(self): + def get(self) -> None: self.render_json({}) - def setUp(self): - super(PerformTasksInTaskqueueTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - - self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.viewer_email, self.viewer_username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( @@ -4190,13 +6017,13 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_super_admin_can_perform_tasks_in_taskqueue(self): + def test_super_admin_can_perform_tasks_in_taskqueue(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_perform_tasks_in_taskqueue') self.logout() - def test_normal_user_cannot_perform_tasks_in_taskqueue(self): + def test_normal_user_cannot_perform_tasks_in_taskqueue(self) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -4207,7 +6034,8 @@ def test_normal_user_cannot_perform_tasks_in_taskqueue(self): self.logout() def test_request_with_appropriate_header_can_perform_tasks_in_taskqueue( - self): + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_perform_tasks_in_taskqueue', @@ -4220,22 +6048,19 @@ class PerformCronTaskTests(test_utils.GenericTestBase): viewer_username = 'viewer' viewer_email = 'viewer@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_perform_cron_tasks - def get(self): + def get(self) -> None: self.render_json({}) - def setUp(self): - super(PerformCronTaskTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - - self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.viewer_email, self.viewer_username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( @@ -4243,13 +6068,13 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_super_admin_can_perform_cron_tasks(self): + def test_super_admin_can_perform_cron_tasks(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_perform_cron_task') self.logout() - def test_normal_user_cannot_perform_cron_tasks(self): + def test_normal_user_cannot_perform_cron_tasks(self) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -4259,7 +6084,9 @@ def test_normal_user_cannot_perform_cron_tasks(self): 'You do not have the credentials to access this page.') self.logout() - def test_request_with_appropriate_header_can_perform_cron_tasks(self): + def test_request_with_appropriate_header_can_perform_cron_tasks( + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_perform_cron_task', headers={'X-AppEngine-Cron': 'true'}) @@ -4268,15 +6095,13 @@ def test_request_with_appropriate_header_can_perform_cron_tasks(self): class EditSkillDecoratorTests(test_utils.GenericTestBase): """Tests permissions for accessing the skill editor.""" - second_admin_username = 'adm2' - second_admin_email = 'adm2@example.com' manager_username = 'topicmanager' manager_email = 'topicmanager@example.com' viewer_username = 'viewer' viewer_email = 'viewer@example.com' skill_id = '1' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'skill_id': { @@ -4285,38 +6110,23 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_edit_skill - def get(self, skill_id): + def get(self, skill_id: str) -> None: self.render_json({'skill_id': skill_id}) - def setUp(self): - super(EditSkillDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - self.signup(self.second_admin_email, self.second_admin_username) self.signup(self.manager_email, self.manager_username) self.signup(self.viewer_email, self.viewer_username) - self.set_curriculum_admins( - [self.CURRICULUM_ADMIN_USERNAME, self.second_admin_username]) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.second_admin_id = self.get_user_id_from_email( - self.second_admin_email) - self.manager_id = self.get_user_id_from_email(self.manager_email) - self.admin = user_services.get_user_actions_info(self.admin_id) - self.manager = user_services.get_user_actions_info(self.manager_id) self.topic_id = topic_fetchers.get_new_topic_id() - subtopic_1 = topic_domain.Subtopic.create_default_subtopic( - 1, 'Subtopic Title 1') - subtopic_1.skill_ids = ['skill_id_1'] - subtopic_1.url_fragment = 'sub-one-frag' - self.save_new_topic( - self.topic_id, self.admin_id, name='Name', - description='Description', canonical_story_ids=[], - additional_story_ids=[], uncategorized_skill_ids=[], - subtopics=[subtopic_1], next_subtopic_id=2) + self.save_new_topic(self.topic_id, self.admin_id) self.set_topic_managers([self.manager_username], self.topic_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( @@ -4324,47 +6134,100 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_cannot_edit_skill_with_invalid_skill_id(self): + def test_cannot_edit_skill_with_invalid_skill_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_custom_response( '/mock_edit_skill/', 'text/plain', expected_status_int=404) self.logout() - def test_admin_can_edit_skill(self): + def test_admin_can_edit_skill(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_skill/%s' % self.skill_id) self.assertEqual(response['skill_id'], self.skill_id) self.logout() - def test_admin_can_edit_other_public_skill(self): - self.login(self.second_admin_email) - with self.swap(self, 'testapp', self.mock_testapp): - response = self.get_json('/mock_edit_skill/%s' % self.skill_id) - self.assertEqual(response['skill_id'], self.skill_id) - self.logout() - - def test_topic_manager_can_edit_public_skill(self): + def test_topic_manager_can_edit_public_skill(self) -> None: self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_skill/%s' % self.skill_id) self.assertEqual(response['skill_id'], self.skill_id) self.logout() - def test_normal_user_can_not_edit_public_skill(self): + def test_normal_user_cannot_edit_public_skill(self) -> None: self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_skill/%s' % self.skill_id, expected_status_int=401) + self.logout() + + def test_guest_cannot_edit_public_skill(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_edit_skill/%s' % self.skill_id, expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + +class DeleteSkillDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_delete_skill.""" + + viewer_username = 'viewer' + viewer_email = 'viewer@example.com' + skill_id = '1' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_delete_skill + def get(self) -> None: + self.render_json({'success': True}) + + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.viewer_email, self.viewer_username) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route('/mock_delete_skill', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_admin_can_delete_skill(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_delete_skill') + self.assertTrue(response['success']) + self.logout() + + def test_normal_user_cannot_delete_public_skill(self) -> None: + self.login(self.viewer_email) + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json('/mock_delete_skill', expected_status_int=401) + self.logout() + + def test_guest_cannot_delete_public_skill(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_skill', expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) class EditQuestionDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_edit_question.""" question_id = 'question_id' + user_a = 'A' + user_a_email = 'a@example.com' + user_b = 'B' + user_b_email = 'b@example.com' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'question_id': { @@ -4373,47 +6236,33 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_edit_question - def get(self, question_id): + def get(self, question_id: str) -> None: self.render_json({'question_id': question_id}) - def setUp(self): - super(EditQuestionDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) - self.signup('a@example.com', 'A') - self.signup('b@example.com', 'B') - - self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) - self.user_id_admin = ( - self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) - self.user_id_a = self.get_user_id_from_email('a@example.com') - self.user_id_b = self.get_user_id_from_email('b@example.com') + self.signup(self.user_a_email, self.user_a) + self.signup(self.user_b_email, self.user_b) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.manager_id = self.get_user_id_from_email('a@example.com') - self.question_id = 'question_id' - + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.topic_id = topic_fetchers.get_new_topic_id() - subtopic_1 = topic_domain.Subtopic.create_default_subtopic( - 1, 'Subtopic Title 1') - subtopic_1.skill_ids = ['skill_id_1'] - subtopic_1.url_fragment = 'sub-one-frag' - self.save_new_topic( - self.topic_id, self.admin_id, name='Name', - description='Description', canonical_story_ids=[], - additional_story_ids=[], uncategorized_skill_ids=[], - subtopics=[subtopic_1], next_subtopic_id=2) + self.save_new_topic(self.topic_id, self.admin_id) + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.question_id, self.owner_id, - self._create_valid_question_data('ABC'), ['skill_1']) - self.set_topic_managers( - [user_services.get_username(self.user_id_a)], self.topic_id) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) + self.set_topic_managers([self.user_a], self.topic_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( @@ -4421,7 +6270,7 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_guest_cannot_edit_question(self): + def test_guest_cannot_edit_question(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_question/%s' % self.question_id, @@ -4430,7 +6279,7 @@ def test_guest_cannot_edit_question(self): response['error'], 'You must be logged in to access this resource.') - def test_cannot_edit_question_with_invalid_question_id(self): + def test_cannot_edit_question_with_invalid_question_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -4438,7 +6287,7 @@ def test_cannot_edit_question_with_invalid_question_id(self): expected_status_int=404) self.logout() - def test_admin_can_edit_question(self): + def test_admin_can_edit_question(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -4446,16 +6295,16 @@ def test_admin_can_edit_question(self): self.assertEqual(response['question_id'], self.question_id) self.logout() - def test_topic_manager_can_edit_question(self): - self.login('a@example.com') + def test_topic_manager_can_edit_question(self) -> None: + self.login(self.user_a_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_question/%s' % self.question_id) self.assertEqual(response['question_id'], self.question_id) self.logout() - def test_any_user_cannot_edit_question(self): - self.login('b@example.com') + def test_any_user_cannot_edit_question(self) -> None: + self.login(self.user_b_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_question/%s' % self.question_id, @@ -4463,12 +6312,202 @@ def test_any_user_cannot_edit_question(self): self.logout() +class ViewQuestionEditorDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_view_question_editor.""" + + question_id = 'question_id' + user_a = 'A' + user_a_email = 'a@example.com' + user_b = 'B' + user_b_email = 'b@example.com' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'question_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_view_question_editor + def get(self, question_id: str) -> None: + self.render_json({'question_id': question_id}) + + def setUp(self) -> None: + super().setUp() + + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.signup(self.user_a_email, self.user_a) + self.signup(self.user_b_email, self.user_b) + + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.topic_id = topic_fetchers.get_new_topic_id() + self.save_new_topic(self.topic_id, self.admin_id) + content_id_generator = translation_domain.ContentIdGenerator() + self.save_new_question( + self.question_id, self.owner_id, + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) + self.set_topic_managers([self.user_a], self.topic_id) + + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_view_question_editor/', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_guest_cannot_view_question_editor(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_question_editor/%s' % self.question_id, + expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + def test_cannot_view_question_editor_with_invalid_question_id( + self + ) -> None: + invalid_id = 'invalid_question_id' + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_question_editor/%s' % invalid_id, + expected_status_int=404) + error_msg = ( + 'Could not find the page http://localhost/' + 'mock_view_question_editor/%s.' % invalid_id + ) + self.assertEqual(response['error'], error_msg) + self.logout() + + def test_curriculum_admin_can_view_question_editor(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_question_editor/%s' % self.question_id) + self.assertEqual(response['question_id'], self.question_id) + self.logout() + + def test_topic_manager_can_view_question_editor(self) -> None: + self.login(self.user_a_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_question_editor/%s' % self.question_id) + self.assertEqual(response['question_id'], self.question_id) + self.logout() + + def test_normal_user_cannot_view_question_editor(self) -> None: + self.login(self.user_b_email) + user_id_b = self.get_user_id_from_email(self.user_b_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_view_question_editor/%s' % self.question_id, + expected_status_int=401) + error_msg = ( + '%s does not have enough rights to access the questions editor' + % user_id_b) + self.assertEqual(response['error'], error_msg) + self.logout() + + +class DeleteQuestionDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_delete_question.""" + + question_id = 'question_id' + user_a = 'A' + user_a_email = 'a@example.com' + user_b = 'B' + user_b_email = 'b@example.com' + + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + URL_PATH_ARGS_SCHEMAS = { + 'question_id': { + 'schema': { + 'type': 'basestring' + } + } + } + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + @acl_decorators.can_delete_question + def get(self, question_id: str) -> None: + self.render_json({'question_id': question_id}) + + def setUp(self) -> None: + super().setUp() + + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.signup(self.user_a_email, self.user_a) + self.signup(self.user_b_email, self.user_b) + + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.manager_id = self.get_user_id_from_email(self.user_a_email) + + self.topic_id = topic_fetchers.get_new_topic_id() + self.save_new_topic(self.topic_id, self.admin_id) + self.set_topic_managers([self.user_a], self.topic_id) + + self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( + [webapp2.Route( + '/mock_delete_question/', self.MockHandler)], + debug=feconf.DEBUG, + )) + + def test_guest_cannot_delete_question(self) -> None: + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_question/%s' % self.question_id, + expected_status_int=401) + error_msg = 'You must be logged in to access this resource.' + self.assertEqual(response['error'], error_msg) + + def test_curriculum_admin_can_delete_question(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_question/%s' % self.question_id) + self.assertEqual(response['question_id'], self.question_id) + self.logout() + + def test_topic_manager_can_delete_question(self) -> None: + self.login(self.user_a_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_question/%s' % self.question_id) + self.assertEqual(response['question_id'], self.question_id) + self.logout() + + def test_normal_user_cannot_delete_question(self) -> None: + self.login(self.user_b_email) + user_id_b = self.get_user_id_from_email(self.user_b_email) + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json( + '/mock_delete_question/%s' % self.question_id, + expected_status_int=401) + error_msg = ( + '%s does not have enough rights to delete the question.' + % user_id_b) + self.assertEqual(response['error'], error_msg) + self.logout() + + class PlayQuestionDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_play_question.""" question_id = 'question_id' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'question_id': { @@ -4477,14 +6516,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_play_question - def get(self, question_id): + def get(self, question_id: str) -> None: self.render_json({'question_id': question_id}) - def setUp(self): - super(PlayQuestionDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( @@ -4492,11 +6531,14 @@ def setUp(self): '/mock_play_question/', self.MockHandler)], debug=feconf.DEBUG, )) + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.question_id, self.owner_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) - def test_can_play_question_with_valid_question_id(self): + def test_can_play_question_with_valid_question_id(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_play_question/%s' % ( self.question_id)) @@ -4504,14 +6546,14 @@ def test_can_play_question_with_valid_question_id(self): class PlayEntityDecoratorTests(test_utils.GenericTestBase): - """Test the decorator can_play_entity.""" + """Tests the decorator can_play_entity.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'entity_type': { @@ -4525,15 +6567,15 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_play_entity - def get(self, entity_type, entity_id): + def get(self, entity_type: str, entity_id: str) -> None: self.render_json( {'entity_type': entity_type, 'entity_id': entity_id}) - def setUp(self): - super(PlayEntityDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) @@ -4547,22 +6589,27 @@ def setUp(self): debug=feconf.DEBUG, )) self.question_id = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.question_id, self.owner_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_cannot_play_exploration_on_disabled_exploration_ids(self): + def test_cannot_play_exploration_on_disabled_exploration_ids(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_EXPLORATION, feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404) - def test_guest_can_play_exploration_on_published_exploration(self): + def test_guest_can_play_exploration_on_published_exploration( + self + ) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_EXPLORATION, self.published_exp_id)) @@ -4571,20 +6618,20 @@ def test_guest_can_play_exploration_on_published_exploration(self): self.assertEqual( response['entity_id'], self.published_exp_id) - def test_guest_cannot_play_exploration_on_private_exploration(self): + def test_guest_cannot_play_exploration_on_private_exploration(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_EXPLORATION, self.private_exp_id), expected_status_int=404) - def test_cannot_play_exploration_with_none_exploration_rights(self): + def test_cannot_play_exploration_with_none_exploration_rights(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_play_entity/%s/%s' % (feconf.ENTITY_TYPE_EXPLORATION, 'fake_exp_id'), expected_status_int=404) - def test_can_play_question_for_valid_question_id(self): + def test_can_play_question_for_valid_question_id(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_QUESTION, self.question_id)) @@ -4593,25 +6640,27 @@ def test_can_play_question_for_valid_question_id(self): self.assertEqual(response['entity_id'], self.question_id) self.assertEqual(response['entity_type'], 'question') - def test_cannot_play_question_invalid_question_id(self): + def test_cannot_play_question_invalid_question_id(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_QUESTION, 'question_id'), expected_status_int=404) - def test_cannot_play_entity_for_invalid_entity(self): + def test_cannot_play_entity_for_invalid_entity(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_play_entity/%s/%s' % ( 'fake_entity_type', 'fake_entity_id'), expected_status_int=404) class EditEntityDecoratorTests(test_utils.GenericTestBase): + """Tests the decorator can_edit_entity.""" + username = 'banneduser' user_email = 'user@example.com' published_exp_id = 'exp_0' private_exp_id = 'exp_1' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'entity_type': { @@ -4625,15 +6674,15 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_edit_entity - def get(self, entity_type, entity_id): + def get(self, entity_type: str, entity_id: str) -> None: return self.render_json( {'entity_type': entity_type, 'entity_id': entity_id}) - def setUp(self): - super(EditEntityDecoratorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -4654,16 +6703,19 @@ def setUp(self): debug=feconf.DEBUG, )) self.question_id = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.question_id, self.owner_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_can_edit_exploration_with_valid_exp_id(self): + def test_can_edit_exploration_with_valid_exp_id(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -4675,7 +6727,7 @@ def test_can_edit_exploration_with_valid_exp_id(self): response['entity_id'], self.published_exp_id) self.logout() - def test_cannot_edit_exploration_with_invalid_exp_id(self): + def test_cannot_edit_exploration_with_invalid_exp_id(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -4683,7 +6735,7 @@ def test_cannot_edit_exploration_with_invalid_exp_id(self): expected_status_int=404) self.logout() - def test_banned_user_cannot_edit_exploration(self): + def test_banned_user_cannot_edit_exploration(self) -> None: self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -4692,7 +6744,7 @@ def test_banned_user_cannot_edit_exploration(self): expected_status_int=401) self.logout() - def test_can_edit_question_with_valid_question_id(self): + def test_can_edit_question_with_valid_question_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_entity/%s/%s' % ( @@ -4701,7 +6753,7 @@ def test_can_edit_question_with_valid_question_id(self): self.assertEqual(response['entity_type'], 'question') self.logout() - def test_can_edit_topic(self): + def test_can_edit_topic(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( @@ -4716,7 +6768,7 @@ def test_can_edit_topic(self): self.assertEqual(response['entity_type'], 'topic') self.logout() - def test_cannot_edit_topic_with_invalid_topic_id(self): + def test_cannot_edit_topic_with_invalid_topic_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) topic_id = 'incorrect_id' with self.swap(self, 'testapp', self.mock_testapp): @@ -4726,7 +6778,7 @@ def test_cannot_edit_topic_with_invalid_topic_id(self): expected_status_int=404) self.logout() - def test_can_edit_skill(self): + def test_can_edit_skill(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) skill_id = skill_services.get_new_skill_id() self.save_new_skill(skill_id, self.admin_id, description='Description') @@ -4737,7 +6789,43 @@ def test_can_edit_skill(self): self.assertEqual(response['entity_type'], 'skill') self.logout() - def test_can_edit_blog_post(self): + def test_can_submit_images_to_questions(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL) + skill_id = skill_services.get_new_skill_id() + self.save_new_skill(skill_id, self.admin_id, description='Description') + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_edit_entity/%s/%s' % ( + feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS, skill_id)) + self.assertEqual(response['entity_id'], skill_id) + self.assertEqual(response['entity_type'], 'question_suggestions') + self.logout() + + def test_unauthenticated_users_cannot_submit_images_to_questions( + self + ) -> None: + skill_id = skill_services.get_new_skill_id() + self.save_new_skill(skill_id, self.admin_id, description='Description') + with self.swap(self, 'testapp', self.mock_testapp): + self.get_json('/mock_edit_entity/%s/%s' % ( + feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS, skill_id), + expected_status_int=401) + + def test_cannot_submit_images_to_questions_without_having_permissions( + self + ) -> None: + self.login(self.user_email) + skill_id = skill_services.get_new_skill_id() + self.save_new_skill(skill_id, self.admin_id, description='Description') + with self.swap(self, 'testapp', self.mock_testapp): + response = self.get_json('/mock_edit_entity/%s/%s' % ( + feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS, skill_id), + expected_status_int=401) + self.assertEqual( + response['error'], 'You do not have credentials to submit' + ' images to questions.') + self.logout() + + def test_can_edit_blog_post(self) -> None: self.login(self.BLOG_ADMIN_EMAIL) blog_admin_id = ( self.get_user_id_from_email(self.BLOG_ADMIN_EMAIL)) @@ -4750,7 +6838,7 @@ def test_can_edit_blog_post(self): self.assertEqual(response['entity_type'], 'blog_post') self.logout() - def test_can_edit_story(self): + def test_can_edit_story(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) story_id = story_services.get_new_story_id() topic_id = topic_fetchers.get_new_topic_id() @@ -4767,7 +6855,7 @@ def test_can_edit_story(self): self.assertEqual(response['entity_type'], 'story') self.logout() - def test_cannot_edit_entity_invalid_entity(self): + def test_cannot_edit_entity_invalid_entity(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_edit_entity/%s/%s' % ( 'invalid_entity_type', 'q_id'), expected_status_int=404) @@ -4786,7 +6874,7 @@ class SaveExplorationTests(test_utils.GenericTestBase): private_exp_id_1 = 'exp_3' private_exp_id_2 = 'exp_4' - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { @@ -4795,14 +6883,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_save_exploration - def get(self, exploration_id): + def get(self, exploration_id: str) -> None: self.render_json({'exploration_id': exploration_id}) - def setUp(self): - super(SaveExplorationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -4843,40 +6931,40 @@ def setUp(self): self.voiceover_admin, self.published_exp_id_1, self.voice_artist_id, self.role) - def test_unautheticated_user_cannot_save_exploration(self): + def test_unautheticated_user_cannot_save_exploration(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.private_exp_id_1, expected_status_int=401) - def test_can_not_save_exploration_with_invalid_exp_id(self): + def test_cannot_save_exploration_with_invalid_exp_id(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/invalid_exp_id', expected_status_int=404) self.logout() - def test_banned_user_cannot_save_exploration(self): + def test_banned_user_cannot_save_exploration(self) -> None: self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.private_exp_id_1, expected_status_int=401) self.logout() - def test_owner_can_save_exploration(self): + def test_owner_can_save_exploration(self) -> None: self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() - def test_moderator_can_save_public_exploration(self): + def test_moderator_can_save_public_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.published_exp_id_1) self.assertEqual(response['exploration_id'], self.published_exp_id_1) self.logout() - def test_moderator_can_save_private_exploration(self): + def test_moderator_can_save_private_exploration(self) -> None: self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) @@ -4884,14 +6972,14 @@ def test_moderator_can_save_private_exploration(self): self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() - def test_admin_can_save_private_exploration(self): + def test_admin_can_save_private_exploration(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() - def test_voice_artist_can_only_save_assigned_exploration(self): + def test_voice_artist_can_only_save_assigned_exploration(self) -> None: self.login(self.VOICE_ARTIST_EMAIL) # Checking voice artist can only save assigned public exploration. with self.swap(self, 'testapp', self.mock_testapp): @@ -4906,13 +6994,25 @@ def test_voice_artist_can_only_save_assigned_exploration(self): self.logout() +class MockHandlerNormalizedPayloadDict(TypedDict): + """Type for the MockHandler's normalized_payload dictionary.""" + + signature: str + vm_id: str + message: bytes + + class OppiaMLAccessDecoratorTest(test_utils.GenericTestBase): """Tests for oppia_ml_access decorator.""" - class MockHandler(base.OppiaMLVMHandler): + class MockHandler( + base.OppiaMLVMHandler[ + MockHandlerNormalizedPayloadDict, Dict[str, str] + ] + ): REQUIRE_PAYLOAD_CSRF_CHECK = False GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'POST': { 'vm_id': { @@ -4933,31 +7033,34 @@ class MockHandler(base.OppiaMLVMHandler): } } - def extract_request_message_vm_id_and_signature(self): + def extract_request_message_vm_id_and_signature( + self + ) -> classifier_domain.OppiaMLAuthInfo: """Returns message, vm_id and signature retrived from incoming request. Returns: - tuple(str). Message at index 0, vm_id at index 1 and signature - at index 2. + OppiaMLAuthInfo. Message at index 0, vm_id at index 1 and + signature at index 2. """ - signature = self.payload.get('signature') - vm_id = self.payload.get('vm_id') - message = self.payload.get('message') + assert self.normalized_payload is not None + signature = self.normalized_payload['signature'] + vm_id = self.normalized_payload['vm_id'] + message = self.normalized_payload['message'] return classifier_domain.OppiaMLAuthInfo(message, vm_id, signature) @acl_decorators.is_from_oppia_ml - def post(self): + def post(self) -> None: self.render_json({'job_id': 'new_job'}) - def setUp(self): - super(OppiaMLAccessDecoratorTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/ml/nextjobhandler', self.MockHandler)], debug=feconf.DEBUG, )) - def test_unauthorized_vm_cannot_fetch_jobs(self): + def test_unauthorized_vm_cannot_fetch_jobs(self) -> None: payload = {} payload['vm_id'] = 'fake_vm' secret = 'fake_secret' @@ -4972,7 +7075,7 @@ def test_unauthorized_vm_cannot_fetch_jobs(self): '/ml/nextjobhandler', payload, expected_status_int=401) - def test_default_vm_id_raises_exception_in_prod_mode(self): + def test_default_vm_id_raises_exception_in_prod_mode(self) -> None: payload = {} payload['vm_id'] = feconf.DEFAULT_VM_ID secret = feconf.DEFAULT_VM_SHARED_SECRET @@ -4986,7 +7089,7 @@ def test_default_vm_id_raises_exception_in_prod_mode(self): self.post_json( '/ml/nextjobhandler', payload, expected_status_int=401) - def test_that_invalid_signature_raises_exception(self): + def test_that_invalid_signature_raises_exception(self) -> None: payload = {} payload['vm_id'] = feconf.DEFAULT_VM_ID secret = feconf.DEFAULT_VM_SHARED_SECRET @@ -4998,7 +7101,7 @@ def test_that_invalid_signature_raises_exception(self): self.post_json( '/ml/nextjobhandler', payload, expected_status_int=401) - def test_that_no_excpetion_is_raised_when_valid_vm_access(self): + def test_that_no_excpetion_is_raised_when_valid_vm_access(self) -> None: payload = {} payload['vm_id'] = feconf.DEFAULT_VM_ID secret = feconf.DEFAULT_VM_SHARED_SECRET @@ -5025,12 +7128,12 @@ class DecoratorForUpdatingSuggestionTests(test_utils.GenericTestBase): en_language_reviewer = 'reviewer2@example.com' username = 'user' user_email = 'user@example.com' - TARGET_TYPE = 'exploration' + TARGET_TYPE: Final = 'exploration' exploration_id = 'exp_id' target_version_id = 1 change_dict = { 'cmd': 'add_written_translation', - 'content_id': 'content', + 'content_id': 'content_0', 'language_code': 'hi', 'content_html': '

old content html

', 'state_name': 'State 1', @@ -5038,7 +7141,7 @@ class DecoratorForUpdatingSuggestionTests(test_utils.GenericTestBase): 'data_format': 'html' } - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'suggestion_id': { @@ -5047,14 +7150,14 @@ class MockHandler(base.BaseHandler): } } } - HANDLER_ARGS_SCHEMAS = {'GET': {}} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_update_suggestion - def get(self, suggestion_id): + def get(self, suggestion_id: str) -> None: self.render_json({'suggestion_id': suggestion_id}) - def setUp(self): - super(DecoratorForUpdatingSuggestionTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.author_email, self.author_username) self.signup(self.user_email, self.username) self.signup(self.curriculum_admin_email, self.curriculum_admin_username) @@ -5088,14 +7191,23 @@ def setUp(self): ['TextInput'], category='Algebra')) self.old_content = state_domain.SubtitledHtml( - 'content', '

old content html

').to_dict() + 'content_0', '

old content html

').to_dict() exploration.states['State 1'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) exploration.states['State 2'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) exploration.states['State 3'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) - exp_services._save_exploration(self.author_id, exploration, '', []) # pylint: disable=protected-access + exp_models = ( + exp_services._compute_models_for_updating_exploration( # pylint: disable=protected-access + self.author_id, + exploration, + '', + [] + ) + ) + datastore_services.update_timestamps_multi(exp_models) + datastore_services.put_multi(exp_models) rights_manager.publish_exploration(self.author, self.exploration_id) @@ -5106,16 +7218,23 @@ def setUp(self): self.save_new_skill('skill_123', self.admin_id) - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, question_domain.QuestionDict, float] + ] = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index), + 'version': 44, + 'id': '' }, 'skill_id': 'skill_123', 'skill_difficulty': 0.3 @@ -5166,7 +7285,7 @@ def setUp(self): self.question_suggestion_id = question_suggestion.suggestion_id self.edit_state_suggestion_id = edit_state_suggestion.suggestion_id - def test_authors_cannot_update_suggestion_that_they_created(self): + def test_authors_cannot_update_suggestion_that_they_created(self) -> None: self.login(self.author_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -5178,7 +7297,7 @@ def test_authors_cannot_update_suggestion_that_they_created(self): 'suggestions.' % self.author_username) self.logout() - def test_admin_can_update_any_given_translation_suggestion(self): + def test_admin_can_update_any_given_translation_suggestion(self) -> None: self.login(self.curriculum_admin_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -5187,14 +7306,14 @@ def test_admin_can_update_any_given_translation_suggestion(self): response['suggestion_id'], self.translation_suggestion_id) self.logout() - def test_admin_can_update_any_given_question_suggestion(self): + def test_admin_can_update_any_given_question_suggestion(self) -> None: self.login(self.curriculum_admin_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.question_suggestion_id) self.assertEqual(response['suggestion_id'], self.question_suggestion_id) self.logout() - def test_reviewer_can_update_translation_suggestion(self): + def test_reviewer_can_update_translation_suggestion(self) -> None: self.login(self.hi_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -5203,7 +7322,7 @@ def test_reviewer_can_update_translation_suggestion(self): response['suggestion_id'], self.translation_suggestion_id) self.logout() - def test_reviewer_can_update_question_suggestion(self): + def test_reviewer_can_update_question_suggestion(self) -> None: self.login(self.hi_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.question_suggestion_id) @@ -5211,7 +7330,7 @@ def test_reviewer_can_update_question_suggestion(self): response['suggestion_id'], self.question_suggestion_id) self.logout() - def test_guest_cannot_update_any_suggestion(self): + def test_guest_cannot_update_any_suggestion(self) -> None: with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock/%s' % self.translation_suggestion_id, @@ -5220,7 +7339,9 @@ def test_guest_cannot_update_any_suggestion(self): response['error'], 'You must be logged in to access this resource.') - def test_reviewers_without_permission_cannot_update_any_suggestion(self): + def test_reviewers_without_permission_cannot_update_any_suggestion( + self + ) -> None: self.login(self.en_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -5230,7 +7351,9 @@ def test_reviewers_without_permission_cannot_update_any_suggestion(self): response['error'], 'You are not allowed to update the suggestion.') self.logout() - def test_suggestions_with_invalid_suggestion_id_cannot_be_updated(self): + def test_suggestions_with_invalid_suggestion_id_cannot_be_updated( + self + ) -> None: self.login(self.hi_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -5241,7 +7364,7 @@ def test_suggestions_with_invalid_suggestion_id_cannot_be_updated(self): 'It must contain 3 parts separated by \'.\'') self.logout() - def test_non_existent_suggestions_cannot_be_updated(self): + def test_non_existent_suggestions_cannot_be_updated(self) -> None: self.login(self.hi_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( @@ -5250,7 +7373,7 @@ def test_non_existent_suggestions_cannot_be_updated(self): expected_status_int=404) self.logout() - def test_not_allowed_suggestions_cannot_be_updated(self): + def test_not_allowed_suggestions_cannot_be_updated(self) -> None: self.login(self.en_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( @@ -5264,25 +7387,56 @@ def test_not_allowed_suggestions_cannot_be_updated(self): class OppiaAndroidDecoratorTest(test_utils.GenericTestBase): """Tests for is_from_oppia_android decorator.""" - class MockHandler(base.BaseHandler): + class MockHandler(base.BaseHandler[Dict[str, str], Dict[str, str]]): REQUIRE_PAYLOAD_CSRF_CHECK = False GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'POST': { 'report': { 'schema': { - 'type': 'object_dict', - 'object_class': ( - app_feedback_report_domain.AppFeedbackReport) + 'type': 'dict', + 'properties': [{ + 'name': 'platform_type', + 'schema': { + 'type': 'unicode' + } + }, { + 'name': 'android_report_info_schema_version', + 'schema': { + 'type': 'int' + } + }, { + 'name': 'app_context', + 'schema': incoming_app_feedback_report.ANDROID_APP_CONTEXT_DICT_SCHEMA # pylint: disable=line-too-long + }, { + 'name': 'device_context', + 'schema': incoming_app_feedback_report.ANDROID_DEVICE_CONTEXT_DICT_SCHEMA # pylint: disable=line-too-long + }, { + 'name': 'report_submission_timestamp_sec', + 'schema': { + 'type': 'int' + } + }, { + 'name': 'report_submission_utc_offset_hrs', + 'schema': { + 'type': 'int' + } + }, { + 'name': 'system_context', + 'schema': incoming_app_feedback_report.ANDROID_SYSTEM_CONTEXT_DICT_SCHEMA # pylint: disable=line-too-long + }, { + 'name': 'user_supplied_feedback', + 'schema': incoming_app_feedback_report.USER_SUPPLIED_FEEDBACK_DICT_SCHEMA # pylint: disable=line-too-long + }] } } } } @acl_decorators.is_from_oppia_android - def post(self): - return self.render_json({}) + def post(self) -> None: + self.render_json({}) REPORT_JSON = { 'platform_type': 'android', @@ -5329,8 +7483,8 @@ def post(self): ANDROID_APP_VERSION_NAME = '1.0.0-flavor-commithash' ANDROID_APP_VERSION_CODE = '2' - def setUp(self): - super(OppiaAndroidDecoratorTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/appfeedbackreporthandler/incoming_android_report', @@ -5338,7 +7492,9 @@ def setUp(self): debug=feconf.DEBUG, )) - def test_that_no_exception_is_raised_when_valid_oppia_android_headers(self): + def test_that_no_exception_is_raised_when_valid_oppia_android_headers( + self + ) -> None: headers = { 'api_key': android_validation_constants.ANDROID_API_KEY, 'app_package_name': ( @@ -5354,7 +7510,7 @@ def test_that_no_exception_is_raised_when_valid_oppia_android_headers(self): '/appfeedbackreporthandler/incoming_android_report', payload, headers=headers) - def test_invalid_api_key_raises_exception(self): + def test_invalid_api_key_raises_exception(self) -> None: invalid_headers = { 'api_key': 'bad_key', 'app_package_name': ( @@ -5370,7 +7526,7 @@ def test_invalid_api_key_raises_exception(self): '/appfeedbackreporthandler/incoming_android_report', payload, headers=invalid_headers, expected_status_int=401) - def test_invalid_package_name_raises_exception(self): + def test_invalid_package_name_raises_exception(self) -> None: invalid_headers = { 'api_key': android_validation_constants.ANDROID_API_KEY, 'app_package_name': 'bad_package_name', @@ -5385,7 +7541,7 @@ def test_invalid_package_name_raises_exception(self): '/appfeedbackreporthandler/incoming_android_report', payload, headers=invalid_headers, expected_status_int=401) - def test_invalid_version_name_raises_exception(self): + def test_invalid_version_name_raises_exception(self) -> None: invalid_headers = { 'api_key': android_validation_constants.ANDROID_API_KEY, 'app_package_name': ( @@ -5401,7 +7557,7 @@ def test_invalid_version_name_raises_exception(self): '/appfeedbackreporthandler/incoming_android_report', payload, headers=invalid_headers, expected_status_int=401) - def test_invalid_version_code_raises_exception(self): + def test_invalid_version_code_raises_exception(self) -> None: invalid_headers = { 'api_key': android_validation_constants.ANDROID_API_KEY, 'app_package_name': ( diff --git a/core/controllers/admin.py b/core/controllers/admin.py index d9416aa9a8b2..a1045b6c6b26 100644 --- a/core/controllers/admin.py +++ b/core/controllers/admin.py @@ -28,6 +28,8 @@ from core.controllers import domain_objects_validator as validation_method from core.domain import auth_services from core.domain import blog_services +from core.domain import classroom_config_domain +from core.domain import classroom_config_services from core.domain import collection_services from core.domain import config_domain from core.domain import config_services @@ -55,28 +57,72 @@ from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services +from core.domain import translation_domain from core.domain import user_services from core.domain import wipeout_service +from typing import Dict, List, Optional, TypedDict, Union, cast -class AdminPage(base.BaseHandler): + +class ClassroomPageDataDict(TypedDict): + """Dict representation of classroom page's data dictionary.""" + + course_details: str + name: str + topic_ids: List[str] + topic_list_intro: str + url_fragment: str + + +AllowedAdminConfigPropertyValueTypes = Union[ + str, bool, float, Dict[str, str], List[str], ClassroomPageDataDict +] + + +class AdminPage( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): """Admin page shown in the App Engine admin console.""" - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_admin_page - def get(self): + def get(self) -> None: """Handles GET requests.""" self.render_template('admin-page.mainpage.html') -class AdminHandler(base.BaseHandler): +class AdminHandlerNormalizePayloadDict(TypedDict): + """Dict representation of AdminHandler's normalized_payload + dictionary. + """ + + action: Optional[str] + exploration_id: Optional[str] + collection_id: Optional[str] + num_dummy_exps_to_generate: Optional[int] + num_dummy_exps_to_publish: Optional[int] + new_config_property_values: Optional[ + Dict[str, AllowedAdminConfigPropertyValueTypes] + ] + config_property_id: Optional[str] + data: Optional[str] + topic_id: Optional[str] + feature_name: Optional[str] + commit_message: Optional[str] + new_rules: Optional[List[parameter_domain.PlatformParameterRule]] + exp_id: Optional[str] + + +class AdminHandler( + base.BaseHandler[AdminHandlerNormalizePayloadDict, Dict[str, str]] +): """Handler for the admin page.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'GET': {}, 'POST': { @@ -88,10 +134,12 @@ class AdminHandler(base.BaseHandler): 'generate_dummy_explorations', 'clear_search_index', 'generate_dummy_new_structures_data', 'generate_dummy_new_skill_data', + 'generate_dummy_classroom', 'save_config_properties', 'revert_config_property', 'upload_topic_similarities', 'regenerate_topic_related_opportunities', - 'update_feature_flag_rules' + 'update_feature_flag_rules', + 'rollback_exploration_to_safe_state' ] }, # TODO(#13331): Remove default_value when it is confirmed that, @@ -170,12 +218,18 @@ class AdminHandler(base.BaseHandler): } }, 'default_value': None + }, + 'exp_id': { + 'schema': { + 'type': 'basestring' + }, + 'default_value': None } } } @acl_decorators.can_access_admin_page - def get(self): + def get(self) -> None: """Handles GET requests.""" demo_exploration_ids = list(feconf.DEMO_EXPLORATIONS.keys()) @@ -210,39 +264,69 @@ def get(self): }) @acl_decorators.can_access_admin_page - def post(self): + def post(self) -> None: """Handles POST requests.""" + assert self.user_id is not None + assert self.normalized_payload is not None action = self.normalized_payload.get('action') try: result = {} if action == 'reload_exploration': exploration_id = self.normalized_payload.get('exploration_id') + if exploration_id is None: + raise Exception( + 'The \'exploration_id\' must be provided when the' + ' action is reload_exploration.' + ) self._reload_exploration(exploration_id) elif action == 'reload_collection': collection_id = self.normalized_payload.get('collection_id') + if collection_id is None: + raise Exception( + 'The \'collection_id\' must be provided when the' + ' action is reload_collection.' + ) self._reload_collection(collection_id) elif action == 'generate_dummy_explorations': num_dummy_exps_to_generate = self.normalized_payload.get( 'num_dummy_exps_to_generate') + if num_dummy_exps_to_generate is None: + raise Exception( + 'The \'num_dummy_exps_to_generate\' must be provided' + ' when the action is generate_dummy_explorations.' + ) num_dummy_exps_to_publish = self.normalized_payload.get( 'num_dummy_exps_to_publish') + if num_dummy_exps_to_publish is None: + raise Exception( + 'The \'num_dummy_exps_to_publish\' must be provided' + ' when the action is generate_dummy_explorations.' + ) if num_dummy_exps_to_generate < num_dummy_exps_to_publish: raise self.InvalidInputException( 'Generate count cannot be less than publish count') - else: - self._generate_dummy_explorations( - num_dummy_exps_to_generate, num_dummy_exps_to_publish) + + self._generate_dummy_explorations( + num_dummy_exps_to_generate, num_dummy_exps_to_publish) elif action == 'clear_search_index': search_services.clear_collection_search_index() search_services.clear_exploration_search_index() + search_services.clear_blog_post_summaries_search_index() elif action == 'generate_dummy_new_structures_data': self._load_dummy_new_structures_data() elif action == 'generate_dummy_new_skill_data': self._generate_dummy_skill_and_questions() + elif action == 'generate_dummy_classroom': + self._generate_dummy_classroom() elif action == 'save_config_properties': new_config_property_values = self.normalized_payload.get( 'new_config_property_values') + if new_config_property_values is None: + raise Exception( + 'The \'new_config_property_values\' must be provided' + ' when the action is save_config_properties.' + ) logging.info( '[ADMIN] %s saved config property values: %s' % (self.user_id, new_config_property_values)) @@ -251,6 +335,11 @@ def post(self): elif action == 'revert_config_property': config_property_id = self.normalized_payload.get( 'config_property_id') + if config_property_id is None: + raise Exception( + 'The \'config_property_id\' must be provided' + ' when the action is revert_config_property.' + ) logging.info( '[ADMIN] %s reverted config property: %s' % (self.user_id, config_property_id)) @@ -258,9 +347,19 @@ def post(self): self.user_id, config_property_id) elif action == 'upload_topic_similarities': data = self.normalized_payload.get('data') + if data is None: + raise Exception( + 'The \'data\' must be provided when the action' + ' is upload_topic_similarities.' + ) recommendations_services.update_topic_similarities(data) elif action == 'regenerate_topic_related_opportunities': topic_id = self.normalized_payload.get('topic_id') + if topic_id is None: + raise Exception( + 'The \'topic_id\' must be provided when the action' + ' is regenerate_topic_related_opportunities.' + ) opportunities_count = ( opportunity_services .regenerate_opportunities_related_to_topic( @@ -268,19 +367,54 @@ def post(self): result = { 'opportunities_count': opportunities_count } - elif action == 'update_feature_flag_rules': + elif action == 'rollback_exploration_to_safe_state': + exp_id = self.normalized_payload.get('exp_id') + if exp_id is None: + raise Exception( + 'The \'exp_id\' must be provided when the action' + ' is rollback_exploration_to_safe_state.' + ) + version = ( + exp_services.rollback_exploration_to_safe_state(exp_id)) + result = { + 'version': version + } + else: + # The handler schema defines the possible values of 'action'. + # If 'action' has a value other than those defined in the + # schema, a Bad Request error will be thrown. Hence, 'action' + # must be 'update_feature_flag_rules' if this branch is + # executed. + assert action == 'update_feature_flag_rules' feature_name = self.normalized_payload.get('feature_name') - new_rule_dicts = self.normalized_payload.get('new_rules') + if feature_name is None: + raise Exception( + 'The \'feature_name\' must be provided when the action' + ' is update_feature_flag_rules.' + ) + new_rules = self.normalized_payload.get('new_rules') + if new_rules is None: + raise Exception( + 'The \'new_rules\' must be provided when the action' + ' is update_feature_flag_rules.' + ) commit_message = self.normalized_payload.get('commit_message') + if commit_message is None: + raise Exception( + 'The \'commit_message\' must be provided when the ' + 'action is update_feature_flag_rules.' + ) try: feature_services.update_feature_flag_rules( feature_name, self.user_id, commit_message, - new_rule_dicts) + new_rules) except ( utils.ValidationError, feature_services.FeatureFlagNotFoundException) as e: raise self.InvalidInputException(e) + + new_rule_dicts = [rules.to_dict() for rules in new_rules] logging.info( '[ADMIN] %s updated feature %s with new rules: ' '%s.' % (self.user_id, feature_name, new_rule_dicts)) @@ -290,7 +424,7 @@ def post(self): self.render_json({'error': str(e)}) raise e - def _reload_exploration(self, exploration_id): + def _reload_exploration(self, exploration_id: str) -> None: """Reloads the exploration in dev_mode corresponding to the given exploration id. @@ -311,7 +445,11 @@ def _reload_exploration(self, exploration_id): raise Exception('Cannot reload an exploration in production.') def _create_dummy_question( - self, question_id, question_content, linked_skill_ids): + self, + question_id: str, + question_content: str, + linked_skill_ids: List[str] + ) -> question_domain.Question: """Creates a dummy question object with the given question ID. Args: @@ -323,39 +461,47 @@ def _create_dummy_question( Returns: Question. The dummy question with given values. """ + content_id_generator = translation_domain.ContentIdGenerator() + state = state_domain.State.create_default_state( - 'ABC', is_initial_state=True) + 'ABC', + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + is_initial_state=True) + state.update_interaction_id('TextInput') state.update_interaction_customization_args({ 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG + ), 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': { + 'value': False + } }) - state.update_next_content_id_index(1) state.update_linked_skill_id(None) - state.update_content(state_domain.SubtitledHtml('1', question_content)) - recorded_voiceovers = state_domain.RecordedVoiceovers({}) - written_translations = state_domain.WrittenTranslations({}) - recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0') - recorded_voiceovers.add_content_id_for_voiceover('1') - recorded_voiceovers.add_content_id_for_voiceover('default_outcome') - written_translations.add_content_id_for_translation('ca_placeholder_0') - written_translations.add_content_id_for_translation('1') - written_translations.add_content_id_for_translation('default_outcome') - - state.update_recorded_voiceovers(recorded_voiceovers) - state.update_written_translations(written_translations) + state.update_content(state_domain.SubtitledHtml( + 'content_0', question_content)) + solution = state_domain.Solution( 'TextInput', False, 'Solution', state_domain.SubtitledHtml( - 'solution', '

This is a solution.

')) + content_id_generator.generate( + translation_domain.ContentType.SOLUTION), + '

This is a solution.

')) hints_list = [ state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

This is a hint.

') + state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.HINT), + '

This is a hint.

') ) ] @@ -363,18 +509,24 @@ def _create_dummy_question( state.update_interaction_hints(hints_list) state.update_interaction_default_outcome( state_domain.Outcome( - None, state_domain.SubtitledHtml( - 'feedback_id', '

Dummy Feedback

'), + None, None, + state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + '

Dummy Feedback

'), True, [], None, None ) ) question = question_domain.Question( question_id, state, feconf.CURRENT_STATE_SCHEMA_VERSION, - constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, []) + constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, [], + content_id_generator.next_content_id_index) return question - def _create_dummy_skill(self, skill_id, skill_description, explanation): + def _create_dummy_skill( + self, skill_id: str, skill_description: str, explanation: str + ) -> skill_domain.Skill: """Creates a dummy skill object with the given values. Args: @@ -397,7 +549,7 @@ def _create_dummy_skill(self, skill_id, skill_description, explanation): skill.update_explanation(state_domain.SubtitledHtml('1', explanation)) return skill - def _load_dummy_new_structures_data(self): + def _load_dummy_new_structures_data(self) -> None: """Loads the database with two topics (one of which is empty), a story and three skills in the topic (two of them in a subtopic) and a question attached to each skill. @@ -406,6 +558,7 @@ def _load_dummy_new_structures_data(self): Exception. Cannot load new structures data in production mode. Exception. User does not have enough rights to generate data. """ + assert self.user_id is not None if constants.DEV_MODE: if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: raise Exception( @@ -445,15 +598,17 @@ def _load_dummy_new_structures_data(self): self.user_id, question_id_3, skill_id_3, 0.7) topic_1 = topic_domain.Topic.create_default_topic( - topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description') + topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description', + 'fragm') topic_2 = topic_domain.Topic.create_default_topic( - topic_id_2, 'Empty Topic', 'empty-topic', 'description') + topic_id_2, 'Empty Topic', 'empty-topic', 'description', + 'fragm') topic_1.add_canonical_story(story_id) topic_1.add_uncategorized_skill_id(skill_id_1) topic_1.add_uncategorized_skill_id(skill_id_2) topic_1.add_uncategorized_skill_id(skill_id_3) - topic_1.add_subtopic(1, 'Dummy Subtopic Title') + topic_1.add_subtopic(1, 'Dummy Subtopic Title', 'dummysubtopic') topic_1.move_skill_id_to_subtopic(None, 1, skill_id_2) topic_1.move_skill_id_to_subtopic(None, 1, skill_id_3) @@ -462,11 +617,11 @@ def _load_dummy_new_structures_data(self): 1, topic_id_1)) # These explorations were chosen since they pass the validations # for published stories. - self._reload_exploration('15') + self._reload_exploration('6') self._reload_exploration('25') self._reload_exploration('13') exp_services.update_exploration( - self.user_id, '15', [exp_domain.ExplorationChange({ + self.user_id, '6', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True @@ -489,7 +644,7 @@ def _load_dummy_new_structures_data(self): topic_id_1, 'help-jamie-win-arcade') story_node_dicts = [{ - 'exp_id': '15', + 'exp_id': '6', 'title': 'What are the place values?', 'description': 'Jaime learns the place value of each digit ' + 'in a big number.' @@ -505,7 +660,9 @@ def _load_dummy_new_structures_data(self): 'greater than another number.' }] - def generate_dummy_story_nodes(node_id, exp_id, title, description): + def generate_dummy_story_nodes( + node_id: int, exp_id: str, title: str, description: str + ) -> None: """Generates and connects sequential story nodes. Args: @@ -514,7 +671,7 @@ def generate_dummy_story_nodes(node_id, exp_id, title, description): title: str. The title of the story node. description: str. The description of the story node. """ - + assert self.user_id is not None story.add_node( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), title) @@ -550,7 +707,8 @@ def generate_dummy_story_nodes(node_id, exp_id, title, description): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Dummy Subtopic Title' + 'title': 'Dummy Subtopic Title', + 'url_fragment': 'dummy-fragment' })] ) @@ -563,7 +721,7 @@ def generate_dummy_story_nodes(node_id, exp_id, title, description): else: raise Exception('Cannot load new structures data in production.') - def _generate_dummy_skill_and_questions(self): + def _generate_dummy_skill_and_questions(self) -> None: """Generate and loads the database with a skill and 15 questions linked to the skill. @@ -571,6 +729,7 @@ def _generate_dummy_skill_and_questions(self): Exception. Cannot load new structures data in production mode. Exception. User does not have enough rights to generate data. """ + assert self.user_id is not None if constants.DEV_MODE: if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: raise Exception( @@ -594,7 +753,7 @@ def _generate_dummy_skill_and_questions(self): else: raise Exception('Cannot generate dummy skills in production.') - def _reload_collection(self, collection_id): + def _reload_collection(self, collection_id: str) -> None: """Reloads the collection in dev_mode corresponding to the given collection id. @@ -604,6 +763,7 @@ def _reload_collection(self, collection_id): Raises: Exception. Cannot reload a collection in production. """ + assert self.user_id is not None if constants.DEV_MODE: logging.info( '[ADMIN] %s reloaded collection %s' % @@ -615,7 +775,8 @@ def _reload_collection(self, collection_id): raise Exception('Cannot reload a collection in production.') def _generate_dummy_explorations( - self, num_dummy_exps_to_generate, num_dummy_exps_to_publish): + self, num_dummy_exps_to_generate: int, num_dummy_exps_to_publish: int + ) -> None: """Generates and publishes the given number of dummy explorations. Args: @@ -627,7 +788,7 @@ def _generate_dummy_explorations( Raises: Exception. Environment is not DEVMODE. """ - + assert self.user_id is not None if constants.DEV_MODE: logging.info( '[ADMIN] %s generated %s number of dummy explorations' % @@ -654,12 +815,313 @@ def _generate_dummy_explorations( else: raise Exception('Cannot generate dummy explorations in production.') + def _generate_dummy_classroom(self) -> None: + """Generate and loads the database with a classroom. + + Raises: + Exception. Cannot generate dummy classroom in production. + Exception. User does not have enough rights to generate data. + """ + assert self.user_id is not None + if constants.DEV_MODE: + if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: + raise Exception( + 'User does not have enough rights to generate data.') + logging.info( + '[ADMIN] %s generated dummy classroom.' % self.user_id) + + topic_id_1 = topic_fetchers.get_new_topic_id() + topic_id_2 = topic_fetchers.get_new_topic_id() + topic_id_3 = topic_fetchers.get_new_topic_id() + topic_id_4 = topic_fetchers.get_new_topic_id() + topic_id_5 = topic_fetchers.get_new_topic_id() + + skill_id_1 = skill_services.get_new_skill_id() + skill_id_2 = skill_services.get_new_skill_id() + skill_id_3 = skill_services.get_new_skill_id() + skill_id_4 = skill_services.get_new_skill_id() + skill_id_5 = skill_services.get_new_skill_id() + + question_id_1 = question_services.get_new_question_id() + question_id_2 = question_services.get_new_question_id() + question_id_3 = question_services.get_new_question_id() + question_id_4 = question_services.get_new_question_id() + question_id_5 = question_services.get_new_question_id() + question_id_6 = question_services.get_new_question_id() + question_id_7 = question_services.get_new_question_id() + question_id_8 = question_services.get_new_question_id() + question_id_9 = question_services.get_new_question_id() + question_id_10 = question_services.get_new_question_id() + question_id_11 = question_services.get_new_question_id() + question_id_12 = question_services.get_new_question_id() + question_id_13 = question_services.get_new_question_id() + question_id_14 = question_services.get_new_question_id() + question_id_15 = question_services.get_new_question_id() + + question_1 = self._create_dummy_question( + question_id_1, 'Question 1', [skill_id_1]) + question_2 = self._create_dummy_question( + question_id_2, 'Question 2', [skill_id_1]) + question_3 = self._create_dummy_question( + question_id_3, 'Question 3', [skill_id_1]) + question_4 = self._create_dummy_question( + question_id_4, 'Question 4', [skill_id_2]) + question_5 = self._create_dummy_question( + question_id_5, 'Question 5', [skill_id_2]) + question_6 = self._create_dummy_question( + question_id_6, 'Question 6', [skill_id_2]) + question_7 = self._create_dummy_question( + question_id_7, 'Question 7', [skill_id_3]) + question_8 = self._create_dummy_question( + question_id_8, 'Question 8', [skill_id_3]) + question_9 = self._create_dummy_question( + question_id_9, 'Question 9', [skill_id_3]) + question_10 = self._create_dummy_question( + question_id_10, 'Question 10', [skill_id_4]) + question_11 = self._create_dummy_question( + question_id_11, 'Question 11', [skill_id_4]) + question_12 = self._create_dummy_question( + question_id_12, 'Question 12', [skill_id_4]) + question_13 = self._create_dummy_question( + question_id_13, 'Question 13', [skill_id_5]) + question_14 = self._create_dummy_question( + question_id_14, 'Question 14', [skill_id_5]) + question_15 = self._create_dummy_question( + question_id_15, 'Question 15', [skill_id_5]) + + topic_1 = topic_domain.Topic.create_default_topic( + topic_id_1, 'Addition', 'add', 'description', 'fragm') + topic_1.skill_ids_for_diagnostic_test = [skill_id_1] + topic_1.thumbnail_filename = 'thumbnail.svg' + topic_1.thumbnail_bg_color = '#C6DCDA' + topic_1.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', [skill_id_1], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-three')] + topic_1.next_subtopic_id = 2 + + topic_2 = topic_domain.Topic.create_default_topic( + topic_id_2, 'Subtraction', 'subtraction', + 'description', 'fragm' + ) + topic_2.skill_ids_for_diagnostic_test = [skill_id_2] + topic_2.thumbnail_filename = 'thumbnail.svg' + topic_2.thumbnail_bg_color = '#C6DCDA' + topic_2.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', [skill_id_2], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-three')] + topic_2.next_subtopic_id = 2 + + topic_3 = topic_domain.Topic.create_default_topic( + topic_id_3, 'Multiplication', 'multiplication', + 'description', 'fragm' + ) + topic_3.skill_ids_for_diagnostic_test = [skill_id_3] + topic_3.thumbnail_filename = 'thumbnail.svg' + topic_3.thumbnail_bg_color = '#C6DCDA' + topic_3.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', [skill_id_3], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-three')] + topic_3.next_subtopic_id = 2 + + topic_4 = topic_domain.Topic.create_default_topic( + topic_id_4, 'Division', 'division', 'description', 'fragm') + topic_4.skill_ids_for_diagnostic_test = [skill_id_4] + topic_4.thumbnail_filename = 'thumbnail.svg' + topic_4.thumbnail_bg_color = '#C6DCDA' + topic_4.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', [skill_id_4], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-three')] + topic_4.next_subtopic_id = 2 + + topic_5 = topic_domain.Topic.create_default_topic( + topic_id_5, 'Fraction', 'fraction', 'description', 'fragm') + topic_5.skill_ids_for_diagnostic_test = [skill_id_5] + topic_5.thumbnail_filename = 'thumbnail.svg' + topic_5.thumbnail_bg_color = '#C6DCDA' + topic_5.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', [skill_id_5], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-three')] + topic_5.next_subtopic_id = 2 + + skill_1 = self._create_dummy_skill( + skill_id_1, 'Skill1', '

Dummy Explanation 1

') + skill_2 = self._create_dummy_skill( + skill_id_2, 'Skill2', '

Dummy Explanation 2

') + skill_3 = self._create_dummy_skill( + skill_id_3, 'Skill3', '

Dummy Explanation 3

') + skill_4 = self._create_dummy_skill( + skill_id_4, 'Skill4', '

Dummy Explanation 4

') + skill_5 = self._create_dummy_skill( + skill_id_5, 'Skill5', '

Dummy Explanation 5

') + + question_services.add_question(self.user_id, question_1) + question_services.add_question(self.user_id, question_2) + question_services.add_question(self.user_id, question_3) + question_services.add_question(self.user_id, question_4) + question_services.add_question(self.user_id, question_5) + question_services.add_question(self.user_id, question_6) + question_services.add_question(self.user_id, question_7) + question_services.add_question(self.user_id, question_8) + question_services.add_question(self.user_id, question_9) + question_services.add_question(self.user_id, question_10) + question_services.add_question(self.user_id, question_11) + question_services.add_question(self.user_id, question_12) + question_services.add_question(self.user_id, question_13) + question_services.add_question(self.user_id, question_14) + question_services.add_question(self.user_id, question_15) + + skill_services.save_new_skill(self.user_id, skill_1) + skill_services.save_new_skill(self.user_id, skill_2) + skill_services.save_new_skill(self.user_id, skill_3) + skill_services.save_new_skill(self.user_id, skill_4) + skill_services.save_new_skill(self.user_id, skill_5) + + topic_services.save_new_topic(self.user_id, topic_1) + topic_services.publish_topic(topic_id_1, self.user_id) + + topic_services.save_new_topic(self.user_id, topic_2) + topic_services.publish_topic(topic_id_2, self.user_id) + + topic_services.save_new_topic(self.user_id, topic_3) + topic_services.publish_topic(topic_id_3, self.user_id) + + topic_services.save_new_topic(self.user_id, topic_4) + topic_services.publish_topic(topic_id_4, self.user_id) + + topic_services.save_new_topic(self.user_id, topic_5) + topic_services.publish_topic(topic_id_5, self.user_id) -class AdminRoleHandler(base.BaseHandler): + question_services.create_new_question_skill_link( + self.user_id, question_id_1, skill_id_1, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_2, skill_id_1, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_3, skill_id_1, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_4, skill_id_2, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_5, skill_id_2, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_6, skill_id_2, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_7, skill_id_3, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_8, skill_id_3, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_9, skill_id_3, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_10, skill_id_4, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_11, skill_id_4, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_12, skill_id_4, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_13, skill_id_5, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_14, skill_id_5, 0.5) + question_services.create_new_question_skill_link( + self.user_id, question_id_15, skill_id_5, 0.5) + + classroom_id_1 = classroom_config_services.get_new_classroom_id() + + classroom_name_1 = 'Math' + + classroom_url_fragment_1 = 'math' + + topic_dependency_for_classroom_1: Dict[str, list[str]] = { + topic_id_1: [], + topic_id_2: [topic_id_1], + topic_id_3: [topic_id_1], + topic_id_4: [topic_id_2], + topic_id_5: [topic_id_2, topic_id_3] + } + + classroom_dict_1: classroom_config_domain.ClassroomDict = { + 'classroom_id': classroom_id_1, + 'name': classroom_name_1, + 'url_fragment': classroom_url_fragment_1, + 'course_details': '', + 'topic_list_intro': '', + 'topic_id_to_prerequisite_topic_ids': ( + topic_dependency_for_classroom_1) + } + + classroom_1 = classroom_config_domain.Classroom.from_dict( + classroom_dict_1) + + classroom_config_services.update_or_create_classroom_model( + classroom_1) + + classroom_pages_data = [{ + 'name': 'math', + 'url_fragment': 'math', + 'course_details': '', + 'topic_list_intro': '', + 'topic_ids': [ + topic_id_1, + topic_id_2, + topic_id_3, + topic_id_4, + topic_id_5 + ], + }] + config_services.set_property( + self.user_id, 'classroom_pages_data', classroom_pages_data) + else: + raise Exception('Cannot generate dummy classroom in production.') + + +class AdminRoleHandlerNormalizedGetRequestDict(TypedDict): + """Dict representation of AdminRoleHandler's GET normalized_request + dictionary. + """ + + filter_criterion: str + role: Optional[str] + username: Optional[str] + + +class AdminRoleHandlerNormalizedDeleteRequestDict(TypedDict): + """Dict representation of AdminRoleHandler's DELETE normalized_request + dictionary. + """ + + role: str + username: str + + +class AdminRoleHandlerNormalizedPayloadDict(TypedDict): + """Dict representation of AdminRoleHandler's normalized_payload + dictionary. + """ + + role: str + username: str + + +class AdminRoleHandler( + base.BaseHandler[ + AdminRoleHandlerNormalizedPayloadDict, + Union[ + AdminRoleHandlerNormalizedGetRequestDict, + AdminRoleHandlerNormalizedDeleteRequestDict + ] + ] +): """Handler for roles tab of admin page. Used to view and update roles.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'filter_criterion': { @@ -714,22 +1176,50 @@ class AdminRoleHandler(base.BaseHandler): } @acl_decorators.can_access_admin_page - def get(self): - filter_criterion = self.normalized_request.get( - 'filter_criterion') + def get(self) -> None: + assert self.user_id is not None + # Here we use cast because we are narrowing down the type of + # 'normalized_request' from Union of request TypedDicts to a + # particular TypedDict that was defined according to the schemas. + # So that the type of fetched values is not considered as Any type. + request_data = cast( + AdminRoleHandlerNormalizedGetRequestDict, + self.normalized_request + ) + filter_criterion = request_data['filter_criterion'] if filter_criterion == feconf.USER_FILTER_CRITERION_ROLE: - role = self.normalized_request.get( - feconf.USER_FILTER_CRITERION_ROLE) + role = request_data.get(feconf.USER_FILTER_CRITERION_ROLE) + if role is None: + raise Exception( + 'The role must be provided when the filter criterion ' + 'is \'role\'.' + ) role_services.log_role_query( self.user_id, feconf.ROLE_ACTION_VIEW_BY_ROLE, role=role) self.render_json({ - 'usernames': user_services.get_usernames_by_role(role) + 'usernames': ( + user_services.get_usernames_by_role(role) if role else [] + ) }) - elif filter_criterion == feconf.USER_FILTER_CRITERION_USERNAME: - username = self.normalized_request.get( + else: + # The handler schema defines the possible values of + # 'filter_criterion'. If 'filter_criterion' has a value other than + # those defined in the schema, a Bad Request error will be thrown. + # Hence, 'filter_criterion' must be + # 'feconf.USER_FILTER_CRITERION_USERNAME' if this branch is + # executed. + assert filter_criterion == ( feconf.USER_FILTER_CRITERION_USERNAME) - user_id = user_services.get_user_id_from_username(username) + username = request_data.get(feconf.USER_FILTER_CRITERION_USERNAME) + if username is None: + raise Exception( + 'The username must be provided when the filter criterion ' + 'is \'username\'.' + ) + user_id = ( + user_services.get_user_id_from_username(username) + ) role_services.log_role_query( self.user_id, feconf.ROLE_ACTION_VIEW_BY_USERNAME, username=username) @@ -752,9 +1242,10 @@ def get(self): self.render_json(user_roles_dict) @acl_decorators.can_access_admin_page - def put(self): - username = self.payload.get('username') - role = self.payload.get('role') + def put(self) -> None: + assert self.normalized_payload is not None + username = self.normalized_payload['username'] + role = self.normalized_payload['role'] user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: @@ -772,9 +1263,17 @@ def put(self): self.render_json({}) @acl_decorators.can_access_admin_page - def delete(self): - username = self.request.get('username') - role = self.request.get('role') + def delete(self) -> None: + # Here we use cast because we are narrowing down the type of + # 'normalized_request' from Union of request TypedDicts to a + # particular TypedDict that was defined according to the schemas. + # So that the type of fetched values is not considered as Any type. + request_data = cast( + AdminRoleHandlerNormalizedDeleteRequestDict, + self.normalized_request + ) + username = request_data['username'] + role = request_data['role'] user_id = user_services.get_user_id_from_username(username) if user_id is None: @@ -789,11 +1288,25 @@ def delete(self): self.render_json({}) -class TopicManagerRoleHandler(base.BaseHandler): +class TopicManagerRoleHandlerNormalizedPayloadDict(TypedDict): + """Dict representation of TopicManagerRoleHandler's normalized_payload + dictionary. + """ + + username: str + action: str + topic_id: str + + +class TopicManagerRoleHandler( + base.BaseHandler[ + TopicManagerRoleHandlerNormalizedPayloadDict, Dict[str, str] + ] +): """Handler to assign or deassigning manager to a topic.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { @@ -816,10 +1329,11 @@ class TopicManagerRoleHandler(base.BaseHandler): } @acl_decorators.can_access_admin_page - def put(self): - username = self.normalized_payload.get('username') - action = self.normalized_payload.get('action') - topic_id = self.normalized_payload.get('topic_id') + def put(self) -> None: + assert self.normalized_payload is not None + username = self.normalized_payload['username'] + action = self.normalized_payload['action'] + topic_id = self.normalized_payload['topic_id'] user_settings = user_services.get_user_settings_from_username(username) @@ -837,22 +1351,51 @@ def put(self): topic_services.assign_role( user_services.get_system_user(), topic_manager, topic_domain.ROLE_MANAGER, topic_id) - elif action == 'deassign': + else: + # The handler schema defines the possible values of 'action'. + # If 'action' has a value other than those defined in the schema, + # a Bad Request error will be thrown. Hence, 'action' must be + # 'deassign' if this branch is executed. + assert action == 'deassign' topic_services.deassign_manager_role_from_topic( user_services.get_system_user(), user_id, topic_id) - if not topic_fetchers.get_topic_rights_with_user(user_id): - user_services.remove_user_role( - user_id, feconf.ROLE_ID_TOPIC_MANAGER) + # The case where user does not have manager rights it will be + # caught before in topic_services.deassign_manager_role_from_topic + # method. + assert not topic_fetchers.get_topic_rights_with_user(user_id) + user_services.remove_user_role( + user_id, feconf.ROLE_ID_TOPIC_MANAGER) self.render_json({}) -class BannedUsersHandler(base.BaseHandler): +class BannedUsersHandlerNormalizedPayloadDict(TypedDict): + """Dict representation of BannedUsersHandler's normalized_payload + dictionary. + """ + + username: str + + +class BannedUsersHandlerNormalizedRequestDict(TypedDict): + """Dict representation of BannedUsersHandler's normalized_request + dictionary. + """ + + username: str + + +class BannedUsersHandler( + base.BaseHandler[ + BannedUsersHandlerNormalizedPayloadDict, + BannedUsersHandlerNormalizedRequestDict + ] +): """Handler to ban and unban users.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { @@ -871,8 +1414,9 @@ class BannedUsersHandler(base.BaseHandler): } @acl_decorators.can_access_admin_page - def put(self): - username = self.normalized_payload.get('username') + def put(self) -> None: + assert self.normalized_payload is not None + username = self.normalized_payload['username'] user_id = user_services.get_user_id_from_username(username) if user_id is None: @@ -884,8 +1428,9 @@ def put(self): self.render_json({}) @acl_decorators.can_access_admin_page - def delete(self): - username = self.normalized_request.get('username') + def delete(self) -> None: + assert self.normalized_request is not None + username = self.normalized_request['username'] user_id = user_services.get_user_id_from_username(username) if user_id is None: @@ -896,12 +1441,33 @@ def delete(self): self.render_json({}) -class AdminSuperAdminPrivilegesHandler(base.BaseHandler): +class AdminSuperAdminPrivilegesHandlerNormalizedPayloadDict(TypedDict): + """Dict representation of AdminSuperAdminPrivilegesHandler's + normalized_payload dictionary. + """ + + username: str + + +class AdminSuperAdminPrivilegesHandlerNormalizedRequestDict(TypedDict): + """Dict representation of AdminSuperAdminPrivilegesHandler's + normalized_request dictionary. + """ + + username: str + + +class AdminSuperAdminPrivilegesHandler( + base.BaseHandler[ + AdminSuperAdminPrivilegesHandlerNormalizedPayloadDict, + AdminSuperAdminPrivilegesHandlerNormalizedRequestDict + ] +): """Handler for granting a user super admin privileges.""" PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { @@ -920,12 +1486,12 @@ class AdminSuperAdminPrivilegesHandler(base.BaseHandler): } @acl_decorators.can_access_admin_page - def put(self): + def put(self) -> None: + assert self.normalized_payload is not None if self.email != feconf.ADMIN_EMAIL_ADDRESS: raise self.UnauthorizedUserException( 'Only the default system admin can manage super admins') - - username = self.normalized_payload.get('username') + username = self.normalized_payload['username'] user_id = user_services.get_user_id_from_username(username) if user_id is None: @@ -935,12 +1501,12 @@ def put(self): self.render_json(self.values) @acl_decorators.can_access_admin_page - def delete(self): + def delete(self) -> None: + assert self.normalized_request is not None if self.email != feconf.ADMIN_EMAIL_ADDRESS: raise self.UnauthorizedUserException( 'Only the default system admin can manage super admins') - - username = self.normalized_request.get('username') + username = self.normalized_request['username'] user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: @@ -954,15 +1520,17 @@ def delete(self): self.render_json(self.values) -class AdminTopicsCsvFileDownloader(base.BaseHandler): +class AdminTopicsCsvFileDownloader( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): """Retrieves topic similarity data for download.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_DOWNLOADABLE - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_admin_page - def get(self): + def get(self) -> None: topic_similarities = ( recommendations_services.get_topic_similarities_as_csv() ) @@ -975,11 +1543,26 @@ def get(self): ) -class DataExtractionQueryHandler(base.BaseHandler): +class DataExtractionQueryHandlerNormalizedRequestDict(TypedDict): + """Dict representation of DataExtractionQueryHandler's + normalized_request dictionary. + """ + + exp_id: str + exp_version: int + state_name: str + num_answers: int + + +class DataExtractionQueryHandler( + base.BaseHandler[ + Dict[str, str], DataExtractionQueryHandlerNormalizedRequestDict + ] +): """Handler for data extraction query.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'exp_id': { @@ -1006,9 +1589,10 @@ class DataExtractionQueryHandler(base.BaseHandler): } @acl_decorators.can_access_admin_page - def get(self): - exp_id = self.normalized_request.get('exp_id') - exp_version = self.normalized_request.get('exp_version') + def get(self) -> None: + assert self.normalized_request is not None + exp_id = self.normalized_request['exp_id'] + exp_version = self.normalized_request['exp_version'] exploration = exp_fetchers.get_exploration_by_id( exp_id, strict=False, version=exp_version) @@ -1017,8 +1601,8 @@ def get(self): 'Entity for exploration with id %s and version %s not found.' % (exp_id, exp_version)) - state_name = self.normalized_request.get('state_name') - num_answers = self.normalized_request.get('num_answers') + state_name = self.normalized_request['state_name'] + num_answers = self.normalized_request['num_answers'] if state_name not in exploration.states: raise self.InvalidInputException( @@ -1027,6 +1611,13 @@ def get(self): state_answers = stats_services.get_state_answers( exp_id, exp_version, state_name) + if state_answers is None: + raise Exception( + 'No state answer exists for the given exp_id: %s,' + ' exp_version: %s and state_name: %s' % + (exp_id, exp_version, state_name) + + ) extracted_answers = state_answers.get_submitted_answer_dict_list() if num_answers > 0: @@ -1038,15 +1629,18 @@ def get(self): self.render_json(response) -class SendDummyMailToAdminHandler(base.BaseHandler): +class SendDummyMailToAdminHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): """This function handles sending test emails.""" - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'POST': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'POST': {}} @acl_decorators.can_access_admin_page - def post(self): + def post(self) -> None: username = self.username + assert username is not None if feconf.CAN_SEND_EMAILS: email_manager.send_dummy_mail_to_admin(username) self.render_json({}) @@ -1054,10 +1648,23 @@ def post(self): raise self.InvalidInputException('This app cannot send emails.') -class UpdateUsernameHandler(base.BaseHandler): +class UpdateUsernameHandlerNormalizedPayloadDict(TypedDict): + """Dict representation of UpdateUsernameHandler's + normalized_payload dictionary. + """ + + old_username: str + new_username: str + + +class UpdateUsernameHandler( + base.BaseHandler[ + UpdateUsernameHandlerNormalizedPayloadDict, Dict[str, str] + ] +): """Handler for renaming usernames.""" - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'old_username': { @@ -1078,9 +1685,11 @@ class UpdateUsernameHandler(base.BaseHandler): } @acl_decorators.can_access_admin_page - def put(self): - old_username = self.normalized_payload.get('old_username') - new_username = self.normalized_payload.get('new_username') + def put(self) -> None: + assert self.user_id is not None + assert self.normalized_payload is not None + old_username = self.normalized_payload['old_username'] + new_username = self.normalized_payload['new_username'] user_id = user_services.get_user_id_from_username(old_username) if user_id is None: @@ -1096,28 +1705,42 @@ def put(self): self.render_json({}) -class NumberOfDeletionRequestsHandler(base.BaseHandler): +class NumberOfDeletionRequestsHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): """Handler for getting the number of pending deletion requests via admin page. """ GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} @acl_decorators.can_access_admin_page - def get(self): + def get(self) -> None: self.render_json({ 'number_of_pending_deletion_models': ( wipeout_service.get_number_of_pending_deletion_requests()) }) -class VerifyUserModelsDeletedHandler(base.BaseHandler): +class VerifyUserModelsDeletedHandlerNormalizedRequestDict(TypedDict): + """Dict representation of VerifyUserModelsDeletedHandler's + normalized_request dictionary. + """ + + user_id: str + + +class VerifyUserModelsDeletedHandler( + base.BaseHandler[ + Dict[str, str], VerifyUserModelsDeletedHandlerNormalizedRequestDict + ] +): """Handler for getting whether any models exist for specific user ID.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'user_id': { @@ -1129,18 +1752,32 @@ class VerifyUserModelsDeletedHandler(base.BaseHandler): } @acl_decorators.can_access_admin_page - def get(self): - user_id = self.normalized_request.get('user_id') + def get(self) -> None: + assert self.normalized_request is not None + user_id = self.normalized_request['user_id'] user_is_deleted = wipeout_service.verify_user_deleted( user_id, include_delete_at_end_models=True) self.render_json({'related_models_exist': not user_is_deleted}) -class DeleteUserHandler(base.BaseHandler): +class DeleteUserHandlerNormalizedRequestDict(TypedDict): + """Dict representation of DeleteUserHandler's + normalized_request dictionary. + """ + + user_id: str + username: str + + +class DeleteUserHandler( + base.BaseHandler[ + Dict[str, str], DeleteUserHandlerNormalizedRequestDict + ] +): """Handler for deleting a user with specific ID.""" - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'DELETE': { 'user_id': { @@ -1157,9 +1794,10 @@ class DeleteUserHandler(base.BaseHandler): } @acl_decorators.can_delete_any_user - def delete(self): - user_id = self.normalized_request.get('user_id') - username = self.normalized_request.get('username') + def delete(self) -> None: + assert self.normalized_request is not None + user_id = self.normalized_request['user_id'] + username = self.normalized_request['username'] user_id_from_username = ( user_services.get_user_id_from_username(username)) @@ -1176,11 +1814,25 @@ def delete(self): self.render_json({'success': True}) -class UpdateBlogPostHandler(base.BaseHandler): +class UpdateBlogPostHandlerNormalizedPayloadDict(TypedDict): + """Dict representation of UpdateBlogPostHandler's + normalized_payload dictionary. + """ + + blog_post_id: str + author_username: str + published_on: str + + +class UpdateBlogPostHandler( + base.BaseHandler[ + UpdateBlogPostHandlerNormalizedPayloadDict, Dict[str, str] + ] +): """Handler for changing author ids and published on date in blog posts.""" - URL_PATH_ARGS_SCHEMAS = {} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'blog_post_id': { @@ -1206,10 +1858,11 @@ class UpdateBlogPostHandler(base.BaseHandler): } @acl_decorators.can_access_admin_page - def put(self): - blog_post_id = self.normalized_payload.get('blog_post_id') - author_username = self.normalized_payload.get('author_username') - published_on = self.normalized_payload.get('published_on') + def put(self) -> None: + assert self.normalized_payload is not None + blog_post_id = self.normalized_payload['blog_post_id'] + author_username = self.normalized_payload['author_username'] + published_on = self.normalized_payload['published_on'] author_id = user_services.get_user_id_from_username(author_username) if author_id is None: diff --git a/core/controllers/admin_test.py b/core/controllers/admin_test.py index c9b1d36a7864..6774f5660c5e 100644 --- a/core/controllers/admin_test.py +++ b/core/controllers/admin_test.py @@ -17,13 +17,14 @@ from __future__ import annotations import datetime +import enum import logging from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import blog_services +from core.domain import classroom_config_services from core.domain import collection_services from core.domain import config_domain from core.domain import config_services @@ -52,33 +53,48 @@ from core.platform.auth import firebase_auth_services from core.tests import test_utils +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import audit_models + from mypy_imports import blog_models + from mypy_imports import exp_models + from mypy_imports import opportunity_models + from mypy_imports import user_models + ( audit_models, blog_models, exp_models, opportunity_models, user_models ) = models.Registry.import_models([ - models.NAMES.audit, models.NAMES.blog, models.NAMES.exploration, - models.NAMES.opportunity, models.NAMES.user + models.Names.AUDIT, models.Names.BLOG, models.Names.EXPLORATION, + models.Names.OPPORTUNITY, models.Names.USER ]) BOTH_MODERATOR_AND_ADMIN_EMAIL = 'moderator.and.admin@example.com' BOTH_MODERATOR_AND_ADMIN_USERNAME = 'moderatorandadm1n' -PARAM_NAMES = python_utils.create_enum('test_feature_1') # pylint: disable=invalid-name -FEATURE_STAGES = platform_parameter_domain.FEATURE_STAGES + +class ParamNames(enum.Enum): + """Enum for parameter names.""" + + TEST_FEATURE_1 = 'test_feature_1' + + +FeatureStages = platform_parameter_domain.FeatureStages class AdminIntegrationTest(test_utils.GenericTestBase): """Server integration tests for operations on the admin page.""" - def setUp(self): + def setUp(self) -> None: """Complete the signup process for self.CURRICULUM_ADMIN_EMAIL.""" - super(AdminIntegrationTest, self).setUp() + super().setUp() self.signup(feconf.ADMIN_EMAIL_ADDRESS, 'testsuper') self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.prod_mode_swap = self.swap(constants, 'DEV_MODE', False) - def test_admin_page_rights(self): + def test_admin_page_rights(self) -> None: """Test access rights to the admin page.""" self.get_html_response('/admin', expected_status_int=302) @@ -93,7 +109,7 @@ def test_admin_page_rights(self): self.get_html_response('/admin') self.logout() - def test_promo_bar_configuration_not_present_to_admin(self): + def test_promo_bar_configuration_not_present_to_admin(self) -> None: """Test that promo bar configuration is not presentd in admin page.""" self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) @@ -106,7 +122,7 @@ def test_promo_bar_configuration_not_present_to_admin(self): self.assertNotIn('promo_bar_enabled', response_config_properties) self.assertNotIn('promo_bar_message', response_config_properties) - def test_change_configuration_property(self): + def test_change_configuration_property(self) -> None: """Test that configuration properties can be changed.""" self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) @@ -138,40 +154,293 @@ def test_change_configuration_property(self): self.logout() - def test_cannot_reload_exploration_in_production_mode(self): + def test_cannot_reload_exploration_in_production_mode(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() - prod_mode_swap = self.swap(constants, 'DEV_MODE', False) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'Cannot reload an exploration in production.') - with assert_raises_regexp_context_manager, prod_mode_swap: + with assert_raises_regexp_context_manager, self.prod_mode_swap: self.post_json( '/adminhandler', { 'action': 'reload_exploration', - 'exploration_id': '2' + 'exploration_id': '3' }, csrf_token=csrf_token) self.logout() - def test_cannot_load_new_structures_data_in_production_mode(self): + def test_without_exp_id_reload_exp_action_is_not_performed(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() - prod_mode_swap = self.swap(constants, 'DEV_MODE', False) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'exploration_id\' must be provided when the action ' + 'is reload_exploration.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'reload_exploration', + 'exploration_id': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_collection_id_reload_collection_action_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'collection_id\' must be provided when the action ' + 'is reload_collection.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'reload_collection', + 'collection_id': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_num_dummy_exps_generate_dummy_exp_action_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'num_dummy_exps_to_generate\' must be provided when the ' + 'action is generate_dummy_explorations.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'generate_dummy_explorations', + 'num_dummy_exps_to_generate': None, + 'num_dummy_exps_to_publish': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_num_dummy_exps_to_publish_action_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'num_dummy_exps_to_publish\' must be provided when the ' + 'action is generate_dummy_explorations.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'generate_dummy_explorations', + 'num_dummy_exps_to_generate': 5, + 'num_dummy_exps_to_publish': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_new_config_property_values_action_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'new_config_property_values\' must be provided when the ' + 'action is save_config_properties.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'save_config_properties', + 'new_config_property_values': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_config_property_id_action_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'config_property_id\' must be provided when the action ' + 'is revert_config_property.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'revert_config_property', + 'config_property_id': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_data_action_upload_topic_similarities_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'data\' must be provided when the action is ' + 'upload_topic_similarities.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'upload_topic_similarities', + 'data': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_topic_id_action_regenerate_topic_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'topic_id\' must be provided when the action is ' + 'regenerate_topic_related_opportunities.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'regenerate_topic_related_opportunities', + 'topic_id': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_exp_id_action_rollback_exploration_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'exp_id\' must be provided when the action is ' + 'rollback_exploration_to_safe_state.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'rollback_exploration_to_safe_state', + 'exp_id': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_feature_name_action_update_feature_flag_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'feature_name\' must be provided when the action is ' + 'update_feature_flag_rules.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'update_feature_flag_rules', + 'feature_name': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_new_rules_action_update_feature_flag_is_not_performed( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'new_rules\' must be provided when the action is ' + 'update_feature_flag_rules.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'update_feature_flag_rules', + 'feature_name': 'new_feature', + 'new_rules': None + }, csrf_token=csrf_token) + + self.logout() + + def test_without_commit_message_action_update_feature_flag_is_not_performed( + self + ) -> None: + new_rule_dicts = [ + { + 'filters': [ + { + 'type': 'server_mode', + 'conditions': [['=', 'dev']] + } + ], + 'value_when_matched': True + } + ] + + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, + 'The \'commit_message\' must be provided when the action is ' + 'update_feature_flag_rules.' + ) + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'update_feature_flag_rules', + 'feature_name': 'new_feature', + 'new_rules': new_rule_dicts, + 'commit_message': None + }, csrf_token=csrf_token) + + self.logout() + + def test_cannot_load_new_structures_data_in_production_mode(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'Cannot load new structures data in production.') - with assert_raises_regexp_context_manager, prod_mode_swap: + with assert_raises_regexp_context_manager, self.prod_mode_swap: self.post_json( '/adminhandler', { 'action': 'generate_dummy_new_structures_data' }, csrf_token=csrf_token) self.logout() - def test_non_admins_cannot_load_new_structures_data(self): + def test_non_admins_cannot_load_new_structures_data(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() - assert_raises_regexp = self.assertRaisesRegexp( + assert_raises_regexp = self.assertRaisesRegex( Exception, 'User does not have enough rights to generate data.') with assert_raises_regexp: self.post_json( @@ -180,24 +449,36 @@ def test_non_admins_cannot_load_new_structures_data(self): }, csrf_token=csrf_token) self.logout() - def test_cannot_generate_dummy_skill_data_in_production_mode(self): + def test_cannot_generate_dummy_skill_data_in_production_mode(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() - prod_mode_swap = self.swap(constants, 'DEV_MODE', False) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'Cannot generate dummy skills in production.') - with assert_raises_regexp_context_manager, prod_mode_swap: + with assert_raises_regexp_context_manager, self.prod_mode_swap: self.post_json( '/adminhandler', { 'action': 'generate_dummy_new_skill_data' }, csrf_token=csrf_token) self.logout() - def test_non_admins_cannot_generate_dummy_skill_data(self): + def test_cannot_generate_classroom_data_in_production_mode(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() - assert_raises_regexp = self.assertRaisesRegexp( + + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, 'Cannot generate dummy classroom in production.') + with assert_raises_regexp_context_manager, self.prod_mode_swap: + self.post_json( + '/adminhandler', { + 'action': 'generate_dummy_classroom' + }, csrf_token=csrf_token) + self.logout() + + def test_non_admins_cannot_generate_dummy_skill_data(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + assert_raises_regexp = self.assertRaisesRegex( Exception, 'User does not have enough rights to generate data.') with assert_raises_regexp: self.post_json( @@ -206,14 +487,25 @@ def test_non_admins_cannot_generate_dummy_skill_data(self): }, csrf_token=csrf_token) self.logout() - def test_cannot_reload_collection_in_production_mode(self): + def test_non_admins_cannot_generate_dummy_classroom_data(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() + assert_raises_regexp = self.assertRaisesRegex( + Exception, 'User does not have enough rights to generate data.') + with assert_raises_regexp: + self.post_json( + '/adminhandler', { + 'action': 'generate_dummy_classroom' + }, csrf_token=csrf_token) + self.logout() - prod_mode_swap = self.swap(constants, 'DEV_MODE', False) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + def test_cannot_reload_collection_in_production_mode(self) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'Cannot reload a collection in production.') - with assert_raises_regexp_context_manager, prod_mode_swap: + with assert_raises_regexp_context_manager, self.prod_mode_swap: self.post_json( '/adminhandler', { 'action': 'reload_collection', @@ -222,10 +514,10 @@ def test_cannot_reload_collection_in_production_mode(self): self.logout() - def test_reload_collection(self): + def test_reload_collection(self) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) @@ -257,7 +549,7 @@ def _mock_logging_function(msg, *args): self.logout() - def test_load_new_structures_data(self): + def test_load_new_structures_data(self) -> None: self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -293,7 +585,7 @@ def test_load_new_structures_data(self): self.assertEqual(len(translation_opportunities), 3) self.logout() - def test_generate_dummy_skill_and_questions_data(self): + def test_generate_dummy_skill_and_questions_data(self) -> None: self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -310,7 +602,19 @@ def test_generate_dummy_skill_and_questions_data(self): self.assertEqual(len(questions), 15) self.logout() - def test_regenerate_topic_related_opportunities_action(self): + def test_generate_dummy_classroom_data(self) -> None: + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + self.post_json( + '/adminhandler', { + 'action': 'generate_dummy_classroom' + }, csrf_token=csrf_token) + classrooms = classroom_config_services.get_all_classrooms() + self.assertEqual(len(classrooms), 1) + self.logout() + + def test_regenerate_topic_related_opportunities_action(self) -> None: self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) @@ -324,7 +628,7 @@ def test_regenerate_topic_related_opportunities_action(self): self.publish_exploration(owner_id, '0') topic = topic_domain.Topic.create_default_topic( - topic_id, 'topic', 'abbrev', 'description') + topic_id, 'topic', 'abbrev', 'description', 'fragm') topic.thumbnail_filename = 'thumbnail.svg' topic.thumbnail_bg_color = '#C6DCDA' topic.subtopics = [ @@ -333,6 +637,7 @@ def test_regenerate_topic_related_opportunities_action(self): constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-three')] topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] topic_services.save_new_topic(owner_id, topic) topic_services.publish_topic(topic_id, self.admin_id) @@ -385,7 +690,107 @@ def test_regenerate_topic_related_opportunities_action(self): self.assertLess(old_creation_time, new_creation_time) - def test_admin_topics_csv_download_handler(self): + def test_rollback_exploration_to_safe_state_action(self) -> None: + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + + owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.save_new_valid_exploration( + '0', owner_id, title='title', end_state_name='End State', + correctness_feedback_enabled=True) + exp_services.update_exploration( + owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 1') + exp_services.update_exploration( + owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 2') + exp_services.update_exploration( + owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 3') + exp_services.update_exploration( + owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 4') + + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + csrf_token = self.get_new_csrf_token() + + result = self.post_json( + '/adminhandler', { + 'action': 'rollback_exploration_to_safe_state', + 'exp_id': '0' + }, csrf_token=csrf_token) + + self.assertEqual( + result, { + 'version': 5 + }) + + snapshot_content_model = ( + exp_models.ExplorationSnapshotContentModel.get( + '0-5', strict=True)) + snapshot_content_model.delete() + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + '0-4', strict=True)) + snapshot_metadata_model.delete() + + result = self.post_json( + '/adminhandler', { + 'action': 'rollback_exploration_to_safe_state', + 'exp_id': '0' + }, csrf_token=csrf_token) + + self.assertEqual( + result, { + 'version': 3 + }) + + def test_admin_topics_csv_download_handler(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) response = self.get_custom_response( '/admintopicscsvdownloadhandler', 'text/csv') @@ -404,10 +809,10 @@ def test_admin_topics_csv_download_handler(self): self.logout() - def test_revert_config_property(self): + def test_revert_config_property(self) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) @@ -432,7 +837,7 @@ def _mock_logging_function(msg, *args): self.logout() - def test_upload_topic_similarities(self): + def test_upload_topic_similarities(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -463,14 +868,14 @@ def test_upload_topic_similarities(self): self.logout() - def test_get_handler_includes_all_feature_flags(self): + def test_get_handler_includes_all_feature_flags(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) feature = platform_parameter_registry.Registry.create_feature_flag( - PARAM_NAMES.test_feature_1, 'feature for test.', FEATURE_STAGES.dev) + ParamNames.TEST_FEATURE_1, 'feature for test.', FeatureStages.DEV) feature_list_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_LIST', - [getattr(PARAM_NAMES, feature.name)]) + [ParamNames.TEST_FEATURE_1]) feature_set_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_NAMES_SET', set([feature.name])) @@ -483,12 +888,12 @@ def test_get_handler_includes_all_feature_flags(self): feature.name) self.logout() - def test_post_with_flag_changes_updates_feature_flags(self): + def test_post_with_flag_changes_updates_feature_flags(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() feature = platform_parameter_registry.Registry.create_feature_flag( - PARAM_NAMES.test_feature_1, 'feature for test.', FEATURE_STAGES.dev) + ParamNames.TEST_FEATURE_1, 'feature for test.', FeatureStages.DEV) new_rule_dicts = [ { 'filters': [ @@ -503,7 +908,7 @@ def test_post_with_flag_changes_updates_feature_flags(self): feature_list_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_LIST', - [getattr(PARAM_NAMES, feature.name)]) + [ParamNames.TEST_FEATURE_1]) feature_set_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_NAMES_SET', set([feature.name])) @@ -527,12 +932,14 @@ def test_post_with_flag_changes_updates_feature_flags(self): feature.name) self.logout() - def test_post_flag_changes_correctly_updates_flags_returned_by_getter(self): + def test_post_flag_changes_correctly_updates_flags_returned_by_getter( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() feature = platform_parameter_registry.Registry.create_feature_flag( - PARAM_NAMES.test_feature_1, 'feature for test.', FEATURE_STAGES.dev) + ParamNames.TEST_FEATURE_1, 'feature for test.', FeatureStages.DEV) new_rule_dicts = [ { 'filters': [ @@ -547,7 +954,7 @@ def test_post_flag_changes_correctly_updates_flags_returned_by_getter(self): feature_list_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_LIST', - [getattr(PARAM_NAMES, feature.name)]) + [ParamNames.TEST_FEATURE_1]) feature_set_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_NAMES_SET', set([feature.name])) @@ -572,12 +979,12 @@ def test_post_flag_changes_correctly_updates_flags_returned_by_getter(self): feature.name) self.logout() - def test_update_flag_rules_with_invalid_rules_returns_400(self): + def test_update_flag_rules_with_invalid_rules_returns_400(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() feature = platform_parameter_registry.Registry.create_feature_flag( - PARAM_NAMES.test_feature_1, 'feature for test.', FEATURE_STAGES.dev) + ParamNames.TEST_FEATURE_1, 'feature for test.', FeatureStages.DEV) new_rule_dicts = [ { 'filters': [ @@ -592,7 +999,7 @@ def test_update_flag_rules_with_invalid_rules_returns_400(self): feature_list_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_LIST', - [getattr(PARAM_NAMES, feature.name)]) + [ParamNames.TEST_FEATURE_1]) feature_set_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_NAMES_SET', set([feature.name])) @@ -616,7 +1023,9 @@ def test_update_flag_rules_with_invalid_rules_returns_400(self): feature.name) self.logout() - def test_update_flag_rules_with_unknown_feature_name_returns_400(self): + def test_update_flag_rules_with_unknown_feature_name_returns_400( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -654,7 +1063,8 @@ def test_update_flag_rules_with_unknown_feature_name_returns_400(self): self.logout() def test_update_flag_rules_with_feature_name_of_non_string_type_returns_400( - self): + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -676,7 +1086,8 @@ def test_update_flag_rules_with_feature_name_of_non_string_type_returns_400( self.logout() def test_update_flag_rules_with_message_of_non_string_type_returns_400( - self): + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -697,7 +1108,9 @@ def test_update_flag_rules_with_message_of_non_string_type_returns_400( self.logout() - def test_update_flag_rules_with_rules_of_non_list_type_returns_400(self): + def test_update_flag_rules_with_rules_of_non_list_type_returns_400( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -719,7 +1132,8 @@ def test_update_flag_rules_with_rules_of_non_list_type_returns_400(self): self.logout() def test_update_flag_rules_with_rules_of_non_list_of_dict_type_returns_400( - self): + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -740,12 +1154,14 @@ def test_update_flag_rules_with_rules_of_non_list_of_dict_type_returns_400( self.logout() - def test_update_flag_rules_with_unexpected_exception_returns_500(self): + def test_update_flag_rules_with_unexpected_exception_returns_500( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() feature = platform_parameter_registry.Registry.create_feature_flag( - PARAM_NAMES.test_feature_1, 'feature for test.', FEATURE_STAGES.dev) + ParamNames.TEST_FEATURE_1, 'feature for test.', FeatureStages.DEV) new_rule_dicts = [ { 'filters': [ @@ -760,14 +1176,16 @@ def test_update_flag_rules_with_unexpected_exception_returns_500(self): feature_list_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_LIST', - [getattr(PARAM_NAMES, feature.name)]) + [ParamNames.TEST_FEATURE_1]) feature_set_ctx = self.swap( platform_feature_services, 'ALL_FEATURES_NAMES_SET', set([feature.name])) - # Replace the stored instance with None in order to trigger unexpected - # exception during update. + # Here we use MyPy ignore because we are assigning a None value + # where instance of 'PlatformParameter' is expected, and this is + # done to Replace the stored instance with None in order to + # trigger the unexpected exception during update. platform_parameter_registry.Registry.parameter_registry[ - feature.name] = None + feature.name] = None # type: ignore[assignment] with feature_list_ctx, feature_set_ctx: response = self.post_json( '/adminhandler', { @@ -787,7 +1205,7 @@ def test_update_flag_rules_with_unexpected_exception_returns_500(self): feature.name) self.logout() - def test_grant_super_admin_privileges(self): + def test_grant_super_admin_privileges(self) -> None: self.login(feconf.ADMIN_EMAIL_ADDRESS, is_super_admin=True) grant_super_admin_privileges_stub = self.swap_with_call_counter( @@ -803,7 +1221,9 @@ def test_grant_super_admin_privileges(self): self.assertEqual(call_counter.times_called, 1) self.assertNotIn('error', response) - def test_grant_super_admin_privileges_requires_system_default_admin(self): + def test_grant_super_admin_privileges_requires_system_default_admin( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) grant_super_admin_privileges_stub = self.swap_with_call_counter( @@ -821,7 +1241,7 @@ def test_grant_super_admin_privileges_requires_system_default_admin(self): response['error'], 'Only the default system admin can manage super admins') - def test_grant_super_admin_privileges_fails_without_username(self): + def test_grant_super_admin_privileges_fails_without_username(self) -> None: self.login(feconf.ADMIN_EMAIL_ADDRESS, is_super_admin=True) response = self.put_json( @@ -831,7 +1251,9 @@ def test_grant_super_admin_privileges_fails_without_username(self): error_msg = 'Missing key in handler args: username.' self.assertEqual(response['error'], error_msg) - def test_grant_super_admin_privileges_fails_with_invalid_username(self): + def test_grant_super_admin_privileges_fails_with_invalid_username( + self + ) -> None: self.login(feconf.ADMIN_EMAIL_ADDRESS, is_super_admin=True) response = self.put_json( @@ -840,7 +1262,7 @@ def test_grant_super_admin_privileges_fails_with_invalid_username(self): self.assertEqual(response['error'], 'No such user exists') - def test_revoke_super_admin_privileges(self): + def test_revoke_super_admin_privileges(self) -> None: self.login(feconf.ADMIN_EMAIL_ADDRESS, is_super_admin=True) revoke_super_admin_privileges_stub = self.swap_with_call_counter( @@ -855,7 +1277,9 @@ def test_revoke_super_admin_privileges(self): self.assertEqual(call_counter.times_called, 1) self.assertNotIn('error', response) - def test_revoke_super_admin_privileges_requires_system_default_admin(self): + def test_revoke_super_admin_privileges_requires_system_default_admin( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) revoke_super_admin_privileges_stub = self.swap_with_call_counter( @@ -872,7 +1296,7 @@ def test_revoke_super_admin_privileges_requires_system_default_admin(self): response['error'], 'Only the default system admin can manage super admins') - def test_revoke_super_admin_privileges_fails_without_username(self): + def test_revoke_super_admin_privileges_fails_without_username(self) -> None: self.login(feconf.ADMIN_EMAIL_ADDRESS, is_super_admin=True) response = self.delete_json( @@ -881,7 +1305,9 @@ def test_revoke_super_admin_privileges_fails_without_username(self): error_msg = 'Missing key in handler args: username.' self.assertEqual(response['error'], error_msg) - def test_revoke_super_admin_privileges_fails_with_invalid_username(self): + def test_revoke_super_admin_privileges_fails_with_invalid_username( + self + ) -> None: self.login(feconf.ADMIN_EMAIL_ADDRESS, is_super_admin=True) response = self.delete_json( @@ -890,7 +1316,9 @@ def test_revoke_super_admin_privileges_fails_with_invalid_username(self): self.assertEqual(response['error'], 'No such user exists') - def test_revoke_super_admin_privileges_fails_for_default_admin(self): + def test_revoke_super_admin_privileges_fails_for_default_admin( + self + ) -> None: self.login(feconf.ADMIN_EMAIL_ADDRESS, is_super_admin=True) response = self.delete_json( @@ -905,11 +1333,11 @@ def test_revoke_super_admin_privileges_fails_for_default_admin(self): class GenerateDummyExplorationsTest(test_utils.GenericTestBase): """Test the conditions for generation of dummy explorations.""" - def setUp(self): - super(GenerateDummyExplorationsTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - def test_generate_count_greater_than_publish_count(self): + def test_generate_count_greater_than_publish_count(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() self.post_json( @@ -923,7 +1351,7 @@ def test_generate_count_greater_than_publish_count(self): self.assertEqual(len(generated_exps), 10) self.assertEqual(len(published_exps), 3) - def test_generate_count_equal_to_publish_count(self): + def test_generate_count_equal_to_publish_count(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() self.post_json( @@ -937,7 +1365,7 @@ def test_generate_count_equal_to_publish_count(self): self.assertEqual(len(generated_exps), 2) self.assertEqual(len(published_exps), 2) - def test_generate_count_less_than_publish_count(self): + def test_generate_count_less_than_publish_count(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() generated_exps_response = self.post_json( @@ -953,7 +1381,9 @@ def test_generate_count_less_than_publish_count(self): self.assertEqual(len(generated_exps), 0) self.assertEqual(len(published_exps), 0) - def test_handler_raises_error_with_non_int_num_dummy_exps_to_generate(self): + def test_handler_raises_error_with_non_int_num_dummy_exps_to_generate( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -975,7 +1405,9 @@ def test_handler_raises_error_with_non_int_num_dummy_exps_to_generate(self): self.logout() - def test_handler_raises_error_with_non_int_num_dummy_exps_to_publish(self): + def test_handler_raises_error_with_non_int_num_dummy_exps_to_publish( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -997,12 +1429,12 @@ def test_handler_raises_error_with_non_int_num_dummy_exps_to_publish(self): self.logout() - def test_cannot_generate_dummy_explorations_in_prod_mode(self): + def test_cannot_generate_dummy_explorations_in_prod_mode(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() prod_mode_swap = self.swap(constants, 'DEV_MODE', False) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'Cannot generate dummy explorations in production.') with assert_raises_regexp_context_manager, prod_mode_swap: @@ -1024,14 +1456,14 @@ def test_cannot_generate_dummy_explorations_in_prod_mode(self): class AdminRoleHandlerTest(test_utils.GenericTestBase): """Checks the user role handling on the admin page.""" - def setUp(self): + def setUp(self) -> None: """Complete the signup process for self.CURRICULUM_ADMIN_EMAIL.""" - super(AdminRoleHandlerTest, self).setUp() + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - def test_view_and_update_role(self): + def test_view_and_update_role(self) -> None: user_email = 'user1@example.com' username = 'user1' @@ -1070,7 +1502,36 @@ def test_view_and_update_role(self): }) self.logout() - def test_invalid_username_in_filter_criterion_and_update_role(self): + def test_if_filter_criterion_is_username_and_username_is_not_provided( + self + ) -> None: + + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + with self.assertRaisesRegex( + Exception, + 'The username must be provided when the filter criterion ' + 'is \'username\'.' + ): + self.get_json( + feconf.ADMIN_ROLE_HANDLER_URL, + params={'filter_criterion': 'username'} + ) + + def test_if_filter_criterion_is_role_and_role_is_not_provided( + self + ) -> None: + + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + with self.assertRaisesRegex( + Exception, + 'The role must be provided when the filter criterion is \'role\'.' + ): + self.get_json( + feconf.ADMIN_ROLE_HANDLER_URL, + params={'filter_criterion': 'role'} + ) + + def test_invalid_username_in_filter_criterion_and_update_role(self) -> None: username = 'myinvaliduser' self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) @@ -1089,7 +1550,7 @@ def test_invalid_username_in_filter_criterion_and_update_role(self): csrf_token=csrf_token, expected_status_int=400) - def test_removing_role_with_invalid_username(self): + def test_removing_role_with_invalid_username(self) -> None: username = 'invaliduser' self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) @@ -1102,7 +1563,7 @@ def test_removing_role_with_invalid_username(self): self.assertEqual( response['error'], 'User with given username does not exist.') - def test_cannot_view_role_with_invalid_view_filter_criterion(self): + def test_cannot_view_role_with_invalid_view_filter_criterion(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) response = self.get_json( feconf.ADMIN_ROLE_HANDLER_URL, @@ -1114,7 +1575,7 @@ def test_cannot_view_role_with_invalid_view_filter_criterion(self): '[\'role\', \'username\']') self.assertEqual(response['error'], error_msg) - def test_replacing_user_role_from_topic_manager_to_moderator(self): + def test_replacing_user_role_from_topic_manager_to_moderator(self) -> None: user_email = 'user1@example.com' username = 'user1' @@ -1122,7 +1583,7 @@ def test_replacing_user_role_from_topic_manager_to_moderator(self): topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( - 1, 'Subtopic Title 1') + 1, 'Subtopic Title 1', 'url-frag-one') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' self.save_new_topic( @@ -1173,7 +1634,7 @@ def test_replacing_user_role_from_topic_manager_to_moderator(self): self.logout() - def test_removing_moderator_role_from_user_roles(self): + def test_removing_moderator_role_from_user_roles(self) -> None: user_email = 'user1@example.com' username = 'user1' @@ -1215,7 +1676,8 @@ def test_removing_moderator_role_from_user_roles(self): self.logout() def test_general_role_handler_does_not_support_assigning_topic_manager( - self): + self + ) -> None: user_email = 'user1@example.com' username = 'user1' self.signup(user_email, username) @@ -1232,7 +1694,8 @@ def test_general_role_handler_does_not_support_assigning_topic_manager( response['error'], 'Unsupported role for this handler.') def test_general_role_handler_supports_unassigning_topic_manager( - self): + self + ) -> None: user_email = 'user1@example.com' username = 'user1' @@ -1285,11 +1748,11 @@ def test_general_role_handler_supports_unassigning_topic_manager( class TopicManagerRoleHandlerTest(test_utils.GenericTestBase): """Tests for TopicManagerRoleHandler.""" - def setUp(self): - super(TopicManagerRoleHandlerTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.admin_id = self.get_user_id_from_email(self.SUPER_ADMIN_EMAIL) - def test_handler_with_invalid_username(self): + def test_handler_with_invalid_username(self) -> None: username = 'invaliduser' topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( @@ -1311,7 +1774,7 @@ def test_handler_with_invalid_username(self): self.assertEqual( response['error'], 'User with given username does not exist.') - def test_adding_topic_manager_role_to_user(self): + def test_adding_topic_manager_role_to_user(self) -> None: user_email = 'user1@example.com' username = 'user1' @@ -1361,7 +1824,7 @@ def test_adding_topic_manager_role_to_user(self): }) self.logout() - def test_adding_new_topic_manager_to_a_topic(self): + def test_adding_new_topic_manager_to_a_topic(self) -> None: user_email = 'user1@example.com' username = 'user1' self.signup(user_email, username) @@ -1431,11 +1894,11 @@ def test_adding_new_topic_manager_to_a_topic(self): class BannedUsersHandlerTest(test_utils.GenericTestBase): """Tests for BannedUsersHandler.""" - def setUp(self): - super(BannedUsersHandlerTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.admin_id = self.get_user_id_from_email(self.SUPER_ADMIN_EMAIL) - def test_mark_a_user_ban(self): + def test_mark_a_user_ban(self) -> None: user_email = 'user1@example.com' username = 'user1' self.signup(user_email, username) @@ -1471,7 +1934,9 @@ def test_mark_a_user_ban(self): 'managed_topic_ids': [] }) - def test_banning_a_topic_manger_should_remove_user_from_topics(self): + def test_banning_topic_manager_should_remove_user_from_topics( + self + ) -> None: user_email = 'user1@example.com' username = 'user1' self.signup(user_email, username) @@ -1523,7 +1988,7 @@ def test_banning_a_topic_manger_should_remove_user_from_topics(self): 'managed_topic_ids': [] }) - def test_ban_user_with_invalid_username(self): + def test_ban_user_with_invalid_username(self) -> None: self.login(self.SUPER_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() response_dict = self.put_json( @@ -1534,7 +1999,7 @@ def test_ban_user_with_invalid_username(self): self.assertEqual( response_dict['error'], 'User with given username does not exist.') - def test_unmark_a_banned_user(self): + def test_unmark_a_banned_user(self) -> None: user_email = 'user1@example.com' username = 'user1' self.signup(user_email, username) @@ -1570,7 +2035,7 @@ def test_unmark_a_banned_user(self): 'managed_topic_ids': [] }) - def test_unban_user_with_invalid_username(self): + def test_unban_user_with_invalid_username(self) -> None: self.login(self.SUPER_ADMIN_EMAIL, is_super_admin=True) response_dict = self.delete_json( '/bannedusershandler', @@ -1586,9 +2051,9 @@ class DataExtractionQueryHandlerTests(test_utils.GenericTestBase): EXP_ID = 'exp' - def setUp(self): + def setUp(self) -> None: """Complete the signup process for self.CURRICULUM_ADMIN_EMAIL.""" - super(DataExtractionQueryHandlerTests, self).setUp() + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -1611,7 +2076,7 @@ def setUp(self): 0, exp_domain.EXPLICIT_CLASSIFICATION, {}, 'a_session_id_val', 1.0)) - def test_data_extraction_handler(self): + def test_data_extraction_handler(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) # Test that it returns all answers when 'num_answers' is 0. @@ -1643,7 +2108,33 @@ def test_data_extraction_handler(self): self.assertEqual(len(extracted_answers), 1) self.assertEqual(extracted_answers[0]['answer'], 'first answer') - def test_handler_when_exp_version_is_not_int_throws_exception(self): + def test_raises_error_if_no_state_answer_exists_while_data_extraction( + self + ) -> None: + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + payload = { + 'exp_id': self.EXP_ID, + 'exp_version': self.exploration.version, + 'state_name': self.exploration.init_state_name, + 'num_answers': 0 + } + + swap_state_answers = self.swap_to_always_return( + stats_services, 'get_state_answers', None + ) + with swap_state_answers: + response = self.get_json( + '/explorationdataextractionhandler', + params=payload, + expected_status_int=500 + ) + self.assertEqual( + response['error'], + 'No state answer exists for the given exp_id: exp, ' + 'exp_version: 1 and state_name: Introduction' + ) + + def test_handler_when_exp_version_is_not_int_throws_exception(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) # Test that it returns all answers when 'num_answers' is 0. @@ -1663,7 +2154,7 @@ def test_handler_when_exp_version_is_not_int_throws_exception(self): expected_status_int=400) self.assertEqual(response['error'], error_msg) - def test_that_handler_raises_exception(self): + def test_that_handler_raises_exception(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) payload = { 'exp_id': self.EXP_ID, @@ -1680,7 +2171,7 @@ def test_that_handler_raises_exception(self): response['error'], 'Exploration \'exp\' does not have \'state name\' state.') - def test_handler_raises_error_with_invalid_exploration_id(self): + def test_handler_raises_error_with_invalid_exploration_id(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) payload = { 'exp_id': 'invalid_exp_id', @@ -1698,7 +2189,9 @@ def test_handler_raises_error_with_invalid_exploration_id(self): 'Entity for exploration with id invalid_exp_id and version 1 not ' 'found.') - def test_handler_raises_error_with_invalid_exploration_version(self): + def test_handler_raises_error_with_invalid_exploration_version( + self + ) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) payload = { 'exp_id': self.EXP_ID, @@ -1720,7 +2213,7 @@ def test_handler_raises_error_with_invalid_exploration_version(self): class ClearSearchIndexTest(test_utils.GenericTestBase): """Tests that search index gets cleared.""" - def test_clear_search_index(self): + def test_clear_search_index(self) -> None: exp_services.load_demo('0') result_explorations = search_services.search_explorations( 'Welcome', [], [], 2)[0] @@ -1731,6 +2224,19 @@ def test_clear_search_index(self): self.assertEqual(result_collections, ['0']) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + user_id_a = self.get_user_id_from_email( + self.CURRICULUM_ADMIN_EMAIL + ) + blog_post = blog_services.create_new_blog_post(user_id_a) + change_dict: blog_services.BlogPostChangeDict = { + 'title': 'Welcome to Oppia', + 'thumbnail_filename': 'thumbnail.svg', + 'content': 'Hello Blog Authors', + 'tags': ['Math', 'Science'] + } + blog_services.update_blog_post(blog_post.id, change_dict) + blog_services.publish_blog_post(blog_post.id) + csrf_token = self.get_new_csrf_token() generated_exps_response = self.post_json( '/adminhandler', { @@ -1744,16 +2250,20 @@ def test_clear_search_index(self): result_collections = search_services.search_collections( 'Welcome', [], [], 2)[0] self.assertEqual(result_collections, []) + result_blog_posts = ( + search_services.search_blog_post_summaries('Welcome', [], 2)[0] + ) + self.assertEqual(result_blog_posts, []) class SendDummyMailTest(test_utils.GenericTestBase): """"Tests for sending test mails to admin.""" - def setUp(self): - super(SendDummyMailTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - def test_send_dummy_mail(self): + def test_send_dummy_mail(self) -> None: self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() @@ -1777,12 +2287,12 @@ class UpdateUsernameHandlerTest(test_utils.GenericTestBase): OLD_USERNAME = 'oldUsername' NEW_USERNAME = 'newUsername' - def setUp(self): - super(UpdateUsernameHandlerTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.OLD_USERNAME) self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) - def test_update_username_with_none_new_username(self): + def test_update_username_with_none_new_username(self) -> None: csrf_token = self.get_new_csrf_token() response = self.put_json( @@ -1795,7 +2305,7 @@ def test_update_username_with_none_new_username(self): error_msg = 'Missing key in handler args: new_username.' self.assertEqual(response['error'], error_msg) - def test_update_username_with_none_old_username(self): + def test_update_username_with_none_old_username(self) -> None: csrf_token = self.get_new_csrf_token() response = self.put_json( @@ -1808,7 +2318,7 @@ def test_update_username_with_none_old_username(self): error_msg = 'Missing key in handler args: old_username.' self.assertEqual(response['error'], error_msg) - def test_update_username_with_non_string_new_username(self): + def test_update_username_with_non_string_new_username(self) -> None: csrf_token = self.get_new_csrf_token() response = self.put_json( @@ -1822,7 +2332,7 @@ def test_update_username_with_non_string_new_username(self): response['error'], 'Schema validation for \'new_username\' failed:' ' Expected string, received 123') - def test_update_username_with_non_string_old_username(self): + def test_update_username_with_non_string_old_username(self) -> None: csrf_token = self.get_new_csrf_token() response = self.put_json( @@ -1837,7 +2347,7 @@ def test_update_username_with_non_string_old_username(self): ' string, received 123') self.assertEqual(response['error'], error_msg) - def test_update_username_with_long_new_username(self): + def test_update_username_with_long_new_username(self) -> None: long_username = 'a' * (constants.MAX_USERNAME_LENGTH + 1) csrf_token = self.get_new_csrf_token() @@ -1854,7 +2364,7 @@ def test_update_username_with_long_new_username(self): % (constants.MAX_USERNAME_LENGTH, long_username)) self.assertEqual(response['error'], error_msg) - def test_update_username_with_nonexistent_old_username(self): + def test_update_username_with_nonexistent_old_username(self) -> None: non_existent_username = 'invalid' csrf_token = self.get_new_csrf_token() @@ -1867,7 +2377,7 @@ def test_update_username_with_nonexistent_old_username(self): expected_status_int=400) self.assertEqual(response['error'], 'Invalid username: invalid') - def test_update_username_with_new_username_already_taken(self): + def test_update_username_with_new_username_already_taken(self) -> None: csrf_token = self.get_new_csrf_token() response = self.put_json( @@ -1879,7 +2389,7 @@ def test_update_username_with_new_username_already_taken(self): expected_status_int=400) self.assertEqual(response['error'], 'Username already taken.') - def test_update_username(self): + def test_update_username(self) -> None: user_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() @@ -1891,7 +2401,7 @@ def test_update_username(self): csrf_token=csrf_token) self.assertEqual(user_services.get_username(user_id), self.NEW_USERNAME) - def test_update_username_creates_audit_model(self): + def test_update_username_creates_audit_model(self) -> None: user_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() @@ -1932,16 +2442,16 @@ def test_update_username_creates_audit_model(self): class NumberOfDeletionRequestsHandlerTest(test_utils.GenericTestBase): """Tests NumberOfDeletionRequestsHandler.""" - def setUp(self): - super(NumberOfDeletionRequestsHandlerTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) - def test_get_with_no_deletion_request_returns_zero(self): + def test_get_with_no_deletion_request_returns_zero(self) -> None: response = self.get_json('/numberofdeletionrequestshandler') self.assertEqual(response['number_of_pending_deletion_models'], 0) - def test_get_with_two_deletion_request_returns_two(self): + def test_get_with_two_deletion_request_returns_two(self) -> None: user_models.PendingDeletionRequestModel( id='id1', email='id1@email.com').put() user_models.PendingDeletionRequestModel( @@ -1954,23 +2464,23 @@ def test_get_with_two_deletion_request_returns_two(self): class VerifyUserModelsDeletedHandlerTest(test_utils.GenericTestBase): """Tests VerifyUserModelsDeletedHandler.""" - def setUp(self): - super(VerifyUserModelsDeletedHandlerTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) self.admin_user_id = ( self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) - def test_get_without_user_id_raises_error(self): + def test_get_without_user_id_raises_error(self) -> None: self.get_json( '/verifyusermodelsdeletedhandler', expected_status_int=400) - def test_get_with_nonexistent_user_id_returns_true(self): + def test_get_with_nonexistent_user_id_returns_true(self) -> None: response = self.get_json( '/verifyusermodelsdeletedhandler', params={'user_id': 'aaa'}) self.assertFalse(response['related_models_exist']) - def test_get_with_existing_user_id_returns_true(self): + def test_get_with_existing_user_id_returns_true(self) -> None: response = self.get_json( '/verifyusermodelsdeletedhandler', params={'user_id': self.admin_user_id} @@ -1981,8 +2491,8 @@ def test_get_with_existing_user_id_returns_true(self): class DeleteUserHandlerTest(test_utils.GenericTestBase): """Tests DeleteUserHandler.""" - def setUp(self): - super(DeleteUserHandlerTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME) self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL) self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) @@ -1990,19 +2500,19 @@ def setUp(self): self.admin_user_id = self.get_user_id_from_email( feconf.SYSTEM_EMAIL_ADDRESS) - def test_delete_without_user_id_raises_error(self): + def test_delete_without_user_id_raises_error(self) -> None: self.delete_json( '/deleteuserhandler', params={'username': 'someusername'}, expected_status_int=400) - def test_delete_without_username_raises_error(self): + def test_delete_without_username_raises_error(self) -> None: self.delete_json( '/deleteuserhandler', params={'user_id': 'aa'}, expected_status_int=400) - def test_delete_with_wrong_username_raises_error(self): + def test_delete_with_wrong_username_raises_error(self) -> None: self.delete_json( '/deleteuserhandler', params={ @@ -2011,7 +2521,9 @@ def test_delete_with_wrong_username_raises_error(self): }, expected_status_int=400) - def test_delete_with_differing_user_id_and_username_raises_error(self): + def test_delete_with_differing_user_id_and_username_raises_error( + self + ) -> None: self.delete_json( '/deleteuserhandler', params={ @@ -2020,7 +2532,9 @@ def test_delete_with_differing_user_id_and_username_raises_error(self): }, expected_status_int=400) - def test_delete_with_correct_user_id_andusername_returns_true(self): + def test_delete_with_correct_user_id_andusername_returns_true( + self + ) -> None: response = self.delete_json( '/deleteuserhandler', params={ @@ -2035,8 +2549,8 @@ def test_delete_with_correct_user_id_andusername_returns_true(self): class UpdateBlogPostHandlerTest(test_utils.GenericTestBase): """Tests UpdateBlogPostHandler.""" - def setUp(self): - super(UpdateBlogPostHandlerTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME) self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL) self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) @@ -2063,7 +2577,7 @@ def setUp(self): self.login(feconf.SYSTEM_EMAIL_ADDRESS, is_super_admin=True) - def test_update_blog_post_without_blog_post_id_raises_error(self): + def test_update_blog_post_without_blog_post_id_raises_error(self) -> None: csrf_token = self.get_new_csrf_token() self.put_json( @@ -2075,7 +2589,9 @@ def test_update_blog_post_without_blog_post_id_raises_error(self): csrf_token=csrf_token, expected_status_int=400) - def test_update_blog_post_without_author_username_raises_error(self): + def test_update_blog_post_without_author_username_raises_error( + self + ) -> None: csrf_token = self.get_new_csrf_token() self.put_json( @@ -2087,7 +2603,7 @@ def test_update_blog_post_without_author_username_raises_error(self): csrf_token=csrf_token, expected_status_int=400) - def test_update_blog_post_without_published_on_raises_error(self): + def test_update_blog_post_without_published_on_raises_error(self) -> None: csrf_token = self.get_new_csrf_token() self.put_json( @@ -2099,7 +2615,7 @@ def test_update_blog_post_without_published_on_raises_error(self): csrf_token=csrf_token, expected_status_int=400) - def test_update_blog_post_with_wrong_username_raises_error(self): + def test_update_blog_post_with_wrong_username_raises_error(self) -> None: csrf_token = self.get_new_csrf_token() response = self.put_json( @@ -2115,7 +2631,9 @@ def test_update_blog_post_with_wrong_username_raises_error(self): error_msg = ('Invalid username: someusername') self.assertEqual(response['error'], error_msg) - def test_update_blog_post_with_wrong_blog_post_id_raises_error(self): + def test_update_blog_post_with_wrong_blog_post_id_raises_error( + self + ) -> None: csrf_token = self.get_new_csrf_token() self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.add_user_role( @@ -2132,7 +2650,7 @@ def test_update_blog_post_with_wrong_blog_post_id_raises_error(self): csrf_token=csrf_token, expected_status_int=404) - def test_update_blog_post_with_user_without_enough_rights(self): + def test_update_blog_post_with_user_without_enough_rights(self) -> None: csrf_token = self.get_new_csrf_token() response = self.put_json( @@ -2148,7 +2666,7 @@ def test_update_blog_post_with_user_without_enough_rights(self): error_msg = ('User does not have enough rights to be blog post author.') self.assertEqual(response['error'], error_msg) - def test_update_blog_post_with_invalid_date_format(self): + def test_update_blog_post_with_invalid_date_format(self) -> None: csrf_token = self.get_new_csrf_token() self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.add_user_role( @@ -2170,7 +2688,7 @@ def test_update_blog_post_with_invalid_date_format(self): ' format \'%m/%d/%Y, %H:%M:%S:%f\'') self.assertEqual(response['error'], error_msg) - def test_update_blog_post_with_correct_params(self): + def test_update_blog_post_with_correct_params(self) -> None: csrf_token = self.get_new_csrf_token() self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.add_user_role( diff --git a/core/controllers/android_e2e_config.py b/core/controllers/android_e2e_config.py index fd385257de07..87fa9bbcf9b8 100644 --- a/core/controllers/android_e2e_config.py +++ b/core/controllers/android_e2e_config.py @@ -19,13 +19,12 @@ import os from core import feconf -from core import python_utils +from core import utils from core.constants import constants from core.controllers import acl_decorators from core.controllers import base from core.domain import exp_domain from core.domain import exp_services -from core.domain import fs_domain from core.domain import fs_services from core.domain import opportunity_services from core.domain import question_domain @@ -41,17 +40,22 @@ from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services +from core.domain import translation_domain from core.domain import user_services +from typing import Dict, List -class InitializeAndroidTestDataHandler(base.BaseHandler): + +class InitializeAndroidTestDataHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] +): """Handler to initialize android specific structures.""" - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'POST': {}} + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'POST': {}} @acl_decorators.open_access - def post(self): + def post(self) -> None: """Generates structures for Android end-to-end tests. This handler generates structures for Android end-to-end tests in @@ -80,17 +84,19 @@ def post(self): if not constants.DEV_MODE: raise Exception('Cannot load new structures data in production.') - if topic_services.does_topic_with_name_exist( - 'Android test'): - topic = topic_fetchers.get_topic_by_name('Android test') + if topic_services.does_topic_with_name_exist('Android test'): + topic = topic_fetchers.get_topic_by_name( + 'Android test', strict=True + ) topic_rights = topic_fetchers.get_topic_rights( - topic.id, strict=False) + topic.id, strict=True + ) if topic_rights.topic_is_published: raise self.InvalidInputException( 'The topic is already published.') - else: - raise self.InvalidInputException( - 'The topic exists but is not published.') + + raise self.InvalidInputException( + 'The topic exists but is not published.') exp_id = '26' user_id = feconf.SYSTEM_COMMITTER_ID # Generate new Structure id for topic, story, skill and question. @@ -110,35 +116,36 @@ def post(self): # Create and update topic to validate before publishing. topic = topic_domain.Topic.create_default_topic( - topic_id, 'Android test', 'test-topic-one', 'description') + topic_id, 'Android test', 'test-topic-one', 'description', + 'fragm') topic.update_url_fragment('test-topic') topic.update_meta_tag_content('tag') topic.update_page_title_fragment_for_web('page title for topic') # Save the dummy image to the filesystem to be used as thumbnail. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_TOPIC, topic_id)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_TOPIC, topic_id) fs.commit( '%s/test_svg.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, mimetype='image/svg+xml') # Update thumbnail properties. - topic.update_thumbnail_filename('test_svg.svg') + topic_services.update_thumbnail_filename(topic, 'test_svg.svg') topic.update_thumbnail_bg_color('#C6DCDA') # Add other structures to the topic. topic.add_canonical_story(story_id) topic.add_uncategorized_skill_id(skill_id) - topic.add_subtopic(1, 'Test Subtopic Title') + topic.add_subtopic(1, 'Test Subtopic Title', 'testsubtop') # Update and validate subtopic. - topic.update_subtopic_thumbnail_filename(1, 'test_svg.svg') + topic_services.update_subtopic_thumbnail_filename( + topic, 1, 'test_svg.svg') topic.update_subtopic_thumbnail_bg_color(1, '#FFFFFF') topic.update_subtopic_url_fragment(1, 'suburl') topic.move_skill_id_to_subtopic(None, 1, skill_id) + topic.update_skill_ids_for_diagnostic_test([skill_id]) subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, topic_id)) @@ -174,13 +181,11 @@ def post(self): ) # Save the dummy image to the filesystem to be used as thumbnail. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_STORY, story_id)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_STORY, story_id) fs.commit( '%s/test_svg.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, mimetype='image/svg+xml') @@ -207,7 +212,8 @@ def post(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Dummy Subtopic Title' + 'title': 'Dummy Subtopic Title', + 'url_fragment': 'dummy-fragment' })] ) @@ -225,12 +231,11 @@ def post(self): self._upload_thumbnail(story_id, feconf.ENTITY_TYPE_STORY) self.render_json({}) - def _upload_thumbnail(self, structure_id, structure_type): + def _upload_thumbnail(self, structure_id: str, structure_type: str) -> None: """Uploads images to the local datastore to be fetched using the AssetDevHandler. """ - - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: image_content = f.read() @@ -239,7 +244,11 @@ def _upload_thumbnail(self, structure_id, structure_type): image_content, 'thumbnail', False) def _create_dummy_question( - self, question_id, question_content, linked_skill_ids): + self, + question_id: str, + question_content: str, + linked_skill_ids: List[str] + ) -> question_domain.Question: """Creates a dummy question object with the given question ID. Args: @@ -251,39 +260,44 @@ def _create_dummy_question( Returns: Question. The dummy question with given values. """ + content_id_generator = translation_domain.ContentIdGenerator() state = state_domain.State.create_default_state( - 'ABC', is_initial_state=True) + 'ABC', + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + is_initial_state=True) state.update_interaction_id('TextInput') state.update_interaction_customization_args({ 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG), 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': { + 'value': False + } }) - state.update_next_content_id_index(1) state.update_linked_skill_id(None) - state.update_content(state_domain.SubtitledHtml('1', question_content)) - recorded_voiceovers = state_domain.RecordedVoiceovers({}) - written_translations = state_domain.WrittenTranslations({}) - recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0') - recorded_voiceovers.add_content_id_for_voiceover('1') - recorded_voiceovers.add_content_id_for_voiceover('default_outcome') - written_translations.add_content_id_for_translation('ca_placeholder_0') - written_translations.add_content_id_for_translation('1') - written_translations.add_content_id_for_translation('default_outcome') - - state.update_recorded_voiceovers(recorded_voiceovers) - state.update_written_translations(written_translations) + state.update_content(state_domain.SubtitledHtml( + state.content.content_id, question_content)) + solution = state_domain.Solution( 'TextInput', False, 'Solution', state_domain.SubtitledHtml( - 'solution', '

This is a solution.

')) + content_id_generator.generate( + translation_domain.ContentType.SOLUTION), + '

This is a solution.

')) hints_list = [ state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

This is a hint.

') + state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.HINT), + '

This is a hint.

') ) ] @@ -291,18 +305,23 @@ def _create_dummy_question( state.update_interaction_hints(hints_list) state.update_interaction_default_outcome( state_domain.Outcome( - None, state_domain.SubtitledHtml( - 'feedback_id', '

Dummy Feedback

'), + None, None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + '

Dummy Feedback

'), True, [], None, None ) ) question = question_domain.Question( question_id, state, feconf.CURRENT_STATE_SCHEMA_VERSION, - constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, []) + constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, [], + content_id_generator.next_content_id_index) return question - def _create_dummy_skill(self, skill_id, skill_description, explanation): + def _create_dummy_skill( + self, skill_id: str, skill_description: str, explanation: str + ) -> skill_domain.Skill: """Creates a dummy skill object with the given values. Args: diff --git a/core/controllers/android_e2e_config_test.py b/core/controllers/android_e2e_config_test.py index 8a939c6ecdfe..0e6b2eee0d99 100644 --- a/core/controllers/android_e2e_config_test.py +++ b/core/controllers/android_e2e_config_test.py @@ -31,38 +31,40 @@ class AndroidConfigTest(test_utils.GenericTestBase): """Server integration tests for operations on the admin page.""" - def test_initialize_in_production_raises_exception(self): + def test_initialize_in_production_raises_exception(self) -> None: prod_mode_swap = self.swap(constants, 'DEV_MODE', False) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'Cannot load new structures data in production.') with assert_raises_regexp_context_manager, prod_mode_swap: self.post_json( '/initialize_android_test_data', {}, use_payload=False, csrf_token=None) - def test_initialize_topic_is_published(self): + def test_initialize_topic_is_published(self) -> None: self.post_json( '/initialize_android_test_data', {}, use_payload=False, csrf_token=None) self.assertTrue(topic_services.does_topic_with_name_exist( 'Android test')) - topic = topic_fetchers.get_topic_by_name('Android test') + topic = topic_fetchers.get_topic_by_name('Android test', strict=True) topic_rights = topic_fetchers.get_topic_rights( - topic.id, strict=False) + topic.id, strict=True) self.assertTrue(topic_rights.topic_is_published) - def test_initialize_structures_are_valid(self): + def test_initialize_structures_are_valid(self) -> None: self.post_json( '/initialize_android_test_data', {}, use_payload=False, csrf_token=None) exp_id = '26' - topic = topic_fetchers.get_topic_by_name('Android test') + topic = topic_fetchers.get_topic_by_name('Android test', strict=True) exploration = exp_fetchers.get_exploration_by_id(exp_id) story = story_fetchers.get_story_by_url_fragment( 'android-end-to-end-testing') + assert story is not None skill = skill_fetchers.get_skill_by_description( 'Dummy Skill for Android') + assert skill is not None skill.validate() story.validate() topic.validate(strict=True) @@ -70,15 +72,16 @@ def test_initialize_structures_are_valid(self): for node in story.story_contents.nodes: self.assertEqual(node.exploration_id, exp_id) - def test_initialize_structure_thumbnails_exist(self): + def test_initialize_structure_thumbnails_exist(self) -> None: # To validate the thumbnails for topics ans stories can be fetched # using AssetsDevHandler. self.post_json( '/initialize_android_test_data', {}, use_payload=False, csrf_token=None) - topic = topic_fetchers.get_topic_by_name('Android test') + topic = topic_fetchers.get_topic_by_name('Android test', strict=True) story = story_fetchers.get_story_by_url_fragment( 'android-end-to-end-testing') + assert story is not None self.get_custom_response( '/assetsdevhandler/topic/%s/assets/thumbnail/test_svg.svg' % topic.id, 'image/svg+xml') @@ -86,7 +89,7 @@ def test_initialize_structure_thumbnails_exist(self): '/assetsdevhandler/story/%s/assets/thumbnail/test_svg.svg' % story.id, 'image/svg+xml') - def test_exploration_assets_are_loaded(self): + def test_exploration_assets_are_loaded(self) -> None: self.post_json( '/initialize_android_test_data', {}, use_payload=False, csrf_token=None) @@ -99,7 +102,7 @@ def test_exploration_assets_are_loaded(self): '/assetsdevhandler/exploration/26/assets/image/%s' % filename, 'image/png') - def test_initialize_twice_raises_already_published_exception(self): + def test_initialize_twice_raises_already_published_exception(self) -> None: self.post_json( '/initialize_android_test_data', {}, use_payload=False, csrf_token=None) @@ -109,11 +112,11 @@ def test_initialize_twice_raises_already_published_exception(self): self.assertEqual( response['error'], 'The topic is already published.') - def test_initialize_twice_raises_unpublished_topic_exception(self): + def test_initialize_twice_raises_unpublished_topic_exception(self) -> None: self.post_json( '/initialize_android_test_data', {}, use_payload=False, csrf_token=None) - topic = topic_fetchers.get_topic_by_name('Android test') + topic = topic_fetchers.get_topic_by_name('Android test', strict=True) topic_services.unpublish_topic( topic.id, feconf.SYSTEM_COMMITTER_ID) response = self.post_json( diff --git a/core/controllers/base.py b/core/controllers/base.py index db670c275554..3ccbe4e1794b 100755 --- a/core/controllers/base.py +++ b/core/controllers/base.py @@ -16,10 +16,12 @@ from __future__ import annotations +import abc import base64 import datetime import functools import hmac +import io import json import logging import os @@ -29,29 +31,37 @@ from core import feconf from core import handler_schema_constants -from core import python_utils from core import utils from core.controllers import payload_validator from core.domain import auth_domain from core.domain import auth_services +from core.domain import classifier_domain from core.domain import config_domain from core.domain import config_services from core.domain import user_services +from typing import ( + Any, Dict, Final, Generic, Mapping, Optional, Sequence, TypedDict, TypeVar, + Union +) + import webapp2 -from typing import Any, Dict, Optional # isort: skip +# Note: These private type variables are only defined to implement the Generic +# typing structure of BaseHandler. So, do not make them public in the future. +_NormalizedRequestDictType = TypeVar('_NormalizedRequestDictType') +_NormalizedPayloadDictType = TypeVar('_NormalizedPayloadDictType') -ONE_DAY_AGO_IN_SECS = -24 * 60 * 60 -DEFAULT_CSRF_SECRET = 'oppia csrf secret' -CSRF_SECRET = config_domain.ConfigProperty( +ONE_DAY_AGO_IN_SECS: Final = -24 * 60 * 60 +DEFAULT_CSRF_SECRET: Final = 'oppia csrf secret' +CSRF_SECRET: Final = config_domain.ConfigProperty( 'oppia_csrf_secret', {'type': 'unicode'}, 'Text used to encrypt CSRF tokens.', DEFAULT_CSRF_SECRET) # NOTE: These handlers manage user sessions and serve auth pages. Thus, we # should never reject or replace them when running in maintenance mode; # otherwise admins will be unable to access the site. -AUTH_HANDLER_PATHS = ( +AUTH_HANDLER_PATHS: Final = ( '/csrfhandler', '/login', '/session_begin', @@ -59,18 +69,37 @@ ) +class ResponseValueDict(TypedDict): + """Dict representation of key-value pairs that will be included in the + response. + """ + + error: str + status_code: int + + @functools.lru_cache(maxsize=128) -def load_template(filename): +def load_template( + filename: str, *, template_is_aot_compiled: bool +) -> str: """Return the HTML file contents at filepath. Args: filename: str. Name of the requested HTML file. + template_is_aot_compiled: bool. Used to determine which bundle to use. Returns: str. The HTML file content. """ - filepath = os.path.join(feconf.FRONTEND_TEMPLATES_DIR, filename) - with python_utils.open_file(filepath, 'r') as f: + filepath = os.path.join( + ( + feconf.FRONTEND_AOT_DIR + if template_is_aot_compiled + else feconf.FRONTEND_TEMPLATES_DIR + ), + filename + ) + with utils.open_file(filepath, 'r') as f: html_text = f.read() return html_text @@ -78,7 +107,7 @@ def load_template(filename): class SessionBeginHandler(webapp2.RequestHandler): """Handler for creating new authentication sessions.""" - def get(self): + def get(self) -> None: """Establishes a new auth session.""" auth_services.establish_auth_session(self.request, self.response) @@ -86,7 +115,7 @@ def get(self): class SessionEndHandler(webapp2.RequestHandler): """Handler for destroying existing authentication sessions.""" - def get(self): + def get(self) -> None: """Destroys an existing auth session.""" auth_services.destroy_auth_session(self.response) @@ -119,20 +148,11 @@ class InternalErrorException(Exception): pass - class TemporaryMaintenanceException(Exception): - """Error class for when the server is currently down for temporary - maintenance (error code 503). - """ - def __init__(self): - super( - UserFacingExceptions.TemporaryMaintenanceException, self - ).__init__( - 'Oppia is currently being upgraded, and the site should be up ' - 'and running again in a few hours. Thanks for your patience!') - - -class BaseHandler(webapp2.RequestHandler): +class BaseHandler( + webapp2.RequestHandler, + Generic[_NormalizedPayloadDictType, _NormalizedRequestDictType] +): """Base class for all Oppia handlers.""" # Whether to check POST and PUT payloads for CSRF tokens prior to @@ -150,28 +170,42 @@ class BaseHandler(webapp2.RequestHandler): PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON - # Using Dict[str, Any] here because the following schema can have a - # recursive structure and currently mypy doesn't support recursive type - # currently. See: https://github.com/python/mypy/issues/731 + # Here we use type Any because the sub-classes of BaseHandler can contain + # different schemas with different types of values, like str, complex Dicts + # and etc. URL_PATH_ARGS_SCHEMAS: Optional[Dict[str, Any]] = None - # Using Dict[str, Any] here because the following schema can have a - # recursive structure and currently mypy doesn't support recursive type - # currently. See: https://github.com/python/mypy/issues/731 + # Here we use type Any because the sub-classes of BaseHandler can contain + # different schemas with different types of values, like str, complex Dicts + # and etc. HANDLER_ARGS_SCHEMAS: Optional[Dict[str, Any]] = None - def __init__(self, request, response): # pylint: disable=super-init-not-called + def __init__( # pylint: disable=super-init-not-called + self, request: webapp2.Request, response: webapp2.Response + ) -> None: # Set self.request, self.response and self.app. self.initialize(request, response) self.start_time = datetime.datetime.utcnow() - # Initializes the return dict for the handlers. - self.values = {} + # Here we use type Any because dict 'self.values' is a return dict + # for the handlers, and different handlers can return different + # key-value pairs. So, to allow every type of key-value pair, we + # used Any type here. + self.values: Dict[str, Any] = {} + # This try-catch block is intended to log cases where getting the + # request payload errors with ValueError: Invalid boundary in multipart + # form: b''. This is done to gather sufficient data to help debug the + # error if it arises in the future. + try: + payload_json_string = self.request.get('payload') + except ValueError as e: + logging.error('%s: request %s', e, self.request) + raise e # TODO(#13155): Remove the if-else part once all the handlers have had # schema validation implemented. - if self.request.get('payload'): - self.payload = json.loads(self.request.get('payload')) + if payload_json_string: + self.payload = json.loads(payload_json_string) else: self.payload = None self.iframed = False @@ -182,13 +216,8 @@ def __init__(self, request, response): # pylint: disable=super-init-not-called self.partially_logged_in = False self.user_is_scheduled_for_deletion = False self.current_user_is_super_admin = False - # Once the attribute `normalized_request` is type annotated here, make - # sure to fix all the subclasses using normalized_request.get() method - # by removing their type: ignore[union-attr] and using a type cast - # instead to eliminate the possibility on union types. - # e.g. ClassroomAccessValidationHandler. - self.normalized_request = None - self.normalized_payload = None + self.normalized_request: Optional[_NormalizedRequestDictType] = None + self.normalized_payload: Optional[_NormalizedPayloadDictType] = None try: auth_claims = auth_services.get_auth_claims_from_request(request) @@ -196,6 +225,11 @@ def __init__(self, request, response): # pylint: disable=super-init-not-called auth_services.destroy_auth_session(self.response) self.redirect(user_services.create_login_url(self.request.uri)) return + except auth_domain.UserDisabledError: + auth_services.destroy_auth_session(self.response) + self.redirect( + '/logout?redirect_url=%s' % feconf.PENDING_ACCOUNT_DELETION_URL) + return except auth_domain.InvalidAuthSessionError: logging.exception('User session is invalid!') auth_services.destroy_auth_session(self.response) @@ -213,11 +247,17 @@ def __init__(self, request, response): # pylint: disable=super-init-not-called # to signup page create a new user settings. Otherwise logout # the not-fully registered user. email = auth_claims.email + if email is None: + logging.exception( + 'No email address was found for the user.' + ) + auth_services.destroy_auth_session(self.response) + return if 'signup?' in self.request.uri: user_settings = ( user_services.create_new_user(auth_id, email)) else: - logging.exception( + logging.error( 'Cannot find user %s with email %s on page %s' % ( auth_id, email, self.request.uri)) auth_services.destroy_auth_session(self.response) @@ -244,9 +284,11 @@ def __init__(self, request, response): # pylint: disable=super-init-not-called user_settings.last_logged_in)): user_services.record_user_logged_in(self.user_id) - self.roles = ( - [feconf.ROLE_ID_GUEST] - if self.user_id is None else user_settings.roles) + self.roles = user_settings.roles + + if self.user_id is None: + self.roles = [feconf.ROLE_ID_GUEST] + self.user = user_services.get_user_actions_info(self.user_id) if not self._is_requested_path_currently_accessible_to_user(): @@ -255,7 +297,7 @@ def __init__(self, request, response): # pylint: disable=super-init-not-called self.values['is_super_admin'] = self.current_user_is_super_admin - def dispatch(self): + def dispatch(self) -> None: """Overrides dispatch method in webapp2 superclass. Raises: @@ -275,8 +317,7 @@ def dispatch(self): return if not self._is_requested_path_currently_accessible_to_user(): - self.handle_exception( - self.TemporaryMaintenanceException(), self.app.debug) + self.render_template('maintenance-page.mainpage.html') return if self.user_is_scheduled_for_deletion: @@ -321,21 +362,22 @@ def dispatch(self): schema_validation_succeeded = True try: self.validate_and_normalize_args() - except self.InvalidInputException as e: - self.handle_exception(e, self.app.debug) - schema_validation_succeeded = False - # TODO(#13155): Remove this clause once all the handlers have had - # schema validation implemented. - except NotImplementedError as e: + + # TODO(#13155): Remove NotImplementedError once all the handlers + # have had schema validation implemented. + except ( + NotImplementedError, + self.InternalErrorException, + self.InvalidInputException + ) as e: self.handle_exception(e, self.app.debug) schema_validation_succeeded = False - if not schema_validation_succeeded: return - super(BaseHandler, self).dispatch() + super().dispatch() - def validate_and_normalize_args(self): + def validate_and_normalize_args(self) -> None: """Validates schema for controller layer handler class arguments. Raises: @@ -344,11 +386,24 @@ def validate_and_normalize_args(self): """ handler_class_name = self.__class__.__name__ request_method = self.request.environ['REQUEST_METHOD'] + + # For HEAD requests, we use the schema of GET handler, + # because HEAD returns just the handlers of the GET request. + if request_method == 'HEAD': + request_method = 'GET' + url_path_args = self.request.route_kwargs - handler_class_names_with_no_schema = ( - handler_schema_constants.HANDLER_CLASS_NAMES_WITH_NO_SCHEMA) - if handler_class_name in handler_class_names_with_no_schema: + if ( + handler_class_name in + handler_schema_constants.HANDLER_CLASS_NAMES_WITH_NO_SCHEMA + ): + # TODO(#13155): Remove this clause once all the handlers have had + # schema validation implemented. + if self.URL_PATH_ARGS_SCHEMAS or self.HANDLER_ARGS_SCHEMAS: + raise self.InternalErrorException( + 'Remove handler class name from ' + 'HANDLER_CLASS_NAMES_WHICH_STILL_NEED_SCHEMAS') return handler_args = {} @@ -358,7 +413,7 @@ def validate_and_normalize_args(self): if arg == 'csrf_token': # 'csrf_token' has been already validated in the # dispatch method. - continue + pass elif arg == 'source': source_url = self.request.get('source') regex_pattern = ( @@ -391,7 +446,7 @@ def validate_and_normalize_args(self): schema_for_url_path_args = self.URL_PATH_ARGS_SCHEMAS self.request.route_kwargs, errors = ( - payload_validator.validate( + payload_validator.validate_arguments_against_schema( url_path_args, schema_for_url_path_args, extra_args_are_allowed) ) @@ -411,24 +466,29 @@ def validate_and_normalize_args(self): return try: + if self.HANDLER_ARGS_SCHEMAS is None: + raise Exception( + 'No \'HANDLER_ARGS_SCHEMAS\' Found for the ' + 'handler class: %s' % handler_class_name + ) schema_for_request_method = self.HANDLER_ARGS_SCHEMAS[ request_method] - except Exception: + except Exception as e: raise NotImplementedError( 'Missing schema for %s method in %s handler class.' % ( - request_method, handler_class_name)) + request_method, handler_class_name)) from e allow_string_to_bool_conversion = request_method in ['GET', 'DELETE'] normalized_arg_values, errors = ( - payload_validator.validate( + payload_validator.validate_arguments_against_schema( handler_args, schema_for_request_method, extra_args_are_allowed, allow_string_to_bool_conversion) ) - self.normalized_payload = { + normalized_payload = { arg: normalized_arg_values.get(arg) for arg in payload_arg_keys } - self.normalized_request = { + normalized_request = { arg: normalized_arg_values.get(arg) for arg in request_arg_keys } @@ -443,15 +503,36 @@ def validate_and_normalize_args(self): # execution onwards to the handler. for arg in keys_that_correspond_to_default_values: if request_method in ['GET', 'DELETE']: - self.normalized_request[arg] = normalized_arg_values.get(arg) + normalized_request[arg] = normalized_arg_values.get(arg) else: - self.normalized_payload[arg] = normalized_arg_values.get(arg) + normalized_payload[arg] = normalized_arg_values.get(arg) + + # Here we use MyPy ignore because 'normalized_payload' is of + # Dict[str, Any] type, whereas 'self.normalized_payload' is a Generic + # type whose type can be decided while defining sub-classes. So, Due + # to this mismatch in types MyPy throws an error. Thus, to silence the + # error, we used type ignore here. + self.normalized_payload = normalized_payload # type: ignore[assignment] + # Here we use MyPy ignore because 'normalized_request' is of + # Dict[str, Any] type, whereas 'self.normalized_request' is a Generic + # type whose type can be decided while defining sub-classes. So, Due + # to this mismatch in types MyPy throws an error. Thus, to silence the + # error, we used type ignore here. + self.normalized_request = normalized_request # type: ignore[assignment] + + # Here we use MyPy ignore because here we assigning RaiseErrorOnGet's + # instance to a 'get' method, and according to MyPy assignment to a + # method is not allowed. + self.request.get = RaiseErrorOnGet( # type: ignore[assignment] + 'Use self.normalized_request instead of self.request.').get + self.payload = RaiseErrorOnGet( + 'Use self.normalized_payload instead of self.payload.') if errors: raise self.InvalidInputException('\n'.join(errors)) @property - def current_user_is_site_maintainer(self): + def current_user_is_site_maintainer(self) -> bool: """Returns whether the current user is a site maintainer. A super admin or release coordinator is also a site maintainer. @@ -463,7 +544,7 @@ def current_user_is_site_maintainer(self): self.current_user_is_super_admin or feconf.ROLE_ID_RELEASE_COORDINATOR in self.roles) - def _is_requested_path_currently_accessible_to_user(self): + def _is_requested_path_currently_accessible_to_user(self) -> bool: """Checks whether the requested path is currently accessible to user. Returns: @@ -474,15 +555,21 @@ def _is_requested_path_currently_accessible_to_user(self): not feconf.ENABLE_MAINTENANCE_MODE or self.current_user_is_site_maintainer) - def get(self, *args, **kwargs): # pylint: disable=unused-argument + # Here we use type Any because the sub-classes of 'Basehandler' can have + # 'get' method with different number of arguments and types. + def get(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=unused-argument """Base method to handle GET requests.""" logging.warning('Invalid URL requested: %s', self.request.uri) self.error(404) - self._render_exception( - 404, { - 'error': 'Could not find the page %s.' % self.request.uri}) + values: ResponseValueDict = { + 'error': 'Could not find the page %s.' % self.request.uri, + 'status_code': 404 + } + self._render_exception(values) - def post(self, *args): # pylint: disable=unused-argument + # Here we use type Any because the sub-classes of 'Basehandler' can have + # 'post' method with different number of arguments and types. + def post(self, *args: Any) -> None: # pylint: disable=unused-argument """Base method to handle POST requests. Raises: @@ -490,7 +577,9 @@ def post(self, *args): # pylint: disable=unused-argument """ raise self.PageNotFoundException - def put(self, *args): # pylint: disable=unused-argument + # Here we use type Any because the sub-classes of 'Basehandler' can have + # 'put' method with different number of arguments and types. + def put(self, *args: Any) -> None: # pylint: disable=unused-argument """Base method to handle PUT requests. Raises: @@ -498,7 +587,9 @@ def put(self, *args): # pylint: disable=unused-argument """ raise self.PageNotFoundException - def delete(self, *args): # pylint: disable=unused-argument + # Here we use type Any because the sub-classes of 'Basehandler' can have + # 'delete' method with different number of arguments and types. + def delete(self, *args: Any) -> None: # pylint: disable=unused-argument """Base method to handle DELETE requests. Raises: @@ -506,11 +597,27 @@ def delete(self, *args): # pylint: disable=unused-argument """ raise self.PageNotFoundException - def render_json(self, values: Dict[Any, Any]) -> None: + # Here we use type Any because the sub-classes of 'Basehandler' can have + # 'head' method with different number of arguments and types. + def head(self, *args: Any, **kwargs: Any) -> None: + """Method to handle HEAD requests. The webapp library automatically + makes sure that HEAD only returns the headers of GET request. + """ + return self.get(*args, **kwargs) + + # TODO(#16539): Once all the places are fixed with the type of value + # that is rendered to JSON, then please remove Sequence[Mapping[str, Any]] + # from render_json's argument type. + # Here we use type Any because the argument 'values' can accept various + # kinds of dictionaries that needs to be sent as a JSON response. + def render_json( + self, values: Union[str, Sequence[Mapping[str, Any]], Mapping[str, Any]] + ) -> None: """Prepares JSON response to be sent to the client. Args: - values: dict. The key-value pairs to encode in the JSON response. + values: str|dict. The key-value pairs to encode in the + JSON response. """ self.response.content_type = 'application/json; charset=utf-8' self.response.headers['Content-Disposition'] = ( @@ -525,7 +632,9 @@ def render_json(self, values: Dict[Any, Any]) -> None: self.response.write( b'%s%s' % (feconf.XSSI_PREFIX, json_output.encode('utf-8'))) - def render_downloadable_file(self, file, filename, content_type): + def render_downloadable_file( + self, file: io.BytesIO, filename: str, content_type: str + ) -> None: """Prepares downloadable content to be sent to the client. Args: @@ -537,12 +646,23 @@ def render_downloadable_file(self, file, filename, content_type): self.response.headers['Content-Disposition'] = ( 'attachment; filename=%s' % filename) self.response.charset = 'utf-8' - # We use this super in order to bypass the write method - # in webapp2.Response, since webapp2.Response doesn't support writing - # bytes. - super(webapp2.Response, self.response).write(file.getvalue()) # pylint: disable=bad-super-call - - def render_template(self, filepath, iframe_restriction='DENY'): + # Here we use MyPy ignore because according to MyPy super can + # accept 'super class and self' as arguments but here we are passing + # 'webapp2.Response, and self.response' which confuses MyPy about the + # typing of super, and due to this MyPy is unable to recognize the + # 'write' method and throws an error. This change in arguments is + # done because we use 'super' method in order to bypass the write + # method in webapp2.Response, since webapp2.Response doesn't support + # writing bytes. + super(webapp2.Response, self.response).write(file.getvalue()) # type: ignore[misc] # pylint: disable=bad-super-call + + def render_template( + self, + filepath: str, + iframe_restriction: Optional[str] = 'DENY', + *, + template_is_aot_compiled: bool = False + ) -> None: """Prepares an HTML response to be sent to the client. Args: @@ -553,6 +673,11 @@ def render_template(self, filepath, iframe_restriction='DENY'): DENY: Strictly prevents the template to load in an iframe. SAMEORIGIN: The template can only be displayed in a frame on the same origin as the page itself. + template_is_aot_compiled: bool. False by default. Use + True when the template is compiled by angular AoT compiler. + + Raises: + Exception. Invalid X-Frame-Options. """ # The 'no-store' must be used to properly invalidate the cache when we @@ -574,10 +699,13 @@ def render_template(self, filepath, iframe_restriction='DENY'): self.response.expires = 'Mon, 01 Jan 1990 00:00:00 GMT' self.response.pragma = 'no-cache' + self.response.write(load_template( + filepath, template_is_aot_compiled=template_is_aot_compiled + )) - self.response.write(load_template(filepath)) - - def _render_exception_json_or_html(self, return_type, values): + def _render_exception_json_or_html( + self, return_type: str, values: ResponseValueDict + ) -> None: """Renders an error page, or an error JSON response. Args: @@ -592,8 +720,6 @@ def _render_exception_json_or_html(self, return_type, values): if self.iframed: self.render_template( 'error-iframed.mainpage.html', iframe_restriction=None) - elif values['status_code'] == 503: - self.render_template('maintenance-page.mainpage.html') elif values['status_code'] == 404: # Only 404 routes can be handled with angular router as it only # has access to the path, not to the status code. @@ -609,18 +735,17 @@ def _render_exception_json_or_html(self, return_type, values): 'Not a recognized return type: defaulting to render JSON.') self.render_json(values) - def _render_exception(self, error_code, values): + def _render_exception( + self, values: ResponseValueDict + ) -> None: """Renders an error page, or an error JSON response. Args: - error_code: int. The HTTP status code (expected to be one of - 400, 401, 404 or 500). values: dict. The key-value pairs to include in the response. """ # The error codes here should be in sync with the error pages # generated via webpack.common.config.ts. - assert error_code in [400, 401, 404, 500, 503] - values['status_code'] = error_code + assert values['status_code'] in [400, 401, 404, 500] method = self.request.environ['REQUEST_METHOD'] if method == 'GET': @@ -637,9 +762,13 @@ def _render_exception(self, error_code, values): self.DELETE_HANDLER_ERROR_RETURN_TYPE, values) else: logging.warning('Not a recognized request method.') - self._render_exception_json_or_html(None, values) + self._render_exception_json_or_html( + feconf.HANDLER_TYPE_JSON, values + ) - def handle_exception(self, exception, unused_debug_mode): + def handle_exception( + self, exception: BaseException, unused_debug_mode: bool + ) -> None: """Overwrites the default exception handler. Args: @@ -652,14 +781,26 @@ def handle_exception(self, exception, unused_debug_mode): # For GET requests, there is no payload, so we check against # GET_HANDLER_ERROR_RETURN_TYPE. # Otherwise, we check whether self.payload exists. - if (self.payload is not None or + + # This check is to avoid throwing of 401 when payload doesn't + # exists and self.payload is replaced by RaiseErrorOnGet object. + # TODO(#13155): Change this to self.normalized_payload + # once schema is implemented for all handlers. + payload_exists = ( + self.payload is not None and + not isinstance(self.payload, RaiseErrorOnGet) + ) + if ( + payload_exists or self.GET_HANDLER_ERROR_RETURN_TYPE == - feconf.HANDLER_TYPE_JSON): + feconf.HANDLER_TYPE_JSON + ): self.error(401) - self._render_exception( - 401, { - 'error': ( - 'You must be logged in to access this resource.')}) + values: ResponseValueDict = { + 'error': 'You must be logged in to access this resource.', + 'status_code': 401 + } + self._render_exception(values) else: self.redirect(user_services.create_login_url(self.request.uri)) return @@ -670,61 +811,85 @@ def handle_exception(self, exception, unused_debug_mode): if isinstance(exception, self.PageNotFoundException): logging.warning('Invalid URL requested: %s', self.request.uri) self.error(404) - self._render_exception( - 404, { - 'error': 'Could not find the page %s.' % self.request.uri}) + values = { + 'error': 'Could not find the page %s.' % self.request.uri, + 'status_code': 404 + } + self._render_exception(values) return logging.exception('Exception raised: %s', exception) if isinstance(exception, self.UnauthorizedUserException): self.error(401) - self._render_exception(401, {'error': str(exception)}) + values = { + 'error': str(exception), + 'status_code': 401 + } + self._render_exception(values) return if isinstance(exception, self.InvalidInputException): self.error(400) - self._render_exception(400, {'error': str(exception)}) + values = { + 'error': str(exception), + 'status_code': 400 + } + self._render_exception(values) return if isinstance(exception, self.InternalErrorException): self.error(500) - self._render_exception(500, {'error': str(exception)}) - return - - if isinstance(exception, self.TemporaryMaintenanceException): - self.error(503) - self._render_exception(503, {'error': str(exception)}) + values = { + 'error': str(exception), + 'status_code': 500 + } + self._render_exception(values) return self.error(500) - self._render_exception(500, {'error': str(exception)}) + values = { + 'error': str(exception), + 'status_code': 500 + } + self._render_exception(values) InternalErrorException = UserFacingExceptions.InternalErrorException InvalidInputException = UserFacingExceptions.InvalidInputException NotLoggedInException = UserFacingExceptions.NotLoggedInException PageNotFoundException = UserFacingExceptions.PageNotFoundException UnauthorizedUserException = UserFacingExceptions.UnauthorizedUserException - TemporaryMaintenanceException = ( - UserFacingExceptions.TemporaryMaintenanceException) -class Error404Handler(BaseHandler): +class Error404Handler(BaseHandler[Dict[str, str], Dict[str, str]]): """Handles 404 errors.""" pass +class RaiseErrorOnGet: + """Class that will throw a ValueError when the get function is invoked.""" + + def __init__(self, message: str) -> None: + self.error_message = message + + # Here we use type Any because the 'get' method can accept arbitrary number + # of arguments with different types. + def get(self, *args: Any, **kwargs: Any) -> None: + """Raises an error when invoked.""" + raise ValueError(self.error_message) + + class CsrfTokenManager: """Manages page/user tokens in memcache to protect against CSRF.""" # Max age of the token (48 hours). - _CSRF_TOKEN_AGE_SECS = 60 * 60 * 48 + _CSRF_TOKEN_AGE_SECS: Final = 60 * 60 * 48 # Default user id for non-logged-in users. - _USER_ID_DEFAULT = 'non_logged_in_user' + _USER_ID_DEFAULT: Final = 'non_logged_in_user' @classmethod - def init_csrf_secret(cls): + def init_csrf_secret(cls) -> None: """Verify that non-default CSRF secret exists; creates one if not.""" # Any non-default value is fine. @@ -737,7 +902,7 @@ def init_csrf_secret(cls): base64.urlsafe_b64encode(os.urandom(20))) @classmethod - def _create_token(cls, user_id, issued_on): + def _create_token(cls, user_id: Optional[str], issued_on: float) -> str: """Creates a new CSRF token. Args: @@ -756,23 +921,26 @@ def _create_token(cls, user_id, issued_on): user_id = cls._USER_ID_DEFAULT # Round time to seconds. - issued_on = str(int(issued_on)) + issued_on_str = str(int(issued_on)) - digester = hmac.new(CSRF_SECRET.value.encode('utf-8')) + digester = hmac.new( + key=CSRF_SECRET.value.encode('utf-8'), + digestmod='md5' + ) digester.update(user_id.encode('utf-8')) digester.update(b':') - digester.update(issued_on.encode('utf-8')) + digester.update(issued_on_str.encode('utf-8')) digest = digester.digest() # The b64encode returns bytes, so we first need to decode the returned # bytes to string. token = '%s/%s' % ( - issued_on, base64.urlsafe_b64encode(digest).decode('utf-8')) + issued_on_str, base64.urlsafe_b64encode(digest).decode('utf-8')) return token @classmethod - def _get_current_time(cls): + def _get_current_time(cls) -> float: """Returns the current server time. Returns: @@ -781,7 +949,7 @@ def _get_current_time(cls): return time.time() @classmethod - def create_csrf_token(cls, user_id): + def create_csrf_token(cls, user_id: Optional[str]) -> str: """Creates a CSRF token for the given user_id. Args: @@ -793,7 +961,7 @@ def create_csrf_token(cls, user_id): return cls._create_token(user_id, cls._get_current_time()) @classmethod - def is_csrf_token_valid(cls, user_id, token): + def is_csrf_token_valid(cls, user_id: Optional[str], token: str) -> bool: """Validates a given CSRF token. Args: @@ -822,13 +990,17 @@ def is_csrf_token_valid(cls, user_id, token): return False -class CsrfTokenHandler(BaseHandler): +class CsrfTokenHandler(BaseHandler[Dict[str, str], Dict[str, str]]): """Handles sending CSRF tokens to the frontend.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON REDIRECT_UNFINISHED_SIGNUPS = False + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} - def get(self): + # Here we use MyPy ignore because the signature of 'get' method is not + # compatible with super class's (BaseHandler) 'get' method. + def get(self) -> None: # type: ignore[override] csrf_token = CsrfTokenManager.create_csrf_token( self.user_id) self.render_json({ @@ -836,11 +1008,26 @@ def get(self): }) -class OppiaMLVMHandler(BaseHandler): +class OppiaMLVMHandler( + BaseHandler[_NormalizedPayloadDictType, _NormalizedRequestDictType] +): """Base class for the handlers that communicate with Oppia-ML VM instances. """ - def extract_request_message_vm_id_and_signature(self): + GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON + # Here we use type Any because the sub-classes of OppiaMLVMHandler can + # contain different schemas with different types of values, like str, + # complex Dicts and etc. + URL_PATH_ARGS_SCHEMAS: Dict[str, Any] = {} + # Here we use type Any because the sub-classes of OppiaMLVMHandler can + # contain different schemas with different types of values, like str, + # complex Dicts and etc. + HANDLER_ARGS_SCHEMAS: Dict[str, Any] = {} + + @abc.abstractmethod + def extract_request_message_vm_id_and_signature( + self + ) -> classifier_domain.OppiaMLAuthInfo: """Returns the OppiaMLAuthInfo domain object containing information from the incoming request that is necessary for authentication. diff --git a/core/controllers/base_test.py b/core/controllers/base_test.py index fd2af1e42c3d..3b3a59156410 100644 --- a/core/controllers/base_test.py +++ b/core/controllers/base_test.py @@ -30,7 +30,6 @@ from core import feconf from core import handler_schema_constants -from core import python_utils from core import utils from core.constants import constants from core.controllers import acl_decorators @@ -43,30 +42,42 @@ from core.domain import rights_manager from core.domain import taskqueue_services from core.domain import user_services +from core.domain import wipeout_service from core.platform import models from core.tests import test_utils import main +from typing import Dict, Final, FrozenSet, List, Optional, TypedDict import webapp2 +from webapp2_extras import routes import webtest +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import platform_auth_services as auth_services + auth_services = models.Registry.import_auth_services() datastore_services = models.Registry.import_datastore_services() -(user_models,) = models.Registry.import_models([models.NAMES.user]) +secrets_services = models.Registry.import_secrets_services() +(user_models,) = models.Registry.import_models([models.Names.USER]) -FORTY_EIGHT_HOURS_IN_SECS = 48 * 60 * 60 -PADDING = 1 +FORTY_EIGHT_HOURS_IN_SECS: Final = 48 * 60 * 60 +PADDING: Final = 1 class HelperFunctionTests(test_utils.GenericTestBase): - def test_load_template(self): + def test_load_template(self) -> None: oppia_root_path = os.path.join( 'core', 'templates', 'pages', 'oppia-root') with self.swap(feconf, 'FRONTEND_TEMPLATES_DIR', oppia_root_path): self.assertIn( '"Loading | Oppia"', - base.load_template('oppia-root.mainpage.html')) + base.load_template( + 'oppia-root.mainpage.html', + template_is_aot_compiled=False + ) + ) class UniqueTemplateNamesTests(test_utils.GenericTestBase): @@ -77,10 +88,10 @@ class UniqueTemplateNamesTests(test_utils.GenericTestBase): get_filepath_from_filename function in test_utils.py. """ - def test_template_filenames_are_unique(self): + def test_template_filenames_are_unique(self) -> None: templates_dir = os.path.join( 'core', 'templates', 'pages') - all_template_names = [] + all_template_names: List[str] = [] for root, _, filenames in os.walk(templates_dir): template_filenames = [ filename for filename in filenames if filename.endswith( @@ -91,55 +102,83 @@ def test_template_filenames_are_unique(self): class BaseHandlerTests(test_utils.GenericTestBase): - TEST_LEARNER_EMAIL = 'test.learner@example.com' - TEST_LEARNER_USERNAME = 'testlearneruser' - TEST_CREATOR_EMAIL = 'test.creator@example.com' - TEST_CREATOR_USERNAME = 'testcreatoruser' - TEST_EDITOR_EMAIL = 'test.editor@example.com' - TEST_EDITOR_USERNAME = 'testeditoruser' - DELETED_USER_EMAIL = 'deleted.user@example.com' - DELETED_USER_USERNAME = 'deleteduser' - PARTIALLY_LOGGED_IN_USER_EMAIL = 'partial@example.com' - - class MockHandlerWithInvalidReturnType(base.BaseHandler): + TEST_LEARNER_EMAIL: Final = 'test.learner@example.com' + TEST_LEARNER_USERNAME: Final = 'testlearneruser' + TEST_CREATOR_EMAIL: Final = 'test.creator@example.com' + TEST_CREATOR_USERNAME: Final = 'testcreatoruser' + TEST_EDITOR_EMAIL: Final = 'test.editor@example.com' + TEST_EDITOR_USERNAME: Final = 'testeditoruser' + DELETED_USER_EMAIL: Final = 'deleted.user@example.com' + DELETED_USER_USERNAME: Final = 'deleteduser' + PARTIALLY_LOGGED_IN_USER_EMAIL: Final = 'partial@example.com' + + class MockHandlerWithInvalidReturnType( + base.BaseHandler[Dict[str, str], Dict[str, str]] + ): GET_HANDLER_ERROR_RETURN_TYPE = 'invalid_type' - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} - - def get(self): + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + # Here we use MyPy ignore because the signature of 'get' method does not + # match with the signature of super class's (BaseHandler) 'get' method, + # and this happens because all handler methods in the main codebase have + # decorators which modify the function signature accordingly, but these + # methods in base_test.py do not. + def get(self) -> None: # type: ignore[override] self.render_template('invalid_page.html') - def head(self): - """Do a HEAD request. This is an unrecognized request method in our + def options(self) -> None: + """Do a OPTIONS request. This is an unrecognized request method in our codebase. """ - self.render_template({'invalid_page.html'}) - - class MockHandlerForTestingErrorPageWithIframed(base.BaseHandler): - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} + self.render_template('invalid_page.html') - def get(self): + class MockHandlerForTestingErrorPageWithIframed( + base.BaseHandler[Dict[str, str], Dict[str, str]] + ): + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + # Here we use MyPy ignore because the signature of 'get' method does not + # match with the signature of super class's (BaseHandler) 'get' method, + # and this happens because all handler methods in the main codebase have + # decorators which modify the function signature accordingly, but these + # methods in base_test.py do not. + def get(self) -> None: # type: ignore[override] self.iframed = True self.render_template('invalid_page.html') - class MockHandlerForTestingUiAccessWrapper(base.BaseHandler): - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} - - def get(self): + class MockHandlerForTestingUiAccessWrapper( + base.BaseHandler[Dict[str, str], Dict[str, str]] + ): + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + # Here we use MyPy ignore because the signature of 'get' method does not + # match with the signature of super class's (BaseHandler) 'get' method, + # and this happens because all handler methods in the main codebase have + # decorators which modify the function signature accordingly, but these + # methods in base_test.py do not. + def get(self) -> None: # type: ignore[override] """Handles GET requests.""" pass - class MockHandlerForTestingAuthorizationWrapper(base.BaseHandler): - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'GET': {}} - - def get(self): + class MockHandlerForTestingAuthorizationWrapper( + base.BaseHandler[Dict[str, str], Dict[str, str]] + ): + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'GET': {}} + + # Here we use MyPy ignore because the signature of 'get' method does not + # match with the signature of super class's (BaseHandler) 'get' method, + # and this happens because all handler methods in the main codebase have + # decorators which modify the function signature accordingly, but these + # methods in base_test.py do not. + def get(self) -> None: # type: ignore[override] """Handles GET requests.""" pass - def setUp(self): + def setUp(self) -> None: super(BaseHandlerTests, self).setUp() self.signup('user@example.com', 'user') @@ -157,28 +196,18 @@ def setUp(self): # Create user that is scheduled for deletion. self.signup(self.DELETED_USER_EMAIL, self.DELETED_USER_USERNAME) deleted_user_id = self.get_user_id_from_email(self.DELETED_USER_EMAIL) - deleted_user_model = ( - user_models.UserSettingsModel.get_by_id(deleted_user_id)) - deleted_user_model.deleted = True - deleted_user_model.update_timestamps() - deleted_user_model.put() + wipeout_service.pre_delete_user(deleted_user_id) # Create a new user but do not submit their registration form. user_services.create_new_user( self.get_auth_id_from_email(self.PARTIALLY_LOGGED_IN_USER_EMAIL), self.PARTIALLY_LOGGED_IN_USER_EMAIL) - def test_that_no_get_results_in_500_error(self): + def test_that_no_get_results_in_500_error(self) -> None: """Test that no GET request results in a 500 error.""" for route in main.URLS: - # This was needed for the Django tests to pass (at the time we had - # a Django branch of the codebase). - if isinstance(route, tuple): - continue - else: - url = route.template - url = re.sub('<([^/^:]+)>', 'abc123', url) + url = re.sub('<([^/^:]+)>', 'abc123', route.template) # This url is ignored since it is only needed for a protractor test. # The backend tests fetch templates from @@ -190,16 +219,19 @@ def test_that_no_get_results_in_500_error(self): if url == '/console_errors': continue - # Some of these will 404 or 302. This is expected. - self.get_response_without_checking_for_errors( - url, [200, 301, 302, 400, 401, 404]) + with self.swap_to_always_return( + secrets_services, 'get_secret', 'secret' + ): + # Some of these will 404 or 302. This is expected. + self.get_response_without_checking_for_errors( + url, [200, 301, 302, 400, 401, 404]) # TODO(sll): Add similar tests for POST, PUT, DELETE. # TODO(sll): Set a self.payload attr in the BaseHandler for # POST, PUT and DELETE. Something needs to regulate what # the fields in the payload should be. - def test_requests_for_missing_csrf_token(self): + def test_requests_for_missing_csrf_token(self) -> None: """Tests request without csrf_token results in 401 error.""" self.post_json( @@ -208,7 +240,7 @@ def test_requests_for_missing_csrf_token(self): self.put_json( '/community-library/any', payload={}, expected_status_int=401) - def test_requests_for_invalid_paths(self): + def test_requests_for_invalid_paths(self) -> None: """Test that requests for invalid paths result in a 404 error.""" user_id = user_services.get_user_id_from_username('learneruser') csrf_token = base.CsrfTokenManager.create_csrf_token(user_id) @@ -229,7 +261,7 @@ def test_requests_for_invalid_paths(self): self.delete_json('/community-library/data', expected_status_int=404) - def test_html_requests_have_no_store_cache_policy(self): + def test_html_requests_have_no_store_cache_policy(self) -> None: response = self.get_html_response('/community-library') # We set 'no-store' and 'must-revalidate', but webapp # adds 'no-cache' since it is basically a subset of 'no-store'. @@ -238,19 +270,19 @@ def test_html_requests_have_no_store_cache_policy(self): 'must-revalidate, no-cache, no-store' ) - def test_root_redirect_rules_for_deleted_user_prod_mode(self): + def test_root_redirect_rules_for_deleted_user_prod_mode(self) -> None: with self.swap(constants, 'DEV_MODE', False): self.login(self.DELETED_USER_EMAIL) response = self.get_html_response('/', expected_status_int=302) self.assertIn('pending-account-deletion', response.headers['location']) - def test_root_redirect_rules_for_deleted_user_dev_mode(self): + def test_root_redirect_rules_for_deleted_user_dev_mode(self) -> None: with self.swap(constants, 'DEV_MODE', True): self.login(self.DELETED_USER_EMAIL) response = self.get_html_response('/', expected_status_int=302) self.assertIn('pending-account-deletion', response.headers['location']) - def test_get_with_invalid_return_type_logs_correct_warning(self): + def test_get_with_invalid_return_type_logs_correct_warning(self) -> None: # Modify the testapp to use the mock handler. self.testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( @@ -259,8 +291,8 @@ def test_get_with_invalid_return_type_logs_correct_warning(self): debug=feconf.DEBUG, )) - observed_log_messages = [] - def mock_logging_function(msg, *_): + observed_log_messages: List[str] = [] + def mock_logging_function(msg: str) -> None: observed_log_messages.append(msg) with self.swap(logging, 'warning', mock_logging_function): @@ -270,7 +302,7 @@ def mock_logging_function(msg, *_): observed_log_messages[0], 'Not a recognized return type: defaulting to render JSON.') - def test_unrecognized_request_method_logs_correct_warning(self): + def test_unrecognized_request_method_logs_correct_warning(self) -> None: self.testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock', self.MockHandlerWithInvalidReturnType, @@ -278,21 +310,18 @@ def test_unrecognized_request_method_logs_correct_warning(self): debug=feconf.DEBUG, )) - observed_log_messages = [] - def mock_logging_function(msg, *_): + observed_log_messages: List[str] = [] + def mock_logging_function(msg: str) -> None: observed_log_messages.append(msg) with self.swap(logging, 'warning', mock_logging_function): - self.testapp.head('/mock', status=500) - self.assertEqual(len(observed_log_messages), 2) + self.testapp.options('/mock', status=500) + self.assertEqual(len(observed_log_messages), 1) self.assertEqual( observed_log_messages[0], 'Not a recognized request method.') - self.assertEqual( - observed_log_messages[1], - 'Not a recognized return type: defaulting to render JSON.') - def test_renders_error_page_with_iframed(self): + def test_renders_error_page_with_iframed(self) -> None: # Modify the testapp to use the mock handler. self.testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( @@ -311,20 +340,20 @@ def test_renders_error_page_with_iframed(self): response.body ) - def test_dev_mode_cannot_be_true_on_production(self): + def test_dev_mode_cannot_be_true_on_production(self) -> None: server_software_swap = self.swap( os, 'environ', {'SERVER_SOFTWARE': 'Production'}) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'DEV_MODE can\'t be true on production.') with assert_raises_regexp_context_manager, server_software_swap: # This reloads the feconf module so that all the checks in # the module are reexecuted. importlib.reload(feconf) # pylint: disable-all - def test_frontend_error_handler(self): - observed_log_messages = [] + def test_frontend_error_handler(self) -> None: + observed_log_messages: List[str] = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.error().""" observed_log_messages.append(msg % args) @@ -333,17 +362,27 @@ def _mock_logging_function(msg, *args): self.assertEqual(observed_log_messages, ['Frontend error: errors']) - def test_redirect_oppia_test_server(self): + def test_redirect_when_user_is_disabled(self) -> None: + get_auth_claims_from_request_swap = self.swap_to_always_raise( + auth_services, + 'get_auth_claims_from_request', + auth_domain.UserDisabledError + ) + with get_auth_claims_from_request_swap: + response = self.get_html_response('/', expected_status_int=302) + self.assertIn( + 'pending-account-deletion', response.headers['location']) + + def test_redirect_oppia_test_server(self) -> None: # The old demo server redirects to the new demo server. response = self.get_html_response( 'https://oppiaserver.appspot.com/splash', expected_status_int=301) self.assertEqual( response.headers['Location'], 'https://oppiatestserver.appspot.com') - def test_no_redirection_for_cron_jobs(self): + def test_no_redirection_for_cron_jobs(self) -> None: # Valid URL, where user now has permissions. self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) - admin_user_id = self.get_user_id_from_email('admin@example.com') self.get_json('/cron/models/cleanup', expected_status_int=200) self.logout() @@ -357,7 +396,7 @@ def test_no_redirection_for_cron_jobs(self): 'https://oppiaserver.appspot.com/cron/unknown', expected_status_int=404) - def test_no_redirection_for_tasks(self): + def test_no_redirection_for_tasks(self) -> None: tasks_data = '{"fn_identifier": "%s", "args": [[]], "kwargs": {}}' % ( taskqueue_services.FUNCTION_ID_DELETE_EXPS_FROM_USER_MODELS ) @@ -379,12 +418,12 @@ def test_no_redirection_for_tasks(self): expected_status_int=401 ) - def test_splash_redirect(self): + def test_splash_redirect(self) -> None: # Tests that the old '/splash' URL is redirected to '/'. response = self.get_html_response('/splash', expected_status_int=302) self.assertEqual('http://localhost/', response.headers['location']) - def test_partially_logged_in_redirect(self): + def test_partially_logged_in_redirect(self) -> None: login_context = self.login_context( self.PARTIALLY_LOGGED_IN_USER_EMAIL) @@ -395,7 +434,7 @@ def test_partially_logged_in_redirect(self): response.location, 'http://localhost/logout?redirect_url=/splash') - def test_no_partially_logged_in_redirect_from_logout(self): + def test_no_partially_logged_in_redirect_from_logout(self) -> None: login_context = self.login_context( self.PARTIALLY_LOGGED_IN_USER_EMAIL) @@ -403,7 +442,9 @@ def test_no_partially_logged_in_redirect_from_logout(self): response = self.get_html_response( '/logout', expected_status_int=200) - def test_unauthorized_user_exception_raised_when_session_is_stale(self): + def test_unauthorized_user_exception_raised_when_session_is_stale( + self + ) -> None: with contextlib.ExitStack() as exit_stack: call_counter = exit_stack.enter_context(self.swap_with_call_counter( auth_services, 'destroy_auth_session')) @@ -420,7 +461,9 @@ def test_unauthorized_user_exception_raised_when_session_is_stale(self): response.location, 'http://localhost/login?return_url=http%3A%2F%2Flocalhost%2F') - def test_unauthorized_user_exception_raised_when_session_is_invalid(self): + def test_unauthorized_user_exception_raised_when_session_is_invalid( + self + ) -> None: with contextlib.ExitStack() as exit_stack: call_counter = exit_stack.enter_context(self.swap_with_call_counter( auth_services, 'destroy_auth_session')) @@ -438,7 +481,7 @@ def test_unauthorized_user_exception_raised_when_session_is_invalid(self): response.location, 'http://localhost/login?return_url=http%3A%2F%2Flocalhost%2F') - def test_signup_attempt_on_wrong_page_fails(self): + def test_signup_attempt_on_wrong_page_fails(self) -> None: with contextlib.ExitStack() as exit_stack: call_counter = exit_stack.enter_context(self.swap_with_call_counter( auth_services, 'destroy_auth_session')) @@ -452,7 +495,7 @@ def test_signup_attempt_on_wrong_page_fails(self): )) response = self.get_html_response('/', expected_status_int=200) self.assertIn( - b'', + b'', response.body ) @@ -460,11 +503,93 @@ def test_signup_attempt_on_wrong_page_fails(self): logs, [ 'Cannot find user auth_id with email %s on ' - 'page http://localhost/\nNoneType: None' % self.NEW_USER_EMAIL + 'page http://localhost/' % self.NEW_USER_EMAIL ] ) self.assertEqual(call_counter.times_called, 1) + def test_user_without_email_id_raises_exception(self) -> None: + with contextlib.ExitStack() as exit_stack: + swap_auth_claim = self.swap_to_always_return( + auth_services, + 'get_auth_claims_from_request', + auth_domain.AuthClaims( + 'auth_id', None, role_is_super_admin=False) + ) + logs = exit_stack.enter_context( + self.capture_logging(min_level=logging.ERROR) + ) + with swap_auth_claim: + self.get_html_response('/') + + self.assert_matches_regexps( + logs, + [ + 'No email address was found for the user.' + ] + ) + + def test_logs_request_with_invalid_payload(self) -> None: + with contextlib.ExitStack() as exit_stack: + logs = exit_stack.enter_context( + self.capture_logging(min_level=logging.ERROR)) + exit_stack.enter_context(self.swap_to_always_raise( + webapp2.Request, 'get', + error=ValueError('uh-oh'))) + self.get_custom_response( + '/', + expected_content_type='text/plain', + params=None, + expected_status_int=500) + + self.assertRegexpMatches( + logs[0], + 'uh-oh: request GET /') + + +class MissingHandlerArgsTests(test_utils.GenericTestBase): + + class MissingArgsHandler( + base.BaseHandler[Dict[str, str], Dict[str, str]] + ): + """Mock handler for testing.""" + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + + # Here we use MyPy ignore because the signature of 'post' method does + # not match with the signature of super class's (BaseHandler) 'post' + # method, and this happens because all handler methods in the main + # codebase have decorators which modify the function signature + # accordingly, but these methods in base_test.py do not. + def post(self) -> None: # type: ignore[override] + """Handles POST requests.""" + self.render_json({}) + + def setUp(self) -> None: + super(MissingHandlerArgsTests, self).setUp() + + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + + # Modify the testapp to use the MissingArgsHandler. + self.testapp = webtest.TestApp(webapp2.WSGIApplication( + [ + webapp2.Route( + '/MissingArgHandler', + self.MissingArgsHandler, + name='MissingArgHandler' + ) + ], + debug=feconf.DEBUG, + )) + + def test_missing_arg_handler_raises_error(self) -> None: + response = self.testapp.post('/MissingArgHandler', status=500) + parsed_response = json.loads(response.body[len(feconf.XSSI_PREFIX):]) + self.assertEqual( + parsed_response['error'], + 'Missing schema for POST method in MissingArgsHandler handler class.' + ) + class MaintenanceModeTests(test_utils.GenericTestBase): """Tests BaseHandler behavior when maintenance mode is enabled. @@ -472,7 +597,7 @@ class MaintenanceModeTests(test_utils.GenericTestBase): Each test case runs within a context where ENABLE_MAINTENANCE_MODE is True. """ - def setUp(self): + def setUp(self) -> None: super(MaintenanceModeTests, self).setUp() self.signup( self.RELEASE_COORDINATOR_EMAIL, self.RELEASE_COORDINATOR_USERNAME) @@ -484,22 +609,24 @@ def setUp(self): self.swap(feconf, 'ENABLE_MAINTENANCE_MODE', True)) self.context_stack = context_stack.pop_all() - def tearDown(self): + def tearDown(self) -> None: self.context_stack.close() super(MaintenanceModeTests, self).tearDown() - def test_html_response_is_rejected(self): + def test_html_response_is_rejected(self) -> None: destroy_auth_session_call_counter = self.context_stack.enter_context( self.swap_with_call_counter(auth_services, 'destroy_auth_session')) response = self.get_html_response( - '/community-library', expected_status_int=503) + '/community-library', expected_status_int=200) self.assertIn(b'', response.body) self.assertNotIn(b'', response.body) self.assertEqual(destroy_auth_session_call_counter.times_called, 1) - def test_html_response_is_not_rejected_when_user_is_super_admin(self): + def test_html_response_is_not_rejected_when_user_is_super_admin( + self + ) -> None: self.context_stack.enter_context(self.super_admin_context()) destroy_auth_session_call_counter = self.context_stack.enter_context( self.swap_with_call_counter(auth_services, 'destroy_auth_session')) @@ -511,7 +638,8 @@ def test_html_response_is_not_rejected_when_user_is_super_admin(self): self.assertEqual(destroy_auth_session_call_counter.times_called, 0) def test_html_response_is_not_rejected_when_user_is_release_coordinator( - self): + self + ) -> None: self.context_stack.enter_context( self.login_context(self.RELEASE_COORDINATOR_EMAIL)) destroy_auth_session_call_counter = self.context_stack.enter_context( @@ -523,39 +651,13 @@ def test_html_response_is_not_rejected_when_user_is_release_coordinator( self.assertNotIn(b'', response.body) self.assertEqual(destroy_auth_session_call_counter.times_called, 0) - def test_json_response_is_rejected(self): - destroy_auth_session_call_counter = self.context_stack.enter_context( - self.swap_with_call_counter(auth_services, 'destroy_auth_session')) - - response = self.get_json('/url_handler', expected_status_int=503) - - self.assertIn('error', response) - self.assertEqual( - response['error'], - 'Oppia is currently being upgraded, and the site should be up ' - 'and running again in a few hours. Thanks for your patience!') - self.assertNotIn('login_url', response) - self.assertEqual(destroy_auth_session_call_counter.times_called, 1) - - def test_json_response_is_not_rejected_when_user_is_super_admin(self): - self.context_stack.enter_context(self.super_admin_context()) - destroy_auth_session_call_counter = self.context_stack.enter_context( - self.swap_with_call_counter(auth_services, 'destroy_auth_session')) - - response = self.get_json('/url_handler') - - self.assertIn('login_url', response) - self.assertIsNone(response['login_url']) - self.assertNotIn('error', response) - self.assertEqual(destroy_auth_session_call_counter.times_called, 0) - - def test_csrfhandler_handler_is_not_rejected(self): + def test_csrfhandler_handler_is_not_rejected(self) -> None: response = self.get_json('/csrfhandler') self.assertTrue( base.CsrfTokenManager.is_csrf_token_valid(None, response['token'])) - def test_session_begin_handler_is_not_rejected(self): + def test_session_begin_handler_is_not_rejected(self) -> None: call_counter = self.context_stack.enter_context( self.swap_with_call_counter( auth_services, 'establish_auth_session')) @@ -564,7 +666,7 @@ def test_session_begin_handler_is_not_rejected(self): self.assertEqual(call_counter.times_called, 1) - def test_session_end_handler_is_not_rejected(self): + def test_session_end_handler_is_not_rejected(self) -> None: call_counter = self.context_stack.enter_context( self.swap_with_call_counter(auth_services, 'destroy_auth_session')) @@ -572,18 +674,25 @@ def test_session_end_handler_is_not_rejected(self): self.assertEqual(call_counter.times_called, 1) - def test_signup_fails(self): - with self.assertRaisesRegexp(Exception, 'Bad response: 503'): + def test_signup_fails(self) -> None: + with self.assertRaisesRegex( + Exception, '\'\' unexpectedly found in'): self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) - def test_signup_succeeds_when_maintenance_mode_is_disabled(self): + def test_signup_succeeds_when_maintenance_mode_is_disabled(self) -> None: with self.swap(feconf, 'ENABLE_MAINTENANCE_MODE', False): self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) - def test_signup_succeeds_when_user_is_super_admin(self): - self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME, is_super_admin=True) + def test_signup_succeeds_when_user_is_super_admin(self) -> None: + self.signup( + self.CURRICULUM_ADMIN_EMAIL, + self.CURRICULUM_ADMIN_USERNAME, + is_super_admin=True + ) - def test_admin_auth_session_is_preserved_when_in_maintenance_mode(self): + def test_admin_auth_session_is_preserved_when_in_maintenance_mode( + self + ) -> None: # TODO(#12692): Use stateful login sessions to assert the behavior of # logging out, rather than asserting that destroy_auth_session() gets # called. @@ -600,7 +709,9 @@ def test_admin_auth_session_is_preserved_when_in_maintenance_mode(self): self.assertEqual(destroy_auth_session_call_counter.times_called, 0) - def test_non_admin_auth_session_is_destroyed_when_in_maintenance_mode(self): + def test_non_admin_auth_session_is_destroyed_when_in_maintenance_mode( + self + ) -> None: # TODO(#12692): Use stateful login sessions to assert the behavior of # logging out, rather than asserting that destroy_auth_session() gets # called. @@ -612,15 +723,15 @@ def test_non_admin_auth_session_is_destroyed_when_in_maintenance_mode(self): self.assertEqual(destroy_auth_session_call_counter.times_called, 0) - with self.assertRaisesRegexp(Exception, 'Bad response: 503'): - self.get_json('/url_handler?current_url=/') + response = self.get_html_response('/url_handler?current_url=/') + self.assertIn(b'', response.body) self.assertEqual(destroy_auth_session_call_counter.times_called, 1) class CsrfTokenManagerTests(test_utils.GenericTestBase): - def test_create_and_validate_token(self): + def test_create_and_validate_token(self) -> None: uid = 'user_id' token = base.CsrfTokenManager.create_csrf_token(uid) @@ -634,16 +745,16 @@ def test_create_and_validate_token(self): self.assertFalse( base.CsrfTokenManager.is_csrf_token_valid(uid, 'new/token')) - def test_nondefault_csrf_secret_is_used(self): + def test_non_default_csrf_secret_is_used(self) -> None: base.CsrfTokenManager.create_csrf_token('uid') self.assertNotEqual(base.CSRF_SECRET.value, base.DEFAULT_CSRF_SECRET) - def test_token_expiry(self): + def test_token_expiry(self) -> None: # This can be any value. orig_time = 100.0 current_time = orig_time - def mock_get_current_time(unused_cls): + def mock_get_current_time(unused_cls: str) -> float: return current_time with self.swap( @@ -669,16 +780,21 @@ def mock_get_current_time(unused_cls): class EscapingTests(test_utils.GenericTestBase): - class FakePage(base.BaseHandler): + class FakePage(base.BaseHandler[Dict[str, str], Dict[str, str]]): """Fake page for testing autoescaping.""" - URL_PATH_ARGS_SCHEMAS = {} - HANDLER_ARGS_SCHEMAS = {'POST': {}} - - def post(self): + URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {} + HANDLER_ARGS_SCHEMAS: Dict[str, Dict[str, str]] = {'POST': {}} + + # Here we use MyPy ignore because the signature of 'post' method does + # not match with the signature of super class's (BaseHandler) 'post' + # method, and this happens because all handler methods in the main + # codebase have decorators which modify the function signature + # accordingly, but these methods in base_test.py do not. + def post(self) -> None: # type: ignore[override] """Handles POST requests.""" self.render_json({'big_value': u'\n', }) - logged_errors = [] - - def _log_error_for_tests(error_message): - """Appends the error message to the logged errors list.""" - logged_errors.append(error_message) - - log_new_error_counter = test_utils.CallCounter(_log_error_for_tests) + log_new_error_counter = test_utils.CallCounter(logging.error) log_new_error_ctx = self.swap( - email_manager, 'log_new_error', log_new_error_counter) - - with can_send_emails_ctx, log_new_error_ctx: - self.assertEqual(log_new_error_counter.times_called, 0) - - self.login(self.EDITOR_EMAIL) - self.get_html_response(feconf.SIGNUP_URL + '?return_url=/') - csrf_token = self.get_new_csrf_token() - - # No user-facing error should surface. - self.post_json( - feconf.SIGNUP_DATA_URL, { - 'agreed_to_terms': True, - 'username': self.EDITOR_USERNAME - }, csrf_token=csrf_token) + logging, 'error', log_new_error_counter) + + with self.capture_logging(min_level=logging.ERROR) as logs: + with can_send_emails_ctx, log_new_error_ctx: + self.assertEqual(log_new_error_counter.times_called, 0) + + self.login(self.EDITOR_EMAIL) + self.get_html_response(feconf.SIGNUP_URL + '?return_url=/') + csrf_token = self.get_new_csrf_token() + + # No user-facing error should surface. + self.post_json( + feconf.SIGNUP_DATA_URL, + { + 'agreed_to_terms': True, + 'username': self.EDITOR_USERNAME, + 'default_dashboard': constants.DASHBOARD_TYPE_LEARNER, + 'can_receive_email_updates': ( + feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE + ) + }, + csrf_token=csrf_token + ) - # However, an error should be recorded in the logs. - self.assertEqual(log_new_error_counter.times_called, 1) - self.assertTrue(logged_errors[0].startswith( - 'Original email HTML body does not match cleaned HTML body')) + # However, an error should be recorded in the logs. + self.assertEqual(log_new_error_counter.times_called, 1) + self.assertTrue(logs[0].startswith( + 'Original email HTML body does not match cleaned HTML body') + ) - # Check that no email was sent. - messages = self._get_sent_email_messages(self.EDITOR_EMAIL) - self.assertEqual(0, len(messages)) + # Check that no email was sent. + messages = self._get_sent_email_messages(self.EDITOR_EMAIL) + self.assertEqual(0, len(messages)) - def test_contents_of_signup_email_are_correct(self): + def test_contents_of_signup_email_are_correct(self) -> None: with self.swap(feconf, 'CAN_SEND_EMAILS', True): config_services.set_property( self.admin_id, email_manager.EMAIL_FOOTER.name, @@ -753,10 +800,17 @@ def test_contents_of_signup_email_are_correct(self): csrf_token = self.get_new_csrf_token() self.post_json( - feconf.SIGNUP_DATA_URL, { + feconf.SIGNUP_DATA_URL, + { 'agreed_to_terms': True, - 'username': self.EDITOR_USERNAME - }, csrf_token=csrf_token) + 'username': self.EDITOR_USERNAME, + 'default_dashboard': constants.DASHBOARD_TYPE_LEARNER, + 'can_receive_email_updates': ( + feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE + ) + }, + csrf_token=csrf_token + ) # Check that an email was sent with the correct content. messages = self._get_sent_email_messages(self.EDITOR_EMAIL) @@ -770,7 +824,9 @@ def test_contents_of_signup_email_are_correct(self): self.assertEqual(messages[0].body, self.expected_text_email_content) self.assertEqual(messages[0].html, self.expected_html_email_content) - def test_email_only_sent_once_for_repeated_signups_by_same_user(self): + def test_email_only_sent_once_for_repeated_signups_by_same_user( + self + ) -> None: with self.swap(feconf, 'CAN_SEND_EMAILS', True): config_services.set_property( self.admin_id, email_manager.EMAIL_FOOTER.name, @@ -784,10 +840,17 @@ def test_email_only_sent_once_for_repeated_signups_by_same_user(self): csrf_token = self.get_new_csrf_token() self.post_json( - feconf.SIGNUP_DATA_URL, { + feconf.SIGNUP_DATA_URL, + { 'agreed_to_terms': True, - 'username': self.EDITOR_USERNAME - }, csrf_token=csrf_token) + 'username': self.EDITOR_USERNAME, + 'default_dashboard': constants.DASHBOARD_TYPE_LEARNER, + 'can_receive_email_updates': ( + feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE + ) + }, + csrf_token=csrf_token + ) # Check that an email was sent. messages = self._get_sent_email_messages(self.EDITOR_EMAIL) @@ -795,16 +858,23 @@ def test_email_only_sent_once_for_repeated_signups_by_same_user(self): # Send a second POST request. self.post_json( - feconf.SIGNUP_DATA_URL, { + feconf.SIGNUP_DATA_URL, + { 'agreed_to_terms': True, - 'username': self.EDITOR_USERNAME - }, csrf_token=csrf_token) + 'username': self.EDITOR_USERNAME, + 'default_dashboard': constants.DASHBOARD_TYPE_LEARNER, + 'can_receive_email_updates': ( + feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE + ) + }, + csrf_token=csrf_token + ) # Check that no new email was sent. messages = self._get_sent_email_messages(self.EDITOR_EMAIL) self.assertEqual(1, len(messages)) - def test_email_only_sent_if_signup_was_successful(self): + def test_email_only_sent_if_signup_was_successful(self) -> None: with self.swap(feconf, 'CAN_SEND_EMAILS', True): config_services.set_property( self.admin_id, email_manager.EMAIL_FOOTER.name, @@ -821,9 +891,15 @@ def test_email_only_sent_if_signup_was_successful(self): feconf.SIGNUP_DATA_URL, { 'agreed_to_terms': True, - 'username': 'BadUsername!!!' + 'username': 'BadUsername!!!', + 'default_dashboard': constants.DASHBOARD_TYPE_LEARNER, + 'can_receive_email_updates': ( + feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE + ) }, - csrf_token=csrf_token, expected_status_int=400) + csrf_token=csrf_token, + expected_status_int=400 + ) # Check that no email was sent. messages = self._get_sent_email_messages(self.EDITOR_EMAIL) @@ -831,16 +907,23 @@ def test_email_only_sent_if_signup_was_successful(self): # Redo the signup process with a good username. self.post_json( - feconf.SIGNUP_DATA_URL, { + feconf.SIGNUP_DATA_URL, + { 'agreed_to_terms': True, - 'username': self.EDITOR_USERNAME - }, csrf_token=csrf_token) + 'username': self.EDITOR_USERNAME, + 'default_dashboard': constants.DASHBOARD_TYPE_LEARNER, + 'can_receive_email_updates': ( + feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE + ) + }, + csrf_token=csrf_token + ) # Check that a new email was sent. messages = self._get_sent_email_messages(self.EDITOR_EMAIL) self.assertEqual(1, len(messages)) - def test_record_of_sent_email_is_written_to_datastore(self): + def test_record_of_sent_email_is_written_to_datastore(self) -> None: with self.swap(feconf, 'CAN_SEND_EMAILS', True): config_services.set_property( self.admin_id, email_manager.EMAIL_FOOTER.name, @@ -852,7 +935,9 @@ def test_record_of_sent_email_is_written_to_datastore(self): self.admin_id, email_manager.EMAIL_SENDER_NAME.name, 'Email Sender') - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() self.assertEqual(len(all_models), 0) self.login(self.EDITOR_EMAIL) @@ -862,8 +947,14 @@ def test_record_of_sent_email_is_written_to_datastore(self): self.post_json( feconf.SIGNUP_DATA_URL, { 'agreed_to_terms': True, - 'username': self.EDITOR_USERNAME - }, csrf_token=csrf_token) + 'username': self.EDITOR_USERNAME, + 'default_dashboard': constants.DASHBOARD_TYPE_LEARNER, + 'can_receive_email_updates': ( + feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE + ) + }, + csrf_token=csrf_token + ) # Check that a new email was sent. messages = self._get_sent_email_messages(self.EDITOR_EMAIL) @@ -897,8 +988,8 @@ def test_record_of_sent_email_is_written_to_datastore(self): class DuplicateEmailTests(test_utils.EmailTestBase): """Test that duplicate emails are not sent.""" - def setUp(self): - super(DuplicateEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME) self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL) @@ -917,8 +1008,11 @@ def setUp(self): self.new_footer) def _generate_hash_for_tests( - unused_cls, unused_recipient_id, unused_email_subject, - unused_email_body): + unused_cls: Type[test_utils.TestBase], + unused_recipient_id: str, + unused_email_subject: str, + unused_email_body: str + ) -> str: """Returns the generated hash for tests.""" return 'Email Hash' @@ -927,121 +1021,115 @@ def _generate_hash_for_tests( types.MethodType( _generate_hash_for_tests, email_models.SentEmailModel)) - def test_send_email_does_not_resend_if_same_hash_exists(self): + def test_send_email_does_not_resend_if_same_hash_exists(self) -> None: can_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', True) duplicate_email_ctx = self.swap( feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 1000) - logged_errors = [] - - def _log_error_for_tests(error_message): - """Appends the error message to the logged errors list.""" - logged_errors.append(error_message) - - log_new_error_counter = test_utils.CallCounter(_log_error_for_tests) + log_new_error_counter = test_utils.CallCounter(logging.error) log_new_error_ctx = self.swap( - email_manager, 'log_new_error', log_new_error_counter) - - with can_send_emails_ctx, duplicate_email_ctx, log_new_error_ctx: - all_models = email_models.SentEmailModel.get_all().fetch() - self.assertEqual(len(all_models), 0) - - cleaned_html_body = html_cleaner.clean(self.new_email_html_body) - raw_plaintext_body = cleaned_html_body.replace( - '
', '\n').replace('
', '\n').replace( - '
  • ', '
  • - ').replace('

    ', '

    \n

    ') - cleaned_plaintext_body = html_cleaner.strip_html_tags( - raw_plaintext_body) - email_models.SentEmailModel.create( - self.new_user_id, self.NEW_USER_EMAIL, - feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS, - feconf.EMAIL_INTENT_SIGNUP, self.new_email_subject, - cleaned_plaintext_body, datetime.datetime.utcnow()) - - # Check that the content of this email was recorded in - # SentEmailModel. - all_models = email_models.SentEmailModel.get_all().fetch() - self.assertEqual(len(all_models), 1) - - email_manager.send_post_signup_email( - self.new_user_id, test_for_duplicate_email=True) - - # An error should be recorded in the logs. - self.assertEqual(log_new_error_counter.times_called, 1) - self.assertRegexpMatches(logged_errors[0], 'Duplicate email') - - # Check that a new email was not sent. - messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) - self.assertEqual(0, len(messages)) - - # Check that the content of this email was not recorded in - # SentEmailModel. - all_models = email_models.SentEmailModel.get_all().fetch() - self.assertEqual(len(all_models), 1) - - def test_send_email_does_not_resend_within_duplicate_interval(self): + logging, 'error', log_new_error_counter) + + with self.capture_logging(min_level=logging.ERROR) as logs: + with can_send_emails_ctx, duplicate_email_ctx, log_new_error_ctx: + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() + self.assertEqual(len(all_models), 0) + + cleaned_html_body = html_cleaner.clean(self.new_email_html_body) + raw_plaintext_body = cleaned_html_body.replace( + '
    ', '\n').replace('
    ', '\n').replace( + '

  • ', '
  • - ').replace('

    ', '

    \n

    ') + cleaned_plaintext_body = html_cleaner.strip_html_tags( + raw_plaintext_body) + email_models.SentEmailModel.create( + self.new_user_id, self.NEW_USER_EMAIL, + feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS, + feconf.EMAIL_INTENT_SIGNUP, self.new_email_subject, + cleaned_plaintext_body, datetime.datetime.utcnow()) + + # Check that the content of this email was recorded in + # SentEmailModel. + all_models = email_models.SentEmailModel.get_all().fetch() + self.assertEqual(len(all_models), 1) + + email_manager.send_post_signup_email( + self.new_user_id, test_for_duplicate_email=True) + + # An error should be recorded in the logs. + self.assertEqual(log_new_error_counter.times_called, 1) + self.assertRegex(logs[0], 'Duplicate email') + + # Check that a new email was not sent. + messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) + self.assertEqual(0, len(messages)) + + # Check that the content of this email was not recorded in + # SentEmailModel. + all_models = email_models.SentEmailModel.get_all().fetch() + self.assertEqual(len(all_models), 1) + + def test_send_email_does_not_resend_within_duplicate_interval(self) -> None: can_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', True) duplicate_email_ctx = self.swap( feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2) - logged_errors = [] - - def _log_error_for_tests(error_message): - """Appends the error message to the logged errors list.""" - logged_errors.append(error_message) - - log_new_error_counter = test_utils.CallCounter(_log_error_for_tests) + log_new_error_counter = test_utils.CallCounter(logging.error) log_new_error_ctx = self.swap( - email_manager, 'log_new_error', log_new_error_counter) - - with can_send_emails_ctx, duplicate_email_ctx, log_new_error_ctx: - config_services.set_property( - self.admin_id, email_manager.EMAIL_SENDER_NAME.name, - 'Email Sender') - - all_models = email_models.SentEmailModel.get_all().fetch() - self.assertEqual(len(all_models), 0) - - email_manager._send_email( # pylint: disable=protected-access - self.new_user_id, feconf.SYSTEM_COMMITTER_ID, - feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body', - feconf.SYSTEM_EMAIL_ADDRESS) - - # Check that a new email was sent. - messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) - self.assertEqual(1, len(messages)) - - # Check that the content of this email was recorded in - # SentEmailModel. - all_models = email_models.SentEmailModel.get_all().fetch() - self.assertEqual(len(all_models), 1) - - # No error should be recorded in the logs. - self.assertEqual(log_new_error_counter.times_called, 0) - - email_manager._send_email( # pylint: disable=protected-access - self.new_user_id, feconf.SYSTEM_COMMITTER_ID, - feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body', - feconf.SYSTEM_EMAIL_ADDRESS) - - # An error should be recorded in the logs. - self.assertEqual(log_new_error_counter.times_called, 1) - self.assertRegexpMatches(logged_errors[0], 'Duplicate email') - - # Check that a new email was not sent. - messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) - self.assertEqual(1, len(messages)) - - # Check that the content of this email was not recorded in - # SentEmailModel. - all_models = email_models.SentEmailModel.get_all().fetch() - self.assertEqual(len(all_models), 1) - - def test_sending_email_with_different_recipient_but_same_hash(self): + logging, 'error', log_new_error_counter) + + with self.capture_logging(min_level=logging.ERROR) as logs: + with can_send_emails_ctx, duplicate_email_ctx, log_new_error_ctx: + config_services.set_property( + self.admin_id, email_manager.EMAIL_SENDER_NAME.name, + 'Email Sender') + + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() + self.assertEqual(len(all_models), 0) + + email_manager._send_email( # pylint: disable=protected-access + self.new_user_id, feconf.SYSTEM_COMMITTER_ID, + feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body', + feconf.SYSTEM_EMAIL_ADDRESS) + + # Check that a new email was sent. + messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) + self.assertEqual(1, len(messages)) + + # Check that the content of this email was recorded in + # SentEmailModel. + all_models = email_models.SentEmailModel.get_all().fetch() + self.assertEqual(len(all_models), 1) + + # No error should be recorded in the logs. + self.assertEqual(log_new_error_counter.times_called, 0) + + email_manager._send_email( # pylint: disable=protected-access + self.new_user_id, feconf.SYSTEM_COMMITTER_ID, + feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body', + feconf.SYSTEM_EMAIL_ADDRESS) + + # An error should be recorded in the logs. + self.assertEqual(log_new_error_counter.times_called, 1) + self.assertRegex(logs[0], 'Duplicate email') + + # Check that a new email was not sent. + messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) + self.assertEqual(1, len(messages)) + + # Check that the content of this email was not recorded in + # SentEmailModel. + all_models = email_models.SentEmailModel.get_all().fetch() + self.assertEqual(len(all_models), 1) + + def test_sending_email_with_different_recipient_but_same_hash(self) -> None: """Hash for both messages is same but recipients are different.""" can_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', True) @@ -1050,7 +1138,9 @@ def test_sending_email_with_different_recipient_but_same_hash(self): feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2) with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx: - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() self.assertEqual(len(all_models), 0) email_models.SentEmailModel.create( @@ -1089,7 +1179,7 @@ def test_sending_email_with_different_recipient_but_same_hash(self): self.assertEqual( sent_email_model1.html_body, sent_email_model2.html_body) - def test_sending_email_with_different_subject_but_same_hash(self): + def test_sending_email_with_different_subject_but_same_hash(self) -> None: """Hash for both messages is same but subjects are different.""" can_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', True) @@ -1098,7 +1188,9 @@ def test_sending_email_with_different_subject_but_same_hash(self): feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2) with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx: - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() self.assertEqual(len(all_models), 0) email_models.SentEmailModel.create( @@ -1138,7 +1230,7 @@ def test_sending_email_with_different_subject_but_same_hash(self): self.assertEqual( sent_email_model1.html_body, sent_email_model2.html_body) - def test_sending_email_with_different_body_but_same_hash(self): + def test_sending_email_with_different_body_but_same_hash(self) -> None: """Hash for both messages is same but body is different.""" can_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', True) @@ -1147,7 +1239,9 @@ def test_sending_email_with_different_body_but_same_hash(self): feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2) with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx: - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() self.assertEqual(len(all_models), 0) email_models.SentEmailModel.create( @@ -1187,7 +1281,9 @@ def test_sending_email_with_different_body_but_same_hash(self): self.assertNotEqual( sent_email_model1.html_body, sent_email_model2.html_body) - def test_duplicate_emails_are_sent_after_some_time_has_elapsed(self): + def test_duplicate_emails_are_sent_after_some_time_has_elapsed( + self + ) -> None: can_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', True) @@ -1195,7 +1291,9 @@ def test_duplicate_emails_are_sent_after_some_time_has_elapsed(self): feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2) with can_send_emails_ctx, duplicate_email_ctx: - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() self.assertEqual(len(all_models), 0) email_sent_time = ( @@ -1251,8 +1349,8 @@ def test_duplicate_emails_are_sent_after_some_time_has_elapsed(self): class FeedbackMessageBatchEmailTests(test_utils.EmailTestBase): - def setUp(self): - super(FeedbackMessageBatchEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -1271,8 +1369,8 @@ def setUp(self): self.can_not_send_feedback_email_ctx = self.swap( feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', False) - def test_email_not_sent_if_can_send_emails_is_false(self): - feedback_messages = { + def test_email_not_sent_if_can_send_emails_is_false(self) -> None: + feedback_messages: Dict[str, email_manager.FeedbackMessagesDict] = { self.exploration.id: { 'title': self.exploration.title, 'messages': ['Message 1.1', 'Message 1.2', 'Message 1.3']} @@ -1285,8 +1383,10 @@ def test_email_not_sent_if_can_send_emails_is_false(self): messages = self._get_sent_email_messages(self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self): - feedback_messages = { + def test_email_not_sent_if_can_send_feedback_message_emails_is_false( + self + ) -> None: + feedback_messages: Dict[str, email_manager.FeedbackMessagesDict] = { self.exploration.id: { 'title': self.exploration.title, 'messages': ['Message 1.1', 'Message 1.2', 'Message 1.3']} @@ -1299,8 +1399,8 @@ def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self): messages = self._get_sent_email_messages(self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_that_email_not_sent_if_feedback_messages_are_empty(self): - feedback_messages = {} + def test_that_email_not_sent_if_feedback_messages_are_empty(self) -> None: + feedback_messages: Dict[str, email_manager.FeedbackMessagesDict] = {} with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: email_manager.send_feedback_message_email( self.editor_id, feedback_messages) @@ -1309,7 +1409,7 @@ def test_that_email_not_sent_if_feedback_messages_are_empty(self): messages = self._get_sent_email_messages(self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_correct_email_body_is_sent(self): + def test_correct_email_body_is_sent(self) -> None: expected_email_html_body = ( 'Hi editor,
    ' '
    ' @@ -1349,7 +1449,7 @@ def test_correct_email_body_is_sent(self): '\n' 'You can change your email preferences via the Preferences page.') - feedback_messages = { + feedback_messages: Dict[str, email_manager.FeedbackMessagesDict] = { self.exploration.id: { 'title': self.exploration.title, 'messages': ['Message 1.1', 'Message 1.2', 'Message 1.3']} @@ -1366,7 +1466,9 @@ def test_correct_email_body_is_sent(self): self.assertEqual(messages[0].body, expected_email_text_body) # Check that email model is correct. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() self.assertEqual(len(all_models), 1) sent_email_model = all_models[0] @@ -1386,8 +1488,8 @@ def test_correct_email_body_is_sent(self): class SuggestionEmailTests(test_utils.EmailTestBase): - def setUp(self): - super(SuggestionEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -1408,7 +1510,7 @@ def setUp(self): self.can_not_send_feedback_email_ctx = self.swap( feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', False) - def test_that_email_not_sent_if_can_send_emails_is_false(self): + def test_that_email_not_sent_if_can_send_emails_is_false(self) -> None: with self.can_not_send_emails_ctx: email_manager.send_suggestion_email( self.exploration.title, self.exploration.id, self.new_user_id, @@ -1419,7 +1521,9 @@ def test_that_email_not_sent_if_can_send_emails_is_false(self): self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self): + def test_email_not_sent_if_can_send_feedback_message_emails_is_false( + self + ) -> None: with self.can_send_emails_ctx, self.can_not_send_feedback_email_ctx: email_manager.send_suggestion_email( self.exploration.title, self.exploration.id, self.new_user_id, @@ -1430,7 +1534,7 @@ def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self): self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_that_suggestion_emails_are_correct(self): + def test_that_suggestion_emails_are_correct(self) -> None: expected_email_subject = 'New suggestion for "Title"' expected_email_html_body = ( @@ -1473,7 +1577,9 @@ def test_that_suggestion_emails_are_correct(self): self.assertEqual(messages[0].body, expected_email_text_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual(sent_email_model.subject, expected_email_subject) self.assertEqual(sent_email_model.recipient_id, self.editor_id) @@ -1490,8 +1596,8 @@ def test_that_suggestion_emails_are_correct(self): class SubscriptionEmailTests(test_utils.EmailTestBase): - def setUp(self): - super(SubscriptionEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -1513,7 +1619,7 @@ def setUp(self): self.can_not_send_subscription_email_ctx = self.swap( feconf, 'CAN_SEND_SUBSCRIPTION_EMAILS', False) - def test_that_email_not_sent_if_can_send_emails_is_false(self): + def test_that_email_not_sent_if_can_send_emails_is_false(self) -> None: with self.can_not_send_emails_ctx: email_manager.send_emails_to_subscribers( self.editor_id, self.exploration.id, self.exploration.title) @@ -1521,7 +1627,9 @@ def test_that_email_not_sent_if_can_send_emails_is_false(self): messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) self.assertEqual(len(messages), 0) - def test_that_email_not_sent_if_can_send_subscription_emails_is_false(self): + def test_that_email_not_sent_if_can_send_subscription_emails_is_false( + self + ) -> None: with self.can_send_emails_ctx, self.can_not_send_subscription_email_ctx: email_manager.send_emails_to_subscribers( self.editor_id, self.exploration.id, self.exploration.title) @@ -1529,7 +1637,7 @@ def test_that_email_not_sent_if_can_send_subscription_emails_is_false(self): messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) self.assertEqual(len(messages), 0) - def test_that_subscription_emails_are_correct(self): + def test_that_subscription_emails_are_correct(self) -> None: expected_email_subject = 'editor has published a new exploration!' expected_email_html_body = ( @@ -1570,7 +1678,9 @@ def test_that_subscription_emails_are_correct(self): self.assertEqual(messages[0].body, expected_email_text_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual(sent_email_model.subject, expected_email_subject) self.assertEqual(sent_email_model.recipient_id, self.new_user_id) @@ -1587,8 +1697,8 @@ def test_that_subscription_emails_are_correct(self): class FeedbackMessageInstantEmailTests(test_utils.EmailTestBase): - def setUp(self): - super(FeedbackMessageInstantEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -1609,7 +1719,7 @@ def setUp(self): self.can_not_send_feedback_email_ctx = self.swap( feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', False) - def test_email_not_sent_if_can_send_emails_is_false(self): + def test_email_not_sent_if_can_send_emails_is_false(self) -> None: with self.can_not_send_emails_ctx: email_manager.send_instant_feedback_message_email( self.new_user_id, self.editor_id, 'editor message', @@ -1620,7 +1730,9 @@ def test_email_not_sent_if_can_send_emails_is_false(self): messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) self.assertEqual(len(messages), 0) - def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self): + def test_email_not_sent_if_can_send_feedback_message_emails_is_false( + self + ) -> None: with self.can_send_emails_ctx, self.can_not_send_feedback_email_ctx: email_manager.send_instant_feedback_message_email( self.new_user_id, self.editor_id, 'editor message', @@ -1631,7 +1743,7 @@ def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self): messages = self._get_sent_email_messages(self.NEW_USER_EMAIL) self.assertEqual(len(messages), 0) - def test_that_feedback_message_emails_are_correct(self): + def test_that_feedback_message_emails_are_correct(self) -> None: expected_email_subject = 'New Oppia message in "a subject"' expected_email_html_body = ( @@ -1674,7 +1786,9 @@ def test_that_feedback_message_emails_are_correct(self): self.assertEqual(messages[0].body, expected_email_text_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual(sent_email_model.subject, expected_email_subject) self.assertEqual(sent_email_model.recipient_id, self.new_user_id) @@ -1693,8 +1807,8 @@ def test_that_feedback_message_emails_are_correct(self): class FlagExplorationEmailTest(test_utils.EmailTestBase): """Test that emails are sent to moderators when explorations are flagged.""" - def setUp(self): - super(FlagExplorationEmailTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -1722,7 +1836,7 @@ def setUp(self): self.can_not_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', False) - def test_that_email_not_sent_if_can_send_emails_is_false(self): + def test_that_email_not_sent_if_can_send_emails_is_false(self) -> None: with self.can_not_send_emails_ctx: email_manager.send_flag_exploration_email( self.exploration.title, self.exploration.id, self.new_user_id, @@ -1732,7 +1846,7 @@ def test_that_email_not_sent_if_can_send_emails_is_false(self): messages = self._get_sent_email_messages(self.MODERATOR_EMAIL) self.assertEqual(len(messages), 0) - def test_that_flag_exploration_emails_are_correct(self): + def test_that_flag_exploration_emails_are_correct(self) -> None: expected_email_subject = 'Exploration flagged by user: "Title"' expected_email_html_body = ( @@ -1780,7 +1894,9 @@ def test_that_flag_exploration_emails_are_correct(self): self.assertEqual(messages[0].body, expected_email_text_body) # Make sure correct email models are stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = next( m for m in all_models if m.recipient_id == self.moderator_id) self.assertEqual(sent_email_model.subject, expected_email_subject) @@ -1810,11 +1926,11 @@ def test_that_flag_exploration_emails_are_correct(self): class OnboardingReviewerInstantEmailTests(test_utils.EmailTestBase): """Test that correct email is sent while onboarding reviewers.""" - REVIEWER_USERNAME = 'reviewer' - REVIEWER_EMAIL = 'reviewer@example.com' + REVIEWER_USERNAME: Final = 'reviewer' + REVIEWER_EMAIL: Final = 'reviewer@example.com' - def setUp(self): - super(OnboardingReviewerInstantEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.REVIEWER_EMAIL, self.REVIEWER_USERNAME) self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) user_services.update_email_preferences( @@ -1823,7 +1939,7 @@ def setUp(self): self.can_not_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', False) - def test_that_email_not_sent_if_can_send_emails_is_false(self): + def test_that_email_not_sent_if_can_send_emails_is_false(self) -> None: with self.can_not_send_emails_ctx: email_manager.send_mail_to_onboard_new_reviewers( self.reviewer_id, 'Algebra') @@ -1832,7 +1948,7 @@ def test_that_email_not_sent_if_can_send_emails_is_false(self): messages = self._get_sent_email_messages(self.REVIEWER_EMAIL) self.assertEqual(len(messages), 0) - def test_that_correct_completion_email_is_sent(self): + def test_that_correct_completion_email_is_sent(self) -> None: expected_email_subject = 'Invitation to review suggestions' expected_email_html_body = ( 'Hi reviewer,

    ' @@ -1866,7 +1982,9 @@ def test_that_correct_completion_email_is_sent(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual(sent_email_model.subject, expected_email_subject) self.assertEqual(sent_email_model.recipient_id, self.reviewer_id) @@ -1884,11 +2002,11 @@ def test_that_correct_completion_email_is_sent(self): class NotifyReviewerInstantEmailTests(test_utils.EmailTestBase): """Test that correct email is sent while notifying reviewers.""" - REVIEWER_USERNAME = 'reviewer' - REVIEWER_EMAIL = 'reviewer@example.com' + REVIEWER_USERNAME: Final = 'reviewer' + REVIEWER_EMAIL: Final = 'reviewer@example.com' - def setUp(self): - super(NotifyReviewerInstantEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.REVIEWER_EMAIL, self.REVIEWER_USERNAME) self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) user_services.update_email_preferences( @@ -1897,7 +2015,7 @@ def setUp(self): self.can_not_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', False) - def test_that_email_not_sent_if_can_send_emails_is_false(self): + def test_that_email_not_sent_if_can_send_emails_is_false(self) -> None: with self.can_not_send_emails_ctx: email_manager.send_mail_to_notify_users_to_review( self.reviewer_id, 'Algebra') @@ -1906,7 +2024,7 @@ def test_that_email_not_sent_if_can_send_emails_is_false(self): self.REVIEWER_EMAIL) self.assertEqual(len(messages), 0) - def test_that_correct_completion_email_is_sent(self): + def test_that_correct_completion_email_is_sent(self) -> None: expected_email_subject = 'Notification to review suggestions' expected_email_html_body = ( 'Hi reviewer,

    ' @@ -1932,7 +2050,9 @@ def test_that_correct_completion_email_is_sent(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[email_models.SentEmailModel] = ( + email_models.SentEmailModel.get_all().fetch() + ) sent_email_model = all_models[0] self.assertEqual(sent_email_model.subject, expected_email_subject) self.assertEqual(sent_email_model.recipient_id, self.reviewer_id) @@ -1948,31 +2068,368 @@ def test_that_correct_completion_email_is_sent(self): feconf.EMAIL_INTENT_REVIEW_CREATOR_DASHBOARD_SUGGESTIONS) +class NotifyContributionAchievementEmailTests(test_utils.EmailTestBase): + """Test that correct email is sent when notifying contributor + achievements.""" + + USERNAME: Final = 'user' + USER_EMAIL: Final = 'user@example.com' + + def setUp(self) -> None: + super().setUp() + self.signup(self.USER_EMAIL, self.USERNAME) + self.user_id = self.get_user_id_from_email(self.USER_EMAIL) + user_services.update_email_preferences( + self.user_id, True, False, False, False) + self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) + self.can_not_send_emails_ctx = self.swap( + feconf, 'CAN_SEND_EMAILS', False) + + def test_that_email_not_sent_if_can_send_emails_is_false(self) -> None: + contributor_ranking_email_info = ( + suggestion_registry.ContributorMilestoneEmailInfo( + self.user_id, 'question', 'edit', None, + 'Initial Contributor' + )) + with self.can_not_send_emails_ctx: + email_manager.send_mail_to_notify_contributor_ranking_achievement( + contributor_ranking_email_info) + + messages = self._get_sent_email_messages( + self.USER_EMAIL) + + self.assertEqual(len(messages), 0) + + def test_that_email_not_sent_if_user_can_not_receive_emails(self) -> None: + user_services.update_email_preferences( + self.user_id, False, False, False, False) + contributor_ranking_email_info = ( + suggestion_registry.ContributorMilestoneEmailInfo( + self.user_id, 'question', 'edit', None, + 'Initial Contributor' + )) + with self.can_not_send_emails_ctx: + email_manager.send_mail_to_notify_contributor_ranking_achievement( + contributor_ranking_email_info) + + messages = self._get_sent_email_messages( + self.USER_EMAIL) + self.assertEqual(len(messages), 0) + + def test_that_translation_submitter_acceptance_ranking_email_is_sent( + self) -> None: + expected_email_subject = 'Oppia Translator Rank Achievement!' + expected_email_html_body = ( + 'Hi user,

    ' + 'This is to let you know that you have successfully achieved the ' + 'Initial Contributor rank for submitting translations in हिन्दी ' + '(Hindi). Your efforts help Oppia grow better every day and ' + 'support students around the world.

    ' + 'You can check all the achievements you earned in the ' + '' + 'Contributor Dashboard.

    ' + 'Best wishes and we hope you can continue to contribute!

    ' + 'The Oppia Contributor Dashboard Team') + + contributor_ranking_email_info = ( + suggestion_registry.ContributorMilestoneEmailInfo( + self.user_id, 'translation', 'acceptance', 'hi', + 'Initial Contributor' + )) + with self.can_send_emails_ctx: + email_manager.send_mail_to_notify_contributor_ranking_achievement( + contributor_ranking_email_info) + + # Make sure correct email is sent. + messages = self._get_sent_email_messages(self.USER_EMAIL) + self.assertEqual(len(messages), 1) + self.assertEqual(messages[0].html, expected_email_html_body) + + # Make sure correct email model is stored. + all_models: Sequence[email_models.SentEmailModel] = ( + email_models.SentEmailModel.get_all().fetch() + ) + sent_email_model = all_models[0] + self.assertEqual(sent_email_model.subject, expected_email_subject) + self.assertEqual(sent_email_model.recipient_id, self.user_id) + self.assertEqual( + sent_email_model.recipient_email, self.USER_EMAIL) + self.assertEqual( + sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID) + self.assertEqual( + sent_email_model.sender_email, + 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) + self.assertEqual( + sent_email_model.intent, + feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS) + + def test_that_question_submitter_acceptance_ranking_email_is_sent( + self) -> None: + expected_email_subject = 'Oppia Question Submitter Rank Achievement!' + expected_email_html_body = ( + 'Hi user,

    ' + 'This is to let you know that you have successfully achieved the ' + 'Initial Contributor rank for submitting practice questions. Your ' + 'efforts help Oppia grow better every day and support students ' + 'around the world.

    ' + 'You can check all the achievements you earned in the ' + '' + 'Contributor Dashboard.

    ' + 'Best wishes and we hope you can continue to contribute!

    ' + 'The Oppia Contributor Dashboard Team') + + contributor_ranking_email_info = ( + suggestion_registry.ContributorMilestoneEmailInfo( + self.user_id, 'question', 'acceptance', None, + 'Initial Contributor' + )) + with self.can_send_emails_ctx: + email_manager.send_mail_to_notify_contributor_ranking_achievement( + contributor_ranking_email_info) + + # Make sure correct email is sent. + messages = self._get_sent_email_messages(self.USER_EMAIL) + self.assertEqual(len(messages), 1) + self.assertEqual(messages[0].html, expected_email_html_body) + + # Make sure correct email model is stored. + all_models: Sequence[email_models.SentEmailModel] = ( + email_models.SentEmailModel.get_all().fetch() + ) + sent_email_model = all_models[0] + self.assertEqual(sent_email_model.subject, expected_email_subject) + self.assertEqual(sent_email_model.recipient_id, self.user_id) + self.assertEqual( + sent_email_model.recipient_email, self.USER_EMAIL) + self.assertEqual( + sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID) + self.assertEqual( + sent_email_model.sender_email, + 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) + self.assertEqual( + sent_email_model.intent, + feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS) + + def test_that_translation_reviewer_review_ranking_email_is_sent( + self) -> None: + expected_email_subject = 'Oppia Translation Reviewer Rank Achievement!' + expected_email_html_body = ( + 'Hi user,

    ' + 'This is to let you know that you have successfully achieved the ' + 'Initial Contributor rank for reviewing translations in हिन्दी ' + '(Hindi). Your efforts help Oppia grow better every day and ' + 'support students around the world.

    ' + 'You can check all the achievements you earned in the ' + '' + 'Contributor Dashboard.

    ' + 'Best wishes and we hope you can continue to contribute!

    ' + 'The Oppia Contributor Dashboard Team') + + contributor_ranking_email_info = ( + suggestion_registry.ContributorMilestoneEmailInfo( + self.user_id, 'translation', 'review', 'hi', + 'Initial Contributor' + )) + with self.can_send_emails_ctx: + email_manager.send_mail_to_notify_contributor_ranking_achievement( + contributor_ranking_email_info) + + # Make sure correct email is sent. + messages = self._get_sent_email_messages(self.USER_EMAIL) + self.assertEqual(len(messages), 1) + self.assertEqual(messages[0].html, expected_email_html_body) + + # Make sure correct email model is stored. + all_models: Sequence[email_models.SentEmailModel] = ( + email_models.SentEmailModel.get_all().fetch() + ) + sent_email_model = all_models[0] + self.assertEqual(sent_email_model.subject, expected_email_subject) + self.assertEqual(sent_email_model.recipient_id, self.user_id) + self.assertEqual( + sent_email_model.recipient_email, self.USER_EMAIL) + self.assertEqual( + sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID) + self.assertEqual( + sent_email_model.sender_email, + 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) + self.assertEqual( + sent_email_model.intent, + feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS) + + def test_that_question_reviewer_review_ranking_email_is_sent( + self) -> None: + expected_email_subject = 'Oppia Question Reviewer Rank Achievement!' + expected_email_html_body = ( + 'Hi user,

    ' + 'This is to let you know that you have successfully achieved the ' + 'Initial Contributor rank for reviewing practice questions. Your ' + 'efforts help Oppia grow better every day and support students ' + 'around the world.

    ' + 'You can check all the achievements you earned in the ' + '' + 'Contributor Dashboard.

    ' + 'Best wishes and we hope you can continue to contribute!

    ' + 'The Oppia Contributor Dashboard Team') + + contributor_ranking_email_info = ( + suggestion_registry.ContributorMilestoneEmailInfo( + self.user_id, 'question', 'review', None, + 'Initial Contributor' + )) + with self.can_send_emails_ctx: + email_manager.send_mail_to_notify_contributor_ranking_achievement( + contributor_ranking_email_info) + + # Make sure correct email is sent. + messages = self._get_sent_email_messages(self.USER_EMAIL) + self.assertEqual(len(messages), 1) + self.assertEqual(messages[0].html, expected_email_html_body) + + # Make sure correct email model is stored. + all_models: Sequence[email_models.SentEmailModel] = ( + email_models.SentEmailModel.get_all().fetch() + ) + sent_email_model = all_models[0] + self.assertEqual(sent_email_model.subject, expected_email_subject) + self.assertEqual(sent_email_model.recipient_id, self.user_id) + self.assertEqual( + sent_email_model.recipient_email, self.USER_EMAIL) + self.assertEqual( + sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID) + self.assertEqual( + sent_email_model.sender_email, + 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) + self.assertEqual( + sent_email_model.intent, + feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS) + + def test_that_translation_reviewer_edit_ranking_email_is_sent( + self) -> None: + expected_email_subject = 'Oppia Translation Reviewer Rank Achievement!' + expected_email_html_body = ( + 'Hi user,

    ' + 'This is to let you know that you have successfully achieved the ' + 'Initial Contributor rank for correcting translations in हिन्दी ' + '(Hindi). Your efforts help Oppia grow better every day and ' + 'support students around the world.

    ' + 'You can check all the achievements you earned in the ' + '' + 'Contributor Dashboard.

    ' + 'Best wishes and we hope you can continue to contribute!

    ' + 'The Oppia Contributor Dashboard Team') + + contributor_ranking_email_info = ( + suggestion_registry.ContributorMilestoneEmailInfo( + self.user_id, 'translation', 'edit', 'hi', + 'Initial Contributor' + )) + with self.can_send_emails_ctx: + email_manager.send_mail_to_notify_contributor_ranking_achievement( + contributor_ranking_email_info) + + # Make sure correct email is sent. + messages = self._get_sent_email_messages(self.USER_EMAIL) + self.assertEqual(len(messages), 1) + self.assertEqual(messages[0].html, expected_email_html_body) + + # Make sure correct email model is stored. + all_models: Sequence[email_models.SentEmailModel] = ( + email_models.SentEmailModel.get_all().fetch() + ) + sent_email_model = all_models[0] + self.assertEqual(sent_email_model.subject, expected_email_subject) + self.assertEqual(sent_email_model.recipient_id, self.user_id) + self.assertEqual( + sent_email_model.recipient_email, self.USER_EMAIL) + self.assertEqual( + sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID) + self.assertEqual( + sent_email_model.sender_email, + 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) + self.assertEqual( + sent_email_model.intent, + feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS) + + def test_that_question_reviewer_edit_ranking_email_is_sent( + self) -> None: + expected_email_subject = 'Oppia Question Reviewer Rank Achievement!' + expected_email_html_body = ( + 'Hi user,

    ' + 'This is to let you know that you have successfully achieved the ' + 'Initial Contributor rank for correcting practice questions. ' + 'Your efforts help Oppia grow better every day and support ' + 'students around the world.

    ' + 'You can check all the achievements you earned in the ' + '' + 'Contributor Dashboard.

    ' + 'Best wishes and we hope you can continue to contribute!

    ' + 'The Oppia Contributor Dashboard Team') + + contributor_ranking_email_info = ( + suggestion_registry.ContributorMilestoneEmailInfo( + self.user_id, 'question', 'edit', None, + 'Initial Contributor' + )) + with self.can_send_emails_ctx: + email_manager.send_mail_to_notify_contributor_ranking_achievement( + contributor_ranking_email_info) + + # Make sure correct email is sent. + messages = self._get_sent_email_messages(self.USER_EMAIL) + self.assertEqual(len(messages), 1) + self.assertEqual(messages[0].html, expected_email_html_body) + + # Make sure correct email model is stored. + all_models: Sequence[email_models.SentEmailModel] = ( + email_models.SentEmailModel.get_all().fetch() + ) + sent_email_model = all_models[0] + self.assertEqual(sent_email_model.subject, expected_email_subject) + self.assertEqual(sent_email_model.recipient_id, self.user_id) + self.assertEqual( + sent_email_model.recipient_email, self.USER_EMAIL) + self.assertEqual( + sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID) + self.assertEqual( + sent_email_model.sender_email, + 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) + self.assertEqual( + sent_email_model.intent, + feconf.EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS) + + class NotifyContributionDashboardReviewersEmailTests(test_utils.EmailTestBase): """Tests the send_mail_to_notify_contributor_dashboard_reviewers method, which sends an email to reviewers with information regarding the suggestions that have waited the longest for review. """ - target_id = 'exp1' - skill_id = 'skill_123456' - mocked_review_submission_datetime = datetime.datetime(2020, 6, 15, 5) - AUTHOR_USERNAME = 'author' - AUTHOR_EMAIL = 'author@example.com' - REVIEWER_1_USERNAME = 'reviewer1' - REVIEWER_1_EMAIL = 'reviewer1@community.org' - REVIEWER_2_USERNAME = 'reviewer2' - REVIEWER_2_EMAIL = 'reviewer2@community.org' + target_id: str = 'exp1' + skill_id: str = 'skill_123456' + mocked_review_submission_datetime: datetime.datetime = ( + datetime.datetime(2020, 6, 15, 5) + ) + AUTHOR_USERNAME: Final = 'author' + AUTHOR_EMAIL: Final = 'author@example.com' + REVIEWER_1_USERNAME: Final = 'reviewer1' + REVIEWER_1_EMAIL: Final = 'reviewer1@community.org' + REVIEWER_2_USERNAME: Final = 'reviewer2' + REVIEWER_2_EMAIL: Final = 'reviewer2@community.org' def _create_translation_suggestion_in_lang_with_html_and_datetime( - self, language_code, translation_html, submission_datetime): + self, + language_code: str, + translation_html: str, + submission_datetime: datetime.datetime + ) -> suggestion_registry.BaseSuggestion: """Creates a translation suggestion in the given language_code with the given translation html and submission datetime. """ add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID, + 'content_id': 'content_0', 'language_code': language_code, 'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR, 'translation_html': translation_html, @@ -1990,24 +2447,34 @@ def _create_translation_suggestion_in_lang_with_html_and_datetime( return translation_suggestion def _create_question_suggestion_with_question_html_and_datetime( - self, question_html, submission_datetime): + self, + question_html: str, + submission_datetime: datetime.datetime + ) -> suggestion_registry.BaseSuggestion: """Creates a question suggestion with the given question html and submission datetime. """ with self.swap( feconf, 'DEFAULT_INIT_STATE_CONTENT_STR', question_html): - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': self.skill_id, 'skill_difficulty': 0.3 @@ -2024,7 +2491,9 @@ def _create_question_suggestion_with_question_html_and_datetime( return question_suggestion def _create_reviewable_suggestion_email_infos_from_suggestions( - self, suggestions): + self, + suggestions: List[suggestion_registry.BaseSuggestion] + ) -> List[suggestion_registry.ReviewableSuggestionEmailInfo]: """Creates a list of ReviewableSuggestionEmailInfo objects from the given suggestions. """ @@ -2038,11 +2507,17 @@ def _create_reviewable_suggestion_email_infos_from_suggestions( ] def _assert_email_data_stored_in_sent_email_model_is_correct( - self, expected_email_html_body, reviewer_id, reviewer_email): + self, + expected_email_html_body: str, + reviewer_id: Optional[str], + reviewer_email: str + ) -> None: """Asserts that the created sent email model from the sent email contains the right information. """ - sent_email_models = email_models.SentEmailModel.get_all().filter( + sent_email_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().filter( email_models.SentEmailModel.recipient_id == reviewer_id).fetch() self.assertEqual(len(sent_email_models), 1) sent_email_model = sent_email_models[0] @@ -2066,18 +2541,14 @@ def _assert_email_data_stored_in_sent_email_model_is_correct( sent_email_model.intent, feconf.EMAIL_INTENT_REVIEW_CONTRIBUTOR_DASHBOARD_SUGGESTIONS) - def _log_error_for_tests(self, error_message): - """Appends the error message to the logged errors list.""" - self.logged_errors.append(error_message) - - def _mock_logging_info(self, msg, *args): + def _mock_logging_info(self, msg: str, *args: str) -> None: """Mocks logging.info() by appending the log message to the logged info list. """ self.logged_info.append(msg % args) - def setUp(self): - super(NotifyContributionDashboardReviewersEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, self.AUTHOR_USERNAME) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.signup(self.REVIEWER_1_EMAIL, self.REVIEWER_1_USERNAME) @@ -2092,12 +2563,11 @@ def setUp(self): self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) self.cannot_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', False) - self.logged_errors = [] self.log_new_error_counter = test_utils.CallCounter( - self._log_error_for_tests) + logging.error) self.log_new_error_ctx = self.swap( - email_manager, 'log_new_error', self.log_new_error_counter) - self.logged_info = [] + logging, 'error', self.log_new_error_counter) + self.logged_info: List[str] = [] self.log_new_info_ctx = self.swap( logging, 'info', self._mock_logging_info) @@ -2112,79 +2582,86 @@ def setUp(self): .create_reviewable_suggestion_email_info_from_suggestion( question_suggestion)) - def test_email_not_sent_if_can_send_emails_is_false(self): + def test_email_not_sent_if_can_send_emails_is_false(self) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) - with self.cannot_send_emails_ctx, self.log_new_error_ctx: - email_manager.send_mail_to_notify_contributor_dashboard_reviewers( - [self.reviewer_1_id], [[self.reviewable_suggestion_email_info]] - ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.cannot_send_emails_ctx, self.log_new_error_ctx: + email_manager.send_mail_to_notify_contributor_dashboard_reviewers( # pylint: disable=line-too-long + [self.reviewer_1_id], + [[self.reviewable_suggestion_email_info]] + ) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], 'This app cannot send emails to users.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], 'This app cannot send emails to users.') - def test_email_not_sent_if_reviewer_notifications_is_disabled(self): + def test_email_not_sent_if_reviewer_notifications_is_disabled(self) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', False) - with self.can_send_emails_ctx, self.log_new_error_ctx: - email_manager.send_mail_to_notify_contributor_dashboard_reviewers( - [self.reviewer_1_id], [[self.reviewable_suggestion_email_info]] - ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + email_manager.send_mail_to_notify_contributor_dashboard_reviewers( # pylint: disable=line-too-long + [self.reviewer_1_id], + [[self.reviewable_suggestion_email_info]] + ) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], - 'The "contributor_dashboard_reviewer_emails_is_enabled" property ' - 'must be enabled on the admin config page in order to send ' - 'reviewers the emails.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], + 'The "contributor_dashboard_reviewer_emails_is_enabled" ' + 'property must be enabled on the admin config page in order ' + 'to send reviewers the emails.') - def test_email_not_sent_if_reviewer_email_does_not_exist(self): + def test_email_not_sent_if_reviewer_email_does_not_exist(self) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) - with self.can_send_emails_ctx, self.log_new_error_ctx: - email_manager.send_mail_to_notify_contributor_dashboard_reviewers( - ['reviewer_id_with_no_email'], - [[self.reviewable_suggestion_email_info]] - ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + email_manager.send_mail_to_notify_contributor_dashboard_reviewers( # pylint: disable=line-too-long + ['reviewer_id_with_no_email'], + [[self.reviewable_suggestion_email_info]] + ) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], - 'There was no email for the given reviewer id: ' - 'reviewer_id_with_no_email.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], + 'There was no email for the given reviewer id: ' + 'reviewer_id_with_no_email.') - def test_email_not_sent_if_no_reviewers_to_notify(self): + def test_email_not_sent_if_no_reviewers_to_notify(self) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) - with self.can_send_emails_ctx, self.log_new_error_ctx: - email_manager.send_mail_to_notify_contributor_dashboard_reviewers( - [], [[self.reviewable_suggestion_email_info]] - ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + email_manager.send_mail_to_notify_contributor_dashboard_reviewers( # pylint: disable=line-too-long + [], [[self.reviewable_suggestion_email_info]] + ) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], - 'No Contributor Dashboard reviewers to notify.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], + 'No Contributor Dashboard reviewers to notify.') def test_email_not_sent_if_no_suggestions_to_notify_the_reviewer_about( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2202,7 +2679,8 @@ def test_email_not_sent_if_no_suggestions_to_notify_the_reviewer_about( 'id: %s.' % self.reviewer_1_id) def test_email_sent_to_reviewer_with_question_waiting_a_day_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2260,7 +2738,8 @@ def test_email_sent_to_reviewer_with_question_waiting_a_day_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_question_waiting_days_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2318,7 +2797,8 @@ def test_email_sent_to_reviewer_with_question_waiting_days_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_question_waiting_an_hour_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2377,7 +2857,8 @@ def test_email_sent_to_reviewer_with_question_waiting_an_hour_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_question_waiting_hours_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2436,7 +2917,8 @@ def test_email_sent_to_reviewer_with_question_waiting_hours_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_question_waiting_a_minute_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2494,7 +2976,8 @@ def test_email_sent_to_reviewer_with_question_waiting_a_minute_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_question_waiting_minutes_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2552,7 +3035,8 @@ def test_email_sent_to_reviewer_with_question_waiting_minutes_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_question_waiting_seconds_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2610,7 +3094,8 @@ def test_email_sent_to_reviewer_with_question_waiting_seconds_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_multi_questions_waiting_for_a_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2677,7 +3162,8 @@ def test_email_sent_to_reviewer_with_multi_questions_waiting_for_a_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_multi_reviewers_with_multi_question_suggestions( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2793,7 +3279,8 @@ def test_email_sent_to_multi_reviewers_with_multi_question_suggestions( self.REVIEWER_2_EMAIL) def test_email_sent_to_reviewer_with_translation_waiting_a_day_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2819,8 +3306,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_a_day_for_review( 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '

      ' - '
    • The following Hindi translation suggestion was submitted for ' - 'review 1 day ago:' + '
    • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 1 day ago:' '
      Sample translation

    • ' '

    ' 'Please take some time to review any of the above contributions ' @@ -2852,7 +3339,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_a_day_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_translation_waiting_days_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2876,8 +3364,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_days_for_review( 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '
      ' - '
    • The following Hindi translation suggestion was submitted for ' - 'review 5 days ago:' + '
    • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 5 days ago:' '
      Sample translation

    • ' '

    ' 'Please take some time to review any of the above contributions ' @@ -2910,7 +3398,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_days_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_translation_waiting_an_hour_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2936,8 +3425,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_an_hour_for_review( 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '
      ' - '
    • The following Hindi translation suggestion was submitted for ' - 'review 1 hour ago:' + '
    • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 1 hour ago:' '
      Sample translation

    • ' '

    ' 'Please take some time to review any of the above contributions ' @@ -2970,7 +3459,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_an_hour_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_translation_waiting_hours_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -2996,8 +3486,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_hours_for_review( 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '
      ' - '
    • The following Hindi translation suggestion was submitted for ' - 'review 5 hours ago:' + '
    • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 5 hours ago:' '
      Sample translation

    • ' '

    ' 'Please take some time to review any of the above contributions ' @@ -3030,7 +3520,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_hours_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_translation_waiting_a_min_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -3056,8 +3547,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_a_min_for_review( 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '
      ' - '
    • The following Hindi translation suggestion was submitted for ' - 'review 1 minute ago:' + '
    • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 1 minute ago:' '
      Sample translation

    • ' '

    ' 'Please take some time to review any of the above contributions ' @@ -3090,7 +3581,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_a_min_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_translation_waiting_mins_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -3116,8 +3608,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_mins_for_review( 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '
      ' - '
    • The following Hindi translation suggestion was submitted for ' - 'review 5 minutes ago:' + '
    • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 5 minutes ago:' '
      Sample translation

    • ' '

    ' 'Please take some time to review any of the above contributions ' @@ -3150,7 +3642,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_mins_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_translation_waiting_secs_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -3176,8 +3669,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_secs_for_review( 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '
      ' - '
    • The following Hindi translation suggestion was submitted for ' - 'review 1 minute ago:' + '
    • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 1 minute ago:' '
      Sample translation

    • ' '

    ' 'Please take some time to review any of the above contributions ' @@ -3210,7 +3703,8 @@ def test_email_sent_to_reviewer_with_translation_waiting_secs_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_reviewer_with_multi_translation_waiting_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -3243,8 +3737,8 @@ def test_email_sent_to_reviewer_with_multi_translation_waiting_for_review( '
  • The following English translation suggestion was submitted ' 'for review 1 day ago:' '
    Translation 1

  • ' - '
  • The following French translation suggestion was submitted for ' - 'review 1 hour ago:' + '
  • The following français (French) translation suggestion was ' + 'submitted for review 1 hour ago:' '
    Translation 2

  • ' '
    ' 'Please take some time to review any of the above contributions ' @@ -3277,7 +3771,8 @@ def test_email_sent_to_reviewer_with_multi_translation_waiting_for_review( self.REVIEWER_1_EMAIL) def test_email_sent_to_multi_reviewers_with_multi_translations_suggestions( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -3325,8 +3820,8 @@ def test_email_sent_to_multi_reviewers_with_multi_translations_suggestions( '
  • The following English translation suggestion was submitted ' 'for review 1 day ago:' '
    Translation 1 for reviewer 1

  • ' - '
  • The following French translation suggestion was submitted for ' - 'review 1 hour ago:' + '
  • The following français (French) translation suggestion was ' + 'submitted for review 1 hour ago:' '
    Translation 2 for reviewer 1

  • ' '
    ' 'Please take some time to review any of the above contributions ' @@ -3347,11 +3842,11 @@ def test_email_sent_to_multi_reviewers_with_multi_translations_suggestions( 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '
      ' - '
    • The following Hindi translation suggestion was submitted for ' - 'review 1 minute ago:' + '
    • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 1 minute ago:' '
      Translation 1 for reviewer 2

    • ' - '
    • The following French translation suggestion was submitted for ' - 'review 1 minute ago:' + '
    • The following français (French) translation suggestion was ' + 'submitted for review 1 minute ago:' '
      Translation 2 for reviewer 2

    • ' '

    ' 'Please take some time to review any of the above contributions ' @@ -3392,7 +3887,9 @@ def test_email_sent_to_multi_reviewers_with_multi_translations_suggestions( expected_email_html_body_reviewer_2, self.reviewer_2_id, self.REVIEWER_2_EMAIL) - def test_email_sent_to_multi_reviewers_with_multi_suggestions_waiting(self): + def test_email_sent_to_multi_reviewers_with_multi_suggestions_waiting( + self + ) -> None: config_services.set_property( 'committer_id', 'contributor_dashboard_reviewer_emails_is_enabled', True) @@ -3462,8 +3959,8 @@ def test_email_sent_to_multi_reviewers_with_multi_suggestions_waiting(self): 'Here are some examples of contributions that have been waiting ' 'the longest for review:

    ' '
      ' - '
    • The following French translation suggestion was submitted for ' - 'review 1 minute ago:' + '
    • The following français (French) translation suggestion was ' + 'submitted for review 1 minute ago:' '
      Translation 2

    • ' '
    • The following question suggestion was submitted for ' 'review 1 minute ago:' @@ -3517,25 +4014,31 @@ class NotifyAdminsSuggestionsWaitingTooLongForReviewEmailTests( review on the Contributor Dashboard. """ - target_id = 'exp1' - skill_id = 'skill_123456' - mocked_review_submission_datetime = datetime.datetime(2020, 6, 15, 5) - AUTHOR_USERNAME = 'author' - AUTHOR_EMAIL = 'author@example.com' - CURRICULUM_ADMIN_1_USERNAME = 'user1' - CURRICULUM_ADMIN_1_EMAIL = 'user1@community.org' - CURRICULUM_ADMIN_2_USERNAME = 'user2' - CURRICULUM_ADMIN_2_EMAIL = 'user2@community.org' + target_id: str = 'exp1' + skill_id: str = 'skill_123456' + mocked_review_submission_datetime: datetime.datetime = ( + datetime.datetime(2020, 6, 15, 5) + ) + AUTHOR_USERNAME: Final = 'author' + AUTHOR_EMAIL: Final = 'author@example.com' + CURRICULUM_ADMIN_1_USERNAME: Final = 'user1' + CURRICULUM_ADMIN_1_EMAIL: Final = 'user1@community.org' + CURRICULUM_ADMIN_2_USERNAME: Final = 'user2' + CURRICULUM_ADMIN_2_EMAIL: Final = 'user2@community.org' def _create_translation_suggestion_in_lang_with_html_and_datetime( - self, language_code, translation_html, submission_datetime): + self, + language_code: str, + translation_html: str, + submission_datetime: datetime.datetime + ) -> suggestion_registry.BaseSuggestion: """Creates a translation suggestion in the given language_code with the given translation html and submission datetime. """ add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID, + 'content_id': 'content_0', 'language_code': language_code, 'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR, 'translation_html': translation_html, @@ -3553,24 +4056,34 @@ def _create_translation_suggestion_in_lang_with_html_and_datetime( return translation_suggestion def _create_question_suggestion_with_question_html_and_datetime( - self, question_html, submission_datetime): + self, + question_html: str, + submission_datetime: datetime.datetime + ) -> suggestion_registry.BaseSuggestion: """Creates a question suggestion with the given question html and submission datetime. """ with self.swap( feconf, 'DEFAULT_INIT_STATE_CONTENT_STR', question_html): - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': self.skill_id, 'skill_difficulty': 0.3 @@ -3587,7 +4100,8 @@ def _create_question_suggestion_with_question_html_and_datetime( return question_suggestion def _create_reviewable_suggestion_email_infos_from_suggestions( - self, suggestions): + self, suggestions: List[suggestion_registry.BaseSuggestion] + ) -> List[suggestion_registry.ReviewableSuggestionEmailInfo]: """Creates a list of ReviewableSuggestionEmailInfo objects from the given suggestions. """ @@ -3601,11 +4115,17 @@ def _create_reviewable_suggestion_email_infos_from_suggestions( ] def _assert_email_data_stored_in_sent_email_model_is_correct( - self, expected_email_html_body, admin_id, admin_email): + self, + expected_email_html_body: str, + admin_id: Optional[str], + admin_email: str + ) -> None: """Asserts that the created sent email model from the sent email contains the right information. """ - sent_email_models = email_models.SentEmailModel.get_all().filter( + sent_email_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().filter( email_models.SentEmailModel.recipient_id == admin_id).fetch() self.assertEqual(len(sent_email_models), 1) sent_email_model = sent_email_models[0] @@ -3630,20 +4150,14 @@ def _assert_email_data_stored_in_sent_email_model_is_correct( sent_email_model.intent, feconf.EMAIL_INTENT_ADDRESS_CONTRIBUTOR_DASHBOARD_SUGGESTIONS) - def _log_error_for_tests(self, error_message): - """Appends the error message to the logged errors list.""" - self.logged_errors.append(error_message) - - def _mock_logging_info(self, msg, *args): + def _mock_logging_info(self, msg: str, *args: str) -> None: """Mocks logging.info() by appending the log message to the logged info list. """ self.logged_info.append(msg % args) - def setUp(self): - super( - NotifyAdminsSuggestionsWaitingTooLongForReviewEmailTests, - self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, self.AUTHOR_USERNAME) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.signup( @@ -3658,12 +4172,11 @@ def setUp(self): self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) self.cannot_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', False) - self.logged_errors = [] self.log_new_error_counter = test_utils.CallCounter( - self._log_error_for_tests) + logging.error) self.log_new_error_ctx = self.swap( - email_manager, 'log_new_error', self.log_new_error_counter) - self.logged_info = [] + logging, 'error', self.log_new_error_counter) + self.logged_info: List[str] = [] self.log_new_info_ctx = self.swap( logging, 'info', self._mock_logging_info) @@ -3678,103 +4191,110 @@ def setUp(self): .create_reviewable_suggestion_email_info_from_suggestion( question_suggestion)) - def test_email_not_sent_if_can_send_emails_is_false(self): + def test_email_not_sent_if_can_send_emails_is_false(self) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) - with self.cannot_send_emails_ctx, self.log_new_error_ctx: - with self.swap( - suggestion_models, - 'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0): - ( - email_manager - .send_mail_to_notify_admins_suggestions_waiting_long( - [self.admin_1_id], - [], - [], - [self.reviewable_suggestion_email_info]) - ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.cannot_send_emails_ctx, self.log_new_error_ctx: + with self.swap( + suggestion_models, + 'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0): + ( + email_manager + .send_mail_to_notify_admins_suggestions_waiting_long( + [self.admin_1_id], + [], + [], + [self.reviewable_suggestion_email_info]) + ) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], 'This app cannot send emails to users.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], 'This app cannot send emails to users.') def test_email_not_sent_if_notifying_admins_about_suggestions_is_disabled( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', False) - with self.can_send_emails_ctx, self.log_new_error_ctx: - with self.swap( - suggestion_models, - 'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0): - ( - email_manager - .send_mail_to_notify_admins_suggestions_waiting_long( - [self.admin_1_id], [], [], - [self.reviewable_suggestion_email_info]) - ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + with self.swap( + suggestion_models, + 'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0): + ( + email_manager + .send_mail_to_notify_admins_suggestions_waiting_long( + [self.admin_1_id], [], [], + [self.reviewable_suggestion_email_info]) + ) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], - 'The "notify_admins_suggestions_waiting_too_long" property ' - 'must be enabled on the admin config page in order to send ' - 'admins the emails.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], + 'The "notify_admins_suggestions_waiting_too_long" property ' + 'must be enabled on the admin config page in order to send ' + 'admins the emails.') - def test_email_not_sent_if_admin_email_does_not_exist(self): + def test_email_not_sent_if_admin_email_does_not_exist(self) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) - with self.can_send_emails_ctx, self.log_new_error_ctx: - with self.swap( - suggestion_models, - 'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0): - ( - email_manager - .send_mail_to_notify_admins_suggestions_waiting_long( - ['admin_id_without_email'], [], [], - [self.reviewable_suggestion_email_info]) - ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + with self.swap( + suggestion_models, + 'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0): + ( + email_manager + .send_mail_to_notify_admins_suggestions_waiting_long( + ['admin_id_without_email'], [], [], + [self.reviewable_suggestion_email_info]) + ) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], - 'There was no email for the given admin id: admin_id_without_email.' - ) + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], + 'There was no email for the given admin id: ' + 'admin_id_without_email.' + ) - def test_email_not_sent_if_no_admins_to_notify(self): + def test_email_not_sent_if_no_admins_to_notify(self) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) - with self.can_send_emails_ctx, self.log_new_error_ctx: - with self.swap( - suggestion_models, - 'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0): - ( - email_manager - .send_mail_to_notify_admins_suggestions_waiting_long( - [], [], [], [self.reviewable_suggestion_email_info]) - ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + with self.swap( + suggestion_models, + 'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0): + ( + email_manager + .send_mail_to_notify_admins_suggestions_waiting_long( + [], [], [], [self.reviewable_suggestion_email_info]) + ) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], 'There were no admins to notify.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], 'There were no admins to notify.') def test_email_not_sent_if_no_suggestions_to_notify_the_admin_about( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) @@ -3797,7 +4317,8 @@ def test_email_not_sent_if_no_suggestions_to_notify_the_admin_about( 'too long for a review.') def test_email_sent_to_admin_if_question_has_waited_too_long_for_a_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) @@ -3865,7 +4386,8 @@ def test_email_sent_to_admin_if_question_has_waited_too_long_for_a_review( self.CURRICULUM_ADMIN_1_EMAIL) def test_email_sent_to_admin_if_multiple_questions_have_waited_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) @@ -3942,7 +4464,8 @@ def test_email_sent_to_admin_if_multiple_questions_have_waited_for_review( self.CURRICULUM_ADMIN_1_EMAIL) def test_email_sent_to_admin_if_translation_has_waited_too_long_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) @@ -3976,8 +4499,8 @@ def test_email_sent_to_admin_if_translation_has_waited_too_long_for_review( 'review:' '

      ' '
        ' - '
      • The following Hindi translation suggestion was submitted for ' - 'review 5 days ago:' + '
      • The following हिन्दी (Hindi) translation suggestion was ' + 'submitted for review 5 days ago:' '
        Sample translation

      • ' '

      ' 'Thanks so much - we appreciate your help!
      ' @@ -4010,7 +4533,8 @@ def test_email_sent_to_admin_if_translation_has_waited_too_long_for_review( self.CURRICULUM_ADMIN_1_EMAIL) def test_email_sent_to_admin_if_multi_translations_have_waited_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) @@ -4053,8 +4577,8 @@ def test_email_sent_to_admin_if_multi_translations_have_waited_for_review( '
    • The following English translation suggestion was submitted ' 'for review 2 days ago:' '
      Translation 1

    • ' - '
    • The following French translation suggestion was submitted for ' - 'review 2 days ago:' + '
    • The following français (French) translation suggestion was ' + 'submitted for review 2 days ago:' '
      Translation 2

    • ' '

    ' 'Thanks so much - we appreciate your help!
    ' @@ -4087,7 +4611,8 @@ def test_email_sent_to_admin_if_multi_translations_have_waited_for_review( self.CURRICULUM_ADMIN_1_EMAIL) def test_email_sent_to_admin_if_multi_suggestion_types_waiting_for_review( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) @@ -4138,8 +4663,8 @@ def test_email_sent_to_admin_if_multi_suggestion_types_waiting_for_review( '
  • The following English translation suggestion was submitted ' 'for review 2 days ago:' '
    Translation 1

  • ' - '
  • The following French translation suggestion was submitted for ' - 'review 2 days ago:' + '
  • The following français (French) translation suggestion was ' + 'submitted for review 2 days ago:' '
    Translation 2

  • ' '
    ' 'Thanks so much - we appreciate your help!
    ' @@ -4168,10 +4693,21 @@ def test_email_sent_to_admin_if_multi_suggestion_types_waiting_for_review( self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - sent_email_models = email_models.SentEmailModel.get_all().filter( + sent_email_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().filter( email_models.SentEmailModel.recipient_id == self.admin_1_id).fetch() self.assertEqual(len(sent_email_models), 2) - sent_email_models.sort(key=lambda m: m.html_body) + # Here, we are narrowing down the type of 'sent_email_models' from + # Sequence to List. Because Sequence is a broader type and it does not + # contain extra methods (e.g: .sort()), and below we are using .sort() + # method by assuming that 'sent_email_models' is an instance of list. + # So, to avoid error we used assertion here. + assert isinstance(sent_email_models, list) + email_sort_fn: Callable[[email_models.SentEmailModel], str] = ( + lambda m: m.html_body if isinstance(m.html_body, str) else '' + ) + sent_email_models.sort(key=email_sort_fn) sent_email_model = sent_email_models[0] self.assertEqual( sent_email_model.subject, @@ -4193,7 +4729,7 @@ def test_email_sent_to_admin_if_multi_suggestion_types_waiting_for_review( sent_email_model.intent, feconf.EMAIL_INTENT_ADDRESS_CONTRIBUTOR_DASHBOARD_SUGGESTIONS) - def test_email_sent_to_multiple_admins(self): + def test_email_sent_to_multiple_admins(self) -> None: config_services.set_property( 'committer_id', 'notify_admins_suggestions_waiting_too_long_is_enabled', True) @@ -4300,20 +4836,22 @@ class NotifyAdminsContributorDashboardReviewersNeededTests( specific suggestion types. """ - CURRICULUM_ADMIN_1_USERNAME = 'user1' - CURRICULUM_ADMIN_1_EMAIL = 'user1@community.org' - CURRICULUM_ADMIN_2_USERNAME = 'user2' - CURRICULUM_ADMIN_2_EMAIL = 'user2@community.org' - AUTHOR_EMAIL = 'author@example.com' - target_id = 'exp1' - skill_id = 'skill_123456' - - def _create_translation_suggestion_with_language_code(self, language_code): + CURRICULUM_ADMIN_1_USERNAME: Final = 'user1' + CURRICULUM_ADMIN_1_EMAIL: Final = 'user1@community.org' + CURRICULUM_ADMIN_2_USERNAME: Final = 'user2' + CURRICULUM_ADMIN_2_EMAIL: Final = 'user2@community.org' + AUTHOR_EMAIL: Final = 'author@example.com' + target_id: str = 'exp1' + skill_id: str = 'skill_123456' + + def _create_translation_suggestion_with_language_code( + self, language_code: str + ) -> suggestion_registry.BaseSuggestion: """Creates a translation suggestion in the given language_code.""" add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID, + 'content_id': 'content_0', 'language_code': language_code, 'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR, 'translation_html': '

    This is the translated content.

    ', @@ -4328,18 +4866,25 @@ def _create_translation_suggestion_with_language_code(self, language_code): 'test description' ) - def _create_question_suggestion(self): + def _create_question_suggestion(self) -> suggestion_registry.BaseSuggestion: """Creates a question suggestion.""" - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': self.skill_id, 'skill_difficulty': 0.3 @@ -4354,11 +4899,17 @@ def _create_question_suggestion(self): ) def _assert_email_data_stored_in_sent_email_model_is_correct( - self, expected_email_html_body, admin_id, admin_email): + self, + expected_email_html_body: str, + admin_id: Optional[str], + admin_email: str + ) -> None: """Asserts that the sent email model that was created from the email that was sent contains the right information. """ - sent_email_models = email_models.SentEmailModel.get_all().filter( + sent_email_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().filter( email_models.SentEmailModel.recipient_id == admin_id).fetch() self.assertEqual(len(sent_email_models), 1) sent_email_model = sent_email_models[0] @@ -4381,20 +4932,14 @@ def _assert_email_data_stored_in_sent_email_model_is_correct( sent_email_model.intent, feconf.EMAIL_INTENT_ADD_CONTRIBUTOR_DASHBOARD_REVIEWERS) - def _log_error_for_tests(self, error_message): - """Appends the error message to the logged errors list.""" - self.logged_errors.append(error_message) - - def _mock_logging_info(self, msg, *args): + def _mock_logging_info(self, msg: str, *args: str) -> None: """Mocks logging.info() by appending the log message to the logged info list. """ self.logged_info.append(msg % args) - def setUp(self): - super( - NotifyAdminsContributorDashboardReviewersNeededTests, - self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.signup( @@ -4412,73 +4957,77 @@ def setUp(self): self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) self.cannot_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', False) - self.logged_errors = [] self.log_new_error_counter = test_utils.CallCounter( - self._log_error_for_tests) + logging.error) self.log_new_error_ctx = self.swap( - email_manager, 'log_new_error', self.log_new_error_counter) - self.logged_info = [] + logging, 'error', self.log_new_error_counter) + self.logged_info: List[str] = [] self.log_new_info_ctx = self.swap( logging, 'info', self._mock_logging_info) - self.suggestion_types_needing_reviewers = { - feconf.SUGGESTION_TYPE_ADD_QUESTION: {} + self.suggestion_types_needing_reviewers: Dict[str, Set[str]] = { + feconf.SUGGESTION_TYPE_ADD_QUESTION: set() } - def test_email_not_sent_if_can_send_emails_is_false(self): + def test_email_not_sent_if_can_send_emails_is_false(self) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) - with self.cannot_send_emails_ctx, self.log_new_error_ctx: - email_manager.send_mail_to_notify_admins_that_reviewers_are_needed( - [self.admin_1_id], [], [], - self.suggestion_types_needing_reviewers) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.cannot_send_emails_ctx, self.log_new_error_ctx: + email_manager.send_mail_to_notify_admins_that_reviewers_are_needed( # pylint: disable=line-too-long + [self.admin_1_id], [], [], + self.suggestion_types_needing_reviewers) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], 'This app cannot send emails to users.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], 'This app cannot send emails to users.') def test_email_not_sent_if_notifying_admins_reviewers_needed_is_disabled( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', False) - with self.can_send_emails_ctx, self.log_new_error_ctx: - email_manager.send_mail_to_notify_admins_that_reviewers_are_needed( - [self.admin_1_id], [], [], - self.suggestion_types_needing_reviewers) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + email_manager.send_mail_to_notify_admins_that_reviewers_are_needed( # pylint: disable=line-too-long + [self.admin_1_id], [], [], + self.suggestion_types_needing_reviewers) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], - 'The "enable_admin_notifications_for_reviewer_shortage" ' - 'property must be enabled on the admin config page in order to ' - 'send admins the emails.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], + 'The "enable_admin_notifications_for_reviewer_shortage" ' + 'property must be enabled on the admin config page in order to ' + 'send admins the emails.') - def test_email_not_sent_if_no_admins_to_notify(self): + def test_email_not_sent_if_no_admins_to_notify(self) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) - with self.can_send_emails_ctx, self.log_new_error_ctx: - email_manager.send_mail_to_notify_admins_that_reviewers_are_needed( - [], [], [], - self.suggestion_types_needing_reviewers) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + email_manager.send_mail_to_notify_admins_that_reviewers_are_needed( # pylint: disable=line-too-long + [], [], [], + self.suggestion_types_needing_reviewers) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], 'There were no admins to notify.') + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], 'There were no admins to notify.') def test_email_not_sent_if_no_suggestion_types_that_need_reviewers( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) @@ -4494,26 +5043,29 @@ def test_email_not_sent_if_no_suggestion_types_that_need_reviewers( 'There were no suggestion types that needed more reviewers on the ' 'Contributor Dashboard.') - def test_email_not_sent_if_admin_email_does_not_exist(self): + def test_email_not_sent_if_admin_email_does_not_exist(self) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) - with self.can_send_emails_ctx, self.log_new_error_ctx: - email_manager.send_mail_to_notify_admins_that_reviewers_are_needed( - ['admin_id_without_email'], [], [], - self.suggestion_types_needing_reviewers) + with self.capture_logging(min_level=logging.ERROR) as logs: + with self.can_send_emails_ctx, self.log_new_error_ctx: + email_manager.send_mail_to_notify_admins_that_reviewers_are_needed( # pylint: disable=line-too-long + ['admin_id_without_email'], [], [], + self.suggestion_types_needing_reviewers) - messages = self._get_all_sent_email_messages() - self.assertEqual(len(messages), 0) - self.assertEqual(self.log_new_error_counter.times_called, 1) - self.assertEqual( - self.logged_errors[0], - 'There was no email for the given admin id: admin_id_without_email.' - ) + messages = self._get_all_sent_email_messages() + self.assertEqual(len(messages), 0) + self.assertEqual(self.log_new_error_counter.times_called, 1) + self.assertEqual( + logs[0], + 'There was no email for the given admin id: ' + 'admin_id_without_email.' + ) def test_email_sent_to_admin_if_question_suggestions_need_reviewers( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) @@ -4522,7 +5074,7 @@ def test_email_sent_to_admin_if_question_suggestions_need_reviewers( suggestion_services.get_suggestion_types_that_need_reviewers()) self.assertDictEqual( suggestion_types_needing_reviewers, - {feconf.SUGGESTION_TYPE_ADD_QUESTION: {}}) + {feconf.SUGGESTION_TYPE_ADD_QUESTION: set()}) expected_email_html_body = ( 'Hi user1,' '

    ' @@ -4558,7 +5110,8 @@ def test_email_sent_to_admin_if_question_suggestions_need_reviewers( self.CURRICULUM_ADMIN_1_EMAIL) def test_email_sent_to_admins_if_question_suggestions_need_reviewers( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) @@ -4567,7 +5120,7 @@ def test_email_sent_to_admins_if_question_suggestions_need_reviewers( suggestion_services.get_suggestion_types_that_need_reviewers()) self.assertDictEqual( suggestion_types_needing_reviewers, - {feconf.SUGGESTION_TYPE_ADD_QUESTION: {}}) + {feconf.SUGGESTION_TYPE_ADD_QUESTION: set()}) expected_email_html_body_for_admin_1 = ( 'Hi user1,' '

    ' @@ -4627,7 +5180,8 @@ def test_email_sent_to_admins_if_question_suggestions_need_reviewers( self.CURRICULUM_ADMIN_2_EMAIL) def test_admin_email_sent_if_translations_need_reviewers_for_one_lang( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) @@ -4645,9 +5199,9 @@ def test_admin_email_sent_if_translations_need_reviewers_for_one_lang( 'their username(s) and allow reviewing for the suggestion types ' 'that need more reviewers bolded below.' '

    ' - 'There have been Hindi translation suggestions created on ' - 'the Contributor Dashboard page where there ' - 'are not enough reviewers.

    ' + 'There have been हिन्दी (Hindi) translation suggestions ' + 'created on the Contributor Dashboard page where' + ' there are not enough reviewers.

    ' 'Thanks so much - we appreciate your help!

    ' 'Best Wishes!
    ' '- The Oppia Contributor Dashboard Team' % ( @@ -4671,7 +5225,8 @@ def test_admin_email_sent_if_translations_need_reviewers_for_one_lang( self.CURRICULUM_ADMIN_1_EMAIL) def test_admin_emails_sent_if_translations_need_reviewers_for_one_lang( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) @@ -4689,9 +5244,9 @@ def test_admin_emails_sent_if_translations_need_reviewers_for_one_lang( 'their username(s) and allow reviewing for the suggestion types ' 'that need more reviewers bolded below.' '

    ' - 'There have been Hindi translation suggestions created on ' - 'the Contributor Dashboard page where there ' - 'are not enough reviewers.

    ' + 'There have been हिन्दी (Hindi) translation suggestions ' + 'created on the Contributor Dashboard page where' + ' there are not enough reviewers.

    ' 'Thanks so much - we appreciate your help!

    ' 'Best Wishes!
    ' '- The Oppia Contributor Dashboard Team' % ( @@ -4706,9 +5261,9 @@ def test_admin_emails_sent_if_translations_need_reviewers_for_one_lang( 'their username(s) and allow reviewing for the suggestion types ' 'that need more reviewers bolded below.' '

    ' - 'There have been Hindi translation suggestions created on ' - 'the Contributor Dashboard page where there ' - 'are not enough reviewers.

    ' + 'There have been हिन्दी (Hindi) translation suggestions ' + 'created on the Contributor Dashboard page where' + ' there are not enough reviewers.

    ' 'Thanks so much - we appreciate your help!

    ' 'Best Wishes!
    ' '- The Oppia Contributor Dashboard Team' % ( @@ -4738,7 +5293,8 @@ def test_admin_emails_sent_if_translations_need_reviewers_for_one_lang( self.CURRICULUM_ADMIN_2_EMAIL) def test_admin_email_sent_if_translations_need_reviewers_for_multi_lang( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) @@ -4763,8 +5319,8 @@ def test_admin_email_sent_if_translations_need_reviewers_for_multi_lang( 'there are not enough reviewers. The languages that need more ' 'reviewers are:' '
      ' - '
    • French

    • ' - '
    • Hindi

    • ' + '
    • français (French)

    • ' + '
    • हिन्दी (Hindi)

    • ' '

    ' 'Thanks so much - we appreciate your help!

    ' 'Best Wishes!
    ' @@ -4789,7 +5345,8 @@ def test_admin_email_sent_if_translations_need_reviewers_for_multi_lang( self.CURRICULUM_ADMIN_1_EMAIL) def test_admin_emails_sent_if_translations_need_reviewers_for_multi_lang( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) @@ -4814,8 +5371,8 @@ def test_admin_emails_sent_if_translations_need_reviewers_for_multi_lang( 'there are not enough reviewers. The languages that need more ' 'reviewers are:' '
      ' - '
    • French

    • ' - '
    • Hindi

    • ' + '
    • français (French)

    • ' + '
    • हिन्दी (Hindi)

    • ' '

    ' 'Thanks so much - we appreciate your help!

    ' 'Best Wishes!
    ' @@ -4836,8 +5393,8 @@ def test_admin_emails_sent_if_translations_need_reviewers_for_multi_lang( 'there are not enough reviewers. The languages that need more ' 'reviewers are:' '
      ' - '
    • French

    • ' - '
    • Hindi

    • ' + '
    • français (French)

    • ' + '
    • हिन्दी (Hindi)

    • ' '

    ' 'Thanks so much - we appreciate your help!

    ' 'Best Wishes!
    ' @@ -4868,7 +5425,8 @@ def test_admin_emails_sent_if_translations_need_reviewers_for_multi_lang( self.CURRICULUM_ADMIN_2_EMAIL) def test_email_sent_to_admins_if_mutli_suggestion_types_needing_reviewers( - self): + self + ) -> None: config_services.set_property( 'committer_id', 'enable_admin_notifications_for_reviewer_shortage', True) @@ -4882,7 +5440,7 @@ def test_email_sent_to_admins_if_mutli_suggestion_types_needing_reviewers( { feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: { 'fr', 'hi'}, - feconf.SUGGESTION_TYPE_ADD_QUESTION: {} + feconf.SUGGESTION_TYPE_ADD_QUESTION: set() }) expected_email_html_body_for_admin_1 = ( 'Hi user1,' @@ -4897,8 +5455,8 @@ def test_email_sent_to_admins_if_mutli_suggestion_types_needing_reviewers( 'there are not enough reviewers. The languages that need more ' 'reviewers are:' '
      ' - '
    • French

    • ' - '
    • Hindi

    • ' + '
    • français (French)

    • ' + '
    • हिन्दी (Hindi)

    • ' '

    ' 'Thanks so much - we appreciate your help!

    ' 'Best Wishes!
    ' @@ -4919,8 +5477,8 @@ def test_email_sent_to_admins_if_mutli_suggestion_types_needing_reviewers( 'there are not enough reviewers. The languages that need more ' 'reviewers are:' '
      ' - '
    • French

    • ' - '
    • Hindi

    • ' + '
    • français (French)

    • ' + '
    • हिन्दी (Hindi)

    • ' '

    ' 'Thanks so much - we appreciate your help!

    ' 'Best Wishes!
    ' @@ -4944,10 +5502,21 @@ def test_email_sent_to_admins_if_mutli_suggestion_types_needing_reviewers( self.assertEqual(messages[0].html, expected_email_html_body_for_admin_2) # Make sure correct email models are stored. - sent_email_models = email_models.SentEmailModel.get_all().filter( + sent_email_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().filter( email_models.SentEmailModel.recipient_id == self.admin_1_id).fetch() + # Here, we are narrowing down the type of 'sent_email_models' from + # Sequence to List. Because Sequence is a broader type and it does not + # contain extra methods (e.g: .sort()), and below we are using .sort() + # method by assuming that 'sent_email_models' is an instance of list. + # So, to avoid error we used assertion here. + assert isinstance(sent_email_models, list) self.assertEqual(len(sent_email_models), 2) - sent_email_models.sort(key=lambda m: m.html_body) + email_sort_fn: Callable[[email_models.SentEmailModel], str] = ( + lambda m: m.html_body if isinstance(m.html_body, str) else '' + ) + sent_email_models.sort(key=email_sort_fn) sent_email_model = sent_email_models[1] self.assertEqual( sent_email_model.subject, @@ -4978,17 +5547,17 @@ class QueryStatusNotificationEmailTests(test_utils.EmailTestBase): or failed. """ - SUBMITTER_USERNAME = 'submit' - SUBMITTER_EMAIL = 'submit@example.com' - SENDER_USERNAME = 'sender' - SENDER_EMAIL = 'sender@example.com' - RECIPIENT_A_EMAIL = 'a@example.com' - RECIPIENT_A_USERNAME = 'usera' - RECIPIENT_B_EMAIL = 'b@example.com' - RECIPIENT_B_USERNAME = 'userb' - - def setUp(self): - super(QueryStatusNotificationEmailTests, self).setUp() + SUBMITTER_USERNAME: Final = 'submit' + SUBMITTER_EMAIL: Final = 'submit@example.com' + SENDER_USERNAME: Final = 'sender' + SENDER_EMAIL: Final = 'sender@example.com' + RECIPIENT_A_EMAIL: Final = 'a@example.com' + RECIPIENT_A_USERNAME: Final = 'usera' + RECIPIENT_B_EMAIL: Final = 'b@example.com' + RECIPIENT_B_USERNAME: Final = 'userb' + + def setUp(self) -> None: + super().setUp() self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME) self.submitter_id = self.get_user_id_from_email(self.SUBMITTER_EMAIL) self.signup(self.SENDER_EMAIL, self.SENDER_USERNAME) @@ -5003,7 +5572,7 @@ def setUp(self): self.RECIPIENT_B_EMAIL) self.recipient_ids = [self.recipient_a_id, self.recipient_b_id] - def test_that_correct_completion_email_is_sent(self): + def test_that_correct_completion_email_is_sent(self) -> None: query_id = 'qid' expected_email_subject = 'Query qid has successfully completed' expected_email_html_body = ( @@ -5044,7 +5613,9 @@ def test_that_correct_completion_email_is_sent(self): self.assertEqual(messages[0].body, expected_email_text_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5061,7 +5632,7 @@ def test_that_correct_completion_email_is_sent(self): sent_email_model.intent, feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION) - def test_that_correct_failure_email_is_sent(self): + def test_that_correct_failure_email_is_sent(self) -> None: query_id = 'qid' query_params = { 'key1': 'val1', @@ -5114,7 +5685,9 @@ def test_that_correct_failure_email_is_sent(self): self.assertEqual(messages[0].body, expected_email_text_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5138,7 +5711,7 @@ def test_that_correct_failure_email_is_sent(self): self.assertEqual( admin_messages[0].body, expected_admin_email_text_body) - def test_send_user_query_email(self): + def test_send_user_query_email(self) -> None: email_subject = 'Bulk Email User Query Subject' email_body = 'Bulk Email User Query Body' email_intent = feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION @@ -5157,13 +5730,13 @@ def test_send_user_query_email(self): self.assertEqual(len(messages_b), 1) # Make sure correct email model is stored. - all_models = email_models.BulkEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.BulkEmailModel + ] = email_models.BulkEmailModel.get_all().fetch() self.assertEqual(len(all_models), 1) sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, email_subject) - self.assertEqual( - sent_email_model.recipient_ids, self.recipient_ids) self.assertEqual( sent_email_model.sender_id, self.sender_id) self.assertEqual( @@ -5174,167 +5747,62 @@ def test_send_user_query_email(self): email_intent) -class VoiceoverApplicationEmailUnitTest(test_utils.EmailTestBase): - """Unit test related to voiceover application emails.""" +class AccountDeletionEmailUnitTest(test_utils.EmailTestBase): + """Unit test related to account deletion application emails.""" - APPLICANT_USERNAME = 'applicant' - APPLICANT_EMAIL = 'applicant@example.com' + APPLICANT_USERNAME: Final = 'applicant' + APPLICANT_EMAIL: Final = 'applicant@example.com' - def setUp(self): - super(VoiceoverApplicationEmailUnitTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.APPLICANT_EMAIL, self.APPLICANT_USERNAME) self.applicant_id = self.get_user_id_from_email(self.APPLICANT_EMAIL) - user_services.update_email_preferences( - self.applicant_id, True, False, False, False) self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) self.can_not_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', False) - def test_that_email_not_sent_if_can_send_emails_is_false(self): + def test_that_email_not_sent_if_can_send_emails_is_false(self) -> None: with self.can_not_send_emails_ctx: - email_manager.send_accepted_voiceover_application_email( - self.applicant_id, 'Lesson to voiceover', 'en') + email_manager.send_account_deleted_email( + self.applicant_id, self.APPLICANT_EMAIL) messages = self._get_sent_email_messages( self.APPLICANT_EMAIL) self.assertEqual(len(messages), 0) - def test_that_correct_accepted_voiceover_application_email_is_sent(self): - expected_email_subject = ( - '[Accepted] Updates on submitted voiceover application') - expected_email_html_body = ( - 'Hi applicant,

    ' - 'Congratulations! Your voiceover application for ' - '"Lesson to voiceover" lesson got accepted and you have been ' - 'assigned with a voice artist role in the lesson. Now you will be ' - 'able to add voiceovers to the lesson in English ' - 'language.' - '

    You can check the wiki page to learn' - 'how to voiceover a lesson' - '

    ' - 'Thank you for helping improve Oppia\'s lessons!' - '- The Oppia Team
    ' - '
    ' - 'You can change your email preferences via the ' - 'Preferences page.') + def test_account_deletion_failed_email_is_sent_correctly(self) -> None: + dummy_admin_address = 'admin@system.com' - with self.can_send_emails_ctx: - email_manager.send_accepted_voiceover_application_email( - self.applicant_id, 'Lesson to voiceover', 'en') + admin_email_ctx = self.swap( + feconf, 'ADMIN_EMAIL_ADDRESS', dummy_admin_address) - # Make sure correct email is sent. + with self.can_send_emails_ctx, admin_email_ctx: + # Make sure there are no emails already sent. messages = self._get_sent_email_messages( - self.APPLICANT_EMAIL) - self.assertEqual(len(messages), 1) - self.assertEqual(messages[0].html, expected_email_html_body) - - # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() - sent_email_model = all_models[0] - self.assertEqual( - sent_email_model.subject, expected_email_subject) - self.assertEqual( - sent_email_model.recipient_id, self.applicant_id) - self.assertEqual( - sent_email_model.recipient_email, self.APPLICANT_EMAIL) - self.assertEqual( - sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID) - self.assertEqual( - sent_email_model.sender_email, - 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) - self.assertEqual( - sent_email_model.intent, - feconf.EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES) - - def test_that_correct_rejected_voiceover_application_email_is_sent(self): - expected_email_subject = 'Updates on submitted voiceover application' - expected_email_html_body = ( - 'Hi applicant,

    ' - 'Your voiceover application for "Lesson to voiceover" lesson in ' - 'language English got rejected and the reviewer has left a message.' - '

    Review message: A rejection message!

    ' - 'You can create a new voiceover application through the' - '' - 'contributor dashboard page.

    ' - '- The Oppia Team
    ' - '
    ' - 'You can change your email preferences via the ' - 'Preferences page.') + feconf.ADMIN_EMAIL_ADDRESS) + self.assertEqual(messages, []) - with self.can_send_emails_ctx: - email_manager.send_rejected_voiceover_application_email( - self.applicant_id, 'Lesson to voiceover', 'en', - 'A rejection message!') + # Send an account deletion failed email to admin. + email_manager.send_account_deletion_failed_email( + self.applicant_id, self.APPLICANT_EMAIL + ) - # Make sure correct email is sent. + # Make sure emails are sent. messages = self._get_sent_email_messages( - self.APPLICANT_EMAIL) + feconf.ADMIN_EMAIL_ADDRESS) self.assertEqual(len(messages), 1) - self.assertEqual(messages[0].html, expected_email_html_body) - - # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() - sent_email_model = all_models[0] - self.assertEqual( - sent_email_model.subject, expected_email_subject) - self.assertEqual( - sent_email_model.recipient_id, self.applicant_id) - self.assertEqual( - sent_email_model.recipient_email, self.APPLICANT_EMAIL) - self.assertEqual( - sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID) - self.assertEqual( - sent_email_model.sender_email, - 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) - self.assertEqual( - sent_email_model.intent, - feconf.EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES) - - def test_can_send_emails_is_false_logs_error(self): - """When feconf.CAN_SEND_EMAILS is false, - send_rejected_voiceover_application_email(*args) should log an error. - """ - observed_log_messages = [] - - def _mock_logging_function(msg, *args): - """Mocks logging.error().""" - observed_log_messages.append(msg % args) - - with self.swap(logging, 'error', _mock_logging_function): - email_manager.send_rejected_voiceover_application_email( - self.applicant_id, 'Lesson to voiceover', 'en', - 'A rejection message!') - - expected_log_message = 'This app cannot send emails to users.' + self.assertEqual(messages[0].to, ['admin@system.com']) self.assertEqual( - observed_log_messages, [expected_log_message]) - - -class AccountDeletionEmailUnitTest(test_utils.EmailTestBase): - """Unit test related to account deletion application emails.""" - - APPLICANT_USERNAME = 'applicant' - APPLICANT_EMAIL = 'applicant@example.com' - - def setUp(self): - super(AccountDeletionEmailUnitTest, self).setUp() - self.signup(self.APPLICANT_EMAIL, self.APPLICANT_USERNAME) - self.applicant_id = self.get_user_id_from_email(self.APPLICANT_EMAIL) - self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) - self.can_not_send_emails_ctx = self.swap( - feconf, 'CAN_SEND_EMAILS', False) - - def test_that_email_not_sent_if_can_send_emails_is_false(self): - with self.can_not_send_emails_ctx: - email_manager.send_account_deleted_email( - self.applicant_id, self.APPLICANT_EMAIL) - - messages = self._get_sent_email_messages( - self.APPLICANT_EMAIL) - self.assertEqual(len(messages), 0) + messages[0].subject, + 'WIPEOUT: Account deletion failed' + ) + self.assertIn( + 'The Wipeout process failed for the user with ID \'%s\' and ' + 'email \'%s\'.' % (self.applicant_id, self.APPLICANT_EMAIL), + messages[0].html + ) - def test_that_correct_account_deleted_email_is_sent(self): + def test_that_correct_account_deleted_email_is_sent(self) -> None: expected_email_subject = 'Account deleted' expected_email_html_body = ( 'Hi applicant@example.com,

    ' @@ -5352,7 +5820,9 @@ def test_that_correct_account_deleted_email_is_sent(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5370,17 +5840,17 @@ def test_that_correct_account_deleted_email_is_sent(self): class BulkEmailsTests(test_utils.EmailTestBase): - SENDER_EMAIL = 'sender@example.com' - SENDER_USERNAME = 'sender' - FAKE_SENDER_EMAIL = 'fake@example.com' - FAKE_SENDER_USERNAME = 'fake' - RECIPIENT_A_EMAIL = 'a@example.com' - RECIPIENT_A_USERNAME = 'usera' - RECIPIENT_B_EMAIL = 'b@example.com' - RECIPIENT_B_USERNAME = 'userb' - - def setUp(self): - super(BulkEmailsTests, self).setUp() + SENDER_EMAIL: Final = 'sender@example.com' + SENDER_USERNAME: Final = 'sender' + FAKE_SENDER_EMAIL: Final = 'fake@example.com' + FAKE_SENDER_USERNAME: Final = 'fake' + RECIPIENT_A_EMAIL: Final = 'a@example.com' + RECIPIENT_A_USERNAME: Final = 'usera' + RECIPIENT_B_EMAIL: Final = 'b@example.com' + RECIPIENT_B_USERNAME: Final = 'userb' + + def setUp(self) -> None: + super().setUp() # SENDER is authorised sender. # FAKE_SENDER is unauthorised sender. # A and B are recipients. @@ -5400,7 +5870,7 @@ def setUp(self): self.set_curriculum_admins([self.SENDER_USERNAME]) self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) - def test_that_correct_email_is_sent(self): + def test_that_correct_email_is_sent(self) -> None: email_subject = 'Dummy subject' email_html_body = 'Dummy email body.
    ' email_text_body = 'Dummy email body.\n' @@ -5423,15 +5893,15 @@ def test_that_correct_email_is_sent(self): self.assertEqual(messages_b[0].body, email_text_body) # Make sure correct email model is stored. - all_models = email_models.BulkEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.BulkEmailModel.get_all().fetch() self.assertEqual(len(all_models), 1) sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, email_subject) self.assertEqual( sent_email_model.html_body, email_html_body) - self.assertEqual( - sent_email_model.recipient_ids, self.recipient_ids) self.assertEqual( sent_email_model.sender_id, self.sender_id) self.assertEqual( @@ -5441,7 +5911,9 @@ def test_that_correct_email_is_sent(self): sent_email_model.intent, feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION) - def test_email_not_sent_if_original_html_not_matches_cleaned_html(self): + def test_email_not_sent_if_original_html_not_matches_cleaned_html( + self + ) -> None: email_subject = 'Dummy Email Subject' email_html_body = 'Dummy email body.' @@ -5460,9 +5932,9 @@ def test_email_not_sent_if_original_html_not_matches_cleaned_html(self): self.RECIPIENT_B_EMAIL) self.assertEqual(len(messages_b), 0) - def test_that_exception_is_raised_for_unauthorised_sender(self): + def test_that_exception_is_raised_for_unauthorised_sender(self) -> None: with self.can_send_emails_ctx, ( - self.assertRaisesRegexp( + self.assertRaisesRegex( Exception, 'Invalid sender_id for email')): email_manager.send_user_query_email( self.fake_sender_id, self.recipient_ids, 'email_subject', @@ -5476,10 +5948,12 @@ def test_that_exception_is_raised_for_unauthorised_sender(self): self.RECIPIENT_B_EMAIL) self.assertEqual(len(messages_b), 0) - all_models = email_models.BulkEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.BulkEmailModel + ] = email_models.BulkEmailModel.get_all().fetch() self.assertEqual(len(all_models), 0) - def test_that_test_email_is_sent_for_bulk_emails(self): + def test_that_test_email_is_sent_for_bulk_emails(self) -> None: email_subject = 'Test Subject' email_body = 'Test Body' with self.can_send_emails_ctx: @@ -5492,15 +5966,14 @@ def test_that_test_email_is_sent_for_bulk_emails(self): class EmailPreferencesTests(test_utils.EmailTestBase): - def test_can_users_receive_thread_email(self): + def test_can_users_receive_thread_email(self) -> None: gae_ids = ('someUser1', 'someUser2') exp_id = 'someExploration' usernames = ('username1', 'username2') emails = ('user1@example.com', 'user2@example.com') user_ids = [] - for user_id, username, user_email in python_utils.ZIP( - gae_ids, usernames, emails): + for user_id, username, user_email in zip(gae_ids, usernames, emails): user_settings = user_services.create_new_user(user_id, user_email) user_ids.append(user_settings.user_id) user_services.set_username(user_settings.user_id, username) @@ -5567,13 +6040,11 @@ def test_can_users_receive_thread_email(self): class ModeratorActionEmailsTests(test_utils.EmailTestBase): - MODERATOR_EMAIL = 'moderator@example.com' - MODERATOR_USERNAME = 'moderator' - RECIPIENT_EMAIL = 'a@example.com' - RECIPIENT_USERNAME = 'usera' + RECIPIENT_EMAIL: Final = 'a@example.com' + RECIPIENT_USERNAME: Final = 'usera' - def setUp(self): - super(ModeratorActionEmailsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) @@ -5587,21 +6058,25 @@ def setUp(self): self.can_send_email_moderator_action_ctx = self.swap( feconf, 'REQUIRE_EMAIL_ON_MODERATOR_ACTION', True) - def test_exception_raised_if_email_on_moderator_action_is_false(self): - with self.assertRaisesRegexp( + def test_exception_raised_if_email_on_moderator_action_is_false( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'For moderator emails to be sent, please ensure that ' 'REQUIRE_EMAIL_ON_MODERATOR_ACTION is set to True.'): email_manager.require_moderator_email_prereqs_are_satisfied() - def test_exception_raised_if_can_send_emails_is_false(self): - with self.can_send_email_moderator_action_ctx, self.assertRaisesRegexp( + def test_exception_raised_if_can_send_emails_is_false(self) -> None: + with self.can_send_email_moderator_action_ctx, self.assertRaisesRegex( Exception, 'For moderator emails to be sent, please ensure that ' 'CAN_SEND_EMAILS is set to True.'): email_manager.require_moderator_email_prereqs_are_satisfied() - def test_correct_email_draft_received_on_exploration_unpublish(self): + def test_correct_email_draft_received_on_exploration_unpublish( + self + ) -> None: expected_draft_text_body = ( 'I\'m writing to inform you that ' 'I have unpublished the above exploration.') @@ -5609,13 +6084,15 @@ def test_correct_email_draft_received_on_exploration_unpublish(self): d_text = email_manager.get_moderator_unpublish_exploration_email() self.assertEqual(d_text, expected_draft_text_body) - def test_blank_draft_received_exploration_unpublish_exception_raised(self): + def test_blank_draft_received_exploration_unpublish_exception_raised( + self + ) -> None: expected_draft_text_body = '' with self.can_not_send_emails_ctx: d_text = email_manager.get_moderator_unpublish_exploration_email() self.assertEqual(d_text, expected_draft_text_body) - def test_correct_moderator_action_email_sent(self): + def test_correct_moderator_action_email_sent(self) -> None: email_intent = 'unpublish_exploration' exploration_title = 'Title' email_html_body = 'Dummy email body.
    ' @@ -5632,12 +6109,12 @@ def test_correct_moderator_action_email_sent(self): class ContributionReviewerEmailTest(test_utils.EmailTestBase): """Test for assignment and removal of contribution reviewers.""" - TRANSLATION_REVIEWER_EMAIL = 'translationreviewer@example.com' - VOICEOVER_REVIEWER_EMAIL = 'voiceoverreviewer@example.com' - QUESTION_REVIEWER_EMAIL = 'questionreviewer@example.com' + TRANSLATION_REVIEWER_EMAIL: Final = 'translationreviewer@example.com' + VOICEOVER_REVIEWER_EMAIL: Final = 'voiceoverreviewer@example.com' + QUESTION_REVIEWER_EMAIL: Final = 'questionreviewer@example.com' - def setUp(self): - super(ContributionReviewerEmailTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.TRANSLATION_REVIEWER_EMAIL, 'translator') self.signup(self.VOICEOVER_REVIEWER_EMAIL, 'voiceartist') @@ -5662,7 +6139,8 @@ def setUp(self): feconf, 'CAN_SEND_EMAILS', False) def test_assign_translation_reviewer_email_for_can_send_emails_is_false( - self): + self + ) -> None: with self.can_not_send_emails_ctx: email_manager.send_email_to_new_contribution_reviewer( self.translation_reviewer_id, @@ -5673,13 +6151,39 @@ def test_assign_translation_reviewer_email_for_can_send_emails_is_false( self.TRANSLATION_REVIEWER_EMAIL) self.assertEqual(len(messages), 0) + def test_without_language_code_email_not_sent_to_new_translation_reviewer( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'The language_code cannot be None' + ): + with self.can_not_send_emails_ctx: + email_manager.send_email_to_new_contribution_reviewer( + self.translation_reviewer_id, + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION + ) + + def test_without_language_code_email_not_sent_to_removed_translation_reviewer( # pylint: disable=line-too-long + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'The language_code cannot be None' + ): + with self.can_not_send_emails_ctx: + email_manager.send_email_to_removed_contribution_reviewer( + self.translation_reviewer_id, + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION) + def test_assign_translation_reviewer_email_for_invalid_review_category( - self): - with self.assertRaisesRegexp(Exception, 'Invalid review_category'): + self + ) -> None: + with self.assertRaisesRegex(Exception, 'Invalid review_category'): email_manager.send_email_to_new_contribution_reviewer( self.translation_reviewer_id, 'invalid_category') - def test_schema_of_new_reviewer_email_data_constant(self): + def test_schema_of_new_reviewer_email_data_constant(self) -> None: self.assertEqual(sorted(email_manager.NEW_REVIEWER_EMAIL_DATA.keys()), [ constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION, constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION, @@ -5695,15 +6199,15 @@ def test_schema_of_new_reviewer_email_data_constant(self): 'rights_message_template' in category_details)) self.assertTrue('to_check' in category_details) - def test_send_assigned_translation_reviewer_email(self): + def test_send_assigned_translation_reviewer_email(self) -> None: expected_email_subject = ( 'You have been invited to review Oppia translations') expected_email_html_body = ( 'Hi translator,

    ' 'This is to let you know that the Oppia team has added you as a ' - 'reviewer for Hindi language translations. This allows you to ' - 'review translation suggestions made by contributors in the ' - 'Hindi language.

    ' + 'reviewer for हिन्दी (hindi) language translations. This allows you' + ' to review translation suggestions made by contributors in the ' + 'हिन्दी (hindi) language.

    ' 'You can check the translation suggestions waiting for review in ' 'the ' 'Contributor Dashboard.

    ' @@ -5724,7 +6228,9 @@ def test_send_assigned_translation_reviewer_email(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5741,15 +6247,15 @@ def test_send_assigned_translation_reviewer_email(self): self.assertEqual( sent_email_model.intent, feconf.EMAIL_INTENT_ONBOARD_REVIEWER) - def test_send_assigned_voiceover_reviewer_email(self): + def test_send_assigned_voiceover_reviewer_email(self) -> None: expected_email_subject = ( 'You have been invited to review Oppia voiceovers') expected_email_html_body = ( 'Hi voiceartist,

    ' 'This is to let you know that the Oppia team has added you as a ' - 'reviewer for Hindi language voiceovers. This allows you to ' - 'review voiceover applications made by contributors in the ' - 'Hindi language.

    ' + 'reviewer for हिन्दी (hindi) language voiceovers. This allows you ' + 'to review voiceover applications made by contributors in the ' + 'हिन्दी (hindi) language.

    ' 'You can check the voiceover applications waiting for review in ' 'the ' 'Contributor Dashboard.

    ' @@ -5770,7 +6276,9 @@ def test_send_assigned_voiceover_reviewer_email(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5787,7 +6295,7 @@ def test_send_assigned_voiceover_reviewer_email(self): self.assertEqual( sent_email_model.intent, feconf.EMAIL_INTENT_ONBOARD_REVIEWER) - def test_send_assigned_question_reviewer_email(self): + def test_send_assigned_question_reviewer_email(self) -> None: expected_email_subject = ( 'You have been invited to review Oppia questions') expected_email_html_body = ( @@ -5815,7 +6323,9 @@ def test_send_assigned_question_reviewer_email(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5831,7 +6341,7 @@ def test_send_assigned_question_reviewer_email(self): self.assertEqual( sent_email_model.intent, feconf.EMAIL_INTENT_ONBOARD_REVIEWER) - def test_email_is_not_sent_can_send_emails_is_false(self): + def test_email_is_not_sent_can_send_emails_is_false(self) -> None: with self.can_not_send_emails_ctx: email_manager.send_email_to_removed_contribution_reviewer( self.translation_reviewer_id, @@ -5843,12 +6353,13 @@ def test_email_is_not_sent_can_send_emails_is_false(self): self.assertEqual(len(messages), 0) def test_remove_translation_reviewer_email_for_invalid_review_category( - self): - with self.assertRaisesRegexp(Exception, 'Invalid review_category'): + self + ) -> None: + with self.assertRaisesRegex(Exception, 'Invalid review_category'): email_manager.send_email_to_removed_contribution_reviewer( self.translation_reviewer_id, 'invalid_category') - def test_schema_of_removed_reviewer_email_data_constant(self): + def test_schema_of_removed_reviewer_email_data_constant(self) -> None: self.assertEqual( sorted(email_manager.REMOVED_REVIEWER_EMAIL_DATA.keys()), [ constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION, @@ -5866,16 +6377,16 @@ def test_schema_of_removed_reviewer_email_data_constant(self): 'rights_message_template' in category_details)) self.assertTrue('contribution_allowed' in category_details) - def test_send_removed_translation_reviewer_email(self): + def test_send_removed_translation_reviewer_email(self) -> None: expected_email_subject = ( 'You have been unassigned as a translation reviewer') expected_email_html_body = ( 'Hi translator,

    ' 'The Oppia team has removed you from the translation reviewer role ' - 'in the Hindi language. You won\'t be able to review translation ' - 'suggestions made by contributors in the Hindi language any more, ' - 'but you can still contribute translations through the ' - '' + 'in the हिन्दी (hindi) language. You won\'t be able to review ' + 'translation suggestions made by contributors in the हिन्दी (hindi)' + ' language any more, but you can still contribute translations ' + 'through the ' 'Contributor Dashboard.

    ' 'Thanks, and happy contributing!

    ' 'Best wishes,
    ' @@ -5894,7 +6405,9 @@ def test_send_removed_translation_reviewer_email(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5911,16 +6424,16 @@ def test_send_removed_translation_reviewer_email(self): self.assertEqual( sent_email_model.intent, feconf.EMAIL_INTENT_REMOVE_REVIEWER) - def test_send_removed_voiceover_reviewer_email(self): + def test_send_removed_voiceover_reviewer_email(self) -> None: expected_email_subject = ( 'You have been unassigned as a voiceover reviewer') expected_email_html_body = ( 'Hi voiceartist,

    ' 'The Oppia team has removed you from the voiceover reviewer role ' - 'in the Hindi language. You won\'t be able to review voiceover ' - 'applications made by contributors in the Hindi language any more, ' - 'but you can still contribute voiceovers through the ' - '' + 'in the हिन्दी (hindi) language. You won\'t be able to review ' + 'voiceover applications made by contributors in the हिन्दी (hindi)' + ' language any more, but you can still contribute voiceovers ' + 'through the ' 'Contributor Dashboard.

    ' 'Thanks, and happy contributing!

    ' 'Best wishes,
    ' @@ -5939,7 +6452,9 @@ def test_send_removed_voiceover_reviewer_email(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5955,7 +6470,7 @@ def test_send_removed_voiceover_reviewer_email(self): self.assertEqual( sent_email_model.intent, feconf.EMAIL_INTENT_REMOVE_REVIEWER) - def test_send_removed_question_reviewer_email(self): + def test_send_removed_question_reviewer_email(self) -> None: expected_email_subject = ( 'You have been unassigned as a question reviewer') expected_email_html_body = ( @@ -5982,7 +6497,9 @@ def test_send_removed_question_reviewer_email(self): self.assertEqual(messages[0].html, expected_email_html_body) # Make sure correct email model is stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[ + email_models.SentEmailModel + ] = email_models.SentEmailModel.get_all().fetch() sent_email_model = all_models[0] self.assertEqual( sent_email_model.subject, expected_email_subject) @@ -5997,3 +6514,83 @@ def test_send_removed_question_reviewer_email(self): 'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS) self.assertEqual( sent_email_model.intent, feconf.EMAIL_INTENT_REMOVE_REVIEWER) + + +class NotMergeableChangesEmailUnitTest(test_utils.EmailTestBase): + """Unit test related to not mergeable change list emails sent to admin.""" + + dummy_admin_address: str = 'admin@system.com' + + def setUp(self) -> None: + super().setUp() + self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) + self.admin_email_ctx = self.swap( + feconf, 'ADMIN_EMAIL_ADDRESS', self.dummy_admin_address) + + def test_not_mergeable_change_list_email_is_sent_correctly(self) -> None: + with self.can_send_emails_ctx, self.admin_email_ctx: + # Make sure there are no emails already sent. + messages = self._get_sent_email_messages( + feconf.ADMIN_EMAIL_ADDRESS) + self.assertEqual(messages, []) + + # Send an account deletion failed email to admin. + email_manager.send_not_mergeable_change_list_to_admin_for_review( + 'testExploration', 1, 2, [{'field1': 'value1'}] + ) + + # Make sure emails are sent. + messages = self._get_sent_email_messages( + feconf.ADMIN_EMAIL_ADDRESS) + self.assertEqual(len(messages), 1) + self.assertEqual(messages[0].to, ['admin@system.com']) + self.assertEqual( + messages[0].subject, + 'Some changes were rejected due to a conflict' + ) + self.assertIn( + 'Hi Admin,

    ' + 'Some draft changes were rejected in exploration ' + 'testExploration because the changes were conflicting and ' + 'could not be saved. Please see the ' + 'rejected change list below:
    ' + 'Discarded change list: [{\'field1\': \'value1\'}]

    ' + 'Frontend Version: 1
    ' + 'Backend Version: 2

    ' + 'Thanks!', + messages[0].html + ) + + +class MailchimpSecretTest(test_utils.GenericTestBase): + """Tests for the verify_mailchimp_secret.""" + + def setUp(self) -> None: + super().setUp() + self.swap_webhook_secrets_return_none = self.swap_to_always_return( + secrets_services, 'get_secret', None) + self.swap_webhook_secrets_return_secret = self.swap_with_checks( + secrets_services, + 'get_secret', + lambda _: 'secret', + expected_args=[ + ('MAILCHIMP_WEBHOOK_SECRET',), + ('MAILCHIMP_WEBHOOK_SECRET',), + ] + ) + + def test_cloud_secrets_return_none_logs_exception(self) -> None: + with self.swap_webhook_secrets_return_none: + with self.capture_logging(min_level=logging.WARNING) as logs: + self.assertFalse( + email_manager.verify_mailchimp_secret('secret')) + self.assertEqual( + ['Mailchimp Webhook secret is not available.'], logs + ) + + def test_cloud_secrets_return_secret_passes(self) -> None: + with self.swap_webhook_secrets_return_secret: + self.assertTrue( + email_manager.verify_mailchimp_secret('secret')) + self.assertFalse( + email_manager.verify_mailchimp_secret('not-secret')) diff --git a/core/domain/email_services.py b/core/domain/email_services.py index 38a89c156f21..48e6f99b5a63 100644 --- a/core/domain/email_services.py +++ b/core/domain/email_services.py @@ -21,11 +21,18 @@ from core import feconf from core.platform import models -(email_models,) = models.Registry.import_models([models.NAMES.email]) -platform_email_services = models.Registry.import_email_services() +from typing import List +(email_models,) = models.Registry.import_models([models.Names.EMAIL]) -def _is_email_valid(email_address): +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import email_services + +email_services = models.Registry.import_email_services() + + +def _is_email_valid(email_address: str) -> bool: """Determines whether an email address is valid. Args: @@ -44,10 +51,10 @@ def _is_email_valid(email_address): # Matches any characters before the "@" sign, a series of characters until # a ".", and then a series of characters after the period. regex = r'^.+@[a-zA-Z0-9-.]+\.([a-zA-Z]+|[0-9]+)$' - return re.search(regex, email_address) + return bool(re.search(regex, email_address)) -def _is_sender_email_valid(sender_email): +def _is_sender_email_valid(sender_email: str) -> bool: """Gets the sender_email address and validates that it is of the form 'SENDER_NAME ' or 'email_address'. @@ -69,8 +76,13 @@ def _is_sender_email_valid(sender_email): def send_mail( - sender_email, recipient_email, subject, plaintext_body, - html_body, bcc_admin=False): + sender_email: str, + recipient_email: str, + subject: str, + plaintext_body: str, + html_body: str, + bcc_admin: bool = False +) -> None: """Sends an email. In general this function should only be called from @@ -92,8 +104,8 @@ def send_mail( Raises: Exception. The configuration in feconf.py forbids emails from being sent. - Exception. Any recipient email address is malformed. - Exception. Any sender email address is malformed. + ValueError. Any recipient email address is malformed. + ValueError. Any sender email address is malformed. Exception. The email was not sent correctly. In other words, the send_email_to_recipients() function returned False (signifying API returned bad status code). @@ -109,7 +121,7 @@ def send_mail( raise ValueError( 'Malformed sender email address: %s' % sender_email) bcc = [feconf.ADMIN_EMAIL_ADDRESS] if bcc_admin else None - response = platform_email_services.send_email_to_recipients( + response = email_services.send_email_to_recipients( sender_email, [recipient_email], subject, plaintext_body, html_body, bcc, '', None) if not response: @@ -120,7 +132,12 @@ def send_mail( def send_bulk_mail( - sender_email, recipient_emails, subject, plaintext_body, html_body): + sender_email: str, + recipient_emails: List[str], + subject: str, + plaintext_body: str, + html_body: str +) -> None: """Sends emails to all recipients in recipient_emails. In general this function should only be called from @@ -141,8 +158,8 @@ def send_bulk_mail( Raises: Exception. The configuration in feconf.py forbids emails from being sent. - Exception. Any recipient email addresses are malformed. - Exception. Any sender email address is malformed. + ValueError. Any recipient email addresses are malformed. + ValueError. Any sender email address is malformed. Exception. The emails were not sent correctly. In other words, the send_email_to_recipients() function returned False (signifying API returned bad status code). @@ -159,7 +176,7 @@ def send_bulk_mail( raise ValueError( 'Malformed sender email address: %s' % sender_email) - response = platform_email_services.send_email_to_recipients( + response = email_services.send_email_to_recipients( sender_email, recipient_emails, subject, plaintext_body, html_body) if not response: raise Exception( diff --git a/core/domain/email_services_test.py b/core/domain/email_services_test.py index 129709becf24..4e4bc98d3f1a 100644 --- a/core/domain/email_services_test.py +++ b/core/domain/email_services_test.py @@ -22,24 +22,24 @@ from core.platform import models from core.tests import test_utils -(email_models,) = models.Registry.import_models([models.NAMES.email]) +(email_models,) = models.Registry.import_models([models.Names.EMAIL]) platform_email_services = models.Registry.import_email_services() class EmailServicesTest(test_utils.EmailTestBase): """Tests for email_services functions.""" - def test_send_mail_raises_exception_for_invalid_permissions(self): + def test_send_mail_raises_exception_for_invalid_permissions(self) -> None: """Tests the send_mail exception raised for invalid user permissions.""" send_email_exception = ( - self.assertRaisesRegexp( + self.assertRaisesRegex( Exception, 'This app cannot send emails to users.')) with send_email_exception, self.swap(constants, 'DEV_MODE', False): email_services.send_mail( feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, 'subject', 'body', 'html', bcc_admin=False) - def test_send_mail_data_properly_sent(self): + def test_send_mail_data_properly_sent(self) -> None: """Verifies that the data sent in send_mail is correct.""" allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True) @@ -53,7 +53,7 @@ def test_send_mail_data_properly_sent(self): self.assertEqual(messages[0].body, 'body') self.assertEqual(messages[0].html, 'html') - def test_bcc_admin_flag(self): + def test_bcc_admin_flag(self) -> None: """Verifies that the bcc admin flag is working properly in send_mail. """ @@ -67,12 +67,12 @@ def test_bcc_admin_flag(self): self.assertEqual(len(messages), 1) self.assertEqual(messages[0].bcc, feconf.ADMIN_EMAIL_ADDRESS) - def test_send_bulk_mail_exception_for_invalid_permissions(self): + def test_send_bulk_mail_exception_for_invalid_permissions(self) -> None: """Tests the send_bulk_mail exception raised for invalid user permissions. """ send_email_exception = ( - self.assertRaisesRegexp( + self.assertRaisesRegex( Exception, 'This app cannot send emails to users.')) with send_email_exception, ( self.swap(constants, 'DEV_MODE', False) @@ -81,7 +81,7 @@ def test_send_bulk_mail_exception_for_invalid_permissions(self): feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS], 'subject', 'body', 'html') - def test_send_bulk_mail_data_properly_sent(self): + def test_send_bulk_mail_data_properly_sent(self) -> None: """Verifies that the data sent in send_bulk_mail is correct for each user in the recipient list. """ @@ -96,24 +96,29 @@ def test_send_bulk_mail_data_properly_sent(self): self.assertEqual(len(messages), 1) self.assertEqual(messages[0].to, recipients) - def test_email_not_sent_if_email_addresses_are_malformed(self): + def test_email_not_sent_if_email_addresses_are_malformed(self) -> None: """Tests that email is not sent if recipient email address is malformed. """ # Case when malformed_recipient_email is None when calling send_mail. malformed_recipient_email = None - email_exception = self.assertRaisesRegexp( + email_exception = self.assertRaisesRegex( ValueError, 'Malformed recipient email address: %s' % malformed_recipient_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. send_mail() method + # doesn't expect recipient_email to be None, and the case when + # the recipient_email is malformed must be tested this is why + # ignore[arg-type] is used here. email_services.send_mail( - 'sender@example.com', malformed_recipient_email, + 'sender@example.com', malformed_recipient_email, # type: ignore[arg-type] 'subject', 'body', 'html') # Case when malformed_recipient_email is an empty string when # calling send_mail. malformed_recipient_email = '' - email_exception = self.assertRaisesRegexp( + email_exception = self.assertRaisesRegex( ValueError, 'Malformed recipient email address: %s' % malformed_recipient_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: @@ -123,7 +128,7 @@ def test_email_not_sent_if_email_addresses_are_malformed(self): # Case when sender is malformed for send_mail. malformed_sender_email = 'x@x@x' - email_exception = self.assertRaisesRegexp( + email_exception = self.assertRaisesRegex( ValueError, 'Malformed sender email address: %s' % malformed_sender_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: @@ -134,7 +139,7 @@ def test_email_not_sent_if_email_addresses_are_malformed(self): # Case when the SENDER_EMAIL in brackets of 'SENDER NAME # is malformed when calling send_mail. malformed_sender_email = 'Name ' - email_exception = self.assertRaisesRegexp( + email_exception = self.assertRaisesRegex( ValueError, 'Malformed sender email address: %s' % malformed_sender_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: @@ -144,7 +149,7 @@ def test_email_not_sent_if_email_addresses_are_malformed(self): # Case when sender is malformed when calling send_bulk_mail. malformed_sender_email = 'name email@email.com' - email_exception = self.assertRaisesRegexp( + email_exception = self.assertRaisesRegex( ValueError, 'Malformed sender email address: %s' % malformed_sender_email) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: @@ -154,7 +159,7 @@ def test_email_not_sent_if_email_addresses_are_malformed(self): # Case when sender is malformed when calling send_bulk_mail. malformed_recipient_emails = ['a@a.com', 'email.com'] - email_exception = self.assertRaisesRegexp( + email_exception = self.assertRaisesRegex( ValueError, 'Malformed recipient email address: %s' % malformed_recipient_emails[1]) with self.swap(feconf, 'CAN_SEND_EMAILS', True), email_exception: @@ -162,10 +167,10 @@ def test_email_not_sent_if_email_addresses_are_malformed(self): 'sender@example.com', malformed_recipient_emails, 'subject', 'body', 'html') - def test_unsuccessful_status_codes_raises_exception(self): + def test_unsuccessful_status_codes_raises_exception(self) -> None: """Test that unsuccessful status codes returned raises an exception.""" - email_exception = self.assertRaisesRegexp( + email_exception = self.assertRaisesRegex( Exception, 'Bulk email failed to send. Please try again later or' + ' contact us to report a bug at https://www.oppia.org/contact.') allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True) @@ -179,7 +184,7 @@ def test_unsuccessful_status_codes_raises_exception(self): feconf.SYSTEM_EMAIL_ADDRESS, recipients, 'subject', 'body', 'html') - email_exception = self.assertRaisesRegexp( + email_exception = self.assertRaisesRegex( Exception, ( 'Email to %s failed to send. Please try again later or ' + 'contact us to report a bug at ' + diff --git a/core/domain/email_subscription_services.py b/core/domain/email_subscription_services.py index 6d541d7a2b8d..40b0cfe12e04 100644 --- a/core/domain/email_subscription_services.py +++ b/core/domain/email_subscription_services.py @@ -21,7 +21,9 @@ from core.domain import email_manager -def inform_subscribers(creator_id, exploration_id, exploration_title): +def inform_subscribers( + creator_id: str, exploration_id: str, exploration_title: str +) -> None: """Sends an email to all the subscribers of the creators when the creator publishes an exploration. diff --git a/core/domain/email_subscription_services_test.py b/core/domain/email_subscription_services_test.py index c079999f47c2..c8f54a442e23 100644 --- a/core/domain/email_subscription_services_test.py +++ b/core/domain/email_subscription_services_test.py @@ -24,8 +24,15 @@ from core.platform import models from core.tests import test_utils +from typing import Final, Sequence + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import email_models + from mypy_imports import user_models + (email_models, user_models) = models.Registry.import_models([ - models.NAMES.email, models.NAMES.user]) + models.Names.EMAIL, models.Names.USER]) class InformSubscribersTest(test_utils.EmailTestBase): @@ -33,14 +40,14 @@ class InformSubscribersTest(test_utils.EmailTestBase): creator. """ - USER_NAME = 'user' - USER_EMAIL = 'user@test.com' + USER_NAME: Final = 'user' + USER_EMAIL: Final = 'user@test.com' - USER_NAME_2 = 'user2' - USER_EMAIL_2 = 'user2@test.com' + USER_NAME_2: Final = 'user2' + USER_EMAIL_2: Final = 'user2@test.com' - def setUp(self): - super(InformSubscribersTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.USER_EMAIL, self.USER_NAME) self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME) @@ -59,7 +66,7 @@ def setUp(self): self.can_send_subscription_email_ctx = self.swap( feconf, 'CAN_SEND_SUBSCRIPTION_EMAILS', True) - def test_inform_subscribers(self): + def test_inform_subscribers(self) -> None: subscription_services.subscribe_to_creator( self.user_id_2, self.editor_id) subscription_services.subscribe_to_creator( @@ -94,7 +101,8 @@ def test_inform_subscribers(self): self.assertEqual(len(messages), 0) # Make sure correct email models are stored. - all_models = email_models.SentEmailModel.get_all().fetch() + all_models: Sequence[email_models.SentEmailModel] = ( + email_models.SentEmailModel.get_all().fetch()) self.assertEqual(True, any( model.recipient_id == self.user_id for model in all_models)) self.assertEqual(True, any( diff --git a/core/domain/event_services.py b/core/domain/event_services.py index 9663887f0874..f9d1a0107e0b 100644 --- a/core/domain/event_services.py +++ b/core/domain/event_services.py @@ -21,7 +21,6 @@ import logging from core import feconf -from core import python_utils from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import feedback_services @@ -30,8 +29,17 @@ from core.domain import taskqueue_services from core.platform import models -(feedback_models, stats_models, user_models) = models.Registry.import_models([ - models.NAMES.feedback, models.NAMES.statistics, models.NAMES.user]) +from typing import Any, Callable, Dict, Optional, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import stats_models + from mypy_imports import transaction_services + from mypy_imports import user_models + +(stats_models, user_models) = models.Registry.import_models([ + models.Names.STATISTICS, models.Names.USER +]) transaction_services = models.Registry.import_transaction_services() @@ -41,22 +49,33 @@ class BaseEventHandler: # A string denoting the type of the event. Should be specified by # subclasses and considered immutable. - EVENT_TYPE = None + EVENT_TYPE: Optional[str] = None - @classmethod - def _handle_event(cls, *args, **kwargs): - """Perform in-request processing of an incoming event.""" - raise NotImplementedError( - 'Subclasses of BaseEventHandler should implement the ' - '_handle_event() method, using explicit arguments ' - '(no *args or **kwargs).') + # Here, `_handle_event` is added only to inform MyPy that + # method `_handle_event` is always going to exists and it + # has type Callable[..., None]. + _handle_event: Callable[..., None] + # TODO(#16047): Here we use type Any because in child classes this + # method can be redefined with any number of named and keyword arguments + # with different kinds of types. @classmethod - def record(cls, *args, **kwargs): + def record(cls, *args: Any, **kwargs: Any) -> None: """Process incoming events. Callers of event handlers should call this method, not _handle_event(). + + Raises: + NotImplementedError. The method _handle_event is not implemented in + derived classes. """ + if getattr(cls, '_handle_event', None) is None: + raise NotImplementedError( + 'Subclasses of BaseEventHandler should implement the ' + '_handle_event() method, using explicit arguments ' + '(no *args or **kwargs).' + ) + cls._handle_event(*args, **kwargs) @@ -65,10 +84,10 @@ class StatsEventsHandler(BaseEventHandler): stats data. """ - EVENT_TYPE = feconf.EVENT_TYPE_ALL_STATS + EVENT_TYPE: str = feconf.EVENT_TYPE_ALL_STATS @classmethod - def _is_latest_version(cls, exp_id, exp_version): + def _is_latest_version(cls, exp_id: str, exp_version: int) -> bool: """Verifies whether the exploration version for the stats to be stored corresponds to the latest version of the exploration. """ @@ -76,7 +95,15 @@ def _is_latest_version(cls, exp_id, exp_version): return exploration.version == exp_version @classmethod - def _handle_event(cls, exploration_id, exp_version, aggregated_stats): + def _handle_event( + cls, + exploration_id: str, + exp_version: int, + aggregated_stats: Dict[str, Dict[str, Union[int, str]]] + ) -> None: + """Handle events for incremental update to analytics models using + aggregated stats data. + """ if 'undefined' in aggregated_stats['state_stats_mapping']: logging.error( 'Aggregated stats contains an undefined state name: %s' @@ -93,14 +120,23 @@ def _handle_event(cls, exploration_id, exp_version, aggregated_stats): class AnswerSubmissionEventHandler(BaseEventHandler): """Event handler for recording answer submissions.""" - EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED + EVENT_TYPE: str = feconf.EVENT_TYPE_ANSWER_SUBMITTED @classmethod def _handle_event( - cls, exploration_id, exploration_version, state_name, - interaction_id, answer_group_index, rule_spec_index, - classification_categorization, session_id, time_spent_in_secs, - params, normalized_answer): + cls, + exploration_id: str, + exploration_version: int, + state_name: str, + interaction_id: str, + answer_group_index: int, + rule_spec_index: int, + classification_categorization: str, + session_id: str, + time_spent_in_secs: float, + params: Dict[str, Union[str, int]], + normalized_answer: str + ) -> None: """Records an event when an answer triggers a rule. The answer recorded here is a Python-representation of the actual answer submitted by the user. @@ -125,11 +161,15 @@ def _handle_event( class ExplorationActualStartEventHandler(BaseEventHandler): """Event handler for recording exploration actual start events.""" - EVENT_TYPE = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION + EVENT_TYPE: str = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION @classmethod def _handle_event( - cls, exp_id, exp_version, state_name, session_id): + cls, exp_id: str, exp_version: int, state_name: str, session_id: str + ) -> None: + """Perform in-request processing of recording exploration actual start + events. + """ stats_models.ExplorationActualStartEventLogEntryModel.create( exp_id, exp_version, state_name, session_id) @@ -137,12 +177,18 @@ def _handle_event( class SolutionHitEventHandler(BaseEventHandler): """Event handler for recording solution hit events.""" - EVENT_TYPE = feconf.EVENT_TYPE_SOLUTION_HIT + EVENT_TYPE: str = feconf.EVENT_TYPE_SOLUTION_HIT @classmethod def _handle_event( - cls, exp_id, exp_version, state_name, session_id, - time_spent_in_state_secs): + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + time_spent_in_state_secs: float + ) -> None: + """Perform in-request processing of recording solution hit events.""" stats_models.SolutionHitEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, time_spent_in_state_secs) @@ -151,12 +197,21 @@ def _handle_event( class StartExplorationEventHandler(BaseEventHandler): """Event handler for recording exploration start events.""" - EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION + EVENT_TYPE: str = feconf.EVENT_TYPE_START_EXPLORATION @classmethod def _handle_event( - cls, exp_id, exp_version, state_name, session_id, params, - play_type): + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + params: Dict[str, str], + play_type: str + ) -> None: + """Perform in-request processing of recording exploration start + events. + """ stats_models.StartExplorationEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, params, play_type) @@ -166,12 +221,22 @@ def _handle_event( class MaybeLeaveExplorationEventHandler(BaseEventHandler): """Event handler for recording exploration leave events.""" - EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION + EVENT_TYPE: str = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION @classmethod def _handle_event( - cls, exp_id, exp_version, state_name, session_id, time_spent, - params, play_type): + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + time_spent: float, + params: Dict[str, str], + play_type: str + ) -> None: + """Perform in-request processing of recording exploration leave + events. + """ stats_models.MaybeLeaveExplorationEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, time_spent, params, play_type) @@ -180,12 +245,22 @@ def _handle_event( class CompleteExplorationEventHandler(BaseEventHandler): """Event handler for recording exploration completion events.""" - EVENT_TYPE = feconf.EVENT_TYPE_COMPLETE_EXPLORATION + EVENT_TYPE: str = feconf.EVENT_TYPE_COMPLETE_EXPLORATION @classmethod def _handle_event( - cls, exp_id, exp_version, state_name, session_id, time_spent, - params, play_type): + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + time_spent: float, + params: Dict[str, str], + play_type: str + ) -> None: + """Perform in-request processing of recording exploration completion + events. + """ stats_models.CompleteExplorationEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, time_spent, params, play_type) @@ -194,10 +269,19 @@ def _handle_event( class RateExplorationEventHandler(BaseEventHandler): """Event handler for recording exploration rating events.""" - EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION + EVENT_TYPE: str = feconf.EVENT_TYPE_RATE_EXPLORATION @classmethod - def _handle_event(cls, exp_id, user_id, rating, old_rating): + def _handle_event( + cls, + exp_id: str, + user_id: str, + rating: int, + old_rating: int + ) -> None: + """Perform in-request processing of recording exploration rating + events. + """ stats_models.RateExplorationEventLogEntryModel.create( exp_id, user_id, rating, old_rating) handle_exploration_rating(exp_id, rating, old_rating) @@ -206,13 +290,20 @@ def _handle_event(cls, exp_id, user_id, rating, old_rating): class StateHitEventHandler(BaseEventHandler): """Event handler for recording state hit events.""" - EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT + EVENT_TYPE: str = feconf.EVENT_TYPE_STATE_HIT # TODO(sll): Remove params before sending this event to the jobs taskqueue. @classmethod def _handle_event( - cls, exp_id, exp_version, state_name, session_id, - params, play_type): + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + params: Dict[str, str], + play_type: str + ) -> None: + """Perform in-request processing of recording state hit events.""" stats_models.StateHitEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, params, play_type) @@ -221,12 +312,18 @@ def _handle_event( class StateCompleteEventHandler(BaseEventHandler): """Event handler for recording state complete events.""" - EVENT_TYPE = feconf.EVENT_TYPE_STATE_COMPLETED + EVENT_TYPE: str = feconf.EVENT_TYPE_STATE_COMPLETED @classmethod def _handle_event( - cls, exp_id, exp_version, state_name, session_id, - time_spent_in_state_secs): + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + time_spent_in_state_secs: float + ) -> None: + """Perform in-request processing of recording state complete events.""" stats_models.StateCompleteEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, time_spent_in_state_secs) @@ -235,12 +332,21 @@ def _handle_event( class LeaveForRefresherExpEventHandler(BaseEventHandler): """Event handler for recording "leave for refresher exploration" events.""" - EVENT_TYPE = feconf.EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP + EVENT_TYPE: str = feconf.EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP @classmethod def _handle_event( - cls, exp_id, refresher_exp_id, exp_version, state_name, session_id, - time_spent_in_state_secs): + cls, + exp_id: str, + refresher_exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + time_spent_in_state_secs: float + ) -> None: + """Perform in-request processing of recording "leave for refresher + exploration" events. + """ stats_models.LeaveForRefresherExplorationEventLogEntryModel.create( exp_id, refresher_exp_id, exp_version, state_name, session_id, time_spent_in_state_secs) @@ -249,53 +355,73 @@ def _handle_event( class FeedbackThreadCreatedEventHandler(BaseEventHandler): """Event handler for recording new feedback thread creation events.""" - EVENT_TYPE = feconf.EVENT_TYPE_NEW_THREAD_CREATED + EVENT_TYPE: str = feconf.EVENT_TYPE_NEW_THREAD_CREATED @classmethod - def _handle_event(cls, exp_id): + def _handle_event(cls, exp_id: str) -> None: + """Perform in-request processing of recording new feedback thread + creation events. + """ feedback_services.handle_new_thread_created(exp_id) class FeedbackThreadStatusChangedEventHandler(BaseEventHandler): """Event handler for recording reopening feedback thread events.""" - EVENT_TYPE = feconf.EVENT_TYPE_THREAD_STATUS_CHANGED + EVENT_TYPE: str = feconf.EVENT_TYPE_THREAD_STATUS_CHANGED @classmethod - def _handle_event(cls, exp_id, old_status, new_status): + def _handle_event( + cls, + exp_id: str, + old_status: str, + new_status: str + ) -> None: + """Perform in-request processing of recording reopening feedback + thread events. + """ feedback_services.handle_thread_status_changed( exp_id, old_status, new_status) -def handle_exploration_start(exp_id): +def handle_exploration_start(exp_id: str) -> None: """Handles a user's start of an exploration. Args: exp_id: str. The exploration which has been started. """ - exp_summary = exp_fetchers.get_exploration_summary_by_id(exp_id) - if exp_summary: + exp_summary = exp_fetchers.get_exploration_summary_by_id( + exp_id, strict=False + ) + if exp_summary is not None: for user_id in exp_summary.owner_ids: _increment_total_plays_count_transactional(user_id) -def handle_exploration_rating(exp_id, rating, old_rating): +def handle_exploration_rating( + exp_id: str, rating: int, old_rating: Optional[int] +) -> None: """Handles a new rating for an exploration. Args: exp_id: str. The exploration which has been rated. rating: int. The new rating of the exploration. - old_rating: int. The old rating of the exploration before - refreshing. + old_rating: int|None. The old rating of the exploration before + refreshing, or None if the exploration hasn't been rated by the user + yet. """ - exp_summary = exp_fetchers.get_exploration_summary_by_id(exp_id) - if exp_summary: + exp_summary = exp_fetchers.get_exploration_summary_by_id( + exp_id, strict=False + ) + if exp_summary is not None: for user_id in exp_summary.owner_ids: _refresh_average_ratings_transactional(user_id, rating, old_rating) @transaction_services.run_in_transaction_wrapper -def _refresh_average_ratings_transactional(user_id, new_rating, old_rating): +def _refresh_average_ratings_transactional( + user_id: str, new_rating: int, old_rating: Optional[int] +) -> None: """Refreshes the average rating for a user. Args: @@ -322,8 +448,7 @@ def _refresh_average_ratings_transactional(user_id, new_rating, old_rating): num_ratings += 1 else: sum_of_ratings -= old_rating - average_ratings = python_utils.divide( - sum_of_ratings, float(num_ratings)) + average_ratings = sum_of_ratings / float(num_ratings) user_stats_model.average_ratings = average_ratings user_stats_model.num_ratings = num_ratings user_stats_model.update_timestamps() @@ -331,7 +456,7 @@ def _refresh_average_ratings_transactional(user_id, new_rating, old_rating): @transaction_services.run_in_transaction_wrapper -def _increment_total_plays_count_transactional(user_id): +def _increment_total_plays_count_transactional(user_id: str) -> None: """Increments the total plays count of the exploration. Args: diff --git a/core/domain/event_services_test.py b/core/domain/event_services_test.py index 35a85b31fe57..20186c51f829 100644 --- a/core/domain/event_services_test.py +++ b/core/domain/event_services_test.py @@ -25,16 +25,23 @@ from core import feconf from core.domain import event_services +from core.domain import exp_domain +from core.domain import exp_fetchers +from core.domain import feedback_services +from core.domain import stats_services from core.domain import user_services from core.platform import models from core.tests import test_utils -( - stats_models, exp_models, feedback_models, - user_models -) = models.Registry.import_models([ - models.NAMES.statistics, models.NAMES.exploration, models.NAMES.feedback, - models.NAMES.user +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import feedback_models + from mypy_imports import stats_models + from mypy_imports import user_models + +(stats_models, feedback_models, user_models) = models.Registry.import_models([ + models.Names.STATISTICS, models.Names.FEEDBACK, models.Names.USER ]) datastore_services = models.Registry.import_datastore_services() @@ -46,8 +53,8 @@ class MockNumbersModel(datastore_services.Model): class BaseEventHandlerTests(test_utils.GenericTestBase): - def test_handle_event_raises_not_implemented_error(self): - with self.assertRaisesRegexp( + def test_handle_event_raises_not_implemented_error(self) -> None: + with self.assertRaisesRegex( NotImplementedError, re.escape( 'Subclasses of BaseEventHandler should implement the ' @@ -58,10 +65,9 @@ def test_handle_event_raises_not_implemented_error(self): class ExplorationActualStartEventHandlerTests(test_utils.GenericTestBase): - def test_record_exploration_actual_start_events(self): + def test_record_exploration_actual_start_events(self) -> None: all_models = ( stats_models.ExplorationActualStartEventLogEntryModel.get_all()) - self.assertEqual(all_models.count(), 0) event_services.ExplorationActualStartEventHandler.record( @@ -69,11 +75,12 @@ def test_record_exploration_actual_start_events(self): all_models = ( stats_models.ExplorationActualStartEventLogEntryModel.get_all()) - self.assertEqual(all_models.count(), 1) model = all_models.get() + # Ruling out the possibility of None for mypy type checking. + assert model is not None self.assertEqual(model.exp_id, 'exp_id') self.assertEqual(model.state_name, 'state_name') self.assertEqual(model.session_id, 'session_id') @@ -82,10 +89,9 @@ def test_record_exploration_actual_start_events(self): class SolutionHitEventHandlerTests(test_utils.GenericTestBase): - def test_record_solution_hit_events(self): + def test_record_solution_hit_events(self) -> None: all_models = ( stats_models.SolutionHitEventLogEntryModel.get_all()) - self.assertEqual(all_models.count(), 0) event_services.SolutionHitEventHandler.record( @@ -93,11 +99,12 @@ def test_record_solution_hit_events(self): all_models = ( stats_models.SolutionHitEventLogEntryModel.get_all()) - self.assertEqual(all_models.count(), 1) model = all_models.get() + # Ruling out the possibility of None for mypy type checking. + assert model is not None self.assertEqual(model.exp_id, 'exp_id') self.assertEqual(model.state_name, 'state_name') self.assertEqual(model.session_id, 'session_id') @@ -105,12 +112,133 @@ def test_record_solution_hit_events(self): self.assertEqual(model.time_spent_in_state_secs, 2.0) +class StartExplorationEventHandlerTests(test_utils.GenericTestBase): + + def test_recording_exploration_start_events(self) -> None: + + all_models = ( + stats_models.StartExplorationEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 0) + + event_services.StartExplorationEventHandler.record( + 'exp_id', 1, 'state_name', 'session_id', + {}, feconf.PLAY_TYPE_NORMAL) + + all_models = ( + stats_models.StartExplorationEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 1) + + model = all_models.get() + + # Ruling out the possibility of None for mypy type checking. + assert model is not None + self.assertEqual( + model.event_type, + feconf.EVENT_TYPE_START_EXPLORATION) + self.assertEqual(model.exploration_id, 'exp_id') + self.assertEqual(model.exploration_version, 1) + self.assertEqual(model.state_name, 'state_name') + self.assertEqual(model.session_id, 'session_id') + self.assertEqual(model.params, {}) + self.assertEqual(model.play_type, feconf.PLAY_TYPE_NORMAL) + + +class MaybeLeaveExplorationEventHandlerTests(test_utils.GenericTestBase): + + def test_recording_exploration_leave_events(self) -> None: + + all_models = ( + stats_models.MaybeLeaveExplorationEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 0) + + event_services.MaybeLeaveExplorationEventHandler.record( + 'exp_id', 1, 'state_name', 'session_id', 2, + {}, feconf.PLAY_TYPE_NORMAL) + + all_models = ( + stats_models.MaybeLeaveExplorationEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 1) + + model = all_models.get() + + # Ruling out the possibility of None for mypy type checking. + assert model is not None + self.assertEqual( + model.event_type, + feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION) + self.assertEqual(model.exploration_id, 'exp_id') + self.assertEqual(model.exploration_version, 1) + self.assertEqual(model.state_name, 'state_name') + self.assertEqual(model.session_id, 'session_id') + self.assertEqual(model.client_time_spent_in_secs, 2) + self.assertEqual(model.params, {}) + self.assertEqual(model.play_type, feconf.PLAY_TYPE_NORMAL) + + +class CompleteExplorationEventHandlerTests(test_utils.GenericTestBase): + + def test_recording_exploration_leave_events(self) -> None: + + all_models = ( + stats_models.CompleteExplorationEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 0) + + event_services.CompleteExplorationEventHandler.record( + 'exp_id', 1, 'state_name', 'session_id', 2, + {}, feconf.PLAY_TYPE_NORMAL) + + all_models = ( + stats_models.CompleteExplorationEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 1) + + model = all_models.get() + + # Ruling out the possibility of None for mypy type checking. + assert model is not None + self.assertEqual( + model.event_type, + feconf.EVENT_TYPE_COMPLETE_EXPLORATION) + self.assertEqual(model.exploration_id, 'exp_id') + self.assertEqual(model.exploration_version, 1) + self.assertEqual(model.state_name, 'state_name') + self.assertEqual(model.session_id, 'session_id') + self.assertEqual(model.client_time_spent_in_secs, 2) + self.assertEqual(model.params, {}) + self.assertEqual(model.play_type, feconf.PLAY_TYPE_NORMAL) + + +class RateExplorationEventHandlerTests(test_utils.GenericTestBase): + + def test_recording_exploration_rating_events(self) -> None: + + all_models = ( + stats_models.RateExplorationEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 0) + + event_services.RateExplorationEventHandler.record( + 'exp_id', 'user_id', 3, 2) + + all_models = ( + stats_models.RateExplorationEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 1) + + model = all_models.get() + + # Ruling out the possibility of None for mypy type checking. + assert model is not None + self.assertEqual( + model.event_type, + feconf.EVENT_TYPE_RATE_EXPLORATION) + self.assertEqual(model.exploration_id, 'exp_id') + self.assertEqual(model.rating, 3) + self.assertEqual(model.old_rating, 2) + + class StateHitEventHandlerTests(test_utils.GenericTestBase): - def test_record_state_hit_events(self): + def test_record_state_hit_events(self) -> None: all_models = ( stats_models.StateHitEventLogEntryModel.get_all()) - self.assertEqual(all_models.count(), 0) event_services.StateHitEventHandler.record( @@ -119,11 +247,12 @@ def test_record_state_hit_events(self): all_models = ( stats_models.StateHitEventLogEntryModel.get_all()) - self.assertEqual(all_models.count(), 1) model = all_models.get() + # Ruling out the possibility of None for mypy type checking. + assert model is not None self.assertEqual(model.exploration_id, 'exp_id') self.assertEqual(model.state_name, 'state_name') self.assertEqual(model.session_id, 'session_id') @@ -134,10 +263,9 @@ def test_record_state_hit_events(self): class StateCompleteEventHandlerTests(test_utils.GenericTestBase): - def test_record_state_complete_events(self): + def test_record_state_complete_events(self) -> None: all_models = ( stats_models.StateCompleteEventLogEntryModel.get_all()) - self.assertEqual(all_models.count(), 0) event_services.StateCompleteEventHandler.record( @@ -145,11 +273,12 @@ def test_record_state_complete_events(self): all_models = ( stats_models.StateCompleteEventLogEntryModel.get_all()) - self.assertEqual(all_models.count(), 1) model = all_models.get() + # Ruling out the possibility of None for mypy type checking. + assert model is not None self.assertEqual(model.exp_id, 'exp_id') self.assertEqual(model.state_name, 'state_name') self.assertEqual(model.session_id, 'session_id') @@ -159,11 +288,10 @@ def test_record_state_complete_events(self): class LeaveForRefresherExpEventHandlerTests(test_utils.GenericTestBase): - def test_record_leave_for_refresher_exploration_events(self): + def test_record_leave_for_refresher_exploration_events(self) -> None: all_models = ( stats_models.LeaveForRefresherExplorationEventLogEntryModel .get_all()) - self.assertEqual(all_models.count(), 0) event_services.LeaveForRefresherExpEventHandler.record( @@ -172,11 +300,12 @@ def test_record_leave_for_refresher_exploration_events(self): all_models = ( stats_models.LeaveForRefresherExplorationEventLogEntryModel .get_all()) - self.assertEqual(all_models.count(), 1) model = all_models.get() + # Ruling out the possibility of None for mypy type checking. + assert model is not None self.assertEqual(model.exp_id, 'exp_id') self.assertEqual(model.refresher_exp_id, 'refresher_exp_id') self.assertEqual(model.state_name, 'state_name') @@ -185,20 +314,63 @@ def test_record_leave_for_refresher_exploration_events(self): self.assertEqual(model.time_spent_in_state_secs, 2.0) +class FeedbackThreadCreatedEventHandlerTests(test_utils.GenericTestBase): + + def test_new_feedback_thread_creation_events(self) -> None: + + exp_id = 'exp_id' + + event_services.FeedbackThreadCreatedEventHandler.record(exp_id) + thread = feedback_services.get_thread_analytics(exp_id) + self.assertEqual(thread.id, exp_id) + self.assertEqual(thread.num_open_threads, 1) + self.assertEqual(thread.num_total_threads, 1) + + event_services.FeedbackThreadCreatedEventHandler.record(exp_id) + thread = feedback_services.get_thread_analytics(exp_id) + self.assertEqual(thread.id, exp_id) + self.assertEqual(thread.num_open_threads, 2) + self.assertEqual(thread.num_total_threads, 2) + + +class FeedbackThreadStatusChangedEventHandlerTests(test_utils.GenericTestBase): + + def test_recording_reopening_feedback_thread_events(self) -> None: + + exp_id = 'exp_id' + + # Changing Status from closed to open. + event_services.FeedbackThreadStatusChangedEventHandler.record( + exp_id, '', feedback_models.STATUS_CHOICES_OPEN) + + thread = feedback_services.get_thread_analytics(exp_id) + self.assertEqual(thread.id, exp_id) + self.assertEqual(thread.num_open_threads, 1) + + # Changing Status from open to closed. + event_services.FeedbackThreadStatusChangedEventHandler.record( + exp_id, feedback_models.STATUS_CHOICES_OPEN, '') + + thread = feedback_services.get_thread_analytics(exp_id) + self.assertEqual(thread.id, exp_id) + self.assertEqual(thread.num_open_threads, 0) + + class TestEventHandler(event_services.BaseEventHandler): """Mock event class for processing events of type 'test_event'.""" EVENT_TYPE = 'test_event' @classmethod - def _handle_event(cls, number): + def _handle_event(cls, number: int) -> None: + """Mock event handler method to process 'test_event' events.""" MockNumbersModel(number=number).put() class EventHandlerUnitTests(test_utils.GenericTestBase): """Test basic event handler operations.""" - def test_handle_event_method_is_called(self): + def test_handle_event_method_is_called(self) -> None: self.assertEqual(MockNumbersModel.query().count(), 0) TestEventHandler.record(2) self.assertEqual(MockNumbersModel.query().count(), 1) @@ -210,10 +382,10 @@ def test_handle_event_method_is_called(self): class StatsEventsHandlerUnitTests(test_utils.GenericTestBase): """Tests related to the stats events handler.""" - def test_stats_events_with_undefined_state_name_gets_logged(self): + def test_stats_events_with_undefined_state_name_gets_logged(self) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.error().""" observed_log_messages.append(msg % args) @@ -239,10 +411,93 @@ def _mock_logging_function(msg, *args): ] ) + def test_stats_events_successfully_updated(self) -> None: + + all_models = ( + stats_models.ExplorationStatsModel.get_all()) + self.assertEqual(all_models.count(), 0) + + exp_id = 'eid1' + self.save_new_valid_exploration(exp_id, self.OWNER_EMAIL) + exploration = exp_fetchers.get_exploration_by_id(exp_id) + event_services.StatsEventsHandler.record( + exp_id, exploration.version, { + 'state_stats_mapping': { + 'Introduction': {} + } + } + ) + + all_models = stats_models.ExplorationStatsModel.get_all() + self.assertEqual(all_models.count(), 1) + model = all_models.get() + # Ruling out the possibility of None for mypy type checking. + assert model is not None + self.assertEqual(model.exp_id, exp_id) + self.assertEqual(model.exp_version, exploration.version) + + +class AnswerSubmissionEventHandlerTests(test_utils.GenericTestBase): + + def test_answer_submission(self) -> None: + all_models = ( + stats_models.AnswerSubmittedEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 0) + + exp_id = 'eid1' + session_id = 'sid1' + category = exp_domain.DEFAULT_OUTCOME_CLASSIFICATION + self.save_new_valid_exploration(exp_id, self.OWNER_EMAIL) + exploration = exp_fetchers.get_exploration_by_id(exp_id) + + event_services.AnswerSubmissionEventHandler.record( + exp_id, + exploration.version, + state_name=feconf.DEFAULT_INIT_STATE_NAME, + interaction_id='TextInput', + answer_group_index=1, + rule_spec_index=1, + classification_categorization=category, + session_id=session_id, + time_spent_in_secs=2, + params={}, + normalized_answer='answer_submitted' + ) + + state_answers = stats_services.get_state_answers( + exp_id, exploration.version, + exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None + + self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{ + 'answer': 'answer_submitted', + 'time_spent_in_sec': 2.0, + 'answer_group_index': 1, + 'rule_spec_index': 1, + 'classification_categorization': category, + 'session_id': session_id, + 'interaction_id': 'TextInput', + 'params': {}, + 'rule_spec_str': None, + 'answer_str': None, + }]) + + all_models = ( + stats_models.AnswerSubmittedEventLogEntryModel.get_all()) + self.assertEqual(all_models.count(), 1) + + model = all_models.get() + + # Ruling out the possibility of None for mypy type checking. + assert model is not None + self.assertEqual(model.exp_id, exp_id) + self.assertEqual(model.exp_version, exploration.version) + class EventHandlerNameTests(test_utils.GenericTestBase): - def test_event_handler_names(self): + def test_event_handler_names(self) -> None: """This function checks for duplicate event handlers.""" all_python_files = self.get_all_python_files() @@ -282,14 +537,26 @@ def test_event_handler_names(self): class UserStatsEventsFunctionsTests(test_utils.GenericTestBase): - def setUp(self): + def setUp(self) -> None: super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.exploration = ( self.save_new_valid_exploration('exp_id', self.admin_id)) - def test_average_ratings_of_users_exps_are_calculated_correctly(self): + def test_average_ratings_of_users_exps_are_calculated_correctly( + self + ) -> None: + + admin_average_ratings = ( + user_services.get_dashboard_stats(self.admin_id)['average_ratings']) + self.assertIsNone(admin_average_ratings) + + event_services.handle_exploration_rating('exp_id', 5, None) + admin_average_ratings = ( + user_services.get_dashboard_stats(self.admin_id)['average_ratings']) + self.assertEqual(admin_average_ratings, 5) + user_models.UserStatsModel( id=self.admin_id, average_ratings=None, num_ratings=0, total_plays=0 ).put() @@ -313,7 +580,7 @@ def test_average_ratings_of_users_exps_are_calculated_correctly(self): user_services.get_dashboard_stats(self.admin_id)['average_ratings']) self.assertEqual(admin_average_ratings, 1) - def test_total_plays_of_users_exps_are_calculated_correctly(self): + def test_total_plays_of_users_exps_are_calculated_correctly(self) -> None: admin_total_plays = ( user_services.get_dashboard_stats(self.admin_id)['total_plays']) self.assertEqual(admin_total_plays, 0) diff --git a/core/domain/exp_domain.py b/core/domain/exp_domain.py index 1cb72169bf4a..f809435cb5a3 100644 --- a/core/domain/exp_domain.py +++ b/core/domain/exp_domain.py @@ -25,23 +25,39 @@ import collections import copy +import datetime import json import re import string from core import feconf -from core import python_utils from core import schema_utils from core import utils from core.constants import constants from core.domain import change_domain -from core.domain import html_cleaner -from core.domain import html_validation_service from core.domain import param_domain from core.domain import state_domain -from core.platform import models +from core.domain import translation_domain +from extensions.objects.models import objects -(exp_models,) = models.Registry.import_models([models.NAMES.exploration]) +import bs4 +from typing import ( + Callable, Dict, Final, List, Literal, Mapping, Optional, Sequence, Set, + Tuple, TypedDict, Union, cast, overload) + +from core.domain import html_cleaner # pylint: disable=invalid-import-from # isort:skip +from core.domain import html_validation_service # pylint: disable=invalid-import-from # isort:skip +from core.domain import interaction_registry # pylint: disable=invalid-import-from # isort:skip +from core.platform import models # pylint: disable=invalid-import-from # isort:skip + +# TODO(#14537): Refactor this file and remove imports marked +# with 'invalid-import-from'. + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) # Do not modify the values of these constants. This is to preserve backwards @@ -49,41 +65,43 @@ # TODO(bhenning): Prior to July 2015, exploration changes involving rules were # logged using the key 'widget_handlers'. These need to be migrated to # 'answer_groups' and 'default_outcome'. -STATE_PROPERTY_PARAM_CHANGES = 'param_changes' -STATE_PROPERTY_CONTENT = 'content' -STATE_PROPERTY_SOLICIT_ANSWER_DETAILS = 'solicit_answer_details' -STATE_PROPERTY_CARD_IS_CHECKPOINT = 'card_is_checkpoint' -STATE_PROPERTY_RECORDED_VOICEOVERS = 'recorded_voiceovers' -STATE_PROPERTY_WRITTEN_TRANSLATIONS = 'written_translations' -STATE_PROPERTY_INTERACTION_ID = 'widget_id' -STATE_PROPERTY_NEXT_CONTENT_ID_INDEX = 'next_content_id_index' -STATE_PROPERTY_LINKED_SKILL_ID = 'linked_skill_id' -STATE_PROPERTY_INTERACTION_CUST_ARGS = 'widget_customization_args' -STATE_PROPERTY_INTERACTION_ANSWER_GROUPS = 'answer_groups' -STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME = 'default_outcome' -STATE_PROPERTY_UNCLASSIFIED_ANSWERS = ( +STATE_PROPERTY_PARAM_CHANGES: Final = 'param_changes' +STATE_PROPERTY_CONTENT: Final = 'content' +STATE_PROPERTY_SOLICIT_ANSWER_DETAILS: Final = 'solicit_answer_details' +STATE_PROPERTY_CARD_IS_CHECKPOINT: Final = 'card_is_checkpoint' +STATE_PROPERTY_RECORDED_VOICEOVERS: Final = 'recorded_voiceovers' +DEPRECATED_STATE_PROPERTY_WRITTEN_TRANSLATIONS: Final = 'written_translations' +STATE_PROPERTY_INTERACTION_ID: Final = 'widget_id' +DEPRECATED_STATE_PROPERTY_NEXT_CONTENT_ID_INDEX: Final = 'next_content_id_index' +STATE_PROPERTY_LINKED_SKILL_ID: Final = 'linked_skill_id' +STATE_PROPERTY_INTERACTION_CUST_ARGS: Final = 'widget_customization_args' +STATE_PROPERTY_INTERACTION_ANSWER_GROUPS: Final = 'answer_groups' +STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME: Final = 'default_outcome' +STATE_PROPERTY_UNCLASSIFIED_ANSWERS: Final = ( 'confirmed_unclassified_answers') -STATE_PROPERTY_INTERACTION_HINTS = 'hints' -STATE_PROPERTY_INTERACTION_SOLUTION = 'solution' +STATE_PROPERTY_INTERACTION_HINTS: Final = 'hints' +STATE_PROPERTY_INTERACTION_SOLUTION: Final = 'solution' # Deprecated state properties. -STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED = ( +STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED: Final = ( # Deprecated in state schema v27. 'content_ids_to_audio_translations') +STATE_PROPERTY_WRITTEN_TRANSLATIONS_DEPRECATED: Final = 'written_translations' +STATE_PROPERTY_NEXT_CONTENT_ID_INDEX_DEPRECATED: Final = 'next_content_id_index' # These four properties are kept for legacy purposes and are not used anymore. -STATE_PROPERTY_INTERACTION_HANDLERS = 'widget_handlers' -STATE_PROPERTY_INTERACTION_STICKY = 'widget_sticky' -GADGET_PROPERTY_VISIBILITY = 'gadget_visibility' -GADGET_PROPERTY_CUST_ARGS = 'gadget_customization_args' +STATE_PROPERTY_INTERACTION_HANDLERS: Final = 'widget_handlers' +STATE_PROPERTY_INTERACTION_STICKY: Final = 'widget_sticky' +GADGET_PROPERTY_VISIBILITY: Final = 'gadget_visibility' +GADGET_PROPERTY_CUST_ARGS: Final = 'gadget_customization_args' # This takes additional 'title' and 'category' parameters. -CMD_CREATE_NEW = 'create_new' +CMD_CREATE_NEW: Final = 'create_new' # This takes an additional 'state_name' parameter. -CMD_ADD_STATE = 'add_state' +CMD_ADD_STATE: Final = 'add_state' # This takes additional 'old_state_name' and 'new_state_name' parameters. -CMD_RENAME_STATE = 'rename_state' +CMD_RENAME_STATE: Final = 'rename_state' # This takes an additional 'state_name' parameter. -CMD_DELETE_STATE = 'delete_state' +CMD_DELETE_STATE: Final = 'delete_state' # TODO(#12981): Write a one-off job to modify all existing translation # suggestions that use DEPRECATED_CMD_ADD_TRANSLATION to use # CMD_ADD_WRITTEN_TRANSLATION instead. Suggestions in the future will only use @@ -92,23 +110,26 @@ # here to support old suggestions. This takes additional 'state_name', # 'content_id', 'language_code' and 'content_html' and 'translation_html' # parameters. -DEPRECATED_CMD_ADD_TRANSLATION = 'add_translation' +DEPRECATED_CMD_ADD_TRANSLATION: Final = 'add_translation' # This takes additional 'state_name', 'content_id', 'language_code', # 'data_format', 'content_html' and 'translation_html' parameters. -CMD_ADD_WRITTEN_TRANSLATION = 'add_written_translation' +CMD_ADD_WRITTEN_TRANSLATION: Final = 'add_written_translation' # This takes additional 'content_id', 'language_code' and 'state_name' # parameters. -CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE = ( +DEPRECATED_CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE: Final = ( 'mark_written_translation_as_needing_update') # This takes additional 'content_id' and 'state_name' parameters. -CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE = ( +DEPRECATED_CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE: Final = ( 'mark_written_translations_as_needing_update') +CMD_MARK_TRANSLATIONS_NEEDS_UPDATE: Final = 'mark_translations_needs_update' +# This takes additional 'content_id' parameters. +CMD_REMOVE_TRANSLATIONS: Final = 'remove_translations' # This takes additional 'property_name' and 'new_value' parameters. -CMD_EDIT_STATE_PROPERTY = 'edit_state_property' +CMD_EDIT_STATE_PROPERTY: Final = 'edit_state_property' # This takes additional 'property_name' and 'new_value' parameters. -CMD_EDIT_EXPLORATION_PROPERTY = 'edit_exploration_property' +CMD_EDIT_EXPLORATION_PROPERTY: Final = 'edit_exploration_property' # This takes additional 'from_version' and 'to_version' parameters for logging. -CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION = ( +CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION: Final = ( 'migrate_states_schema_to_latest_version') # These are categories to which answers may be classified. These values should @@ -116,24 +137,35 @@ # logs. # Represents answers classified using rules defined as part of an interaction. -EXPLICIT_CLASSIFICATION = 'explicit' +EXPLICIT_CLASSIFICATION: Final = 'explicit' # Represents answers which are contained within the training data of an answer # group. -TRAINING_DATA_CLASSIFICATION = 'training_data_match' +TRAINING_DATA_CLASSIFICATION: Final = 'training_data_match' # Represents answers which were predicted using a statistical training model # from training data within an answer group. -STATISTICAL_CLASSIFICATION = 'statistical_classifier' +STATISTICAL_CLASSIFICATION: Final = 'statistical_classifier' # Represents answers which led to the 'default outcome' of an interaction, # rather than belonging to a specific answer group. -DEFAULT_OUTCOME_CLASSIFICATION = 'default_outcome' - -TYPE_INVALID_EXPRESSION = 'Invalid' -TYPE_VALID_ALGEBRAIC_EXPRESSION = 'AlgebraicExpressionInput' -TYPE_VALID_NUMERIC_EXPRESSION = 'NumericExpressionInput' -TYPE_VALID_MATH_EQUATION = 'MathEquationInput' - - -def clean_math_expression(math_expression): +DEFAULT_OUTCOME_CLASSIFICATION: Final = 'default_outcome' + +TYPE_INVALID_EXPRESSION: Final = 'Invalid' +TYPE_VALID_ALGEBRAIC_EXPRESSION: Final = 'AlgebraicExpressionInput' +TYPE_VALID_NUMERIC_EXPRESSION: Final = 'NumericExpressionInput' +TYPE_VALID_MATH_EQUATION: Final = 'MathEquationInput' +MATH_INTERACTION_TYPES: Final = [ + TYPE_VALID_ALGEBRAIC_EXPRESSION, + TYPE_VALID_NUMERIC_EXPRESSION, + TYPE_VALID_MATH_EQUATION +] +ALGEBRAIC_MATH_INTERACTIONS: Final = [ + TYPE_VALID_ALGEBRAIC_EXPRESSION, + TYPE_VALID_MATH_EQUATION +] +MATH_INTERACTION_DEPRECATED_RULES: Final = [ + 'ContainsSomeOf', 'OmitsSomeOf', 'MatchesWithGeneralForm'] + + +def clean_math_expression(math_expression: str) -> str: """Cleans a given math expression and formats it so that it is compatible with the new interactions' validators. @@ -214,6 +246,23 @@ def clean_math_expression(math_expression): return math_expression +class MetadataVersionHistoryDict(TypedDict): + """Dictionary representing MetadataVersionHistory object.""" + + last_edited_version_number: Optional[int] + last_edited_committer_id: str + + +class ExplorationVersionHistoryDict(TypedDict): + """Dictionary representing ExplorationVersionHistory object.""" + + exploration_id: str + exploration_version: int + state_version_history: Dict[str, state_domain.StateVersionHistoryDict] + metadata_version_history: MetadataVersionHistoryDict + committer_ids: List[str] + + class ExplorationChange(change_domain.BaseChange): """Domain object class for an exploration change. @@ -244,15 +293,13 @@ class ExplorationChange(change_domain.BaseChange): # The allowed list of state properties which can be used in # edit_state_property command. - STATE_PROPERTIES = ( + STATE_PROPERTIES: List[str] = [ STATE_PROPERTY_PARAM_CHANGES, STATE_PROPERTY_CONTENT, STATE_PROPERTY_SOLICIT_ANSWER_DETAILS, STATE_PROPERTY_CARD_IS_CHECKPOINT, STATE_PROPERTY_RECORDED_VOICEOVERS, - STATE_PROPERTY_WRITTEN_TRANSLATIONS, STATE_PROPERTY_INTERACTION_ID, - STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, STATE_PROPERTY_LINKED_SKILL_ID, STATE_PROPERTY_INTERACTION_CUST_ARGS, STATE_PROPERTY_INTERACTION_STICKY, @@ -263,63 +310,101 @@ class ExplorationChange(change_domain.BaseChange): STATE_PROPERTY_INTERACTION_SOLUTION, STATE_PROPERTY_UNCLASSIFIED_ANSWERS, # Deprecated state properties. - STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED) + STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED, + STATE_PROPERTY_WRITTEN_TRANSLATIONS_DEPRECATED, + STATE_PROPERTY_NEXT_CONTENT_ID_INDEX_DEPRECATED + ] # The allowed list of exploration properties which can be used in # edit_exploration_property command. - EXPLORATION_PROPERTIES = ( + EXPLORATION_PROPERTIES: List[str] = [ 'title', 'category', 'objective', 'language_code', 'tags', 'blurb', 'author_notes', 'param_specs', 'param_changes', - 'init_state_name', 'auto_tts_enabled', 'correctness_feedback_enabled') + 'init_state_name', 'auto_tts_enabled', 'correctness_feedback_enabled', + 'next_content_id_index', 'edits_allowed'] - ALLOWED_COMMANDS = [{ + ALLOWED_COMMANDS: List[feconf.ValidCmdDict] = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['category', 'title'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_ADD_STATE, - 'required_attribute_names': ['state_name'], + 'required_attribute_names': [ + 'state_name', + 'content_id_for_state_content', + 'content_id_for_default_outcome' + ], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_STATE, 'required_attribute_names': ['state_name'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_RENAME_STATE, 'required_attribute_names': ['new_state_name', 'old_state_name'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': DEPRECATED_CMD_ADD_TRANSLATION, 'required_attribute_names': [ 'state_name', 'content_id', 'language_code', 'content_html', 'translation_html'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_ADD_WRITTEN_TRANSLATION, 'required_attribute_names': [ 'state_name', 'content_id', 'language_code', 'content_html', 'translation_html', 'data_format'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { - 'name': CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE, + 'name': DEPRECATED_CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE, 'required_attribute_names': [ 'content_id', 'language_code', 'state_name' ], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { - 'name': CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE, + 'name': DEPRECATED_CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE, 'required_attribute_names': ['content_id', 'state_name'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} + }, { + 'name': CMD_MARK_TRANSLATIONS_NEEDS_UPDATE, + 'required_attribute_names': ['content_id'], + 'optional_attribute_names': [], + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} + }, { + 'name': CMD_REMOVE_TRANSLATIONS, + 'required_attribute_names': ['content_id'], + 'optional_attribute_names': [], + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_EDIT_STATE_PROPERTY, 'required_attribute_names': [ @@ -335,34 +420,605 @@ class ExplorationChange(change_domain.BaseChange): 'required_attribute_names': ['property_name', 'new_value'], 'optional_attribute_names': ['old_value'], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': EXPLORATION_PROPERTIES} + 'allowed_values': {'property_name': EXPLORATION_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, 'required_attribute_names': ['from_version', 'to_version'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': exp_models.ExplorationModel.CMD_REVERT_COMMIT, 'required_attribute_names': ['version_number'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }] # TODO(#12991): Remove this once once we use the migration jobs to remove # the deprecated commands from the server data. - DEPRECATED_COMMANDS = [ + DEPRECATED_COMMANDS: List[str] = [ 'clone', 'add_gadget', 'edit_gadget_property', 'delete_gadget', 'rename_gadget'] +class CreateNewExplorationCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_CREATE_NEW command. + """ + + category: str + title: str + + +class AddExplorationStateCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_ADD_STATE command. + """ + + state_name: str + content_id_for_state_content: str + content_id_for_default_outcome: str + + +class DeleteExplorationStateCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_DELETE_STATE command. + """ + + state_name: str + + +class RenameExplorationStateCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_RENAME_STATE command. + """ + + new_state_name: str + old_state_name: str + + +class AddWrittenTranslationCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_ADD_WRITTEN_TRANSLATION command. + """ + + state_name: str + content_id: str + language_code: str + content_html: str + translation_html: str + data_format: str + + +class MarkWrittenTranslationAsNeedingUpdateCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE command. + """ + + content_id: str + language_code: str + state_name: str + + +class MarkWrittenTranslationsAsNeedingUpdateCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE command. + """ + + content_id: str + state_name: str + + +class EditExpStatePropertyParamChangesCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_PARAM_CHANGES as allowed value. + """ + + property_name: Literal['param_changes'] + state_name: str + new_value: List[param_domain.ParamChangeDict] + old_value: List[param_domain.ParamChangeDict] + + +class EditExpStatePropertyContentCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_CONTENT as allowed value. + """ + + property_name: Literal['content'] + state_name: str + new_value: state_domain.SubtitledHtmlDict + old_value: Optional[state_domain.SubtitledHtmlDict] + + +class EditExpStatePropertySolicitAnswerDetailsCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_SOLICIT_ANSWER_DETAILS as allowed value. + """ + + property_name: Literal['solicit_answer_details'] + state_name: str + new_value: bool + old_value: bool + + +class EditExpStatePropertyCardIsCheckpointCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_CARD_IS_CHECKPOINT as allowed value. + """ + + property_name: Literal['card_is_checkpoint'] + state_name: str + new_value: bool + old_value: bool + + +class EditExpStatePropertyRecordedVoiceoversCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_RECORDED_VOICEOVERS as allowed value. + """ + + property_name: Literal['recorded_voiceovers'] + state_name: str + new_value: state_domain.RecordedVoiceoversDict + old_value: state_domain.RecordedVoiceoversDict + + +class EditExpStatePropertyInteractionIdCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_INTERACTION_ID as allowed value. + """ + + property_name: Literal['widget_id'] + state_name: str + new_value: str + old_value: str + + +class EditExpStatePropertyLinkedSkillIdCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_LINKED_SKILL_ID as allowed value. + """ + + property_name: Literal['linked_skill_id'] + state_name: str + new_value: str + old_value: str + + +class EditExpStatePropertyInteractionCustArgsCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_INTERACTION_CUST_ARGS as allowed value. + """ + + property_name: Literal['widget_customization_args'] + state_name: str + new_value: state_domain.CustomizationArgsDictType + old_value: state_domain.CustomizationArgsDictType + + +class EditExpStatePropertyInteractionStickyCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_INTERACTION_STICKY as allowed value. + """ + + property_name: Literal['widget_sticky'] + state_name: str + new_value: bool + old_value: bool + + +class EditExpStatePropertyInteractionHandlersCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_INTERACTION_HANDLERS as allowed value. + """ + + property_name: Literal['widget_handlers'] + state_name: str + new_value: List[state_domain.AnswerGroupDict] + old_value: List[state_domain.AnswerGroupDict] + + +class EditExpStatePropertyInteractionAnswerGroupsCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_INTERACTION_ANSWER_GROUPS as allowed value. + """ + + property_name: Literal['answer_groups'] + state_name: str + new_value: List[state_domain.AnswerGroupDict] + old_value: List[state_domain.AnswerGroupDict] + + +class EditExpStatePropertyInteractionDefaultOutcomeCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME as allowed value. + """ + + property_name: Literal['default_outcome'] + state_name: str + new_value: state_domain.OutcomeDict + old_value: state_domain.OutcomeDict + + +class EditExpStatePropertyInteractionHintsCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_INTERACTION_HINTS as allowed value. + """ + + property_name: Literal['hints'] + state_name: str + new_value: List[state_domain.HintDict] + old_value: List[state_domain.HintDict] + + +class EditExpStatePropertyInteractionSolutionCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_INTERACTION_SOLUTION as allowed value. + """ + + property_name: Literal['solution'] + state_name: str + new_value: state_domain.SolutionDict + old_value: state_domain.SolutionDict + + +class EditExpStatePropertyUnclassifiedAnswersCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_UNCLASSIFIED_ANSWERS as allowed value. + """ + + property_name: Literal['confirmed_unclassified_answers'] + state_name: str + new_value: List[state_domain.AnswerGroup] + old_value: List[state_domain.AnswerGroup] + + +class EditExpStatePropertyContentIdsToAudioTranslationsDeprecatedCmd( + ExplorationChange +): + """Class representing the ExplorationChange's + CMD_EDIT_STATE_PROPERTY command with + STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED + as allowed value. + """ + + property_name: Literal['content_ids_to_audio_translations'] + state_name: str + new_value: Dict[str, Dict[str, state_domain.VoiceoverDict]] + old_value: Dict[str, Dict[str, state_domain.VoiceoverDict]] + + +class EditExplorationPropertyTitleCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'title' as allowed value. + """ + + property_name: Literal['title'] + new_value: str + old_value: str + + +class EditExplorationPropertyCategoryCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'category' as allowed value. + """ + + property_name: Literal['category'] + new_value: str + old_value: str + + +class EditExplorationPropertyObjectiveCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'objective' as allowed value. + """ + + property_name: Literal['objective'] + new_value: str + old_value: str + + +class EditExplorationPropertyLanguageCodeCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'language_code' as allowed value. + """ + + property_name: Literal['language_code'] + new_value: str + old_value: str + + +class EditExplorationPropertyTagsCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'tags' as allowed value. + """ + + property_name: Literal['tags'] + new_value: List[str] + old_value: List[str] + + +class EditExplorationPropertyBlurbCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'blurb' as allowed value. + """ + + property_name: Literal['blurb'] + new_value: str + old_value: str + + +class EditExplorationPropertyAuthorNotesCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'author_notes' as allowed value. + """ + + property_name: Literal['author_notes'] + new_value: str + old_value: str + + +class EditExplorationPropertyParamSpecsCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'param_specs' as allowed value. + """ + + property_name: Literal['param_specs'] + new_value: Dict[str, param_domain.ParamSpecDict] + old_value: Dict[str, param_domain.ParamSpecDict] + + +class EditExplorationPropertyParamChangesCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'param_changes' as allowed value. + """ + + property_name: Literal['param_changes'] + new_value: List[param_domain.ParamChangeDict] + old_value: List[param_domain.ParamChangeDict] + + +class EditExplorationPropertyInitStateNameCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'init_state_name' as allowed value. + """ + + property_name: Literal['init_state_name'] + new_value: str + old_value: str + + +class EditExplorationPropertyAutoTtsEnabledCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'auto_tts_enabled' as allowed value. + """ + + property_name: Literal['auto_tts_enabled'] + new_value: bool + old_value: bool + + +class EditExplorationPropertyCorrectnessFeedbackEnabledCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'correctness_feedback_enabled' as allowed value. + """ + + property_name: Literal['correctness_feedback_enabled'] + new_value: bool + old_value: bool + + +class EditExplorationPropertyNextContentIdIndexCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'next_content_id_index' as allowed value. + """ + + property_name: Literal['next_content_id_index'] + new_value: int + old_value: int + + +class EditExplorationPropertyEditsAllowedCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_EDIT_EXPLORATION_PROPERTY command with + 'edits_allowed' as allowed value. + """ + + property_name: Literal['edits_allowed'] + new_value: bool + old_value: bool + + +class MigrateStatesSchemaToLatestVersionCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION command. + """ + + from_version: str + to_version: str + + +class RevertExplorationCmd(ExplorationChange): + """Class representing the ExplorationChange's + CMD_REVERT_COMMIT command. + """ + + version_number: int + + +class TransientCheckpointUrlDict(TypedDict): + """Dictionary representing the TransientCheckpointUrl object.""" + + exploration_id: str + furthest_reached_checkpoint_state_name: str + furthest_reached_checkpoint_exp_version: int + most_recently_reached_checkpoint_state_name: str + most_recently_reached_checkpoint_exp_version: int + + +class TransientCheckpointUrl: + """Domain object representing the checkpoint progress of a + logged-out user. + """ + + def __init__( + self, + exploration_id: str, + furthest_reached_checkpoint_state_name: str, + furthest_reached_checkpoint_exp_version: int, + most_recently_reached_checkpoint_state_name: str, + most_recently_reached_checkpoint_exp_version: int + ) -> None: + """Initializes a TransientCheckpointUrl domain object. + + Args: + exploration_id: str. Id of the exploration. + furthest_reached_checkpoint_state_name: str. State name of the + furthest reached checkpoint in the exploration. + furthest_reached_checkpoint_exp_version: int. Exploration version + in which the user has completed most checkpoints. + most_recently_reached_checkpoint_state_name: str. State name of + the most recently reached checkpoint in the exploration. + most_recently_reached_checkpoint_exp_version: int. Exploration + version in which a checkpoint was most recently reached. + """ + self.exploration_id = exploration_id + self.furthest_reached_checkpoint_state_name = ( + furthest_reached_checkpoint_state_name) + self.furthest_reached_checkpoint_exp_version = ( + furthest_reached_checkpoint_exp_version) + self.most_recently_reached_checkpoint_state_name = ( + most_recently_reached_checkpoint_state_name) + self.most_recently_reached_checkpoint_exp_version = ( + most_recently_reached_checkpoint_exp_version) + + def to_dict(self) -> TransientCheckpointUrlDict: + """Convert the TransientCheckpointUrl domain instance into a dictionary + form with its keys as the attributes of this class. + + Returns: + dict. A dictionary containing the TransientCheckpointUrl class + information in a dictionary form. + """ + + return { + 'exploration_id': self.exploration_id, + 'furthest_reached_checkpoint_exp_version': ( + self.furthest_reached_checkpoint_exp_version), + 'furthest_reached_checkpoint_state_name': ( + self.furthest_reached_checkpoint_state_name), + 'most_recently_reached_checkpoint_exp_version': ( + self.most_recently_reached_checkpoint_exp_version), + 'most_recently_reached_checkpoint_state_name': ( + self.most_recently_reached_checkpoint_state_name) + } + + def validate(self) -> None: + """Validates properties of the TransientCheckpointUrl object. + + Raises: + ValidationError. One or more attributes of the + TransientCheckpointUrl are invalid. + """ + if not isinstance(self.exploration_id, str): + raise utils.ValidationError( + 'Expected exploration_id to be a str, received %s' + % self.exploration_id) + + if not isinstance(self.furthest_reached_checkpoint_state_name, str): + raise utils.ValidationError( + 'Expected furthest_reached_checkpoint_state_name to be a str,' + 'received %s' % self.furthest_reached_checkpoint_state_name + ) + + if not isinstance(self.furthest_reached_checkpoint_exp_version, int): + raise utils.ValidationError( + 'Expected furthest_reached_checkpoint_exp_version to be an int' + ) + + if not isinstance( + self.most_recently_reached_checkpoint_state_name, str + ): + raise utils.ValidationError( + 'Expected most_recently_reached_checkpoint_state_name to be a' + ' str, received %s' + % self.most_recently_reached_checkpoint_state_name + ) + + if not isinstance( + self.most_recently_reached_checkpoint_exp_version, int + ): + raise utils.ValidationError( + 'Expected most_recently_reached_checkpoint_exp_version' + ' to be an int' + ) + + +class ExplorationCommitLogEntryDict(TypedDict): + """Dictionary representing the ExplorationCommitLogEntry object.""" + + last_updated: float + exploration_id: str + commit_type: str + commit_message: str + version: int + post_commit_status: str + post_commit_community_owned: bool + post_commit_is_private: bool + + class ExplorationCommitLogEntry: """Value object representing a commit to an exploration.""" def __init__( - self, created_on, last_updated, user_id, exploration_id, - commit_type, commit_message, commit_cmds, version, - post_commit_status, post_commit_community_owned, - post_commit_is_private): + self, + created_on: datetime.datetime, + last_updated: datetime.datetime, + user_id: str, + exploration_id: str, + commit_type: str, + commit_message: str, + commit_cmds: Sequence[ + Mapping[str, change_domain.AcceptableChangeDictTypes] + ], + version: int, + post_commit_status: str, + post_commit_community_owned: bool, + post_commit_is_private: bool + ) -> None: """Initializes a ExplorationCommitLogEntry domain object. Args: @@ -401,7 +1057,7 @@ def __init__( self.post_commit_community_owned = post_commit_community_owned self.post_commit_is_private = post_commit_is_private - def to_dict(self): + def to_dict(self) -> ExplorationCommitLogEntryDict: """Returns a dict representing this ExplorationCommitLogEntry domain object. This omits created_on, user_id and commit_cmds and adds username (derived from user_id). @@ -423,10 +1079,17 @@ def to_dict(self): } +class ExpVersionReferenceDict(TypedDict): + """Dictionary representing the ExpVersionReference object.""" + + exp_id: str + version: int + + class ExpVersionReference: """Value object representing an exploration ID and a version number.""" - def __init__(self, exp_id, version): + def __init__(self, exp_id: str, version: int) -> None: """Initializes an ExpVersionReference domain object. Args: @@ -437,7 +1100,7 @@ def __init__(self, exp_id, version): self.version = version self.validate() - def to_dict(self): + def to_dict(self) -> ExpVersionReferenceDict: """Returns a dict representing this ExpVersionReference domain object. Returns: @@ -448,7 +1111,7 @@ def to_dict(self): 'version': self.version } - def validate(self): + def validate(self) -> None: """Validates properties of the ExpVersionReference. Raises: @@ -483,7 +1146,7 @@ class ExplorationVersionsDiff: It doesn't include the name changes of added/deleted states. """ - def __init__(self, change_list): + def __init__(self, change_list: Sequence[ExplorationChange]) -> None: """Constructs an ExplorationVersionsDiff domain object. Args: @@ -492,9 +1155,9 @@ def __init__(self, change_list): version. """ - added_state_names = [] - deleted_state_names = [] - new_to_old_state_names = {} + added_state_names: List[str] = [] + deleted_state_names: List[str] = [] + new_to_old_state_names: Dict[str, str] = {} for change in change_list: if change.cmd == CMD_ADD_STATE: @@ -534,7 +1197,11 @@ class VersionedExplorationInteractionIdsMapping: in an exploration. """ - def __init__(self, version, state_interaction_ids_dict): + def __init__( + self, + version: int, + state_interaction_ids_dict: Dict[str, str] + ) -> None: """Initialises an VersionedExplorationInteractionIdsMapping domain object. @@ -547,16 +1214,112 @@ def __init__(self, version, state_interaction_ids_dict): self.state_interaction_ids_dict = state_interaction_ids_dict -class Exploration: +class ExplorationDict(TypedDict): + """Dictionary representing the Exploration object.""" + + id: str + title: str + category: str + objective: str + language_code: str + tags: List[str] + blurb: str + author_notes: str + states_schema_version: int + init_state_name: str + states: Dict[str, state_domain.StateDict] + param_specs: Dict[str, param_domain.ParamSpecDict] + param_changes: List[param_domain.ParamChangeDict] + auto_tts_enabled: bool + correctness_feedback_enabled: bool + edits_allowed: bool + next_content_id_index: int + + +class VersionedExplorationDict(ExplorationDict): + """Dictionary representing versioned Exploration object.""" + + schema_version: int + + +class ExplorationPlayerDict(TypedDict): + """Dictionary representing Exploration for learner view.""" + + init_state_name: str + param_changes: List[param_domain.ParamChangeDict] + param_specs: Dict[str, param_domain.ParamSpecDict] + states: Dict[str, state_domain.StateDict] + title: str + objective: str + language_code: str + correctness_feedback_enabled: bool + next_content_id_index: int + + +class VersionedExplorationStatesDict(TypedDict): + """Dictionary representing the versioned Exploration state.""" + + states_schema_version: int + states: Dict[str, state_domain.StateDict] + + +class SerializableExplorationDict(ExplorationDict): + """Dictionary representing the serializable Exploration object.""" + + version: int + created_on: str + last_updated: str + + +class RangeVariableDict(TypedDict): + """Dictionary representing the range variable for the NumericInput + interaction. + """ + + ans_group_index: int + rule_spec_index: int + lower_bound: Optional[float] + upper_bound: Optional[float] + lb_inclusive: bool + ub_inclusive: bool + + +class MatchedDenominatorDict(TypedDict): + """Dictionary representing the matched denominator variable for the + FractionInput interaction. + """ + + ans_group_index: int + rule_spec_index: int + denominator: int + + +class Exploration(translation_domain.BaseTranslatableObject): """Domain object for an Oppia exploration.""" def __init__( - self, exploration_id, title, category, objective, - language_code, tags, blurb, author_notes, - states_schema_version, init_state_name, states_dict, - param_specs_dict, param_changes_list, version, - auto_tts_enabled, correctness_feedback_enabled, - created_on=None, last_updated=None): + self, + exploration_id: str, + title: str, + category: str, + objective: str, + language_code: str, + tags: List[str], + blurb: str, + author_notes: str, + states_schema_version: int, + init_state_name: str, + states_dict: Dict[str, state_domain.StateDict], + param_specs_dict: Dict[str, param_domain.ParamSpecDict], + param_changes_list: List[param_domain.ParamChangeDict], + version: int, + auto_tts_enabled: bool, + correctness_feedback_enabled: bool, + next_content_id_index: int, + edits_allowed: bool, + created_on: Optional[datetime.datetime] = None, + last_updated: Optional[datetime.datetime] = None + ) -> None: """Initializes an Exploration domain object. Args: @@ -584,6 +1347,9 @@ def __init__( enabled. correctness_feedback_enabled: bool. True if correctness feedback is enabled. + next_content_id_index: int. The next content_id index to use for + generation of new content_ids. + edits_allowed: bool. True when edits to the exploration is allowed. created_on: datetime.datetime. Date and time when the exploration is created. last_updated: datetime.datetime. Date and time when the exploration @@ -600,7 +1366,7 @@ def __init__( self.states_schema_version = states_schema_version self.init_state_name = init_state_name - self.states = {} + self.states: Dict[str, state_domain.State] = {} for (state_name, state_dict) in states_dict.items(): self.states[state_name] = state_domain.State.from_dict(state_dict) @@ -617,14 +1383,39 @@ def __init__( self.last_updated = last_updated self.auto_tts_enabled = auto_tts_enabled self.correctness_feedback_enabled = correctness_feedback_enabled + self.next_content_id_index = next_content_id_index + self.edits_allowed = edits_allowed + + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the exploration. + + Returns: + TranslatableContentsCollection. An instance of + TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + for state in self.states.values(): + ( + translatable_contents_collection + .add_fields_from_translatable_object(state) + ) + return translatable_contents_collection @classmethod def create_default_exploration( - cls, exploration_id, title=feconf.DEFAULT_EXPLORATION_TITLE, - init_state_name=feconf.DEFAULT_INIT_STATE_NAME, - category=feconf.DEFAULT_EXPLORATION_CATEGORY, - objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE, - language_code=constants.DEFAULT_LANGUAGE_CODE): + cls, + exploration_id: str, + title: str = feconf.DEFAULT_EXPLORATION_TITLE, + init_state_name: str = feconf.DEFAULT_INIT_STATE_NAME, + category: str = feconf.DEFAULT_EXPLORATION_CATEGORY, + objective: str = feconf.DEFAULT_EXPLORATION_OBJECTIVE, + language_code: str = constants.DEFAULT_LANGUAGE_CODE + ) -> Exploration: """Returns a Exploration domain object with default values. 'title', 'init_state_name', 'category', 'objective' if not provided are @@ -646,8 +1437,14 @@ def create_default_exploration( Exploration. The Exploration domain object with default values. """ + content_id_generator = translation_domain.ContentIdGenerator() init_state_dict = state_domain.State.create_default_state( - init_state_name, is_initial_state=True).to_dict() + init_state_name, + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + is_initial_state=True).to_dict() states_dict = { init_state_name: init_state_dict @@ -657,13 +1454,18 @@ def create_default_exploration( exploration_id, title, category, objective, language_code, [], '', '', feconf.CURRENT_STATE_SCHEMA_VERSION, init_state_name, states_dict, {}, [], 0, - feconf.DEFAULT_AUTO_TTS_ENABLED, False) + feconf.DEFAULT_AUTO_TTS_ENABLED, + feconf.DEFAULT_CORRECTNESS_FEEDBACK_ENABLED, + content_id_generator.next_content_id_index, True) @classmethod def from_dict( - cls, exploration_dict, - exploration_version=0, exploration_created_on=None, - exploration_last_updated=None): + cls, + exploration_dict: ExplorationDict, + exploration_version: int = 0, + exploration_created_on: Optional[datetime.datetime] = None, + exploration_last_updated: Optional[datetime.datetime] = None + ) -> Exploration: """Return a Exploration domain object from a dict. Args: @@ -677,6 +1479,10 @@ def from_dict( Returns: Exploration. The corresponding Exploration domain object. + + Raises: + Exception. Some parameter was used in a state but not declared + in the Exploration dict. """ # NOTE TO DEVELOPERS: It is absolutely ESSENTIAL this conversion to and # from an ExplorationModel/dictionary MUST be exhaustive and complete. @@ -692,6 +1498,9 @@ def from_dict( exploration.auto_tts_enabled = exploration_dict['auto_tts_enabled'] exploration.correctness_feedback_enabled = exploration_dict[ 'correctness_feedback_enabled'] + exploration.next_content_id_index = exploration_dict[ + 'next_content_id_index'] + exploration.edits_allowed = exploration_dict['edits_allowed'] exploration.param_specs = { ps_name: param_domain.ParamSpec.from_dict(ps_val) for @@ -702,11 +1511,15 @@ def from_dict( 'states_schema_version'] init_state_name = exploration_dict['init_state_name'] exploration.rename_state(exploration.init_state_name, init_state_name) - exploration.add_states([ - state_name for state_name in exploration_dict['states'] - if state_name != init_state_name]) for (state_name, sdict) in exploration_dict['states'].items(): + if state_name != init_state_name: + exploration.add_state( + state_name, + # These are placeholder values which will be repalced with + # correct values below. + '', '') + state = exploration.states[state_name] state.content = state_domain.SubtitledHtml( @@ -734,7 +1547,9 @@ def from_dict( solution = ( state_domain.Solution.from_dict(idict['id'], idict['solution']) - if idict['solution'] else None) + if idict['solution'] is not None and idict['id'] is not None + else None + ) customization_args = ( state_domain.InteractionInstance. @@ -754,12 +1569,6 @@ def from_dict( state_domain.RecordedVoiceovers.from_dict( sdict['recorded_voiceovers'])) - state.written_translations = ( - state_domain.WrittenTranslations.from_dict( - sdict['written_translations'])) - - state.next_content_id_index = sdict['next_content_id_index'] - state.linked_skill_id = sdict['linked_skill_id'] state.solicit_answer_details = sdict['solicit_answer_details'] @@ -779,7 +1588,7 @@ def from_dict( return exploration @classmethod - def _validate_state_name(cls, name): + def _validate_state_name(cls, name: str) -> None: """Validates name string. Args: @@ -787,7 +1596,7 @@ def _validate_state_name(cls, name): """ utils.require_valid_name(name, 'a state name') - def validate(self, strict=False): + def validate(self, strict: bool = False) -> None: """Validates various properties of the Exploration. Args: @@ -851,6 +1660,7 @@ def validate(self, strict=False): raise utils.ValidationError( 'Adjacent whitespace in tags should be collapsed, ' 'received \'%s\'' % tag) + if len(set(self.tags)) != len(self.tags): raise utils.ValidationError('Some tags duplicate each other') @@ -872,7 +1682,9 @@ def validate(self, strict=False): self._validate_state_name(state_name) state.validate( self.param_specs, - allow_null_interaction=not strict) + allow_null_interaction=not strict, + tagged_skill_misconception_id_required=False, + strict=strict) # The checks below perform validation on the Outcome domain object # that is specific to answer groups in explorations, but not # questions. This logic is here because the validation checks in @@ -886,6 +1698,15 @@ def validate(self, strict=False): raise utils.ValidationError( 'Expected outcome dest to be a string, received %s' % answer_group.outcome.dest) + + outcome = answer_group.outcome + if outcome.dest_if_really_stuck is not None: + if not isinstance(outcome.dest_if_really_stuck, str): + raise utils.ValidationError( + 'Expected dest_if_really_stuck to be a ' + 'string, received %s' % + outcome.dest_if_really_stuck) + if state.interaction.default_outcome is not None: if not state.interaction.default_outcome.dest: raise utils.ValidationError( @@ -895,6 +1716,16 @@ def validate(self, strict=False): 'Expected outcome dest to be a string, received %s' % state.interaction.default_outcome.dest) + interaction_default_outcome = state.interaction.default_outcome + if interaction_default_outcome.dest_if_really_stuck is not None: + if not isinstance( + interaction_default_outcome.dest_if_really_stuck, str + ): + raise utils.ValidationError( + 'Expected dest_if_really_stuck to be a ' + 'string, received %s' + % interaction_default_outcome.dest_if_really_stuck) + if self.states_schema_version is None: raise utils.ValidationError( 'This exploration has no states schema version.') @@ -922,6 +1753,19 @@ def validate(self, strict=False): 'Expected correctness_feedback_enabled to be a bool, received ' '%s' % self.correctness_feedback_enabled) + if not isinstance(self.next_content_id_index, int): + raise utils.ValidationError( + 'Expected next_content_id_index to be an int, received ' + '%s' % self.next_content_id_index) + + # Validates translatable contents in the exploration. + self.validate_translatable_contents(self.next_content_id_index) + + if not isinstance(self.edits_allowed, bool): + raise utils.ValidationError( + 'Expected edits_allowed to be a bool, received ' + '%s' % self.edits_allowed) + for param_name in self.param_specs: if not isinstance(param_name, str): raise utils.ValidationError( @@ -985,10 +1829,22 @@ def validate(self, strict=False): 'The destination %s is not a valid state.' % default_outcome.dest) + # Check default if-stuck destinations. + if ( + default_outcome.dest_if_really_stuck is not None and + default_outcome.dest_if_really_stuck not in all_state_names + ): + raise utils.ValidationError( + 'The destination for the stuck learner %s ' + 'is not a valid state.' + % default_outcome.dest_if_really_stuck) + # Check that, if the outcome is a non-self-loop, then the # refresher_exploration_id is None. - if (default_outcome.refresher_exploration_id is not None and - default_outcome.dest != state_name): + if ( + default_outcome.refresher_exploration_id is not None and + default_outcome.dest != state_name + ): raise utils.ValidationError( 'The default outcome for state %s has a refresher ' 'exploration ID, but is not a self-loop.' % state_name) @@ -1000,10 +1856,22 @@ def validate(self, strict=False): 'The destination %s is not a valid state.' % group.outcome.dest) + # Check group if-stuck destinations. + if ( + group.outcome.dest_if_really_stuck is not None and + group.outcome.dest_if_really_stuck not in all_state_names + ): + raise utils.ValidationError( + 'The destination for the stuck learner %s ' + 'is not a valid state.' + % group.outcome.dest_if_really_stuck) + # Check that, if the outcome is a non-self-loop, then the # refresher_exploration_id is None. - if (group.outcome.refresher_exploration_id is not None and - group.outcome.dest != state_name): + if ( + group.outcome.refresher_exploration_id is not None and + group.outcome.dest != state_name + ): raise utils.ValidationError( 'The outcome for an answer group in state %s has a ' 'refresher exploration ID, but is not a self-loop.' @@ -1089,6 +1957,10 @@ def validate(self, strict=False): curr_state.interaction.get_all_outcomes()) for outcome in all_outcomes: dest_state = outcome.dest + # Ruling out the possibility of None for mypy type + # checking, because above we are already validating + # if outcome exists then it should have destination. + assert dest_state is not None if self.states[dest_state].interaction.is_terminal: excluded_state_is_bypassable = True break @@ -1132,8 +2004,10 @@ def validate(self, strict=False): if default_outcome is not None: # Check that, if the outcome is a self-loop, then the # outcome is not labelled as correct. - if (default_outcome.dest == state_name and - default_outcome.labelled_as_correct): + if ( + default_outcome.dest == state_name and + default_outcome.labelled_as_correct + ): raise utils.ValidationError( 'The default outcome for state %s is labelled ' 'correct but is a self-loop.' % state_name) @@ -1141,12 +2015,23 @@ def validate(self, strict=False): for group in interaction.answer_groups: # Check that, if the outcome is a self-loop, then the # outcome is not labelled as correct. - if (group.outcome.dest == state_name and - group.outcome.labelled_as_correct): + if ( + group.outcome.dest == state_name and + group.outcome.labelled_as_correct + ): raise utils.ValidationError( 'The outcome for an answer group in state %s is ' 'labelled correct but is a self-loop.' % state_name) + if ( + group.outcome.labelled_as_correct and + group.outcome.dest_if_really_stuck is not None + ): + raise utils.ValidationError( + 'The outcome for the state is labelled ' + 'correct but a destination for the stuck learner ' + 'is specified.') + if len(warnings_list) > 0: warning_str = '' for ind, warning in enumerate(warnings_list): @@ -1155,7 +2040,7 @@ def validate(self, strict=False): 'Please fix the following issues before saving this ' 'exploration: %s' % warning_str) - def _verify_all_states_reachable(self): + def _verify_all_states_reachable(self) -> None: """Verifies that all states are reachable from the initial state. Raises: @@ -1179,9 +2064,19 @@ def _verify_all_states_reachable(self): all_outcomes = curr_state.interaction.get_all_outcomes() for outcome in all_outcomes: dest_state = outcome.dest - if (dest_state not in curr_queue and - dest_state not in processed_queue): + dest_if_stuck_state = outcome.dest_if_really_stuck + if ( + dest_state is not None and + dest_state not in curr_queue and + dest_state not in processed_queue + ): curr_queue.append(dest_state) + if ( + dest_if_stuck_state is not None and + dest_if_stuck_state not in curr_queue and + dest_if_stuck_state not in processed_queue + ): + curr_queue.append(dest_if_stuck_state) if len(self.states) != len(processed_queue): unseen_states = list( @@ -1190,7 +2085,7 @@ def _verify_all_states_reachable(self): 'The following states are not reachable from the initial ' 'state: %s' % ', '.join(unseen_states)) - def _verify_no_dead_ends(self): + def _verify_no_dead_ends(self) -> None: """Verifies that all states can reach a terminal state. Raises: @@ -1225,11 +2120,15 @@ def _verify_no_dead_ends(self): if len(self.states) != len(processed_queue): dead_end_states = list( set(self.states.keys()) - set(processed_queue)) + sorted_dead_end_states = sorted(dead_end_states) raise utils.ValidationError( 'It is impossible to complete the exploration from the ' - 'following states: %s' % ', '.join(dead_end_states)) + 'following states: %s' % ', '.join(sorted_dead_end_states) + ) - def get_content_html(self, state_name, content_id): + def get_content_html( + self, state_name: str, content_id: str + ) -> Union[str, List[str]]: """Return the content for a given content id of a state. Args: @@ -1250,7 +2149,7 @@ def get_content_html(self, state_name, content_id): # Derived attributes of an exploration. @property - def init_state(self): + def init_state(self) -> state_domain.State: """The state which forms the start of this exploration. Returns: @@ -1259,7 +2158,7 @@ def init_state(self): return self.states[self.init_state_name] @property - def param_specs_dict(self): + def param_specs_dict(self) -> Dict[str, param_domain.ParamSpecDict]: """A dict of param specs, each represented as Python dicts. Returns: @@ -1269,7 +2168,7 @@ def param_specs_dict(self): for (ps_name, ps_val) in self.param_specs.items()} @property - def param_change_dicts(self): + def param_change_dicts(self) -> List[param_domain.ParamChangeDict]: """A list of param changes, represented as JSONifiable Python dicts. Returns: @@ -1278,7 +2177,7 @@ def param_change_dicts(self): return [param_change.to_dict() for param_change in self.param_changes] @classmethod - def is_demo_exploration_id(cls, exploration_id): + def is_demo_exploration_id(cls, exploration_id: str) -> bool: """Whether the given exploration id is a demo exploration. Args: @@ -1290,7 +2189,7 @@ def is_demo_exploration_id(cls, exploration_id): return exploration_id in feconf.DEMO_EXPLORATIONS @property - def is_demo(self): + def is_demo(self) -> bool: """Whether the exploration is one of the demo explorations. Returns: @@ -1298,7 +2197,7 @@ def is_demo(self): """ return self.is_demo_exploration_id(self.id) - def has_state_name(self, state_name): + def has_state_name(self, state_name: str) -> bool: """Whether the exploration has a state with the given state name. Args: @@ -1310,18 +2209,20 @@ def has_state_name(self, state_name): state_names = list(self.states.keys()) return state_name in state_names - def get_interaction_id_by_state_name(self, state_name): + def get_interaction_id_by_state_name( + self, state_name: str + ) -> Optional[str]: """Returns the interaction id of the state. Args: state_name: str. The name of the state. Returns: - str or None. The ID of the interaction. + str|None. The ID of the interaction. """ return self.states[state_name].interaction.id - def update_title(self, title): + def update_title(self, title: str) -> None: """Update the exploration title. Args: @@ -1329,7 +2230,7 @@ def update_title(self, title): """ self.title = title - def update_category(self, category): + def update_category(self, category: str) -> None: """Update the exploration category. Args: @@ -1337,7 +2238,7 @@ def update_category(self, category): """ self.category = category - def update_objective(self, objective): + def update_objective(self, objective: str) -> None: """Update the exploration objective. Args: @@ -1345,7 +2246,7 @@ def update_objective(self, objective): """ self.objective = objective - def update_language_code(self, language_code): + def update_language_code(self, language_code: str) -> None: """Update the exploration language code. Args: @@ -1353,7 +2254,7 @@ def update_language_code(self, language_code): """ self.language_code = language_code - def update_tags(self, tags): + def update_tags(self, tags: List[str]) -> None: """Update the tags of the exploration. Args: @@ -1361,7 +2262,7 @@ def update_tags(self, tags): """ self.tags = tags - def update_blurb(self, blurb): + def update_blurb(self, blurb: str) -> None: """Update the blurb of the exploration. Args: @@ -1369,7 +2270,7 @@ def update_blurb(self, blurb): """ self.blurb = blurb - def update_author_notes(self, author_notes): + def update_author_notes(self, author_notes: str) -> None: """Update the author notes of the exploration. Args: @@ -1377,7 +2278,9 @@ def update_author_notes(self, author_notes): """ self.author_notes = author_notes - def update_param_specs(self, param_specs_dict): + def update_param_specs( + self, param_specs_dict: Dict[str, param_domain.ParamSpecDict] + ) -> None: """Update the param spec dict. Args: @@ -1390,7 +2293,9 @@ def update_param_specs(self, param_specs_dict): for (ps_name, ps_val) in param_specs_dict.items() } - def update_param_changes(self, param_changes): + def update_param_changes( + self, param_changes: List[param_domain.ParamChange] + ) -> None: """Update the param change dict. Args: @@ -1398,11 +2303,14 @@ def update_param_changes(self, param_changes): """ self.param_changes = param_changes - def update_init_state_name(self, init_state_name): + def update_init_state_name(self, init_state_name: str) -> None: """Update the name for the initial state of the exploration. Args: init_state_name: str. The new name of the initial state. + + Raises: + Exception. Invalid initial state name. """ old_init_state_name = self.init_state_name if init_state_name not in self.states: @@ -1415,7 +2323,7 @@ def update_init_state_name(self, init_state_name): self.states[old_init_state_name].card_is_checkpoint = False self.init_state.card_is_checkpoint = True - def update_auto_tts_enabled(self, auto_tts_enabled): + def update_auto_tts_enabled(self, auto_tts_enabled: bool) -> None: """Update whether automatic text-to-speech is enabled. Args: @@ -1424,7 +2332,9 @@ def update_auto_tts_enabled(self, auto_tts_enabled): """ self.auto_tts_enabled = auto_tts_enabled - def update_correctness_feedback_enabled(self, correctness_feedback_enabled): + def update_correctness_feedback_enabled( + self, correctness_feedback_enabled: bool + ) -> None: """Update whether correctness feedback is enabled. Args: @@ -1433,26 +2343,57 @@ def update_correctness_feedback_enabled(self, correctness_feedback_enabled): """ self.correctness_feedback_enabled = correctness_feedback_enabled - # Methods relating to states. - def add_states(self, state_names): - """Adds multiple states to the exploration. + def update_next_content_id_index(self, next_content_id_index: int) -> None: + """Update the interaction next content id index attribute. Args: - state_names: list(str). List of state names to add. + next_content_id_index: int. The new next content id index to set. + """ + self.next_content_id_index = next_content_id_index - Raises: - ValueError. At least one of the new state names already exists in - the states dict. + def add_states(self, state_names: List[str]) -> None: + """Adds new states in the exploration with the given state names. + + Args: + state_names: list(str). The new state name. """ + content_id_generator = translation_domain.ContentIdGenerator( + self.next_content_id_index) for state_name in state_names: - if state_name in self.states: - raise ValueError('Duplicate state name %s' % state_name) + self.add_state( + state_name, + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME)) + self.next_content_id_index = content_id_generator.next_content_id_index + + def add_state( + self, + state_name: str, + content_id_for_state_content: str, + content_id_for_default_outcome: str + ) -> None: + """Adds new state in the exploration with the given state name. - for state_name in state_names: - self.states[state_name] = state_domain.State.create_default_state( - state_name) + Args: + state_name: str. The new state name. + content_id_for_state_content: str. The content_id for the new state + content. + content_id_for_default_outcome: str. The content_id for the default + outcome of the new state. + + Raises: + ValueError. State names cannot be duplicate. + """ + if state_name in self.states: + raise ValueError('Duplicate state name %s' % state_name) - def rename_state(self, old_state_name, new_state_name): + self.states[state_name] = state_domain.State.create_default_state( + state_name, content_id_for_state_content, + content_id_for_default_outcome) + + def rename_state(self, old_state_name: str, new_state_name: str) -> None: """Renames the given state. Args: @@ -1488,7 +2429,7 @@ def rename_state(self, old_state_name, new_state_name): if outcome.dest == old_state_name: outcome.dest = new_state_name - def delete_state(self, state_name): + def delete_state(self, state_name: str) -> None: """Deletes the given state. Args: @@ -1512,30 +2453,16 @@ def delete_state(self, state_name): for outcome in all_outcomes: if outcome.dest == state_name: outcome.dest = other_state_name + if outcome and outcome.dest_if_really_stuck == state_name: + outcome.dest_if_really_stuck = other_state_name del self.states[state_name] - def get_translatable_text(self, language_code): - """Returns all the contents which needs translation in the given - language. - - Args: - language_code: str. The language code in which translation is - required. - - Returns: - dict(str, dict(str, str)). A dict where state_name is the key and a - dict with content_id as the key and html content as value. - """ - state_names_to_content_id_mapping = {} - for state_name, state in self.states.items(): - state_names_to_content_id_mapping[state_name] = ( - state.get_content_id_mapping_needing_translations( - language_code)) - - return state_names_to_content_id_mapping - - def get_trainable_states_dict(self, old_states, exp_versions_diff): + def get_trainable_states_dict( + self, + old_states: Dict[str, state_domain.State], + exp_versions_diff: ExplorationVersionsDiff + ) -> Dict[str, List[str]]: """Retrieves the state names of all trainable states in an exploration segregated into state names with changed and unchanged answer groups. In this method, the new_state_name refers to the name of the state in @@ -1552,7 +2479,7 @@ def get_trainable_states_dict(self, old_states, exp_versions_diff): representing state names with changed answer groups and unchanged answer groups respectively. """ - trainable_states_dict = { + trainable_states_dict: Dict[str, List[str]] = { 'state_names_with_changed_answer_groups': [], 'state_names_with_unchanged_answer_groups': [] } @@ -1595,58 +2522,20 @@ def get_trainable_states_dict(self, old_states, exp_versions_diff): return trainable_states_dict - def get_languages_with_complete_translation(self): - """Returns a list of language code in which the exploration translation - is 100%. - - Returns: - list(str). A list of language code in which the translation for the - exploration is complete i.e, 100%. - """ - content_count = self.get_content_count() - language_code_list = [] - for language_code, count in self.get_translation_counts().items(): - if count == content_count: - language_code_list.append(language_code) - - return language_code_list - - def get_translation_counts(self): - """Returns a dict representing the number of translations available in a - language for which there exists at least one translation in the - exploration. - - Returns: - dict(str, int). A dict with language code as a key and number of - translation available in that language as the value. - """ - exploration_translation_counts = collections.defaultdict(int) - for state in self.states.values(): - state_translation_counts = state.get_translation_counts() - for language, count in state_translation_counts.items(): - exploration_translation_counts[language] += count - - return dict(exploration_translation_counts) - - def get_content_count(self): - """Returns the total number of distinct content fields available in the - exploration which are user facing and can be translated into - different languages. - - (The content field includes state content, feedback, hints, solutions.) - - Returns: - int. The total number of distinct content fields available inside - the exploration. - """ - content_count = 0 - for state in self.states.values(): - content_count += state.get_translatable_content_count() - - return content_count + def get_metadata(self) -> ExplorationMetadata: + """Gets the ExplorationMetadata domain object for the exploration.""" + return ExplorationMetadata( + self.title, self. category, self.objective, self.language_code, + self.tags, self.blurb, self.author_notes, + self.states_schema_version, self.init_state_name, + self.param_specs, self.param_changes, self.auto_tts_enabled, + self.correctness_feedback_enabled, self.edits_allowed + ) @classmethod - def _convert_states_v41_dict_to_v42_dict(cls, states_dict): + def _convert_states_v41_dict_to_v42_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: """Converts from version 41 to 42. Version 42 changes rule input types for DragAndDropSortInput and ItemSelectionInput interactions to better support translations. Specifically, the rule inputs will store content @@ -1663,7 +2552,38 @@ def _convert_states_v41_dict_to_v42_dict(cls, states_dict): dict. The converted states_dict. """ - def migrate_rule_inputs_and_answers(new_type, value, choices): + @overload + def migrate_rule_inputs_and_answers( + new_type: str, + value: str, + choices: List[state_domain.SubtitledHtmlDict] + ) -> str: ... + + @overload + def migrate_rule_inputs_and_answers( + new_type: str, + value: List[str], + choices: List[state_domain.SubtitledHtmlDict] + ) -> List[str]: ... + + @overload + def migrate_rule_inputs_and_answers( + new_type: str, + value: List[List[str]], + choices: List[state_domain.SubtitledHtmlDict] + ) -> List[List[str]]: ... + + # Here we use MyPy ignore because MyPy expects a return value in + # every condition when we define a return type but here we are + # returning only in if-else conditions and we are not returning + # when none of the condition matches which causes MyPy to throw + # a 'Missing return statement' error. Thus to avoid the error, + # we used ignore here. + def migrate_rule_inputs_and_answers( # type: ignore[return] + new_type: str, + value: Union[List[List[str]], List[str], str], + choices: List[state_domain.SubtitledHtmlDict] + ) -> Union[List[List[str]], List[str], str]: """Migrates SetOfHtmlString to SetOfTranslatableHtmlContentIds, ListOfSetsOfHtmlStrings to ListOfSetsOfTranslatableHtmlContentIds, and DragAndDropHtmlString to TranslatableHtmlContentId. These @@ -1681,7 +2601,7 @@ def migrate_rule_inputs_and_answers(new_type, value, choices): *. The migrated rule input. """ - def extract_content_id_from_choices(html): + def extract_content_id_from_choices(html: str) -> str: """Given a html, find its associated content id in choices, which is a list of subtitled html dicts. @@ -1700,18 +2620,29 @@ def extract_content_id_from_choices(html): return feconf.INVALID_CONTENT_ID if new_type == 'TranslatableHtmlContentId': + # Here 'TranslatableHtmlContentId' can only be of str type, thus + # to narrow down the type we used assert here. + assert isinstance(value, str) return extract_content_id_from_choices(value) elif new_type == 'SetOfTranslatableHtmlContentIds': + # Here we use cast because this 'elif' condition forces value + # to have type List[str]. + set_of_content_ids = cast(List[str], value) return [ migrate_rule_inputs_and_answers( 'TranslatableHtmlContentId', html, choices - ) for html in value + ) for html in set_of_content_ids ] elif new_type == 'ListOfSetsOfTranslatableHtmlContentIds': + # Here we use cast because this 'elif' condition forces value + # to have type List[List[str]]. + list_of_set_of_content_ids = cast( + List[List[str]], value + ) return [ migrate_rule_inputs_and_answers( 'SetOfTranslatableHtmlContentIds', html_set, choices - ) for html_set in value + ) for html_set in list_of_set_of_content_ids ] for state_dict in states_dict.values(): @@ -1721,26 +2652,52 @@ def extract_content_id_from_choices(html): continue solution = state_dict['interaction']['solution'] - choices = state_dict['interaction']['customization_args'][ - 'choices']['value'] + # Here we use cast because we are narrowing down the type from + # various customization args value types to List[SubtitledHtmlDict] + # type, and this is done because here we are accessing 'choices' key + # over 'DragAndDropSortInput' and 'ItemSelectionInput' customization + # args and in these customization args 'choices' key will only have + # values of type List[SubtitledHtmlDict]. + choices = cast( + List[state_domain.SubtitledHtmlDict], + state_dict['interaction']['customization_args']['choices'][ + 'value' + ] + ) if interaction_id == 'ItemSelectionInput': # The solution type will be migrated from SetOfHtmlString to # SetOfTranslatableHtmlContentIds. if solution is not None: + # Ruling out the possibility of any other type for MyPy type + # checking because for interaction 'ItemSelectionInput', + # the correct_answer is formatted as List[str] type. + assert isinstance(solution['correct_answer'], list) + list_of_html_contents = [] + for html_content in solution['correct_answer']: + assert isinstance(html_content, str) + list_of_html_contents.append(html_content) solution['correct_answer'] = ( migrate_rule_inputs_and_answers( 'SetOfTranslatableHtmlContentIds', - solution['correct_answer'], + list_of_html_contents, choices) ) if interaction_id == 'DragAndDropSortInput': # The solution type will be migrated from ListOfSetsOfHtmlString # to ListOfSetsOfTranslatableHtmlContentIds. if solution is not None: + # Ruling out the possibility of any other type for MyPy type + # checking because for interaction 'DragAndDropSortInput', + # the correct_answer is formatted as List[List[str]] type. + assert isinstance(solution['correct_answer'], list) + list_of_html_content_list = [] + for html_content_list in solution['correct_answer']: + assert isinstance(html_content_list, list) + list_of_html_content_list.append(html_content_list) solution['correct_answer'] = ( migrate_rule_inputs_and_answers( 'ListOfSetsOfTranslatableHtmlContentIds', - solution['correct_answer'], + list_of_html_content_list, choices) ) @@ -1753,9 +2710,18 @@ def extract_content_id_from_choices(html): # All rule inputs for ItemSelectionInput will be # migrated from SetOfHtmlString to # SetOfTranslatableHtmlContentIds. + # Ruling out the possibility of any other type + # for MyPy type checking because for interaction + # 'ItemSelectionInput', the rule inputs are formatted + # as List[str] type. + assert isinstance(rule_inputs['x'], list) + list_of_html_contents = [] + for html_content in rule_inputs['x']: + assert isinstance(html_content, str) + list_of_html_contents.append(html_content) rule_inputs['x'] = migrate_rule_inputs_and_answers( 'SetOfTranslatableHtmlContentIds', - rule_inputs['x'], + list_of_html_contents, choices) if interaction_id == 'DragAndDropSortInput': rule_types_with_list_of_sets = [ @@ -1768,9 +2734,20 @@ def extract_content_id_from_choices(html): # the x input will be migrated from # ListOfSetsOfHtmlStrings to # ListOfSetsOfTranslatableHtmlContentIds. + # Ruling out the possibility of any other type + # for MyPy type checking because for interaction + # 'DragAndDropSortInput', the rule inputs are + # formatted as List[List[str]] type. + assert isinstance(rule_inputs['x'], list) + list_of_html_content_list = [] + for html_content_list in rule_inputs['x']: + assert isinstance(html_content_list, list) + list_of_html_content_list.append( + html_content_list + ) rule_inputs['x'] = migrate_rule_inputs_and_answers( 'ListOfSetsOfTranslatableHtmlContentIds', - rule_inputs['x'], + list_of_html_content_list, choices) elif rule_type == 'HasElementXAtPositionY': # For rule type HasElementXAtPositionY, @@ -1778,6 +2755,11 @@ def extract_content_id_from_choices(html): # DragAndDropHtmlString to # TranslatableHtmlContentId, and the y input will # remain as DragAndDropPositiveInt. + # Ruling out the possibility of any other type + # for MyPy type checking because for interaction + # 'HasElementXAtPositionY', the rule inputs are + # formatted as str type. + assert isinstance(rule_inputs['x'], str) rule_inputs['x'] = migrate_rule_inputs_and_answers( 'TranslatableHtmlContentId', rule_inputs['x'], @@ -1788,16 +2770,24 @@ def extract_content_id_from_choices(html): # DragAndDropHtmlString to # TranslatableHtmlContentId. for rule_input_name in ['x', 'y']: + rule_input_value = rule_inputs[rule_input_name] + # Ruling out the possibility of any other type + # for MyPy type checking because for interaction + # 'HasElementXBeforeElementY', the rule inputs + # are formatted as str type. + assert isinstance(rule_input_value, str) rule_inputs[rule_input_name] = ( migrate_rule_inputs_and_answers( 'TranslatableHtmlContentId', - rule_inputs[rule_input_name], + rule_input_value, choices)) return states_dict @classmethod - def _convert_states_v42_dict_to_v43_dict(cls, states_dict): + def _convert_states_v42_dict_to_v43_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: """Converts from version 42 to 43. Version 43 adds a new customization arg to NumericExpressionInput, AlgebraicExpressionInput, and MathEquationInput. The customization arg will allow creators to choose @@ -1829,7 +2819,11 @@ def _convert_states_v42_dict_to_v43_dict(cls, states_dict): return states_dict @classmethod - def _convert_states_v43_dict_to_v44_dict(cls, states_dict, init_state_name): + def _convert_states_v43_dict_to_v44_dict( + cls, + states_dict: Dict[str, state_domain.StateDict], + init_state_name: str + ) -> Dict[str, state_domain.StateDict]: """Converts from version 43 to version 44. Version 44 adds card_is_checkpoint boolean to the state, which allows creators to mark a state as a checkpoint for the learners @@ -1849,7 +2843,9 @@ def _convert_states_v43_dict_to_v44_dict(cls, states_dict, init_state_name): return states_dict @classmethod - def _convert_states_v44_dict_to_v45_dict(cls, states_dict): + def _convert_states_v44_dict_to_v45_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: """Converts from version 44 to 45. Version 45 contains linked skill id. @@ -1867,7 +2863,9 @@ def _convert_states_v44_dict_to_v45_dict(cls, states_dict): return states_dict @classmethod - def _convert_states_v45_dict_to_v46_dict(cls, states_dict): + def _convert_states_v45_dict_to_v46_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: """Converts from version 45 to 46. Version 46 ensures that the written translations in a state containing unicode content do not contain HTML tags and the data_format is unicode. @@ -1890,7 +2888,8 @@ def _convert_states_v45_dict_to_v46_dict(cls, states_dict): state_domain.InteractionInstance .convert_customization_args_dict_to_customization_args( state_dict['interaction']['id'], - state_dict['interaction']['customization_args'])) + state_dict['interaction']['customization_args'], + state_schema_version=45)) for ca_name in customisation_args: list_of_subtitled_unicode_content_ids.extend( state_domain.InteractionCustomizationArg @@ -1903,7 +2902,9 @@ def _convert_states_v45_dict_to_v46_dict(cls, states_dict): ) ) translations_mapping = ( - state_dict['written_translations']['translations_mapping']) + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + state_dict['written_translations']['translations_mapping']) # type: ignore[misc] for content_id in translations_mapping: if content_id in list_of_subtitled_unicode_content_ids: for language_code in translations_mapping[content_id]: @@ -1911,13 +2912,21 @@ def _convert_states_v45_dict_to_v46_dict(cls, states_dict): translations_mapping[content_id][language_code]) written_translation['data_format'] = ( schema_utils.SCHEMA_TYPE_UNICODE) + # Here, we are narrowing down the type from + # Union[List[str], str] to str. + assert isinstance( + written_translation['translation'], + str + ) written_translation['translation'] = ( html_cleaner.strip_html_tags( written_translation['translation'])) return states_dict @classmethod - def _convert_states_v46_dict_to_v47_dict(cls, states_dict): + def _convert_states_v46_dict_to_v47_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: """Converts from version 46 to 47. Version 52 deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. @@ -1938,11 +2947,13 @@ def _convert_states_v46_dict_to_v47_dict(cls, states_dict): state_domain.State.convert_html_fields_in_state( state_dict, html_validation_service - .convert_svg_diagram_tags_to_image_tags) + .convert_svg_diagram_tags_to_image_tags, 46) return states_dict @classmethod - def _convert_states_v47_dict_to_v48_dict(cls, states_dict): + def _convert_states_v47_dict_to_v48_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: """Converts from version 47 to 48. Version 48 fixes encoding issues in HTML fields. @@ -1961,15 +2972,2051 @@ def _convert_states_v47_dict_to_v48_dict(cls, states_dict): if interaction_customisation_args: state_domain.State.convert_html_fields_in_state( state_dict, - html_validation_service.fix_incorrectly_encoded_chars) + html_validation_service.fix_incorrectly_encoded_chars, + state_schema_version=48) return states_dict @classmethod - def _convert_states_v48_dict_to_v49_dict(cls, states_dict): - """Converts from version 48 to 49. Version 49 adds - requireNonnegativeInput customization arg to NumericInput - interaction which allows creators to set input should be greater - than or equal to zero. + def _convert_states_v48_dict_to_v49_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: + """Converts from version 48 to 49. Version 49 adds + requireNonnegativeInput customization arg to NumericInput + interaction which allows creators to set input should be greater + than or equal to zero. + + Args: + states_dict: dict. A dict where each key-value pair represents, + respectively, a state name and a dict used to initialize a + State domain object. + + Returns: + dict. The converted states_dict. + """ + + for state_dict in states_dict.values(): + if state_dict['interaction']['id'] == 'NumericInput': + customization_args = state_dict['interaction'][ + 'customization_args'] + customization_args.update({ + 'requireNonnegativeInput': { + 'value': False + } + }) + + return states_dict + + @classmethod + def _convert_states_v49_dict_to_v50_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: + """Converts from version 49 to 50. Version 50 removes rules from + explorations that use one of the following rules: + [ContainsSomeOf, OmitsSomeOf, MatchesWithGeneralForm]. It also renames + `customOskLetters` cust arg to `allowedVariables`. + + Args: + states_dict: dict. A dict where each key-value pair represents, + respectively, a state name and a dict used to initialize a + State domain object. + + Returns: + dict. The converted states_dict. + """ + for state_dict in states_dict.values(): + if state_dict['interaction']['id'] in MATH_INTERACTION_TYPES: + filtered_answer_groups = [] + for answer_group_dict in state_dict[ + 'interaction']['answer_groups']: + filtered_rule_specs = [] + for rule_spec_dict in answer_group_dict['rule_specs']: + rule_type = rule_spec_dict['rule_type'] + if rule_type not in MATH_INTERACTION_DEPRECATED_RULES: + filtered_rule_specs.append( + copy.deepcopy(rule_spec_dict)) + answer_group_dict['rule_specs'] = filtered_rule_specs + if len(filtered_rule_specs) > 0: + filtered_answer_groups.append( + copy.deepcopy(answer_group_dict)) + state_dict[ + 'interaction']['answer_groups'] = filtered_answer_groups + + # Renaming cust arg. + if state_dict[ + 'interaction']['id'] in ALGEBRAIC_MATH_INTERACTIONS: + customization_args = state_dict[ + 'interaction']['customization_args'] + customization_args['allowedVariables'] = copy.deepcopy( + customization_args['customOskLetters']) + del customization_args['customOskLetters'] + + return states_dict + + @classmethod + def _convert_states_v50_dict_to_v51_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: + """Converts from version 50 to 51. Version 51 adds a new + dest_if_really_stuck field to Outcome class to redirect learners + to a state for strengthening concepts when they get really stuck. + + Args: + states_dict: dict. A dict where each key-value pair represents, + respectively, a state name and a dict used to initialize a + State domain object. + + Returns: + dict. The converted states_dict. + """ + for state_dict in states_dict.values(): + answer_groups = state_dict['interaction']['answer_groups'] + for answer_group in answer_groups: + answer_group['outcome']['dest_if_really_stuck'] = None + + if state_dict['interaction']['default_outcome'] is not None: + state_dict['interaction'][ + 'default_outcome']['dest_if_really_stuck'] = None + + return states_dict + + @classmethod + def _remove_unwanted_content_ids_from_translations_and_voiceovers_from_state_v51_or_v52( # pylint: disable=line-too-long + cls, state_dict: state_domain.StateDict, state_schema: int + ) -> None: + """Helper function to remove the content IDs from the translations + and voiceovers which are deleted from the state. + + Args: + state_dict: state_domain.StateDict. The state dictionary. + state_schema: int. The state schema from which we are using + this functionality. + """ + interaction = state_dict['interaction'] + content_id_list = [state_dict['content']['content_id']] + + for answer_group in interaction['answer_groups']: + content_id_list.append( + answer_group['outcome']['feedback']['content_id'] + ) + + for rule_spec in answer_group['rule_specs']: + for param_name, value in rule_spec['inputs'].items(): + interaction_id = interaction['id'] + param_type = ( + interaction_registry.Registry.get_interaction_by_id( + interaction_id + ).get_rule_param_type( + rule_spec['rule_type'], param_name + ) + ) + + if issubclass( + param_type, objects.BaseTranslatableObject + ): + # We can assume that the value will be a dict, + # as the param_type is BaseTranslatableObject. + assert isinstance(value, dict) + content_id = value['contentId'] + # We can assume the contentId will be str, + # as the param_type is BaseTranslatableObject. + assert isinstance(content_id, str) + content_id_list.append(content_id) + + default_outcome = interaction['default_outcome'] + if default_outcome: + content_id_list.append( + default_outcome['feedback']['content_id']) + + for hint in interaction['hints']: + content_id_list.append(hint['hint_content']['content_id']) + + interaction_solution = interaction['solution'] + if interaction_solution: + content_id_list.append( + interaction_solution['explanation']['content_id']) + + if interaction['id'] is not None: + customisation_args = ( + state_domain.InteractionInstance + .convert_customization_args_dict_to_customization_args( + interaction['id'], + interaction['customization_args'], + state_schema_version=state_schema + ) + ) + for ca_name in customisation_args: + content_id_list.extend( + customisation_args[ca_name].get_content_ids() + ) + + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + translations_mapping = ( + state_dict['written_translations']['translations_mapping']) # type: ignore[misc] + new_translations_mapping = { + content_id: translation_item for + content_id, translation_item in translations_mapping.items() + if content_id in content_id_list + } + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + state_dict['written_translations']['translations_mapping'] = ( # type: ignore[misc] + new_translations_mapping) + + voiceovers_mapping = ( + state_dict['recorded_voiceovers']['voiceovers_mapping']) + new_voiceovers_mapping = {} + for content_id, voiceover_item in voiceovers_mapping.items(): + if content_id in content_id_list: + new_voiceovers_mapping[content_id] = voiceover_item + state_dict['recorded_voiceovers']['voiceovers_mapping'] = ( + new_voiceovers_mapping) + + @classmethod + def _convert_states_v51_dict_to_v52_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: + """Converts from version 51 to 52. Version 52 correctly updates + the content IDs for translations and for voiceovers. In the 49 to 50 + conversion we removed some interaction rules and thus also some parts of + the exploration that had its content IDs, but then the content IDs in + translations and voiceovers were not updated. + + Args: + states_dict: dict. A dict where each key-value pair represents, + respectively, a state name and a dict used to initialize a + State domain object. + + Returns: + dict. The converted states_dict. + """ + for state_dict in states_dict.values(): + cls._remove_unwanted_content_ids_from_translations_and_voiceovers_from_state_v51_or_v52( # pylint: disable=line-too-long + state_dict, state_schema=51) + + return states_dict + + @classmethod + def _convert_states_v52_dict_to_v53_dict( + cls, + states_dict: Dict[str, state_domain.StateDict], + language_code: str + ) -> Dict[str, state_domain.StateDict]: + """Converts from version 52 to 53. Version 53 fixes all the backend + validation checks for explorations errored data which are + categorized as: + - Exploration states + - Exploration interaction + - Exploration RTE + + Args: + states_dict: dict. A dict where each key-value pair represents, + respectively, a state name and a dict used to initialize a + State domain object. + language_code: str. The language code of the exploration. + + Returns: + dict. The converted states_dict. + """ + states_dict = cls._fix_labelled_as_correct_value_in_state_dict( + states_dict) + + # Update state interaction validations. + states_dict = cls._update_state_interaction( + states_dict, language_code) + + # Update state RTE validations. + states_dict = cls._update_state_rte(states_dict) + + return states_dict + + @classmethod + def _convert_states_v53_dict_to_v54_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: + """Converts from version 53 to 54. Version 54 adds + catchMisspellings customization arg to TextInput + interaction which allows creators to detect misspellings. + + Args: + states_dict: dict. A dict where each key-value pair represents, + respectively, a state name and a dict used to initialize a + State domain object. + + Returns: + dict. The converted states_dict. + """ + + for state_dict in states_dict.values(): + if state_dict['interaction']['id'] == 'TextInput': + customization_args = state_dict['interaction'][ + 'customization_args'] + customization_args.update({ + 'catchMisspellings': { + 'value': False + } + }) + + return states_dict + + @classmethod + def _fix_labelled_as_correct_value_in_state_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: + """If destination is `try again` and the value of labelled_as_correct + is True, replaces it with False + + Args: + states_dict: dict. A dict where each key-value pair represents, + respectively, a state name and a dict used to initialize a + State domain object. + + Returns: + dict. The converted states_dict. + """ + for state_name, state_dict in states_dict.items(): + answer_groups = state_dict['interaction']['answer_groups'] + for answer_group in answer_groups: + # labelled_as_correct should not be True if dest is try again. + if answer_group['outcome']['dest'] == state_name: + answer_group['outcome']['labelled_as_correct'] = False + + state_dict['interaction']['answer_groups'] = answer_groups + + return states_dict + + # ########################################################. + # Fix validation errors for exploration state interaction. + # ########################################################. + @classmethod + def _choices_should_be_unique_and_non_empty( + cls, + choices: List[state_domain.SubtitledHtmlDict], + answer_groups: List[state_domain.AnswerGroupDict], + state_dict: state_domain.StateDict, + *, + is_item_selection_interaction: bool = False + ) -> None: + """Handles choices present in the ItemSelectionInput or + in MultipleChoiceInput interactions, implements the following: + - If only one choice is empty then simply removes it + - If multiple choices are empty replace them with `Choice 1` , + `Choice 2` etc + - If choices are duplicate, removes the later choice + - Remove the rules whose choices has been deleted + + Args: + choices: List[state_domain.SubtitledHtmlDict]. A list of choices. + answer_groups: List[state_domain.AnswerGroupDict]. The list of + answer groups. + state_dict: state_domain.StateDict. The exploration state. + is_item_selection_interaction: bool. If the answer group belongs + to ItemSelectionInput interaction or not. + """ + empty_choices: List[state_domain.SubtitledHtmlDict] = [] + seen_choices: List[str] = [] + choices_to_remove: List[state_domain.SubtitledHtmlDict] = [] + invalid_choices_index = [] + invalid_choices_content_ids = [] + content_ids_of_choices_to_update = [] + choices_content = [] + for choice in choices: + choices_content.append(choice['html']) + if html_cleaner.is_html_empty(choice['html']): + empty_choices.append(choice) + + if len(empty_choices) == 1: + invalid_choices_index.append(choices.index(empty_choices[0])) + invalid_choices_content_ids.append(empty_choices[0]['content_id']) + choices_to_remove.append(empty_choices[0]) + else: + for idx, empty_choice in enumerate(empty_choices): + valid_choice = ( + '

    ' + 'Choice ' + str(idx + 1) + '

    ' + ) + if valid_choice in choices_content: + choices_to_remove.append(empty_choice) + else: + empty_choice['html'] = valid_choice + content_ids_of_choices_to_update.append( + empty_choice['content_id']) + + # Duplicate choices. + for choice in choices: + if choice['html'] not in seen_choices: + seen_choices.append(choice['html']) + else: + choices_to_remove.append(choice) + invalid_choices_index.append(choices.index(choice)) + invalid_choices_content_ids.append(choice['content_id']) + + # Remove rules whose choice has been deleted. + empty_ans_groups = [] + for answer_group in answer_groups: + invalid_rules = [] + for rule_spec in answer_group['rule_specs']: + if rule_spec['rule_type'] == 'Equals': + if rule_spec['inputs']['x'] in invalid_choices_index: + invalid_rules.append(rule_spec) + if is_item_selection_interaction: + rule_inputs = rule_spec['inputs'] + assert isinstance(rule_inputs, dict) + rule_values = rule_inputs['x'] + assert isinstance(rule_values, list) + if any( + item in rule_values for item in + invalid_choices_content_ids + ): + invalid_rules.append(rule_spec) + + for invalid_rule in invalid_rules: + answer_group['rule_specs'].remove(invalid_rule) + if ( + len(answer_group['rule_specs']) == 0 and + answer_group not in empty_ans_groups + ): + empty_ans_groups.append(answer_group) + + for empty_ans_group in empty_ans_groups: + answer_groups.remove(empty_ans_group) + + # Remove solution if invalid choice is present. + if state_dict['interaction']['solution'] is not None: + solution = state_dict['interaction']['solution']['correct_answer'] + if isinstance(solution, list) and any( + invalid_choice['content_id'] in solution for invalid_choice in + choices_to_remove + ): + state_dict['interaction']['solution'] = None + + for choice_to_remove in choices_to_remove: + choices.remove(choice_to_remove) + + # Marking the content ids that needs update. + for content_id in content_ids_of_choices_to_update: + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + choice_translations = state_dict['written_translations'][ # type: ignore[misc] + 'translations_mapping'][content_id] + for translation in choice_translations.values(): + translation['needs_update'] = True + + choice_voiceovers = state_dict['recorded_voiceovers'][ + 'voiceovers_mapping'][content_id] + for choice_voiceover in choice_voiceovers.values(): + choice_voiceover['needs_update'] = True + + # Fix RTE content present inside the choices. + for choice in choices: + choice_html = choice['html'] + choice['html'] = cls.fix_content(choice_html) + + @classmethod + def _set_lower_and_upper_bounds( + cls, + range_var: RangeVariableDict, + lower_bound: Optional[float], + upper_bound: Optional[float], + *, + lb_inclusive: bool, + ub_inclusive: bool + ) -> None: + """Sets the lower and upper bounds for the range_var. + + Args: + range_var: dict[str, Any]. Variable used to keep track of each + range. + lower_bound: Optional[float]. The lower bound. + upper_bound: Optional[float]. The upper bound. + lb_inclusive: bool. If lower bound is inclusive. + ub_inclusive: bool. If upper bound is inclusive. + """ + range_var['lower_bound'] = lower_bound + range_var['upper_bound'] = upper_bound + range_var['lb_inclusive'] = lb_inclusive + range_var['ub_inclusive'] = ub_inclusive + + @classmethod + def _is_enclosed_by( + cls, + test_range: RangeVariableDict, + base_range: RangeVariableDict + ) -> bool: + """Checks whether the ranges of rules enclosed or not + + Args: + test_range: RangeVariableDict. It represents the variable for + which we have to check the range. + base_range: RangeVariableDict. It is the variable to which + the range is compared. + + Returns: + bool. Returns True if both rule's ranges are enclosed. + """ + if ( + base_range['lower_bound'] is None or + test_range['lower_bound'] is None or + base_range['upper_bound'] is None or + test_range['upper_bound'] is None + ): + return False + + lb_satisfied = ( + base_range['lower_bound'] < test_range['lower_bound'] or + ( + base_range['lower_bound'] == test_range['lower_bound'] and + (not test_range['lb_inclusive'] or base_range['lb_inclusive']) + ) + ) + ub_satisfied = ( + base_range['upper_bound'] > test_range['upper_bound'] or + ( + base_range['upper_bound'] == test_range['upper_bound'] and + (not test_range['ub_inclusive'] or base_range['ub_inclusive']) + ) + ) + return lb_satisfied and ub_satisfied + + @classmethod + def _should_check_range_criteria( + cls, + earlier_rule: state_domain.RuleSpecDict, + later_rule: state_domain.RuleSpecDict + ) -> bool: + """Checks the range criteria between two rules by comparing their + rule type + + Args: + earlier_rule: state_domain.RuleSpecDict. Previous rule. + later_rule: state_domain.RuleSpecDict. Current rule. + + Returns: + bool. Returns True if the rules passes the range criteria check. + """ + if earlier_rule['rule_type'] in ( + 'HasDenominatorEqualTo', 'IsEquivalentTo', 'IsLessThan', + 'IsEquivalentToAndInSimplestForm', 'IsGreaterThan' + ): + return True + return later_rule['rule_type'] in ( + 'HasDenominatorEqualTo', 'IsLessThan', 'IsGreaterThan' + ) + + @classmethod + def _get_rule_value_of_fraction_interaction( + cls, rule_spec: state_domain.RuleSpecDict + ) -> float: + """Returns rule value of the rule_spec of FractionInput interaction so + that we can keep track of rule's range + + Args: + rule_spec: state_domain.RuleSpecDict. Rule spec of an answer group. + + Returns: + value: float. The value of the rule spec. + """ + rule_input = rule_spec['inputs'] + assert isinstance(rule_input, dict) + rule_value_f = rule_input['f'] + assert isinstance(rule_value_f, dict) + value: float = ( + rule_value_f['wholeNumber'] + + float(rule_value_f['numerator']) / rule_value_f['denominator'] + ) + return value + + @classmethod + def _remove_duplicate_rules_inside_answer_groups( + cls, + answer_groups: List[state_domain.AnswerGroupDict], + state_name: str + ) -> None: + """Removes the duplicate rules present inside the answer groups. This + will simply removes the rule which do not point to another state + to avoid state disconnection. If both of them do not point to different + state we will simply remove the later one + + Args: + answer_groups: List[state_domain.AnswerGroupDict]. The answer groups + present inside the state. + state_name: str. The state name. + """ + rules_to_remove_with_diff_dest_node = [] + rules_to_remove_with_try_again_dest_node = [] + seen_rules_with_try_again_dest_node = [] + seen_rules_with_diff_dest_node = [] + for answer_group in answer_groups: + for rule_spec in answer_group['rule_specs']: + if rule_spec in seen_rules_with_try_again_dest_node: + if ( + answer_group['outcome']['dest'] != state_name and + rule_spec not in seen_rules_with_diff_dest_node + ): + seen_rules_with_diff_dest_node.append(rule_spec) + rules_to_remove_with_try_again_dest_node.append( + rule_spec) + elif ( + answer_group['outcome']['dest'] != state_name and + rule_spec in seen_rules_with_diff_dest_node + ): + rules_to_remove_with_diff_dest_node.append(rule_spec) + else: + rules_to_remove_with_try_again_dest_node.append( + rule_spec) + + elif rule_spec in seen_rules_with_diff_dest_node: + if answer_group['outcome']['dest'] != state_name: + rules_to_remove_with_diff_dest_node.append(rule_spec) + else: + rules_to_remove_with_try_again_dest_node.append( + rule_spec) + + else: + if ( + rule_spec not in seen_rules_with_try_again_dest_node and + answer_group['outcome']['dest'] == state_name + ): + seen_rules_with_try_again_dest_node.append(rule_spec) + if ( + rule_spec not in seen_rules_with_diff_dest_node and + answer_group['outcome']['dest'] != state_name + ): + seen_rules_with_diff_dest_node.append(rule_spec) + + empty_ans_groups = [] + for rule_to_remove in rules_to_remove_with_try_again_dest_node: + removed_try_again_rule = False + for answer_group in reversed(answer_groups): + for rule_spec in reversed(answer_group['rule_specs']): + if ( + rule_spec == rule_to_remove and + answer_group['outcome']['dest'] == state_name + ): + removed_try_again_rule = True + answer_group['rule_specs'].remove(rule_to_remove) + break + + if ( + len(answer_group['rule_specs']) == 0 and + answer_group not in empty_ans_groups + ): + empty_ans_groups.append(answer_group) + + if removed_try_again_rule: + break + + for rule_to_remove in rules_to_remove_with_diff_dest_node: + removed_dest_rule = False + for answer_group in reversed(answer_groups): + for rule_spec in reversed(answer_group['rule_specs']): + if ( + rule_spec == rule_to_remove and + answer_group['outcome']['dest'] != state_name + ): + removed_dest_rule = True + answer_group['rule_specs'].remove(rule_to_remove) + break + + if ( + len(answer_group['rule_specs']) == 0 and + answer_group not in empty_ans_groups + ): + empty_ans_groups.append(answer_group) + + if removed_dest_rule: + break + + for empty_ans_group in empty_ans_groups: + answer_groups.remove(empty_ans_group) + + @classmethod + def _fix_continue_interaction( + cls, state_dict: state_domain.StateDict, language_code: str + ) -> None: + """Fixes Continue interaction where the length of the text value + is more than 20. We simply replace them with the word `Continue` + according to the language code + + Args: + state_dict: state_domain.StateDict. The state dictionary. + language_code: str. The language code of the exploration. + """ + # Here we use cast because we are narrowing down the type from various + # customization args value types to SubtitledUnicodeDict type, and this + # is done because here we are accessing 'buttontext' key from continue + # customization arg whose value is always of SubtitledUnicodeDict type. + button_text_subtitled_unicode_dict = cast( + state_domain.SubtitledUnicodeDict, + state_dict['interaction']['customization_args']['buttonText'][ + 'value' + ] + ) + text_value = button_text_subtitled_unicode_dict['unicode_str'] + content_id = button_text_subtitled_unicode_dict['content_id'] + lang_code_to_unicode_str_dict = { + 'en': 'Continue', + 'es': 'Continuar', + 'nl': 'Doorgaan', + 'ru': 'Продолжить', + 'fr': 'Continuer', + 'ca': 'Continua', + 'hu': 'Folytatás', + 'zh': '继续', + 'it': 'Continua', + 'fi': 'Jatka', + 'pt': 'Continuar', + 'de': 'Fortfahren', + 'ar': 'استمرار', + 'tr': 'İlerle' + } + if len(text_value) > 20: + if language_code in lang_code_to_unicode_str_dict: + button_text_subtitled_unicode_dict['unicode_str'] = ( + lang_code_to_unicode_str_dict[language_code] + ) + else: + button_text_subtitled_unicode_dict['unicode_str'] = 'Continue' + + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + continue_button_translations = state_dict['written_translations'][ # type: ignore[misc] + 'translations_mapping'][content_id] + for translation in continue_button_translations.values(): + translation['needs_update'] = True + + choice_voiceovers = state_dict['recorded_voiceovers'][ + 'voiceovers_mapping'][content_id] + for choice_voiceover in choice_voiceovers.values(): + choice_voiceover['needs_update'] = True + + @classmethod + def _fix_end_interaction(cls, state_dict: state_domain.StateDict) -> None: + """Fixes the End exploration interaction where the recommended + explorations are more than 3. We simply slice them till the + length 3 + + Args: + state_dict: state_domain.StateDict. The state dictionary. + """ + # Here we use cast because we are narrowing down the type from various + # customization args value types to List[str] type, and this is done + # because here we are accessing 'recommendedExplorationIds' key from + # EndExploration customization arg whose value is always of List[str] + # type. + recc_exp_ids = cast( + List[str], + state_dict['interaction']['customization_args'][ + 'recommendedExplorationIds' + ]['value'] + ) + # Should be at most 3 recommended explorations. + state_dict['interaction']['customization_args'][ + 'recommendedExplorationIds']['value'] = recc_exp_ids[:3] + + @classmethod + def _fix_numeric_input_interaction( + cls, state_dict: state_domain.StateDict, state_name: str + ) -> None: + """Fixes NumericInput interaction for the following cases: + - The rules should not be duplicate else the one with not pointing to + different state will be deleted + - The rule should not match previous rules solution means it should + not be in the range of previous rules solution otherwise the later + answer group will be redundant and will never be matched. Simply the + invalid rule will be removed and if only one rule is present then the + complete answer group is removed + - As this interaction is only for the numeric values, all string values + will be considered as invalid and will be removed + - `tol` value in `IsWithinTolerance` rule must be positive else will be + converted to positive value + - `a` should not be greater than `b` in `IsInclusivelyBetween` rule else + we will simply swap them + + Args: + state_dict: state_domain.StateDict. The state dictionary that needs + to be fixed. + state_name: str. The name of the state. + """ + answer_groups = state_dict['interaction']['answer_groups'] + lower_infinity = float('-inf') + upper_infinity = float('inf') + invalid_rules = [] + ranges: List[RangeVariableDict] = [] + cls._remove_duplicate_rules_inside_answer_groups( + answer_groups, state_name) + # All rules should have solutions that do not match + # previous rules' solutions. + for ans_group_index, answer_group in enumerate(answer_groups): + for rule_spec_index, rule_spec in enumerate( + answer_group['rule_specs'] + ): + range_var: RangeVariableDict = { + 'ans_group_index': int(ans_group_index), + 'rule_spec_index': int(rule_spec_index), + 'lower_bound': None, + 'upper_bound': None, + 'lb_inclusive': False, + 'ub_inclusive': False + } + rule_inputs = rule_spec['inputs'] + assert isinstance(rule_inputs, dict) + if rule_spec['rule_type'] == 'IsLessThanOrEqualTo': + try: + assert isinstance(rule_inputs['x'], float) + rule_value = float(rule_inputs['x']) + cls._set_lower_and_upper_bounds( + range_var, + lower_infinity, + rule_value, + lb_inclusive=False, + ub_inclusive=True + ) + except Exception: + invalid_rules.append(rule_spec) + + if rule_spec['rule_type'] == 'IsGreaterThanOrEqualTo': + try: + assert isinstance(rule_inputs['x'], float) + rule_value = float(rule_inputs['x']) + cls._set_lower_and_upper_bounds( + range_var, + rule_value, + upper_infinity, + lb_inclusive=True, + ub_inclusive=False + ) + except Exception: + invalid_rules.append(rule_spec) + + if rule_spec['rule_type'] == 'Equals': + try: + assert isinstance(rule_inputs['x'], float) + rule_value = float(rule_inputs['x']) + cls._set_lower_and_upper_bounds( + range_var, + rule_value, + rule_value, + lb_inclusive=True, + ub_inclusive=True + ) + except Exception: + invalid_rules.append(rule_spec) + + if rule_spec['rule_type'] == 'IsLessThan': + try: + assert isinstance(rule_inputs['x'], float) + rule_value = float(rule_inputs['x']) + cls._set_lower_and_upper_bounds( + range_var, + lower_infinity, + rule_value, + lb_inclusive=False, + ub_inclusive=False + ) + except Exception: + invalid_rules.append(rule_spec) + + if rule_spec['rule_type'] == 'IsWithinTolerance': + try: + rule_value_x = rule_inputs['x'] + assert isinstance(rule_value_x, float) + rule_value_tol = rule_inputs['tol'] + assert isinstance(rule_value_tol, float) + # The `tolerance` value needs to be a positive value. + if rule_value_tol <= 0: + rule_spec['inputs']['tol'] = abs(rule_value_tol) + rule_value_x = float(rule_value_x) + rule_value_tol = float(rule_value_tol) + cls._set_lower_and_upper_bounds( + range_var, + rule_value_x - rule_value_tol, + rule_value_x + rule_value_tol, + lb_inclusive=True, + ub_inclusive=True + ) + except Exception: + invalid_rules.append(rule_spec) + + if rule_spec['rule_type'] == 'IsGreaterThan': + try: + assert isinstance(rule_inputs['x'], float) + rule_value = float(rule_inputs['x']) + cls._set_lower_and_upper_bounds( + range_var, + rule_value, + upper_infinity, + lb_inclusive=False, + ub_inclusive=False + ) + except Exception: + invalid_rules.append(rule_spec) + + if rule_spec['rule_type'] == 'IsInclusivelyBetween': + try: + value_a = rule_inputs['a'] + assert isinstance(value_a, float) + value_b = rule_inputs['b'] + assert isinstance(value_b, float) + # For x in [a, b], a must not be greater than b. + if value_a > value_b: + rule_spec['inputs']['a'] = value_b + rule_spec['inputs']['b'] = value_a + elif value_a == value_b: + rule_spec['rule_type'] = 'Equals' + rule_spec['inputs'] = {'x': value_a} + assert isinstance(rule_spec['inputs']['x'], float) + rule_value = float(rule_spec['inputs']['x']) + cls._set_lower_and_upper_bounds( + range_var, + rule_value, + rule_value, + lb_inclusive=True, + ub_inclusive=True + ) + continue + rule_value_a = float(value_a) + rule_value_b = float(value_b) + cls._set_lower_and_upper_bounds( + range_var, + rule_value_a, + rule_value_b, + lb_inclusive=True, + ub_inclusive=True + ) + except Exception: + invalid_rules.append(rule_spec) + + for range_ele in ranges: + if cls._is_enclosed_by(range_var, range_ele): + invalid_rules.append(rule_spec) + ranges.append(range_var) + + # Removing all the invalid rules. + empty_ans_groups = [] + for invalid_rule in invalid_rules: + for answer_group in answer_groups: + for rule_spec in answer_group['rule_specs']: + if rule_spec == invalid_rule: + answer_group['rule_specs'].remove(rule_spec) + + if ( + len(answer_group['rule_specs']) == 0 and + answer_group not in empty_ans_groups + ): + empty_ans_groups.append(answer_group) + + for empty_ans_group in empty_ans_groups: + answer_groups.remove(empty_ans_group) + + cls._remove_duplicate_rules_inside_answer_groups( + answer_groups, state_name) + + state_dict['interaction']['answer_groups'] = answer_groups + + @classmethod + def _fix_fraction_input_interaction( + cls, state_dict: state_domain.StateDict, state_name: str + ) -> None: + """Fixes FractionInput interaction for the following cases: + - The rules should not be duplicate else the one with not pointing to + different state will be deleted + - The rule should not match previous rules solution means it should + not be in the range of previous rules solution. Invalid rules will + be removed. + + Args: + state_dict: state_domain.StateDict. The state dictionary that needs + to be fixed. + state_name: str. The name of the state. + """ + # All rules should have solutions that do not match + # previous rules' solutions. + answer_groups = state_dict['interaction']['answer_groups'] + lower_infinity = float('-inf') + upper_infinity = float('inf') + ranges: List[RangeVariableDict] = [] + invalid_rules = [] + matched_denominator_list: List[MatchedDenominatorDict] = [] + rules_that_can_have_improper_fractions = [ + 'IsExactlyEqualTo', + 'HasFractionalPartExactlyEqualTo' + ] + allow_imp_frac = state_dict['interaction']['customization_args'][ + 'allowImproperFraction']['value'] + + cls._remove_duplicate_rules_inside_answer_groups( + answer_groups, state_name) + for ans_group_index, answer_group in enumerate(answer_groups): + for rule_spec_index, rule_spec in enumerate( + answer_group['rule_specs'] + ): + range_var: RangeVariableDict = { + 'ans_group_index': int(ans_group_index), + 'rule_spec_index': int(rule_spec_index), + 'lower_bound': None, + 'upper_bound': None, + 'lb_inclusive': False, + 'ub_inclusive': False + } + matched_denominator: MatchedDenominatorDict = { + 'ans_group_index': int(ans_group_index), + 'rule_spec_index': int(rule_spec_index), + 'denominator': 0 + } + + if ( + rule_spec['rule_type'] in + rules_that_can_have_improper_fractions + ): + inputs = rule_spec['inputs'] + assert isinstance(inputs, dict) + value_f = inputs['f'] + assert isinstance(value_f, dict) + num = value_f['numerator'] + assert isinstance(num, int) + deno = value_f['denominator'] + assert isinstance(deno, int) + if not allow_imp_frac and deno <= num: + invalid_rules.append(rule_spec) + continue + + if rule_spec['rule_type'] in ( + 'IsEquivalentTo', 'IsExactlyEqualTo', + 'IsEquivalentToAndInSimplestForm' + ): + rule_value_equal: float = ( + cls._get_rule_value_of_fraction_interaction(rule_spec)) + cls._set_lower_and_upper_bounds( + range_var, + rule_value_equal, + rule_value_equal, + lb_inclusive=True, + ub_inclusive=True + ) + + elif rule_spec['rule_type'] == 'IsGreaterThan': + rule_value_greater: float = ( + cls._get_rule_value_of_fraction_interaction(rule_spec)) + + cls._set_lower_and_upper_bounds( + range_var, + rule_value_greater, + upper_infinity, + lb_inclusive=False, + ub_inclusive=False + ) + + elif rule_spec['rule_type'] == 'IsLessThan': + rule_value_less_than: float = ( + cls._get_rule_value_of_fraction_interaction(rule_spec)) + + cls._set_lower_and_upper_bounds( + range_var, + lower_infinity, + rule_value_less_than, + lb_inclusive=False, + ub_inclusive=False + ) + + elif rule_spec['rule_type'] == 'HasDenominatorEqualTo': + try: + rule_inputs = rule_spec['inputs'] + assert isinstance(rule_inputs, dict) + assert isinstance(rule_inputs['x'], int) + rule_value_x = int(rule_inputs['x']) + matched_denominator['denominator'] = rule_value_x + except Exception: + invalid_rules.append(rule_spec) + + for range_ele in ranges: + earlier_rule = answer_groups[range_ele[ + 'ans_group_index']]['rule_specs'][ + range_ele['rule_spec_index']] + if ( + cls._should_check_range_criteria( + earlier_rule, rule_spec + ) and cls._is_enclosed_by(range_var, range_ele) + ): + invalid_rules.append(rule_spec) + + for den in matched_denominator_list: + if ( + rule_spec['rule_type'] == + 'HasFractionalPartExactlyEqualTo' + ): + rule_spec_f = rule_spec['inputs']['f'] + assert isinstance(rule_spec_f, dict) + if den['denominator'] == rule_spec_f['denominator']: + invalid_rules.append(rule_spec) + + ranges.append(range_var) + matched_denominator_list.append(matched_denominator) + + empty_ans_groups = [] + for invalid_rule in invalid_rules: + for answer_group in answer_groups: + for rule_spec in answer_group['rule_specs']: + if rule_spec == invalid_rule: + answer_group['rule_specs'].remove(rule_spec) + + if ( + len(answer_group['rule_specs']) == 0 and + answer_group not in empty_ans_groups + ): + empty_ans_groups.append(answer_group) + + for empty_ans_group in empty_ans_groups: + answer_groups.remove(empty_ans_group) + + state_dict['interaction']['answer_groups'] = answer_groups + + @classmethod + def _fix_multiple_choice_input_interaction( + cls, state_dict: state_domain.StateDict, state_name: str + ) -> None: + """Fixes MultipleChoiceInput interaction for the following cases: + - The rules should not be duplicate else the one with not pointing to + different state will be deleted + - No answer choice should appear in more than one rule else the + latter rule will be removed + - Answer choices should be non-empty and unique else will be fixed + accordingly + + Args: + state_dict: state_domain.StateDict. The state dictionary that needs + to be fixed. + state_name: str. The name of the state. + """ + answer_groups = state_dict['interaction']['answer_groups'] + # Here we use cast because we are narrowing down the type from various + # customization args value types to List[SubtitledHtmlDict] type, + # and this is done because here we are accessing 'choices' key from + # MultipleChoiceInput customization arg whose value is always of + # List[SubtitledHtmlDict] type. + choices = ( + cast( + List[state_domain.SubtitledHtmlDict], + state_dict['interaction']['customization_args']['choices'][ + 'value' + ] + ) + ) + cls._choices_should_be_unique_and_non_empty( + choices, + answer_groups, + state_dict, + is_item_selection_interaction=False) + + cls._remove_duplicate_rules_inside_answer_groups( + answer_groups, state_name) + + state_dict['interaction']['customization_args']['choices'][ + 'value'] = choices + state_dict['interaction']['answer_groups'] = answer_groups + + @classmethod + def _fix_item_selection_input_interaction( + cls, state_dict: state_domain.StateDict, state_name: str + ) -> None: + """Fixes ItemSelectionInput interaction for the following cases: + - The rules should not be duplicate else the one with not pointing to + different state will be deleted + - `Equals` rule should have value between min and max number + of selections else the rule will be removed + - Minimum number of selections should be no greater than + maximum number of selections else we will simply swap the values + - There should be enough choices to have minimum number of selections + else the minimum value will be set to 1 + - All choices should be unique and non-empty else will be handled + accordingly + + Args: + state_dict: state_domain.StateDict. The state dictionary that needs + to be fixed. + state_name: str. The name of the state. + """ + # Here we use cast because we are narrowing down the type from various + # customization args value types to int type, and this is done because + # here we are accessing 'minAllowableSelectionCount' key from + # ItemSelectionInput Customization arg whose value is always of int + # type. + min_value = cast( + int, + state_dict['interaction']['customization_args'][ + 'minAllowableSelectionCount' + ]['value'] + ) + # Here we use cast because we are narrowing down the type from various + # customization args value types to int type, and this is done because + # here we are accessing 'maxAllowableSelectionCount' key from + # ItemSelectionInput Customization arg whose value is always of int + # type. + max_value = cast( + int, + state_dict['interaction']['customization_args'][ + 'maxAllowableSelectionCount' + ]['value'] + ) + # Here we use cast because we are narrowing down the type from + # various customization args value types to List[SubtitledHtmlDict] + # type, and this is done because here we are accessing 'choices' key + # from ItemSelectionInput customization arg whose value is always of + # List[SubtitledHtmlDict] type. + choices = ( + cast( + List[state_domain.SubtitledHtmlDict], + state_dict['interaction']['customization_args'][ + 'choices' + ]['value'] + ) + ) + answer_groups = state_dict['interaction']['answer_groups'] + + # Rules should not be duplicate. + cls._remove_duplicate_rules_inside_answer_groups( + answer_groups, state_name) + + # Minimum number of selections should be no greater than maximum + # number of selections. + if min_value > max_value: + min_value, max_value = max_value, min_value + + # There should be enough choices to have minimum number + # of selections. + if len(choices) < min_value: + min_value = 1 + + # All choices should be unique and non-empty. + cls._choices_should_be_unique_and_non_empty( + choices, + answer_groups, + state_dict, + is_item_selection_interaction=True) + + empty_ans_groups = [] + for answer_group in answer_groups: + invalid_rules = [] + for rule_spec in answer_group['rule_specs']: + # `Equals` should have between min and max number of selections. + if rule_spec['rule_type'] == 'Equals': + rule_value = rule_spec['inputs']['x'] + assert isinstance(rule_value, list) + if ( + len(rule_value) < min_value or + len(rule_value) > max_value + ): + if ( + answer_group['outcome']['dest'] == state_name or + len(rule_value) == 0 + ): + invalid_rules.append(rule_spec) + else: + min_value = min(min_value, len(rule_value)) + max_value = max(max_value, len(rule_value)) + + for invalid_rule in invalid_rules: + answer_group['rule_specs'].remove(invalid_rule) + + if ( + len(answer_group['rule_specs']) == 0 and + answer_group not in empty_ans_groups + ): + empty_ans_groups.append(answer_group) + + for empty_ans_group in empty_ans_groups: + answer_groups.remove(empty_ans_group) + + state_dict['interaction']['customization_args'][ + 'minAllowableSelectionCount' + ]['value'] = min_value + state_dict['interaction']['customization_args'][ + 'maxAllowableSelectionCount' + ]['value'] = max_value + state_dict['interaction']['customization_args']['choices'][ + 'value' + ] = choices + state_dict['interaction']['answer_groups'] = answer_groups + + @classmethod + def _update_rule_value_having_empty_choices( + cls, + empty_choices: List[state_domain.SubtitledHtmlDict], + rule_value_x: List[List[str]], + solution: Optional[List[List[str]]] + ) -> None: + """Removing empty choice from the rule values. + + Args: + empty_choices: List[state_domain.SubtitledHtmlDict]. The list of + empty choices. + rule_value_x: List[List[str]]. The rule spec value. + solution: Optional[List[List[str]]]. The solution of the state. + """ + for empty_choice in empty_choices: + for rule_value in rule_value_x: + for choice in rule_value: + if choice == empty_choice['content_id']: + rule_value.remove(choice) + break + if len(rule_value) == 0: + rule_value_x.remove(rule_value) + break + + if solution is not None and isinstance(solution, list): + for choice_list in solution: + for choice in choice_list: + if choice == empty_choice['content_id']: + choice_list.remove(choice) + break + if len(choice_list) == 0: + solution.remove(choice_list) + break + + @classmethod + def _is_empty_choice_in_rule_value( + cls, + empty_choices: List[state_domain.SubtitledHtmlDict], + value: str + ) -> bool: + """Returns True if the empty choice is present inside the value. + + Args: + empty_choices: List[state_domain.SubtitledHtmlDict]. The list of + choices. + value: str. The value which needs to be checked. + + Returns: + bool. Returns True if the empty choice is equal to the given value. + """ + for empty_choice in empty_choices: + if value == empty_choice['content_id']: + return True + + return False + + @classmethod + def _fix_drag_and_drop_input_interaction( + cls, state_dict: state_domain.StateDict, state_name: str + ) -> None: + """Fixes the DragAndDropInput interaction with following checks: + - The rules should not be duplicate else the one with not pointing to + different state will be deleted + - Multiple items cannot be in the same place iff the setting is + turned off. Rule will simply be removed + - `IsEqualToOrderingWithOneItemAtIncorrectPosition` rule should + not be present when `multiple items at same place` setting + is turned off. Rule will simply be removed + - In `HasElementXBeforeElementY` rule, `X` value should not be + equal to `Y` value. Rule will simply be removed + - Rule `IsEqualToOrdering` having empty values is removed + - The `Equals` rule should always come before `HasElementXAtPositionY` + where the element `X` is present at position `Y` inside `Equals` + rule otherwise the rule will never going to match. We will simply remove + the `Equals` rule as it will never going to match + - The `Equals` rule should always come before + `IsEqualToOrderingWithOneItemAtIncorrectPosition` otherwise the + rule will never going to match. We will simply remove + the `Equals` rule as it will never going to match + + Args: + state_dict: state_domain.StateDict. The state dictionary that needs + to be fixed. + state_name: str. The name of the state. + """ + answer_groups = state_dict['interaction']['answer_groups'] + multi_item_value = ( + state_dict['interaction']['customization_args'] + ['allowMultipleItemsInSamePosition']['value'] + ) + invalid_rules = [] + ele_x_at_y_rules: List[Dict[str, Union[str, int]]] = [] + off_by_one_rules: List[List[List[str]]] = [] + # Here we use cast because we are narrowing down the type from + # various customization args value types to List[SubtitledHtmlDict] + # type, and this is done because here we are accessing 'choices' key + # from DragAndDropInput customization arg whose value is always of + # List[SubtitledHtmlDict] type. + choices_drag_drop = ( + cast( + List[state_domain.SubtitledHtmlDict], + state_dict['interaction']['customization_args'][ + 'choices' + ]['value'] + ) + ) + + if state_dict['interaction']['solution'] is not None: + solution = state_dict['interaction']['solution']['correct_answer'] + else: + solution = None + + # Here we use cast because we are certain with the type + # of the solution and to avoid the mypy type check failure. + state_sol = cast(Optional[List[List[str]]], solution) + + # Check for empty choices. + empty_choices = [] + for choice_drag in choices_drag_drop: + if html_cleaner.is_html_empty(choice_drag['html']): + empty_choices.append(choice_drag) + + if len(empty_choices) > 0: + for empty_choice in empty_choices: + choices_drag_drop.remove(empty_choice) + + # Fix content. + for choice_drag in choices_drag_drop: + choice_html = choice_drag['html'] + choice_drag['html'] = cls.fix_content(choice_html) + + cls._remove_duplicate_rules_inside_answer_groups( + answer_groups, state_name) + for answer_group in answer_groups: + for rule_spec in answer_group['rule_specs']: + rule_inputs = rule_spec['inputs'] + assert isinstance(rule_inputs, dict) + rule_spec_x = rule_inputs['x'] + + if ( + rule_spec['rule_type'] == + 'IsEqualToOrderingWithOneItemAtIncorrectPosition' + ): + # Here we use cast because we are certain with the type + # of the rule spec and to avoid the mypy type check failure. + rule_spec_val = cast(List[List[str]], rule_spec_x) + if len(empty_choices) > 0: + cls._update_rule_value_having_empty_choices( + empty_choices, rule_spec_val, state_sol) + # `IsEqualToOrderingWithOneItemAtIncorrectPosition` + # rule should not be present when `multiple items at same + # place` setting is turned off. + if not multi_item_value: + invalid_rules.append(rule_spec) + else: + off_by_one_rules.append(rule_spec_val) + + # In `HasElementXBeforeElementY` rule, `X` value + # should not be equal to `Y` value. + elif rule_spec['rule_type'] == 'HasElementXBeforeElementY': + value_x = rule_spec['inputs']['x'] + value_y = rule_spec['inputs']['y'] + assert isinstance(value_x, str) + assert isinstance(value_y, str) + if value_x == value_y: + invalid_rules.append(rule_spec) + + if len(empty_choices) > 0: + if cls._is_empty_choice_in_rule_value( + empty_choices, value_x + ): + invalid_rules.append(rule_spec) + continue + + if cls._is_empty_choice_in_rule_value( + empty_choices, value_y + ): + invalid_rules.append(rule_spec) + continue + + elif rule_spec['rule_type'] == 'HasElementXAtPositionY': + element = rule_spec['inputs']['x'] + assert isinstance(element, str) + position = rule_spec['inputs']['y'] + assert isinstance(position, int) + + if len(empty_choices) > 0: + if cls._is_empty_choice_in_rule_value( + empty_choices, element + ): + invalid_rules.append(rule_spec) + continue + + ele_x_at_y_rules.append( + {'element': element, 'position': position} + ) + + elif rule_spec['rule_type'] == 'IsEqualToOrdering': + # Here we use cast because we are certain with the type + # of the rule spec and to avoid the mypy type check failure. + rule_spec_val_x = cast(List[List[str]], rule_spec_x) + if len(empty_choices) > 0: + cls._update_rule_value_having_empty_choices( + empty_choices, rule_spec_val_x, state_sol) + + # Multiple items cannot be in the same place iff the + # setting is turned off. + for ele in rule_spec_val_x: + if not multi_item_value and len(ele) > 1: + invalid_rules.append(rule_spec) + + # `IsEqualToOrdering` rule should not have empty values. + if len(rule_spec_val_x) <= 0: + invalid_rules.append(rule_spec) + else: + # `IsEqualToOrdering` rule should always come before + # `HasElementXAtPositionY` where element `X` is present + # at position `Y` in `IsEqualToOrdering` rule. + for ele_x_at_y_rule in ele_x_at_y_rules: + assert isinstance(ele_x_at_y_rule, dict) + ele_position = ele_x_at_y_rule['position'] + ele_element = ele_x_at_y_rule['element'] + assert isinstance(ele_position, int) + if ele_position > len(rule_spec_val_x): + continue + rule_choice = rule_spec_val_x[ele_position - 1] + + if len(rule_choice) == 0: + invalid_rules.append(rule_spec) + else: + for choice in rule_choice: + if choice == ele_element: + invalid_rules.append(rule_spec) + + # `IsEqualToOrdering` should always come before + # `IsEqualToOrderingWithOneItemAtIncorrectPosition` when + # they are off by one value. + item_to_layer_idx = {} + for layer_idx, layer in enumerate(rule_spec_val_x): + for item in layer: + item_to_layer_idx[item] = layer_idx + + for off_by_one_rule in off_by_one_rules: + assert isinstance(off_by_one_rule, list) + wrong_positions = 0 + for layer_idx, layer in enumerate(off_by_one_rule): + for item in layer: + if layer_idx != item_to_layer_idx[item]: + wrong_positions += 1 + if wrong_positions <= 1: + invalid_rules.append(rule_spec) + + empty_ans_groups = [] + for invalid_rule in invalid_rules: + for answer_group in answer_groups: + for rule_spec in answer_group['rule_specs']: + if rule_spec == invalid_rule: + answer_group['rule_specs'].remove(rule_spec) + + if ( + len(answer_group['rule_specs']) == 0 and + answer_group not in empty_ans_groups + ): + empty_ans_groups.append(answer_group) + + for empty_ans_group in empty_ans_groups: + answer_groups.remove(empty_ans_group) + + state_dict['interaction']['answer_groups'] = answer_groups + + @classmethod + def _fix_text_input_interaction( + cls, state_dict: state_domain.StateDict, state_name: str + ) -> None: + """Fixes the TextInput interaction with following checks: + - The rules should not be duplicate else the one with not pointing to + different state will be deleted + - Text input height shoule be >= 1 and <= 10 else we will replace with + 10 + - `Contains` should always come after another `Contains` rule where + the first contains rule strings is a substring of the other contains + rule strings + - `StartsWith` rule should always come after another `StartsWith` rule + where the first starts-with string is the prefix of the other + starts-with string + - `Contains` should always come after `StartsWith` rule where the + contains rule strings is a substring of the `StartsWith` rule string + - `Contains` should always come after `Equals` rule where the contains + rule strings is a substring of the `Equals` rule string + - `Contains` should always come after `Equals` rule where the contains + rule strings is a substring of the `Equals` rule string + - `Startswith` should always come after the `Equals` rule where a + `starts-with` string is a prefix of the `Equals` rule's string. + + Args: + state_dict: state_domain.StateDict. The state dictionary that needs + to be fixed. + state_name: str. The name of the state. + """ + answer_groups = state_dict['interaction']['answer_groups'] + seen_strings_contains: List[List[str]] = [] + seen_strings_startswith: List[List[str]] = [] + invalid_rules = [] + + cls._remove_duplicate_rules_inside_answer_groups( + answer_groups, state_name) + # Here we use cast because we are narrowing down the type from various + # customization args value types to int type, and this is done because + # here we are accessing 'rows' key from TextInput customization arg + # whose value is always of int type. + rows_value = cast( + int, + state_dict['interaction']['customization_args']['rows']['value'] + ) + # Text input height shoule be >= 1 and <= 10. + if rows_value < 1: + state_dict['interaction']['customization_args'][ + 'rows']['value'] = 1 + if rows_value > 10: + state_dict['interaction']['customization_args'][ + 'rows']['value'] = 10 + for answer_group in answer_groups: + assert isinstance(answer_group['rule_specs'], list) + for rule_spec in answer_group['rule_specs']: + rule_spec_text = rule_spec['inputs']['x'] + assert isinstance(rule_spec_text, dict) + rule_values = rule_spec_text['normalizedStrSet'] + assert isinstance(rule_values, list) + if rule_spec['rule_type'] == 'Contains': + # `Contains` should always come after another + # `Contains` rule where the first contains rule + # strings is a substring of the other contains + # rule strings. + for contain_rule_ele in seen_strings_contains: + for contain_rule_string in contain_rule_ele: + for rule_value in rule_values: + if contain_rule_string in rule_value: + invalid_rules.append(rule_spec) + seen_strings_contains.append(rule_values) + elif rule_spec['rule_type'] == 'StartsWith': + # `StartsWith` rule should always come after another + # `StartsWith` rule where the first starts-with string + # is the prefix of the other starts-with string. + for start_with_rule_ele in seen_strings_startswith: + for start_with_rule_string in start_with_rule_ele: + for rule_value in rule_values: + if rule_value.startswith( + start_with_rule_string + ): + invalid_rules.append(rule_spec) + # `Contains` should always come after `StartsWith` rule + # where the contains rule strings is a substring + # of the `StartsWith` rule string. + for contain_rule_ele in seen_strings_contains: + for contain_rule_string in contain_rule_ele: + for rule_value in rule_values: + if contain_rule_string in rule_value: + invalid_rules.append(rule_spec) + seen_strings_startswith.append(rule_values) + elif rule_spec['rule_type'] == 'Equals': + # `Contains` should always come after `Equals` rule + # where the contains rule strings is a substring + # of the `Equals` rule string. + for contain_rule_ele in seen_strings_contains: + for contain_rule_string in contain_rule_ele: + for rule_value in rule_values: + if contain_rule_string in rule_value: + invalid_rules.append(rule_spec) + # `Startswith` should always come after the `Equals` + # rule where a `starts-with` string is a prefix of the + # `Equals` rule's string. + for start_with_rule_ele in seen_strings_startswith: + for start_with_rule_string in start_with_rule_ele: + for rule_value in rule_values: + if rule_value.startswith( + start_with_rule_string + ): + invalid_rules.append(rule_spec) + + empty_ans_groups = [] + for invalid_rule in invalid_rules: + for answer_group in answer_groups: + for rule_spec in answer_group['rule_specs']: + if rule_spec == invalid_rule: + answer_group['rule_specs'].remove(rule_spec) + + if ( + len(answer_group['rule_specs']) == 0 and + answer_group not in empty_ans_groups + ): + empty_ans_groups.append(answer_group) + + for empty_ans_group in empty_ans_groups: + answer_groups.remove(empty_ans_group) + + state_dict['interaction']['answer_groups'] = answer_groups + + @classmethod + def _update_state_interaction( + cls, + states_dict: Dict[str, state_domain.StateDict], + language_code: str + ) -> Dict[str, state_domain.StateDict]: + """Handles all the invalid general state interactions + + Args: + states_dict: dict. A dict where each key-value pair represents, + respectively, a state name and a dict used to initialize a + State domain object. + language_code: str. The language code of the exploration. + + Returns: + states_dict: Dict[str, state_domain.StateDict]. The converted + state dictionary. + """ + for state_name, state_dict in states_dict.items(): + interaction_id_to_fix_func: Dict[str, Callable[..., None]] = { + 'Continue': cls._fix_continue_interaction, + 'EndExploration': cls._fix_end_interaction, + 'NumericInput': cls._fix_numeric_input_interaction, + 'FractionInput': cls._fix_fraction_input_interaction, + 'MultipleChoiceInput': ( + cls._fix_multiple_choice_input_interaction), + 'ItemSelectionInput': cls._fix_item_selection_input_interaction, + 'DragAndDropSortInput': ( + cls._fix_drag_and_drop_input_interaction), + 'TextInput': cls._fix_text_input_interaction + } + interaction_id = state_dict['interaction']['id'] + if interaction_id in interaction_id_to_fix_func: + if interaction_id == 'Continue': + interaction_id_to_fix_func[interaction_id]( + state_dict, language_code) + elif interaction_id == 'EndExploration': + interaction_id_to_fix_func[interaction_id](state_dict) + else: + interaction_id_to_fix_func[interaction_id]( + state_dict, state_name) + + # Update translations and voiceovers. + cls._remove_unwanted_content_ids_from_translations_and_voiceovers_from_state_v51_or_v52( # pylint: disable=line-too-long + state_dict, state_schema=52) + + return states_dict + + # ################################################. + # Fix validation errors for exploration state RTE. + # ################################################. + + @classmethod + def _is_tag_removed_with_invalid_attributes( + cls, tag: bs4.BeautifulSoup, attr: str + ) -> bool: + """Returns True when the tag is removed due to invalid attribute. + + Args: + tag: bs4.BeautifulSoup. The RTE tag. + attr: str. The attribute that needs to be checked. + + Returns: + bool. Returns True when the tag has been deleted. + """ + if not tag.has_attr(attr): + tag.decompose() + return True + + if html_cleaner.is_html_empty(tag[attr]): + tag.decompose() + return True + + return False + + @classmethod + def _fix_rte_tags( + cls, html: str, + *, + is_tags_nested_inside_tabs_or_collapsible: bool = False + ) -> str: + """Handles all the invalid RTE tags, performs the following: + - `oppia-noninteractive-image` + - If `alt-with-value` attribute not in the image tag, + introduces the attribute and assign empty value + - If `filepath-with-value` attribute not in image tag, + removes the tag + - If `filepath-with-value` attribute empty then removes + the tag + - If `caption-with-value` attribute not in the image tag, + introduces the attribute and assign empty value + - `oppia-noninteractive-skillreview` + - If `text-with-value` attribute is not present or empty or + None, removes the tag + - If `skill_id-with-value` attribute is not present or empty or + None, removes the tag + - `oppia-noninteractive-math` + - If `math_content-with-value` attribute not in math tag, + removes the tag + - If `raw_latex` is not present or empty or None, removes + the tag + - `oppia-noninteractive-video` + - If `start-with-value` or `end-with-value` is not present, + introduce them to the tag and assign 0 to them + - If `autoplay-with-value` is not present or is not boolean, + introduce it to the tag and assign `false` to them + - If `video_id-with-value` is not present or empty, removes + the tag + - If `start-with-value` > `end-with-value`, set both to '0' + - `oppia-noninteractive-link` + - If `text-with-value` or `url-with-value` is not present, + or is empty simply removes the tag + - `oppia-noninteractive-tabs` and `oppia-noninteractive-collapsible` + - If these tags are nested inside tabs and collapsible tag, we + will simply remove the tag + + Args: + html: str. The RTE tags. + is_tags_nested_inside_tabs_or_collapsible: bool. If the tag is + present inside the tabs or collapsible tag. + + Returns: + str. Returns the updated html value. + """ + soup = bs4.BeautifulSoup(html, 'html.parser') + + for tag in soup.find_all('oppia-noninteractive-image'): + if not tag.has_attr('alt-with-value'): + tag['alt-with-value'] = '""' + + if cls._is_tag_removed_with_invalid_attributes( + tag, 'filepath-with-value'): + continue + + if not tag.has_attr('caption-with-value'): + tag['caption-with-value'] = '""' + + for tag in soup.find_all('oppia-noninteractive-skillreview'): + if cls._is_tag_removed_with_invalid_attributes( + tag, 'text-with-value'): + continue + + if cls._is_tag_removed_with_invalid_attributes( + tag, 'skill_id-with-value'): + continue + + for tag in soup.find_all('oppia-noninteractive-video'): + if not tag.has_attr('start-with-value'): + tag['start-with-value'] = '0' + else: + if not tag['start-with-value'].isdigit(): + tag['start-with-value'] = '0' + + if not tag.has_attr('end-with-value'): + tag['end-with-value'] = '0' + else: + if not tag['end-with-value'].isdigit(): + tag['end-with-value'] = '0' + + if not tag.has_attr('autoplay-with-value'): + tag['autoplay-with-value'] = 'false' + else: + if tag['autoplay-with-value'].strip() not in ( + 'true', 'false', '\'true\'', '\'false\'', + '\"true\"', '\"false\"', True, False + ): + tag['autoplay-with-value'] = 'false' + + if cls._is_tag_removed_with_invalid_attributes( + tag, 'video_id-with-value'): + continue + + start_value = float(tag['start-with-value']) + end_value = float(tag['end-with-value']) + if ( + start_value > end_value and + start_value != 0 and + end_value != 0 + ): + tag['end-with-value'] = '0' + tag['start-with-value'] = '0' + + for tag in soup.find_all('oppia-noninteractive-link'): + if cls._is_tag_removed_with_invalid_attributes( + tag, 'url-with-value' + ): + continue + + url = tag['url-with-value'].replace( + '"', '').replace(' ', '') + if utils.get_url_scheme(url) == 'http': + url = url.replace('http', 'https') + + if ( + utils.get_url_scheme(url) not in + constants.ACCEPTABLE_SCHEMES + ): + tag.decompose() + continue + + tag['url-with-value'] = '"' + url + '"' + + if not tag.has_attr('text-with-value'): + tag['text-with-value'] = tag['url-with-value'] + else: + if html_cleaner.is_html_empty(tag['text-with-value']): + tag['text-with-value'] = tag['url-with-value'] + + for tag in soup.find_all('oppia-noninteractive-math'): + if cls._is_tag_removed_with_invalid_attributes( + tag, 'math_content-with-value'): + continue + + math_content_json = utils.unescape_html( + tag['math_content-with-value']) + math_content_list = json.loads(math_content_json) + if 'raw_latex' not in math_content_list: + tag.decompose() + continue + if html_cleaner.is_html_empty(math_content_list['raw_latex']): + tag.decompose() + continue + + if 'svg_filename' not in math_content_list: + tag.decompose() + continue + if html_cleaner.is_html_empty(math_content_list['svg_filename']): + tag.decompose() + continue + + if is_tags_nested_inside_tabs_or_collapsible: + tabs_tags = soup.find_all('oppia-noninteractive-tabs') + if len(tabs_tags) > 0: + for tabs_tag in tabs_tags: + tabs_tag.decompose() + continue + collapsible_tags = soup.find_all('oppia-noninteractive-collapsible') + if len(collapsible_tags) > 0: + for collapsible_tag in collapsible_tags: + collapsible_tag.decompose() + continue + + return str(soup).replace('
    ', '
    ') + + @classmethod + def _is_tag_removed_with_empty_content( + cls, + tag: bs4.BeautifulSoup, + content: Union[str, List[str]], + *, + is_collapsible: bool = False + ) -> bool: + """Returns True when the tag is removed for having empty content. + + Args: + tag: bs4.BeautifulSoup. The RTE tag. + content: Union[str, List[str]]. The content that needs to be + checked. + is_collapsible: bool. True if the tag is collapsible tag. + + Returns: + bool. Returns True when the tag has been deleted. + """ + if is_collapsible: + assert isinstance(content, str) + if html_cleaner.is_html_empty(content): + tag.decompose() + return True + else: + if len(content) == 0: + tag.decompose() + return True + + return False + + @classmethod + def _fix_tabs_and_collapsible_tags(cls, html: str) -> str: + """Fixes all tabs and collapsible tags, performs the following: + - `oppia-noninteractive-tabs` + - If no `tab_contents-with-value` attribute, tag will be removed + - If `tab_contents-with-value` is empty then the tag will be removed + - `oppia-noninteractive-collapsible` + - If no `content-with-value` attribute, tag will be removed + - If `content-with-value` is empty then the tag will be removed + - If no `heading-with-value` attribute, tag will be removed + - If `heading-with-value` is empty then the tag will be removed + + Args: + html: str. The RTE tags. + + Returns: + str. Returns the updated html value. + """ + soup = bs4.BeautifulSoup(html, 'html.parser') + tabs_tags = soup.find_all('oppia-noninteractive-tabs') + for tag in tabs_tags: + if tag.has_attr('tab_contents-with-value'): + tab_content_json = utils.unescape_html( + tag['tab_contents-with-value']) + tab_content_list = json.loads(tab_content_json) + if cls._is_tag_removed_with_empty_content( + tag, tab_content_list, is_collapsible=False): + continue + + empty_tab_contents = [] + for tab_content in tab_content_list: + tab_content['content'] = cls._fix_rte_tags( + tab_content['content'], + is_tags_nested_inside_tabs_or_collapsible=True + ) + if html_cleaner.is_html_empty(tab_content['content']): + empty_tab_contents.append(tab_content) + + # Remove empty tab content from the tag. + for empty_content in empty_tab_contents: + tab_content_list.remove(empty_content) + + if cls._is_tag_removed_with_empty_content( + tag, tab_content_list, is_collapsible=False): + continue + + tab_content_json = json.dumps(tab_content_list) + tag['tab_contents-with-value'] = utils.escape_html( + tab_content_json) + else: + tag.decompose() + continue + + collapsibles_tags = soup.find_all( + 'oppia-noninteractive-collapsible') + for tag in collapsibles_tags: + if tag.has_attr('content-with-value'): + collapsible_content_json = ( + utils.unescape_html(tag['content-with-value']) + ) + collapsible_content = json.loads( + collapsible_content_json) + if cls._is_tag_removed_with_empty_content( + tag, collapsible_content, is_collapsible=True): + continue + + collapsible_content = cls._fix_rte_tags( + collapsible_content, + is_tags_nested_inside_tabs_or_collapsible=True + ) + if cls._is_tag_removed_with_empty_content( + tag, collapsible_content, is_collapsible=True): + continue + + collapsible_content_json = json.dumps(collapsible_content) + tag['content-with-value'] = utils.escape_html( + collapsible_content_json) + else: + tag.decompose() + continue + + if cls._is_tag_removed_with_invalid_attributes( + tag, 'heading-with-value'): + continue + + return str(soup).replace('
    ', '
    ') + + @classmethod + def fix_content(cls, html: str) -> str: + """Helper function to fix the html. + + Args: + html: str. The html data to fix. + + Returns: + html: str. The fixed html data. + """ + html = cls._fix_rte_tags( + html, is_tags_nested_inside_tabs_or_collapsible=False) + html = cls._fix_tabs_and_collapsible_tags(html) + return html.replace('\xa0', ' ') + + @classmethod + def _update_state_rte( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Dict[str, state_domain.StateDict]: + """Update the state RTE content and translations Args: states_dict: dict. A dict where each key-value pair represents, @@ -1979,23 +5026,101 @@ def _convert_states_v48_dict_to_v49_dict(cls, states_dict): Returns: dict. The converted states_dict. """ - - for state_dict in states_dict.values(): - if state_dict['interaction']['id'] == 'NumericInput': - customization_args = state_dict['interaction'][ - 'customization_args'] - customization_args.update({ - 'requireNonnegativeInput': { - 'value': False - } - }) + for state in states_dict.values(): + # Fix tags for state content. + html = state['content']['html'] + state['content']['html'] = cls.fix_content(html) + + # Fix tags for written translations. + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + written_translations = ( + state['written_translations']['translations_mapping']) # type: ignore[misc] + for translation_item in written_translations.values(): + for translation in translation_item.values(): + if isinstance(translation['translation'], list): + translated_element_list = [] + for element in translation['translation']: + translated_element_list.append( + cls.fix_content(element)) + translation['translation'] = translated_element_list + else: + html = translation['translation'] + translation['translation'] = cls.fix_content(html) + + # Fix RTE content present inside the answer group's feedback. + for answer_group in state['interaction']['answer_groups']: + feedback = answer_group['outcome']['feedback']['html'] + if not html_cleaner.is_html_empty(feedback): + answer_group['outcome']['feedback']['html'] = ( + cls.fix_content(feedback)) + + # Fix RTE content present inside the default outcome. + if state['interaction']['default_outcome'] is not None: + default_feedback = state['interaction']['default_outcome'][ + 'feedback']['html'] + if not html_cleaner.is_html_empty(default_feedback): + state['interaction']['default_outcome']['feedback'][ + 'html'] = cls.fix_content(default_feedback) + + # Fix RTE content present inside the Solution. + if state['interaction']['solution'] is not None: + solution = state['interaction']['solution']['explanation'][ + 'html'] + state['interaction']['solution']['explanation']['html'] = ( + cls.fix_content(solution)) + + # Fix RTE content present inside the Hint. + empty_hints = [] + hints = state['interaction']['hints'] + assert isinstance(hints, list) + for hint in hints: + hint_content = hint['hint_content']['html'] + hint['hint_content']['html'] = cls.fix_content(hint_content) + if html_cleaner.is_html_empty(hint['hint_content']['html']): + empty_hints.append(hint) + + for empty_hint in empty_hints: + hints.remove(empty_hint) + state['interaction']['hints'] = hints + + # Update translations and voiceovers. + cls._remove_unwanted_content_ids_from_translations_and_voiceovers_from_state_v51_or_v52( # pylint: disable=line-too-long + state, state_schema=52) return states_dict + @classmethod + def _convert_states_v54_dict_to_v55_dict( + cls, states_dict: Dict[str, state_domain.StateDict] + ) -> Tuple[Dict[str, state_domain.StateDict], int]: + """Converts from v54 to v55. Version 55 removes next_content_id_index + and WrittenTranslation from State. This version also updates the + content-ids for each translatable field in the state with its new + content-id. + """ + for _, state_dict in states_dict.items(): + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + del state_dict['next_content_id_index'] # type: ignore[misc] + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + del state_dict['written_translations'] # type: ignore[misc] + states_dict, next_content_id_index = ( + state_domain.State + .update_old_content_id_to_new_content_id_in_v54_states(states_dict) + ) + + return states_dict, next_content_id_index + @classmethod def update_states_from_model( - cls, versioned_exploration_states, - current_states_schema_version, init_state_name): + cls, + versioned_exploration_states: VersionedExplorationStatesDict, + current_states_schema_version: int, + init_state_name: str, + language_code: str + ) -> Optional[int]: """Converts the states blob contained in the given versioned_exploration_states dict from current_states_schema_version to current_states_schema_version + 1. @@ -2012,6 +5137,10 @@ def update_states_from_model( current_states_schema_version: int. The current states schema version. init_state_name: str. Name of initial state. + language_code: str. The language code of the exploration. + + Returns: + None|int. The next content Id index for generating new content Id. """ versioned_exploration_states['states_schema_version'] = ( current_states_schema_version + 1) @@ -2021,19 +5150,31 @@ def update_states_from_model( if current_states_schema_version == 43: versioned_exploration_states['states'] = conversion_fn( versioned_exploration_states['states'], init_state_name) + elif current_states_schema_version == 52: + versioned_exploration_states['states'] = conversion_fn( + versioned_exploration_states['states'], language_code) + elif current_states_schema_version == 54: + versioned_exploration_states['states'], next_content_id_index = ( + conversion_fn(versioned_exploration_states['states'])) + assert isinstance(next_content_id_index, int) + return next_content_id_index else: versioned_exploration_states['states'] = conversion_fn( versioned_exploration_states['states']) + return None + # The current version of the exploration YAML schema. If any backward- # incompatible changes are made to the exploration schema in the YAML # definitions, this version number must be changed and a migration process # put in place. - CURRENT_EXP_SCHEMA_VERSION = 54 + CURRENT_EXP_SCHEMA_VERSION = 60 EARLIEST_SUPPORTED_EXP_SCHEMA_VERSION = 46 @classmethod - def _convert_v46_dict_to_v47_dict(cls, exploration_dict): + def _convert_v46_dict_to_v47_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: """Converts a v46 exploration dict into a v47 exploration dict. Changes rule input types for DragAndDropSortInput and ItemSelectionInput interactions to better support translations. Specifically, the rule @@ -2056,7 +5197,9 @@ def _convert_v46_dict_to_v47_dict(cls, exploration_dict): return exploration_dict @classmethod - def _convert_v47_dict_to_v48_dict(cls, exploration_dict): + def _convert_v47_dict_to_v48_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: """Converts a v47 exploration dict into a v48 exploration dict. Adds a new customization arg to NumericExpressionInput, AlgebraicExpressionInput, and MathEquationInput. The customization arg @@ -2080,7 +5223,9 @@ def _convert_v47_dict_to_v48_dict(cls, exploration_dict): return exploration_dict @classmethod - def _convert_v48_dict_to_v49_dict(cls, exploration_dict): + def _convert_v48_dict_to_v49_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: """Converts a v48 exploration dict into a v49 exploration dict. Adds card_is_checkpoint to mark a state as a checkpoint for the learners. @@ -2101,7 +5246,9 @@ def _convert_v48_dict_to_v49_dict(cls, exploration_dict): return exploration_dict @classmethod - def _convert_v49_dict_to_v50_dict(cls, exploration_dict): + def _convert_v49_dict_to_v50_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: """Converts a v49 exploration dict into a v50 exploration dict. Version 50 contains linked skill id to exploration state. @@ -2123,7 +5270,9 @@ def _convert_v49_dict_to_v50_dict(cls, exploration_dict): return exploration_dict @classmethod - def _convert_v50_dict_to_v51_dict(cls, exploration_dict): + def _convert_v50_dict_to_v51_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: """Converts a v50 exploration dict into a v51 exploration dict. Version 51 ensures that unicode written_translations are stripped of HTML tags and have data_format field set to unicode. @@ -2146,7 +5295,9 @@ def _convert_v50_dict_to_v51_dict(cls, exploration_dict): return exploration_dict @classmethod - def _convert_v51_dict_to_v52_dict(cls, exploration_dict): + def _convert_v51_dict_to_v52_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: """Converts a v51 exploration dict into a v52 exploration dict. Version 52 deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. @@ -2169,7 +5320,9 @@ def _convert_v51_dict_to_v52_dict(cls, exploration_dict): return exploration_dict @classmethod - def _convert_v52_dict_to_v53_dict(cls, exploration_dict): + def _convert_v52_dict_to_v53_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: """Converts a v52 exploration dict into a v53 exploration dict. Version 53 fixes encoding issues in HTML fields. @@ -2191,7 +5344,9 @@ def _convert_v52_dict_to_v53_dict(cls, exploration_dict): return exploration_dict @classmethod - def _convert_v53_dict_to_v54_dict(cls, exploration_dict): + def _convert_v53_dict_to_v54_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: """Converts a v53 exploration dict into a v54 exploration dict. Adds a new customization arg to NumericInput interaction which allows creators to set input greator than or equal to zero. @@ -2213,7 +5368,159 @@ def _convert_v53_dict_to_v54_dict(cls, exploration_dict): return exploration_dict @classmethod - def _migrate_to_latest_yaml_version(cls, yaml_content): + def _convert_v54_dict_to_v55_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: + """Converts a v54 exploration dict into a v55 exploration dict. + Removes rules from explorations that use one of the following rules: + [ContainsSomeOf, OmitsSomeOf, MatchesWithGeneralForm]. It also renames + `customOskLetters` cust arg to `allowedVariables`. + + Args: + exploration_dict: dict. The dict representation of an exploration + with schema version v54. + + Returns: + dict. The dict representation of the Exploration domain object, + following schema version v55. + """ + exploration_dict['schema_version'] = 55 + + exploration_dict['states'] = cls._convert_states_v49_dict_to_v50_dict( + exploration_dict['states']) + exploration_dict['states_schema_version'] = 50 + + return exploration_dict + + @classmethod + def _convert_v55_dict_to_v56_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: + """Converts a v55 exploration dict into a v56 exploration dict. + Version 56 adds a new dest_if_really_stuck field to the Outcome class + to redirect the learners to a state for strengthening concepts when + they get really stuck. + + Args: + exploration_dict: dict. The dict representation of an exploration + with schema version v55. + + Returns: + dict. The dict representation of the Exploration domain object, + following schema version v56. + """ + exploration_dict['schema_version'] = 56 + + exploration_dict['states'] = cls._convert_states_v50_dict_to_v51_dict( + exploration_dict['states']) + exploration_dict['states_schema_version'] = 51 + + return exploration_dict + + @classmethod + def _convert_v56_dict_to_v57_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: + """Converts a v56 exploration dict into a v57 exploration dict. + Version 57 correctly updates the content IDs for translations and + for voiceovers. + + Args: + exploration_dict: dict. The dict representation of an exploration + with schema version v56. + + Returns: + dict. The dict representation of the Exploration domain object, + following schema version v57. + """ + exploration_dict['schema_version'] = 57 + + exploration_dict['states'] = cls._convert_states_v51_dict_to_v52_dict( + exploration_dict['states']) + exploration_dict['states_schema_version'] = 52 + + return exploration_dict + + @classmethod + def _convert_v57_dict_to_v58_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: + """Converts a v57 exploration dict into a v58 exploration dict. + Version 58 corrects exploration validation errors which are categorized + as General State Validation, General Interaction Validation + and General RTE Validation. + + Args: + exploration_dict: dict. The dict representation of an exploration + with schema version v56. + + Returns: + dict. The dict representation of the Exploration domain object, + following schema version v57. + """ + exploration_dict['schema_version'] = 58 + + exploration_dict['states'] = cls._convert_states_v52_dict_to_v53_dict( + exploration_dict['states'], exploration_dict['language_code']) + exploration_dict['states_schema_version'] = 53 + + return exploration_dict + + @classmethod + def _convert_v58_dict_to_v59_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: + """Converts a v58 exploration dict into a v59 exploration dict. + Version 59 adds a new customization arg to TextInput allowing + creators to catch misspellings. + + Args: + exploration_dict: dict. The dict representation of an exploration + with schema version v58. + + Returns: + dict. The dict representation of the Exploration domain object, + following schema version v59. + """ + exploration_dict['schema_version'] = 59 + exploration_dict['states'] = cls._convert_states_v53_dict_to_v54_dict( + exploration_dict['states']) + exploration_dict['states_schema_version'] = 54 + + return exploration_dict + + @classmethod + def _convert_v59_dict_to_v60_dict( + cls, exploration_dict: VersionedExplorationDict + ) -> VersionedExplorationDict: + """Converts a v59 exploration dict into a v60 exploration dict. + Removes written_translation, next_content_id_index from state properties + and also introduces next_content_id_index variable into + exploration level. + + Args: + exploration_dict: dict. The dict representation of an exploration + with schema version v59. + + Returns: + dict. The dict representation of the Exploration domain object, + following schema version v60. + """ + exploration_dict['schema_version'] = 60 + + exploration_dict['states'], next_content_id_index = ( + cls._convert_states_v54_dict_to_v55_dict( + exploration_dict['states']) + ) + exploration_dict['states_schema_version'] = 55 + exploration_dict['next_content_id_index'] = next_content_id_index + + return exploration_dict + + @classmethod + def _migrate_to_latest_yaml_version( + cls, yaml_content: str + ) -> VersionedExplorationDict: """Return the YAML content of the exploration in the latest schema format. @@ -2221,17 +5528,21 @@ def _migrate_to_latest_yaml_version(cls, yaml_content): yaml_content: str. The YAML representation of the exploration. Returns: - tuple(dict, int). The dict 'exploration_dict' is the representation - of the Exploration and the 'initial_schema_version' is the initial - schema version provided in 'yaml_content'. + exploration_dict. The dict 'exploration_dict' is the representation + of the Exploration. Raises: InvalidInputException. The 'yaml_content' or the schema version is not specified. Exception. The exploration schema version is not valid. """ + # Here we use cast because we are narrowing down the return type of + # dict_from_yaml() from Dict[str, Any] to VersionedExplorationDict. try: - exploration_dict = utils.dict_from_yaml(yaml_content) + exploration_dict = cast( + VersionedExplorationDict, + utils.dict_from_yaml(yaml_content) + ) except utils.InvalidInputException as e: raise utils.InvalidInputException( 'Please ensure that you are uploading a YAML text file, not ' @@ -2288,10 +5599,40 @@ def _migrate_to_latest_yaml_version(cls, yaml_content): exploration_dict) exploration_schema_version = 54 + if exploration_schema_version == 54: + exploration_dict = cls._convert_v54_dict_to_v55_dict( + exploration_dict) + exploration_schema_version = 55 + + if exploration_schema_version == 55: + exploration_dict = cls._convert_v55_dict_to_v56_dict( + exploration_dict) + exploration_schema_version = 56 + + if exploration_schema_version == 56: + exploration_dict = cls._convert_v56_dict_to_v57_dict( + exploration_dict) + exploration_schema_version = 57 + + if exploration_schema_version == 57: + exploration_dict = cls._convert_v57_dict_to_v58_dict( + exploration_dict) + exploration_schema_version = 58 + + if exploration_schema_version == 58: + exploration_dict = cls._convert_v58_dict_to_v59_dict( + exploration_dict) + exploration_schema_version = 59 + + if exploration_schema_version == 59: + exploration_dict = cls._convert_v59_dict_to_v60_dict( + exploration_dict) + exploration_schema_version = 60 + return exploration_dict @classmethod - def from_yaml(cls, exploration_id, yaml_content): + def from_yaml(cls, exploration_id: str, yaml_content: str) -> Exploration: """Creates and returns exploration from a YAML text string for YAML schema versions 10 and later. @@ -2311,29 +5652,36 @@ def from_yaml(cls, exploration_id, yaml_content): exploration_dict['id'] = exploration_id return Exploration.from_dict(exploration_dict) - def to_yaml(self): + def to_yaml(self) -> str: """Convert the exploration domain object into YAML string. Returns: str. The YAML representation of this exploration. """ exp_dict = self.to_dict() - exp_dict['schema_version'] = self.CURRENT_EXP_SCHEMA_VERSION + # Here we use MyPy ignore because the dictionary returned by `to_dict()` + # method is ExplorationDict and ExplorationDict does not contain + # `schema_version` key, but here we are defining a `schema_version` key + # which causes MyPy to throw error 'TypedDict has no key schema_version' + # thus to silence the error, we used ignore here. + exp_dict['schema_version'] = self.CURRENT_EXP_SCHEMA_VERSION # type: ignore[misc] # The ID is the only property which should not be stored within the # YAML representation. - del exp_dict['id'] + # Here we use MyPy ignore because MyPy doesn't allow key deletion from + # TypedDict. + del exp_dict['id'] # type: ignore[misc] - return python_utils.yaml_from_dict(exp_dict) + return utils.yaml_from_dict(exp_dict) - def to_dict(self): + def to_dict(self) -> ExplorationDict: """Returns a copy of the exploration as a dictionary. It includes all necessary information to represent the exploration. Returns: dict. A dict mapping all fields of Exploration instance. """ - return copy.deepcopy({ + exploration_dict: ExplorationDict = ({ 'id': self.id, 'title': self.title, 'category': self.category, @@ -2348,18 +5696,29 @@ def to_dict(self): 'tags': self.tags, 'auto_tts_enabled': self.auto_tts_enabled, 'correctness_feedback_enabled': self.correctness_feedback_enabled, + 'next_content_id_index': self.next_content_id_index, + 'edits_allowed': self.edits_allowed, 'states': {state_name: state.to_dict() for (state_name, state) in self.states.items()} }) + exploration_dict_deepcopy = copy.deepcopy(exploration_dict) + return exploration_dict_deepcopy - def serialize(self): + def serialize(self) -> str: """Returns the object serialized as a JSON string. Returns: str. JSON-encoded str encoding all of the information composing the object. """ - exploration_dict = self.to_dict() + # Here we use MyPy ignore because to_dict() method returns a general + # dictionary representation of domain object (ExplorationDict) which + # does not contain properties like created_on and last_updated but + # MyPy expects exploration_dict, a dictionary which contains all the + # properties of domain object. That's why we are explicitly changing + # the type of exploration_dict here, which causes MyPy to throw an + # error. Thus, to silence the error, we added an ignore here. + exploration_dict: SerializableExplorationDict = self.to_dict() # type: ignore[assignment] # The only reason we add the version parameter separately is that our # yaml encoding/decoding of this object does not handle the version # parameter. @@ -2381,7 +5740,7 @@ def serialize(self): return json.dumps(exploration_dict) @classmethod - def deserialize(cls, json_string): + def deserialize(cls, json_string: str) -> Exploration: """Returns an Exploration domain object decoded from a JSON string. Args: @@ -2409,7 +5768,7 @@ def deserialize(cls, json_string): return exploration - def to_player_dict(self): + def to_player_dict(self) -> ExplorationPlayerDict: """Returns a copy of the exploration suitable for inclusion in the learner view. @@ -2428,8 +5787,8 @@ def to_player_dict(self): - title: str. The exploration title. - objective: str. The exploration objective. - language_code: str. The language code of the exploration. - - correctness_feedback_enabled: str. Whether to show correctness - feedback. + - correctness_feedback_enabled: bool. Whether to show + correctness feedback. """ return { 'init_state_name': self.init_state_name, @@ -2443,35 +5802,45 @@ def to_player_dict(self): 'objective': self.objective, 'language_code': self.language_code, 'correctness_feedback_enabled': self.correctness_feedback_enabled, + 'next_content_id_index': self.next_content_id_index } - def get_all_html_content_strings(self): - """Gets all html content strings used in this exploration. - Returns: - list(str). The list of html content strings. - """ - html_list = [] - for state in self.states.values(): - content_html = state.content.html - interaction_html_list = ( - state.interaction.get_all_html_content_strings()) - html_list += [content_html] + interaction_html_list +class ExplorationSummaryMetadataDict(TypedDict): + """Dictionary representing the meta data for exploration summary.""" - return html_list + id: str + title: str + objective: str class ExplorationSummary: """Domain object for an Oppia exploration summary.""" def __init__( - self, exploration_id, title, category, objective, - language_code, tags, ratings, scaled_average_rating, status, - community_owned, owner_ids, editor_ids, voice_artist_ids, - viewer_ids, contributor_ids, contributors_summary, version, - exploration_model_created_on, - exploration_model_last_updated, - first_published_msec, deleted=False): + self, + exploration_id: str, + title: str, + category: str, + objective: str, + language_code: str, + tags: List[str], + ratings: Dict[str, int], + scaled_average_rating: float, + status: str, + community_owned: bool, + owner_ids: List[str], + editor_ids: List[str], + voice_artist_ids: List[str], + viewer_ids: List[str], + contributor_ids: List[str], + contributors_summary: Dict[str, int], + version: int, + exploration_model_created_on: datetime.datetime, + exploration_model_last_updated: datetime.datetime, + first_published_msec: Optional[float], + deleted: bool = False + ) -> None: """Initializes a ExplorationSummary domain object. Args: @@ -2507,8 +5876,9 @@ def __init__( the exploration model is created. exploration_model_last_updated: datetime.datetime. Date and time when the exploration model was last updated. - first_published_msec: int. Time in milliseconds since the Epoch, - when the exploration was first published. + first_published_msec: float|None. Time in milliseconds since the + Epoch, when the exploration was first published, or None if + Exploration is not published yet. deleted: bool. Whether the exploration is marked as deleted. """ self.id = exploration_id @@ -2533,7 +5903,7 @@ def __init__( self.first_published_msec = first_published_msec self.deleted = deleted - def validate(self): + def validate(self) -> None: """Validates various properties of the ExplorationSummary. Raises: @@ -2616,7 +5986,7 @@ def validate(self): 'Expected value to be non-negative, received %s' % ( value)) - if not isinstance(self.scaled_average_rating, float): + if not isinstance(self.scaled_average_rating, (float, int)): raise utils.ValidationError( 'Expected scaled_average_rating to be float, received %s' % ( self.scaled_average_rating)) @@ -2667,6 +6037,14 @@ def validate(self): 'Expected each id in viewer_ids to ' 'be string, received %s' % viewer_id) + all_user_ids_with_rights = ( + self.owner_ids + self.editor_ids + self.voice_artist_ids + + self.viewer_ids) + if len(all_user_ids_with_rights) != len(set(all_user_ids_with_rights)): + raise utils.ValidationError( + 'Users should not be assigned to multiple roles at once, ' + 'received users: %s' % ', '.join(all_user_ids_with_rights)) + if not isinstance(self.contributor_ids, list): raise utils.ValidationError( 'Expected contributor_ids to be list, received %s' % ( @@ -2682,7 +6060,7 @@ def validate(self): 'Expected contributors_summary to be dict, received %s' % ( self.contributors_summary)) - def to_metadata_dict(self): + def to_metadata_dict(self) -> ExplorationSummaryMetadataDict: """Given an exploration summary, this method returns a dict containing id, title and objective of the exploration. @@ -2699,15 +6077,15 @@ def to_metadata_dict(self): 'objective': self.objective, } - def is_private(self): + def is_private(self) -> bool: """Checks whether the exploration is private. Returns: bool. Whether the exploration is private. """ - return self.status == constants.ACTIVITY_STATUS_PRIVATE + return bool(self.status == constants.ACTIVITY_STATUS_PRIVATE) - def is_solely_owned_by_user(self, user_id): + def is_solely_owned_by_user(self, user_id: str) -> bool: """Checks whether the exploration is solely owned by the user. Args: @@ -2718,7 +6096,7 @@ def is_solely_owned_by_user(self, user_id): """ return user_id in self.owner_ids and len(self.owner_ids) == 1 - def does_user_have_any_role(self, user_id): + def does_user_have_any_role(self, user_id: str) -> bool: """Checks if a given user has any role within the exploration. Args: @@ -2734,7 +6112,7 @@ def does_user_have_any_role(self, user_id): user_id in self.viewer_ids ) - def add_contribution_by_user(self, contributor_id): + def add_contribution_by_user(self, contributor_id: str) -> None: """Add a new contributor to the contributors summary. Args: @@ -2774,10 +6152,11 @@ class ExplorationChangeMergeVerifier: # new property is added or deleted which affects or is affected # by interaction id and whose changes directly conflicts with # interaction id changes. - PROPERTIES_CONFLICTING_INTERACTION_ID_CHANGES = [ + PROPERTIES_CONFLICTING_INTERACTION_ID_CHANGES: List[str] = [ STATE_PROPERTY_INTERACTION_CUST_ARGS, STATE_PROPERTY_INTERACTION_SOLUTION, - STATE_PROPERTY_INTERACTION_ANSWER_GROUPS] + STATE_PROPERTY_INTERACTION_ANSWER_GROUPS + ] # PROPERTIES_CONFLICTING_CUST_ARGS_CHANGES: List of the properties # in which if there are any changes then customization args @@ -2785,10 +6164,11 @@ class ExplorationChangeMergeVerifier: # new property is added or deleted which affects or is affected # by customization args and whose changes directly conflicts with # cust args changes. - PROPERTIES_CONFLICTING_CUST_ARGS_CHANGES = [ + PROPERTIES_CONFLICTING_CUST_ARGS_CHANGES: List[str] = [ STATE_PROPERTY_INTERACTION_SOLUTION, STATE_PROPERTY_RECORDED_VOICEOVERS, - STATE_PROPERTY_INTERACTION_ANSWER_GROUPS] + STATE_PROPERTY_INTERACTION_ANSWER_GROUPS + ] # PROPERTIES_CONFLICTING_ANSWER_GROUPS_CHANGES: List of the properties # in which if there are any changes then answer groups @@ -2796,10 +6176,11 @@ class ExplorationChangeMergeVerifier: # new property is added or deleted which affects or is affected # by answer groups and whose changes directly conflicts with # answer groups changes. - PROPERTIES_CONFLICTING_ANSWER_GROUPS_CHANGES = [ + PROPERTIES_CONFLICTING_ANSWER_GROUPS_CHANGES: List[str] = [ STATE_PROPERTY_INTERACTION_SOLUTION, STATE_PROPERTY_RECORDED_VOICEOVERS, - STATE_PROPERTY_INTERACTION_CUST_ARGS] + STATE_PROPERTY_INTERACTION_CUST_ARGS + ] # PROPERTIES_CONFLICTING_SOLUTION_CHANGES: List of the properties # in which if there are any changes then solution @@ -2807,10 +6188,11 @@ class ExplorationChangeMergeVerifier: # new property is added or deleted which affects or is affected # by solution and whose changes directly conflicts with # solution changes. - PROPERTIES_CONFLICTING_SOLUTION_CHANGES = [ + PROPERTIES_CONFLICTING_SOLUTION_CHANGES: List[str] = [ STATE_PROPERTY_INTERACTION_ANSWER_GROUPS, STATE_PROPERTY_RECORDED_VOICEOVERS, - STATE_PROPERTY_INTERACTION_CUST_ARGS] + STATE_PROPERTY_INTERACTION_CUST_ARGS + ] # PROPERTIES_CONFLICTING_VOICEOVERS_CHANGES: List of the properties # in which if there are any changes then voiceovers @@ -2818,67 +6200,41 @@ class ExplorationChangeMergeVerifier: # new property is added or deleted which affects or is affected # by voiceovers and whose changes directly conflicts with # voiceovers changes. - PROPERTIES_CONFLICTING_VOICEOVERS_CHANGES = [ + PROPERTIES_CONFLICTING_VOICEOVERS_CHANGES: List[str] = [ STATE_PROPERTY_CONTENT, STATE_PROPERTY_INTERACTION_SOLUTION, STATE_PROPERTY_INTERACTION_HINTS, - STATE_PROPERTY_WRITTEN_TRANSLATIONS, STATE_PROPERTY_INTERACTION_ANSWER_GROUPS, STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME, - STATE_PROPERTY_INTERACTION_CUST_ARGS] + STATE_PROPERTY_INTERACTION_CUST_ARGS + ] # NON_CONFLICTING_PROPERTIES: List of the properties # in which if there are any changes then they are always mergeable. - NON_CONFLICTING_PROPERTIES = [ + NON_CONFLICTING_PROPERTIES: List[str] = [ STATE_PROPERTY_UNCLASSIFIED_ANSWERS, - STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, STATE_PROPERTY_LINKED_SKILL_ID, - STATE_PROPERTY_CARD_IS_CHECKPOINT] + STATE_PROPERTY_CARD_IS_CHECKPOINT + ] - def __init__(self, composite_change_list): + def __init__(self, composite_change_list: List[ExplorationChange]) -> None: - self.added_state_names = [] - self.deleted_state_names = [] - self.new_to_old_state_names = collections.defaultdict(set) - self.changed_properties = collections.defaultdict(set) - self.changed_translations = collections.defaultdict(set) + self.added_state_names: List[str] = [] + self.deleted_state_names: List[str] = [] + self.new_to_old_state_names: Dict[str, str] = ( + collections.defaultdict(str) + ) + self.changed_properties: Dict[str, Set[str]] = ( + collections.defaultdict(set) + ) + self.changed_translations: Dict[str, Set[str]] = ( + collections.defaultdict(set) + ) for change in composite_change_list: self._parse_exp_change(change) - def _get_property_name_from_content_id(self, content_id): - """Returns property name from content id. - - Args: - content_id: string. Id of the content. - - Returns: - string. Name of the property of which the - content is part of. - """ - property_name_to_content_id_identifier = { - STATE_PROPERTY_CONTENT: ( - lambda content_id: content_id == 'content'), - STATE_PROPERTY_INTERACTION_CUST_ARGS: ( - lambda content_id: content_id[:3] == 'ca_'), - STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME: ( - lambda content_id: content_id == 'default_outcome'), - STATE_PROPERTY_INTERACTION_SOLUTION: ( - lambda content_id: content_id == 'solution'), - STATE_PROPERTY_INTERACTION_HINTS: ( - lambda content_id: content_id[:4] == 'hint'), - STATE_PROPERTY_INTERACTION_ANSWER_GROUPS: ( - lambda content_id: ( - content_id[:8] == 'feedback' or - content_id[:10] == 'rule_input')), - } - - for prop_name, identifier_function in ( - property_name_to_content_id_identifier.items()): - if identifier_function(content_id): - return prop_name - - def _parse_exp_change(self, change): + def _parse_exp_change(self, change: ExplorationChange) -> None: """This function take the change and according to the cmd add the property name in the lists defined above. @@ -2915,25 +6271,16 @@ def _parse_exp_change(self, change): # in changed_properties dict. state_name = change.state_name if state_name in self.new_to_old_state_names: - state_name = self.new_to_old_state_names.get(change.state_name) + state_name = self.new_to_old_state_names[change.state_name] self.changed_properties[state_name].add( change.property_name) - elif change.cmd == CMD_ADD_WRITTEN_TRANSLATION: - changed_property = self._get_property_name_from_content_id( - change.content_id) - # A condition to store the name of the properties changed - # in changed_properties dict. - state_name = change.state_name - if state_name in self.new_to_old_state_names: - state_name = self.new_to_old_state_names.get(change.state_name) - self.changed_translations[state_name].add( - changed_property) - self.changed_properties[state_name].add( - STATE_PROPERTY_WRITTEN_TRANSLATIONS) def is_change_list_mergeable( - self, change_list, - exp_at_change_list_version, current_exploration): + self, + change_list: List[ExplorationChange], + exp_at_change_list_version: Exploration, + current_exploration: Exploration + ) -> Tuple[bool, bool]: """Checks whether the change list from the old version of an exploration can be merged on the latest version of an exploration. @@ -2975,7 +6322,7 @@ def is_change_list_mergeable( # states names in change_list where the key is the state name in # frontend version and the value is the renamed name from the # change list if there is any rename state change. - state_names_of_renamed_states = {} + state_names_of_renamed_states: Dict[str, str] = {} for change in change_list: change_is_mergeable = False if change.cmd == CMD_RENAME_STATE: @@ -3119,28 +6466,6 @@ def is_change_list_mergeable( change_is_mergeable = True if not self.changed_properties[state_name]: change_is_mergeable = True - elif change.cmd == CMD_ADD_WRITTEN_TRANSLATION: - state_name = state_names_of_renamed_states.get( - change.state_name) or change.state_name - if state_name in old_to_new_state_names: - # Here we will send the changelist, frontend_version, - # backend_version and exploration to the admin, so - # that the changes related to state renames can be - # reviewed and the proper conditions can be written - # to handle those cases. - return False, True - changed_property = self._get_property_name_from_content_id( - change.content_id) - if (changed_property not in - (self.changed_properties[state_name] | - self.changed_translations[state_name])): - change_is_mergeable = True - if not self.changed_properties[state_name]: - change_is_mergeable = True - elif change.cmd == CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE: - change_is_mergeable = True - elif change.cmd == CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE: - change_is_mergeable = True elif change.cmd == CMD_EDIT_EXPLORATION_PROPERTY: change_is_mergeable = ( exp_at_change_list_version.__getattribute__( @@ -3151,8 +6476,249 @@ def is_change_list_mergeable( if change_is_mergeable: changes_are_mergeable = True continue - else: - changes_are_mergeable = False - break + changes_are_mergeable = False + break return changes_are_mergeable, False + + +class ExplorationMetadataDict(TypedDict): + """Dictionary representing the ExplorationMetadata object.""" + + title: str + category: str + objective: str + language_code: str + tags: List[str] + blurb: str + author_notes: str + states_schema_version: int + init_state_name: str + param_specs: Dict[str, param_domain.ParamSpecDict] + param_changes: List[param_domain.ParamChangeDict] + auto_tts_enabled: bool + correctness_feedback_enabled: bool + edits_allowed: bool + + +class ExplorationMetadata: + """Class to represent the exploration metadata properties.""" + + def __init__( + self, + title: str, + category: str, + objective: str, + language_code: str, + tags: List[str], + blurb: str, + author_notes: str, + states_schema_version: int, + init_state_name: str, + param_specs: Dict[str, param_domain.ParamSpec], + param_changes: List[param_domain.ParamChange], + auto_tts_enabled: bool, + correctness_feedback_enabled: bool, + edits_allowed: bool + ) -> None: + """Initializes an ExplorationMetadata domain object. + + Args: + title: str. The exploration title. + category: str. The category of the exploration. + objective: str. The objective of the exploration. + language_code: str. The language code of the exploration. + tags: list(str). The tags given to the exploration. + blurb: str. The blurb of the exploration. + author_notes: str. The author notes. + states_schema_version: int. Tbe schema version of the exploration. + init_state_name: str. The name for the initial state of the + exploration. + param_specs: dict(str, ParamSpec). A dict where each key-value pair + represents respectively, a param spec name and a ParamSpec + domain object. + param_changes: list(ParamChange). List of ParamChange domain + objects. + auto_tts_enabled: bool. True if automatic text-to-speech is + enabled. + correctness_feedback_enabled: bool. True if correctness feedback is + enabled. + edits_allowed: bool. True when edits to the exploration is allowed. + """ + self.title = title + self.category = category + self.objective = objective + self.language_code = language_code + self.tags = tags + self.blurb = blurb + self.author_notes = author_notes + self.states_schema_version = states_schema_version + self.init_state_name = init_state_name + self.param_specs = param_specs + self.param_changes = param_changes + self.auto_tts_enabled = auto_tts_enabled + self.correctness_feedback_enabled = correctness_feedback_enabled + self.edits_allowed = edits_allowed + + def to_dict(self) -> ExplorationMetadataDict: + """Gets the dict representation of ExplorationMetadata domain object. + + Returns: + dict. The dict representation of the ExplorationMetadata + domain object. + """ + return { + 'title': self.title, + 'category': self.category, + 'objective': self.objective, + 'language_code': self.language_code, + 'tags': self.tags, + 'blurb': self.blurb, + 'author_notes': self.author_notes, + 'states_schema_version': self.states_schema_version, + 'init_state_name': self.init_state_name, + 'param_specs': { + ps_name: ps_value.to_dict() + for (ps_name, ps_value) in self.param_specs.items() + }, + 'param_changes': [ + p_change.to_dict() for p_change in self.param_changes + ], + 'auto_tts_enabled': self.auto_tts_enabled, + 'correctness_feedback_enabled': self.correctness_feedback_enabled, + 'edits_allowed': self.edits_allowed + } + + +class MetadataVersionHistory: + """Class to represent an element of the version history list of the + exploration metadata. + + Attributes: + last_edited_version_number: int. The version number of the + exploration in which the metadata was last edited. + last_edited_committer_id: str. The user id of the user who committed + the latest changes to the exploration metadata. + """ + + def __init__( + self, + last_edited_version_number: Optional[int], + last_edited_committer_id: str + ): + """Initializes the MetadataVersionHistory domain object. + + Args: + last_edited_version_number: int. The version number of the + exploration in which the metadata was last edited. + last_edited_committer_id: str. The user id of the user who + committed the latest changes to the exploration metadata. + """ + self.last_edited_version_number = last_edited_version_number + self.last_edited_committer_id = last_edited_committer_id + + def to_dict(self) -> MetadataVersionHistoryDict: + """Returns a dict representation of the MetadataVersionHistory domain + object. + + Returns: + dict. The dict representation of the MetadataVersionHistory domain + object. + """ + return { + 'last_edited_version_number': self.last_edited_version_number, + 'last_edited_committer_id': self.last_edited_committer_id + } + + @classmethod + def from_dict( + cls, metadata_version_history_dict: MetadataVersionHistoryDict + ) -> MetadataVersionHistory: + """Returns an MetadataVersionHistory domain object from a dict. + + Args: + metadata_version_history_dict: dict. The dict representation of + MetadataVersionHistory object. + + Returns: + MetadataVersionHistory. The corresponding MetadataVersionHistory + domain object. + """ + return cls( + metadata_version_history_dict['last_edited_version_number'], + metadata_version_history_dict['last_edited_committer_id'] + ) + + +class ExplorationVersionHistory: + """Class to represent the version history of an exploration at a + particular version. + + Attributes: + exploration_id: str. The id of the exploration. + exploration_version: int. The version number of the exploration. + state_version_history: Dict[str, StateVersionHistory]. + The mapping of state names and StateVersionHistory domain objects. + metadata_version_history: MetadataVersionHistory. The details of the + last commit on the exploration metadata. + committer_ids: List[str]. A list of user ids who made the + 'previous commit' on each state and the exploration metadata. + """ + + def __init__( + self, + exploration_id: str, + exploration_version: int, + state_version_history_dict: Dict[ + str, state_domain.StateVersionHistoryDict + ], + metadata_last_edited_version_number: Optional[int], + metadata_last_edited_committer_id: str, + committer_ids: List[str] + ) -> None: + """Initializes the ExplorationVersionHistory domain object. + + Args: + exploration_id: str. The id of the exploration. + exploration_version: int. The version number of the exploration. + state_version_history_dict: dict. The mapping of state names and + dicts of StateVersionHistory domain objects. + metadata_last_edited_version_number: int. The version number of the + exploration in which the metadata was last edited. + metadata_last_edited_committer_id: str. The user id of the user who + committed the latest changes to the exploration metadata. + committer_ids: List[str]. A list of user ids who made the + 'previous commit' on each state and the exploration metadata. + """ + self.exploration_id = exploration_id + self.exploration_version = exploration_version + self.state_version_history = { + state_name: state_domain.StateVersionHistory.from_dict(vh_dict) + for state_name, vh_dict in state_version_history_dict.items() + } + self.metadata_version_history = MetadataVersionHistory( + metadata_last_edited_version_number, + metadata_last_edited_committer_id + ) + self.committer_ids = committer_ids + + def to_dict(self) -> ExplorationVersionHistoryDict: + """Returns a dict representation of the ExplorationVersionHistory + domain object. + + Returns: + dict. A dict representation of the ExplorationVersionHistory + domain object. + """ + return { + 'exploration_id': self.exploration_id, + 'exploration_version': self.exploration_version, + 'state_version_history': { + state_name: state_vh.to_dict() + for state_name, state_vh in self.state_version_history.items() + }, + 'metadata_version_history': ( + self.metadata_version_history.to_dict() + ), + 'committer_ids': self.committer_ids + } diff --git a/core/domain/exp_domain_test.py b/core/domain/exp_domain_test.py index 8d08ebb302ce..48cb6f9f98a9 100644 --- a/core/domain/exp_domain_test.py +++ b/core/domain/exp_domain_test.py @@ -20,7 +20,6 @@ import copy import os -import re from core import feconf from core import utils @@ -32,26 +31,33 @@ from core.domain import param_domain from core.domain import rights_manager from core.domain import state_domain +from core.domain import translation_domain from core.platform import models from core.tests import test_utils -(exp_models,) = models.Registry.import_models([models.NAMES.exploration]) +from typing import Dict, Final, List, Tuple, Union, cast + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) class ExplorationChangeTests(test_utils.GenericTestBase): - def test_exp_change_object_with_missing_cmd(self): - with self.assertRaisesRegexp( + def test_exp_change_object_with_missing_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): exp_domain.ExplorationChange({'invalid': 'data'}) - def test_exp_change_object_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_exp_change_object_with_invalid_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): exp_domain.ExplorationChange({'cmd': 'invalid'}) - def test_exp_change_object_with_deprecated_cmd(self): - with self.assertRaisesRegexp( + def test_exp_change_object_with_deprecated_cmd(self) -> None: + with self.assertRaisesRegex( utils.DeprecatedCommandError, 'Command clone is deprecated'): exp_domain.ExplorationChange({ 'cmd': 'clone', @@ -59,8 +65,8 @@ def test_exp_change_object_with_deprecated_cmd(self): 'old_value': 'old_value' }) - def test_exp_change_object_with_deprecated_cmd_argument(self): - with self.assertRaisesRegexp( + def test_exp_change_object_with_deprecated_cmd_argument(self) -> None: + with self.assertRaisesRegex( utils.DeprecatedCommandError, 'Value for property_name in cmd edit_state_property: ' 'fallbacks is deprecated'): @@ -71,8 +77,8 @@ def test_exp_change_object_with_deprecated_cmd_argument(self): 'new_value': 'foo', }) - def test_exp_change_object_with_missing_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_exp_change_object_with_missing_attribute_in_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following required attributes are missing: ' 'new_value')): @@ -82,8 +88,8 @@ def test_exp_change_object_with_missing_attribute_in_cmd(self): 'old_value': 'old_value' }) - def test_exp_change_object_with_extra_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_exp_change_object_with_extra_attribute_in_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following extra attributes are present: invalid')): exp_domain.ExplorationChange({ @@ -93,8 +99,8 @@ def test_exp_change_object_with_extra_attribute_in_cmd(self): 'invalid': 'invalid' }) - def test_exp_change_object_with_invalid_exploration_property(self): - with self.assertRaisesRegexp( + def test_exp_change_object_with_invalid_exploration_property(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd edit_exploration_property: ' 'invalid is not allowed')): @@ -105,8 +111,8 @@ def test_exp_change_object_with_invalid_exploration_property(self): 'new_value': 'new_value', }) - def test_exp_change_object_with_invalid_state_property(self): - with self.assertRaisesRegexp( + def test_exp_change_object_with_invalid_state_property(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd edit_state_property: ' 'invalid is not allowed')): @@ -118,7 +124,7 @@ def test_exp_change_object_with_invalid_state_property(self): 'new_value': 'new_value', }) - def test_exp_change_object_with_create_new(self): + def test_exp_change_object_with_create_new(self) -> None: exp_change_object = exp_domain.ExplorationChange({ 'cmd': 'create_new', 'category': 'category', @@ -129,16 +135,18 @@ def test_exp_change_object_with_create_new(self): self.assertEqual(exp_change_object.category, 'category') self.assertEqual(exp_change_object.title, 'title') - def test_exp_change_object_with_add_state(self): + def test_exp_change_object_with_add_state(self) -> None: exp_change_object = exp_domain.ExplorationChange({ 'cmd': 'add_state', 'state_name': 'state_name', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' }) self.assertEqual(exp_change_object.cmd, 'add_state') self.assertEqual(exp_change_object.state_name, 'state_name') - def test_exp_change_object_with_rename_state(self): + def test_exp_change_object_with_rename_state(self) -> None: exp_change_object = exp_domain.ExplorationChange({ 'cmd': 'rename_state', 'old_state_name': 'old_state_name', @@ -149,7 +157,7 @@ def test_exp_change_object_with_rename_state(self): self.assertEqual(exp_change_object.old_state_name, 'old_state_name') self.assertEqual(exp_change_object.new_state_name, 'new_state_name') - def test_exp_change_object_with_delete_state(self): + def test_exp_change_object_with_delete_state(self) -> None: exp_change_object = exp_domain.ExplorationChange({ 'cmd': 'delete_state', 'state_name': 'state_name', @@ -158,7 +166,7 @@ def test_exp_change_object_with_delete_state(self): self.assertEqual(exp_change_object.cmd, 'delete_state') self.assertEqual(exp_change_object.state_name, 'state_name') - def test_exp_change_object_with_edit_state_property(self): + def test_exp_change_object_with_edit_state_property(self) -> None: exp_change_object = exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'state_name': 'state_name', @@ -173,7 +181,7 @@ def test_exp_change_object_with_edit_state_property(self): self.assertEqual(exp_change_object.new_value, 'new_value') self.assertEqual(exp_change_object.old_value, 'old_value') - def test_exp_change_object_with_edit_exploration_property(self): + def test_exp_change_object_with_edit_exploration_property(self) -> None: exp_change_object = exp_domain.ExplorationChange({ 'cmd': 'edit_exploration_property', 'property_name': 'title', @@ -187,7 +195,8 @@ def test_exp_change_object_with_edit_exploration_property(self): self.assertEqual(exp_change_object.old_value, 'old_value') def test_exp_change_object_with_migrate_states_schema_to_latest_version( - self): + self + ) -> None: exp_change_object = exp_domain.ExplorationChange({ 'cmd': 'migrate_states_schema_to_latest_version', 'from_version': 'from_version', @@ -199,7 +208,7 @@ def test_exp_change_object_with_migrate_states_schema_to_latest_version( self.assertEqual(exp_change_object.from_version, 'from_version') self.assertEqual(exp_change_object.to_version, 'to_version') - def test_exp_change_object_with_revert_commit(self): + def test_exp_change_object_with_revert_commit(self) -> None: exp_change_object = exp_domain.ExplorationChange({ 'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT, 'version_number': 'version_number' @@ -210,7 +219,7 @@ def test_exp_change_object_with_revert_commit(self): exp_models.ExplorationModel.CMD_REVERT_COMMIT) self.assertEqual(exp_change_object.version_number, 'version_number') - def test_to_dict(self): + def test_to_dict(self) -> None: exp_change_dict = { 'cmd': 'create_new', 'title': 'title', @@ -223,19 +232,19 @@ def test_to_dict(self): class ExplorationVersionsDiffDomainUnitTests(test_utils.GenericTestBase): """Test the exploration versions difference domain object.""" - def setUp(self): - super(ExplorationVersionsDiffDomainUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.exp_id = 'exp_id1' test_exp_filepath = os.path.join( feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) - assets_list = [] + assets_list: List[Tuple[str, bytes]] = [] exp_services.save_new_exploration_from_yaml_and_assets( feconf.SYSTEM_COMMITTER_ID, yaml_content, self.exp_id, assets_list) self.exploration = exp_fetchers.get_exploration_by_id(self.exp_id) - def test_correct_creation_of_version_diffs(self): + def test_correct_creation_of_version_diffs(self) -> None: # Rename a state. self.exploration.rename_state('Home', 'Renamed state') change_list = [exp_domain.ExplorationChange({ @@ -261,6 +270,8 @@ def test_correct_creation_of_version_diffs(self): change_list = [exp_domain.ExplorationChange({ 'cmd': 'add_state', 'state_name': 'New state', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) @@ -293,6 +304,8 @@ def test_correct_creation_of_version_diffs(self): change_list = [exp_domain.ExplorationChange({ 'cmd': 'add_state', 'state_name': 'New state', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' }), exp_domain.ExplorationChange({ 'cmd': 'rename_state', 'old_state_name': 'New state', @@ -316,7 +329,9 @@ def test_correct_creation_of_version_diffs(self): self.exploration.delete_state('Renamed state 2') change_list = [exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'New state 2' + 'state_name': 'New state 2', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' }), exp_domain.ExplorationChange({ 'cmd': 'rename_state', 'old_state_name': 'New state 2', @@ -358,21 +373,25 @@ def test_correct_creation_of_version_diffs(self): self.assertEqual(exp_versions_diff.old_to_new_state_names, {}) self.exploration.version += 1 - def test_cannot_create_exploration_change_with_invalid_change_dict(self): - with self.assertRaisesRegexp( + def test_cannot_create_exploration_change_with_invalid_change_dict( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Missing cmd key in change dict'): exp_domain.ExplorationChange({ 'invalid_cmd': 'invalid' }) - def test_cannot_create_exploration_change_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_cannot_create_exploration_change_with_invalid_cmd(self) -> None: + with self.assertRaisesRegex( Exception, 'Command invalid_cmd is not allowed'): exp_domain.ExplorationChange({ 'cmd': 'invalid_cmd' }) - def test_cannot_create_exploration_change_with_invalid_state_property(self): + def test_cannot_create_exploration_change_with_invalid_state_property( + self + ) -> None: exp_change = exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID, @@ -381,7 +400,7 @@ def test_cannot_create_exploration_change_with_invalid_state_property(self): }) self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange)) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Value for property_name in cmd edit_state_property: ' 'invalid_property is not allowed'): @@ -393,7 +412,8 @@ def test_cannot_create_exploration_change_with_invalid_state_property(self): }) def test_cannot_create_exploration_change_with_invalid_exploration_property( - self): + self + ) -> None: exp_change = exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'title', @@ -401,7 +421,7 @@ def test_cannot_create_exploration_change_with_invalid_exploration_property( }) self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange)) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Value for property_name in cmd edit_exploration_property: ' 'invalid_property is not allowed'): @@ -411,7 +431,7 @@ def test_cannot_create_exploration_change_with_invalid_exploration_property( 'new_value': '' }) - def test_revert_exploration_commit(self): + def test_revert_exploration_commit(self) -> None: exp_change = exp_domain.ExplorationChange({ 'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT, 'version_number': 1 @@ -429,7 +449,7 @@ def test_revert_exploration_commit(self): class ExpVersionReferenceTests(test_utils.GenericTestBase): - def test_create_exp_version_reference_object(self): + def test_create_exp_version_reference_object(self) -> None: exp_version_reference = exp_domain.ExpVersionReference('exp_id', 1) self.assertEqual( @@ -438,52 +458,189 @@ def test_create_exp_version_reference_object(self): 'version': 1 }) - def test_validate_exp_version(self): - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exp_version(self) -> None: + with self.assertRaisesRegex( Exception, 'Expected version to be an int, received invalid_version'): - exp_domain.ExpVersionReference('exp_id', 'invalid_version') + exp_domain.ExpVersionReference('exp_id', 'invalid_version') # type: ignore[arg-type] - def test_validate_exp_id(self): - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exp_id(self) -> None: + with self.assertRaisesRegex( Exception, 'Expected exp_id to be a str, received 0'): - exp_domain.ExpVersionReference(0, 1) + exp_domain.ExpVersionReference(0, 1) # type: ignore[arg-type] + + +class TransientCheckpointUrlTests(test_utils.GenericTestBase): + """Testing TransientCheckpointUrl domain object.""" + + def setUp(self) -> None: + super().setUp() + self.transient_checkpoint_url = exp_domain.TransientCheckpointUrl( + 'exp_id', 'frcs_name', 1, 'mrrcs_name', 1) + + def test_initialization(self) -> None: + """Testing init method.""" + + self.assertEqual(self.transient_checkpoint_url.exploration_id, 'exp_id') + self.assertEqual( + self.transient_checkpoint_url + .furthest_reached_checkpoint_state_name, + 'frcs_name') + self.assertEqual( + self.transient_checkpoint_url. + furthest_reached_checkpoint_exp_version, 1) + self.assertEqual( + self.transient_checkpoint_url + .most_recently_reached_checkpoint_state_name, 'mrrcs_name') + self.assertEqual( + self.transient_checkpoint_url + .most_recently_reached_checkpoint_exp_version, 1) + + def test_to_dict(self) -> None: + logged_out_learner_progress_dict = { + 'exploration_id': 'exploration_id', + 'furthest_reached_checkpoint_exp_version': 1, + 'furthest_reached_checkpoint_state_name': ( + 'furthest_reached_checkpoint_state_name'), + 'most_recently_reached_checkpoint_exp_version': 1, + 'most_recently_reached_checkpoint_state_name': ( + 'most_recently_reached_checkpoint_state_name') + } + logged_out_learner_progress_object = exp_domain.TransientCheckpointUrl( + 'exploration_id', + 'furthest_reached_checkpoint_state_name', 1, + 'most_recently_reached_checkpoint_state_name', 1 + ) + self.assertEqual( + logged_out_learner_progress_object.to_dict(), + logged_out_learner_progress_dict) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_exploration_id_incorrect_type(self) -> None: + self.transient_checkpoint_url.exploration_id = 5 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected exploration_id to be a str' + ): + self.transient_checkpoint_url.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_furthest_reached_checkpoint_state_name_incorrect_type( + self + ) -> None: + self.transient_checkpoint_url.furthest_reached_checkpoint_state_name = 5 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected furthest_reached_checkpoint_state_name to be a str' + ): + self.transient_checkpoint_url.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_furthest_reached_checkpoint_exp_version_incorrect_type( + self + ) -> None: + self.transient_checkpoint_url.furthest_reached_checkpoint_exp_version = 'invalid_version' # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected furthest_reached_checkpoint_exp_version to be an int' + ): + self.transient_checkpoint_url.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_most_recently_reached_checkpoint_state_name_incorrect_type( + self + ) -> None: + self.transient_checkpoint_url.most_recently_reached_checkpoint_state_name = 5 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected most_recently_reached_checkpoint_state_name to be a str' + ): + self.transient_checkpoint_url.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_most_recently_reached_checkpoint_exp_version_incorrect_type( + self + ) -> None: + self.transient_checkpoint_url.most_recently_reached_checkpoint_exp_version = 'invalid_version' # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected most_recently_reached_checkpoint_exp_version to be an int' + ): + self.transient_checkpoint_url.validate() class ExplorationCheckpointsUnitTests(test_utils.GenericTestBase): """Test checkpoints validations in an exploration. """ - def setUp(self): - super(ExplorationCheckpointsUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.exploration = ( exp_domain.Exploration.create_default_exploration('eid')) + self.content_id_generator = translation_domain.ContentIdGenerator( + self.exploration.next_content_id_index + ) + self.new_state = state_domain.State.create_default_state( - 'Introduction', is_initial_state=True) - self.set_interaction_for_state(self.new_state, 'TextInput') + 'Introduction', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + is_initial_state=True) + self.set_interaction_for_state( + self.new_state, 'TextInput', self.content_id_generator) self.exploration.init_state_name = 'Introduction' self.exploration.states = { self.exploration.init_state_name: self.new_state } self.set_interaction_for_state( self.exploration.states[self.exploration.init_state_name], - 'TextInput') + 'TextInput', self.content_id_generator) self.init_state = ( self.exploration.states[self.exploration.init_state_name]) - self.end_state = state_domain.State.create_default_state('End') - self.set_interaction_for_state(self.end_state, 'EndExploration') + self.end_state = state_domain.State.create_default_state( + 'End', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + self.set_interaction_for_state( + self.end_state, 'EndExploration', self.content_id_generator) self.end_state.update_interaction_default_outcome(None) - def test_init_state_with_card_is_checkpoint_false_is_invalid(self): + self.exploration.next_content_id_index = ( + self.content_id_generator.next_content_id_index) + + def test_init_state_with_card_is_checkpoint_false_is_invalid(self) -> None: self.init_state.update_card_is_checkpoint(False) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected card_is_checkpoint of first state to ' 'be True but found it to be False'): self.exploration.validate(strict=True) self.init_state.update_card_is_checkpoint(True) - def test_end_state_with_card_is_checkpoint_true_is_invalid(self): + def test_end_state_with_card_is_checkpoint_true_is_invalid(self) -> None: default_outcome = self.init_state.interaction.default_outcome + # Ruling out the possibility of None for mypy type checking. + assert default_outcome is not None default_outcome.dest = self.exploration.init_state_name self.init_state.update_interaction_default_outcome(default_outcome) @@ -492,13 +649,15 @@ def test_end_state_with_card_is_checkpoint_true_is_invalid(self): 'End': self.end_state } self.end_state.update_card_is_checkpoint(True) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected card_is_checkpoint of terminal state ' 'to be False but found it to be True'): self.exploration.validate(strict=True) self.end_state.update_card_is_checkpoint(False) - def test_init_state_checkpoint_with_end_exp_interaction_is_valid(self): + def test_init_state_checkpoint_with_end_exp_interaction_is_valid( + self + ) -> None: self.exploration.init_state_name = 'End' self.exploration.states = { self.exploration.init_state_name: self.end_state @@ -510,7 +669,7 @@ def test_init_state_checkpoint_with_end_exp_interaction_is_valid(self): self.exploration.validate(strict=True) self.end_state.update_card_is_checkpoint(False) - def test_checkpoint_count_with_count_outside_range_is_invalid(self): + def test_checkpoint_count_with_count_outside_range_is_invalid(self) -> None: self.exploration.init_state_name = 'Introduction' self.exploration.states = { self.exploration.init_state_name: self.new_state, @@ -522,8 +681,8 @@ def test_checkpoint_count_with_count_outside_range_is_invalid(self): self.exploration.states['State%s' % i].card_is_checkpoint = True self.set_interaction_for_state( self.exploration.states['State%s' % i], - 'Continue') - with self.assertRaisesRegexp( + 'Continue', self.content_id_generator) + with self.assertRaisesRegex( Exception, 'Expected checkpoint count to be between 1 and 8 ' 'inclusive but found it to be 9' ): @@ -533,7 +692,9 @@ def test_checkpoint_count_with_count_outside_range_is_invalid(self): 'End': self.end_state } - def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): + def test_bypassable_state_with_card_is_checkpoint_true_is_invalid( + self + ) -> None: # Note: In the graphs below, states with the * symbol are checkpoints. # Exploration to test a checkpoint state which has no outcome. @@ -550,10 +711,24 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): # │ End │ # └───────────────┘. - second_state = state_domain.State.create_default_state('Second') - self.set_interaction_for_state(second_state, 'TextInput') - third_state = state_domain.State.create_default_state('Third') - self.set_interaction_for_state(third_state, 'TextInput') + second_state = state_domain.State.create_default_state( + 'Second', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + self.set_interaction_for_state( + second_state, 'TextInput', self.content_id_generator) + third_state = state_domain.State.create_default_state( + 'Third', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + self.set_interaction_for_state( + third_state, 'TextInput', self.content_id_generator) self.exploration.states = { self.exploration.init_state_name: self.new_state, @@ -567,8 +742,10 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): init_state_answer_groups = [ state_domain.AnswerGroup( state_domain.Outcome( - 'Second', state_domain.SubtitledHtml( - 'feedback_0', '

    Feedback

    '), + 'Second', None, state_domain.SubtitledHtml( + self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + '

    Feedback

    '), False, [], None, None), [ state_domain.RuleSpec( @@ -576,7 +753,10 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): { 'x': { - 'contentId': 'rule_input_0', + 'contentId': ( + self.content_id_generator.generate( + translation_domain.ContentType.RULE, + extra_prefix='input')), 'normalizedStrSet': ['Test0'] } }) @@ -585,8 +765,10 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): None ), state_domain.AnswerGroup( state_domain.Outcome( - 'Third', state_domain.SubtitledHtml( - 'feedback_1', '

    Feedback

    '), + 'Third', None, state_domain.SubtitledHtml( + self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + '

    Feedback

    '), False, [], None, None), [ state_domain.RuleSpec( @@ -594,7 +776,10 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): { 'x': { - 'contentId': 'rule_input_1', + 'contentId': ( + self.content_id_generator.generate( + translation_domain.ContentType.RULE, + extra_prefix='input')), 'normalizedStrSet': ['Test1'] } }) @@ -608,8 +793,10 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): third_state_answer_groups = [ state_domain.AnswerGroup( state_domain.Outcome( - 'End', state_domain.SubtitledHtml( - 'feedback_0', '

    Feedback

    '), + 'End', None, state_domain.SubtitledHtml( + self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + '

    Feedback

    '), False, [], None, None), [ state_domain.RuleSpec( @@ -617,7 +804,10 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): { 'x': { - 'contentId': 'rule_input_0', + 'contentId': ( + self.content_id_generator.generate( + translation_domain.ContentType.RULE, + extra_prefix='input')), 'normalizedStrSet': ['Test0'] } }) @@ -631,10 +821,13 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): third_state.update_interaction_answer_groups( third_state_answer_groups) + self.exploration.next_content_id_index = ( + self.content_id_generator.next_content_id_index) + # The exploration can be completed via third_state. Hence, making # second_state a checkpoint raises a validation error. second_state.card_is_checkpoint = True - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Cannot make Second a checkpoint as it is' ' bypassable' ): @@ -660,7 +853,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): second_state_answer_groups = [ state_domain.AnswerGroup( state_domain.Outcome( - 'End', state_domain.SubtitledHtml( + 'End', None, state_domain.SubtitledHtml( 'feedback_0', '

    Feedback

    '), False, [], None, None), [ @@ -715,14 +908,42 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): # │ End │ # └───────────┘. - a_state = state_domain.State.create_default_state('A') - self.set_interaction_for_state(a_state, 'TextInput') - b_state = state_domain.State.create_default_state('B') - self.set_interaction_for_state(b_state, 'TextInput') - c_state = state_domain.State.create_default_state('C') - self.set_interaction_for_state(c_state, 'TextInput') - d_state = state_domain.State.create_default_state('D') - self.set_interaction_for_state(d_state, 'TextInput') + a_state = state_domain.State.create_default_state( + 'A', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + self.set_interaction_for_state( + a_state, 'TextInput', self.content_id_generator) + b_state = state_domain.State.create_default_state( + 'B', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + self.set_interaction_for_state( + b_state, 'TextInput', self.content_id_generator) + c_state = state_domain.State.create_default_state( + 'C', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + self.set_interaction_for_state( + c_state, 'TextInput', self.content_id_generator) + d_state = state_domain.State.create_default_state( + 'D', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + self.set_interaction_for_state( + d_state, 'TextInput', self.content_id_generator) self.exploration.states = { self.exploration.init_state_name: self.new_state, @@ -738,7 +959,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): init_state_answer_groups = [ state_domain.AnswerGroup( state_domain.Outcome( - 'A', state_domain.SubtitledHtml( + 'A', None, state_domain.SubtitledHtml( 'feedback_0', '

    Feedback

    '), False, [], None, None), [ @@ -756,7 +977,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): None ), state_domain.AnswerGroup( state_domain.Outcome( - 'B', state_domain.SubtitledHtml( + 'B', None, state_domain.SubtitledHtml( 'feedback_1', '

    Feedback

    '), False, [], None, None), [ @@ -774,7 +995,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): None ), state_domain.AnswerGroup( state_domain.Outcome( - 'C', state_domain.SubtitledHtml( + 'C', None, state_domain.SubtitledHtml( 'feedback_2', '

    Feedback

    '), False, [], None, None), [ @@ -797,7 +1018,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): a_and_b_state_answer_groups = [ state_domain.AnswerGroup( state_domain.Outcome( - 'D', state_domain.SubtitledHtml( + 'D', None, state_domain.SubtitledHtml( 'feedback_0', '

    Feedback

    '), False, [], None, None), [ @@ -820,7 +1041,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): c_and_d_state_answer_groups = [ state_domain.AnswerGroup( state_domain.Outcome( - 'End', state_domain.SubtitledHtml( + 'End', None, state_domain.SubtitledHtml( 'feedback_0', '

    Feedback

    '), False, [], None, None), [ @@ -854,7 +1075,9 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): # d_state becomes bypassable. Hence, making d_state a checkpoint raises # validation error. d_state.update_card_is_checkpoint(True) - with self.assertRaisesRegexp( + self.exploration.update_next_content_id_index( + self.content_id_generator.next_content_id_index) + with self.assertRaisesRegex( Exception, 'Cannot make D a checkpoint as it is bypassable' ): self.exploration.validate(strict=True) @@ -884,7 +1107,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): c_state_answer_groups = [ state_domain.AnswerGroup( state_domain.Outcome( - 'D', state_domain.SubtitledHtml( + 'D', None, state_domain.SubtitledHtml( 'feedback_0', '

    Feedback

    '), False, [], None, None), [ @@ -927,8 +1150,15 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): # │ End │ │ End 2 │ # └───────────┘ └───────────┘. - new_end_state = state_domain.State.create_default_state('End 2') - self.set_interaction_for_state(new_end_state, 'EndExploration') + new_end_state = state_domain.State.create_default_state( + 'End 2', + self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + self.set_interaction_for_state( + new_end_state, 'EndExploration', self.content_id_generator) new_end_state.update_interaction_default_outcome(None) self.exploration.states = { @@ -946,7 +1176,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): c_state_answer_groups = [ state_domain.AnswerGroup( state_domain.Outcome( - 'D', state_domain.SubtitledHtml( + 'D', None, state_domain.SubtitledHtml( 'feedback_0', '

    Feedback

    '), False, [], None, None), [ @@ -964,7 +1194,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): None ), state_domain.AnswerGroup( state_domain.Outcome( - 'End 2', state_domain.SubtitledHtml( + 'End 2', None, state_domain.SubtitledHtml( 'feedback_1', '

    Feedback

    '), False, [], None, None), [ @@ -985,7 +1215,7 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): c_state.update_interaction_answer_groups( c_state_answer_groups) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Cannot make D a checkpoint as it is bypassable' ): self.exploration.validate(strict=True) @@ -995,1891 +1225,5735 @@ def test_bypassable_state_with_card_is_checkpoint_true_is_invalid(self): class ExplorationDomainUnitTests(test_utils.GenericTestBase): """Test the exploration domain object.""" - # TODO(bhenning): The validation tests below should be split into separate - # unit tests. Also, all validation errors should be covered in the tests. - def test_validation(self): - """Test validation of explorations.""" - exploration = exp_domain.Exploration.create_default_exploration('eid') - exploration.init_state_name = '' - exploration.states = {} - - exploration.title = 'Hello #' - self._assert_validation_error(exploration, 'Invalid character #') - - exploration.title = 'Title' - exploration.category = 'Category' + def setUp(self) -> None: + super().setUp() + translation_dict = { + 'content_id_3': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + True + ) + } + self.dummy_entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict) + self.new_exploration = ( + exp_domain.Exploration.create_default_exploration('test_id')) + self.content_id_generator = translation_domain.ContentIdGenerator( + self.new_exploration.next_content_id_index + ) + self.state = self.new_exploration.states['Introduction'] + self.set_interaction_for_state( + self.state, 'Continue', self.content_id_generator) - # Note: If '/' ever becomes a valid state name, ensure that the rule - # editor frontend tenplate is fixed -- it currently uses '/' as a - # sentinel for an invalid state name. - bad_state = state_domain.State.create_default_state('/') - exploration.states = {'/': bad_state} + def test_image_rte_tag(self) -> None: + """Validate image tag.""" + self.state.content.html = ( + '') self._assert_validation_error( - exploration, 'Invalid character / in a state name') + self.new_exploration, 'Image tag does not have \'alt-with-value\' ' + 'attribute.') + + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Image tag \'caption-with-value\' attribute ' + 'should not be greater than 500 characters.') - new_state = state_domain.State.create_default_state('ABC') - self.set_interaction_for_state(new_state, 'TextInput') + self.state.content.html = ( + '' + '') + self._assert_validation_error( + self.new_exploration, 'Image tag does not have \'caption-with' + '-value\' attribute.') - # The 'states' property must be a non-empty dict of states. - exploration.states = {} + self.state.content.html = ( + '') self._assert_validation_error( - exploration, 'exploration has no states') - exploration.states = {'A string #': new_state} + self.new_exploration, 'Image tag \'filepath-with-value\' attribute ' + 'should not be empty.') + + self.state.content.html = ( + '') self._assert_validation_error( - exploration, 'Invalid character # in a state name') - exploration.states = {'A string _': new_state} + self.new_exploration, 'Image tag does not have \'filepath-with' + '-value\' attribute.') + + def test_skill_review_rte_tag(self) -> None: + """Validate SkillReview tag.""" + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, 'Invalid character _ in a state name') - - exploration.states = {'ABC': new_state} + self.new_exploration, 'SkillReview tag does not have \'text-with' + '-value\' attribute.') + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, 'has no initial state name') - - exploration.init_state_name = 'initname' + self.new_exploration, 'SkillReview tag \'text-with-value\' ' + 'attribute should not be empty.') + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, - r'There is no state in \[\'ABC\'\] corresponding to ' - 'the exploration\'s initial state name initname.') + self.new_exploration, 'SkillReview tag does not have ' + '\'skill_id-with-value\' attribute.') - # Test whether a default outcome to a non-existing state is invalid. - exploration.states = {exploration.init_state_name: new_state} + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, 'destination ABC is not a valid') - - # Restore a valid exploration. - init_state = exploration.states[exploration.init_state_name] - default_outcome = init_state.interaction.default_outcome - default_outcome.dest = exploration.init_state_name - init_state.update_interaction_default_outcome(default_outcome) - init_state.update_card_is_checkpoint(True) - exploration.validate() - - # Ensure an invalid destination can also be detected for answer groups. - # Note: The state must keep its default_outcome, otherwise it will - # trigger a validation error for non-terminal states needing to have a - # default outcome. To validate the outcome of the answer group, this - # default outcome must point to a valid state. - init_state = exploration.states[exploration.init_state_name] - default_outcome = init_state.interaction.default_outcome - default_outcome.dest = exploration.init_state_name - old_answer_groups = copy.deepcopy(init_state.interaction.answer_groups) - old_answer_groups.append({ - 'outcome': { - 'dest': exploration.init_state_name, - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'labelled_as_correct': False, - 'param_changes': [], - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None - }, - 'rule_specs': [{ - 'inputs': { - 'x': { - 'contentId': 'rule_input_Equals', - 'normalizedStrSet': ['Test'] - } - }, - 'rule_type': 'Contains' - }], - 'training_data': [], - 'tagged_skill_misconception_id': None - }) - - new_answer_groups = [ - state_domain.AnswerGroup.from_dict(answer_group) - for answer_group in old_answer_groups - ] - init_state.update_interaction_answer_groups(new_answer_groups) + self.new_exploration, 'SkillReview tag \'skill_id-with-value\' ' + 'attribute should not be empty.') + + def test_video_rte_tag(self) -> None: + """Validate Video tag.""" + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Video tag does not have \'start-with' + '-value\' attribute.') + + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Video tag \'start-with-value\' attribute ' + 'should not be empty.') + + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Video tag does not have \'end-with-value\' ' + 'attribute.') + + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Video tag \'end-with-value\' attribute ' + 'should not be empty.') + + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Start value should not be greater than End ' + 'value in Video tag.') + + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Video tag does not have \'autoplay-with' + '-value\' attribute.') + + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Video tag \'autoplay-with-value\' attribute ' + 'should be a boolean value.') - exploration.validate() + self.state.content.html = ( + '' + '') + self._assert_validation_error( + self.new_exploration, 'Video tag does not have \'video_id-with' + '-value\' attribute.') + + self.state.content.html = ( + '') + self._assert_validation_error( + self.new_exploration, 'Video tag \'video_id-with-value\' attribute ' + 'should not be empty.') + + def test_link_rte_tag(self) -> None: + """Validate Link tag.""" + self.state.content.html = ( + '' + '' + ) + self._assert_validation_error( + self.new_exploration, 'Link tag does not have \'text-with-value\' ' + 'attribute.') - interaction = init_state.interaction - answer_groups = interaction.answer_groups - answer_group = answer_groups[0] - answer_group.outcome.dest = 'DEF' + self.state.content.html = ( + '' + '' + ) self._assert_validation_error( - exploration, 'destination DEF is not a valid') + self.new_exploration, 'Link tag does not have \'url-with-value\' ' + 'attribute.') - # Restore a valid exploration. - self.set_interaction_for_state( - init_state, 'TextInput') - new_answer_groups = [ - state_domain.AnswerGroup.from_dict(answer_groups) - for answer_groups in old_answer_groups - ] - init_state.update_interaction_answer_groups(new_answer_groups) - answer_groups = interaction.answer_groups - answer_group = answer_groups[0] - answer_group.outcome.dest = exploration.init_state_name - exploration.validate() + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'Link tag \'url-with-value\' attribute ' + 'should not be empty.') - # Validate RuleSpec. - rule_spec = answer_group.rule_specs[0] - rule_spec.inputs = {} + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, 'RuleSpec \'Contains\' is missing inputs') + self.new_exploration, ( + 'Link should be prefix with acceptable schemas ' + 'which are \\[\'https\', \'\']') + ) - rule_spec.inputs = 'Inputs string' + def test_math_rte_tag(self) -> None: + """Validate Math tag.""" + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, 'Expected inputs to be a dict') + self.new_exploration, 'Math tag does not have ' + '\'math_content-with-value\' attribute.') - rule_spec.inputs = {'x': 'Test'} - rule_spec.rule_type = 'FakeRuleType' - self._assert_validation_error(exploration, 'Unrecognized rule type') + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'Math tag \'math_content-with-value\' ' + 'attribute should not be empty.') - rule_spec.inputs = {'x': { - 'contentId': 'rule_input_Equals', - 'normalizedStrSet': 15 - }} - rule_spec.rule_type = 'Contains' - with self.assertRaisesRegexp( - AssertionError, 'Expected list, received 15' - ): - exploration.validate() + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'Math tag does not have \'raw_latex-with' + '-value\' attribute.') + + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'Math tag \'raw_latex-with-value\' attribute ' + 'should not be empty.') - self.set_interaction_for_state( - exploration.states[exploration.init_state_name], - 'PencilCodeEditor') - temp_rule = old_answer_groups[0]['rule_specs'][0] - old_answer_groups[0]['rule_specs'][0] = { - 'rule_type': 'ErrorContains', - 'inputs': {'x': '{{ExampleParam}}'} - } - new_answer_groups = [ - state_domain.AnswerGroup.from_dict(answer_group) - for answer_group in old_answer_groups - ] - init_state.update_interaction_answer_groups(new_answer_groups) - old_answer_groups[0]['rule_specs'][0] = temp_rule + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'Math tag does not have ' + '\'svg_filename-with-value\' attribute.') + + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'Math tag \'svg_filename-with-value\' ' + 'attribute should not be empty.') + + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'Math tag \'svg_filename-with-value\' ' + 'attribute should have svg extension.') + + def test_tabs_rte_tag(self) -> None: + """Validate Tabs tag.""" + self.state.content.html = ( + '' + '' + ) + self._assert_validation_error( + self.new_exploration, 'No tabs are present inside the tabs tag.') + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, - 'RuleSpec \'ErrorContains\' has an input with name \'x\' which ' - 'refers to an unknown parameter within the exploration: ' - 'ExampleParam') + self.new_exploration, 'No content attribute is present inside ' + 'the tabs tag.') + + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'No title attribute is present inside ' + 'the tabs tag.') + + self.state.content.html = ( + '' + '' + ) + self._assert_validation_error( + self.new_exploration, 'title present inside tabs tag is empty.') - # Restore a valid exploration. - exploration.param_specs['ExampleParam'] = param_domain.ParamSpec( - 'UnicodeString') - exploration.validate() + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'No content attribute is present inside ' + 'the tabs tag.') + + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'content present inside tabs tag is empty.') + + self.state.content.html = ( + '' + '' + ) + self._assert_validation_error( + self.new_exploration, 'Tabs tag should not be present inside ' + 'another Tabs or Collapsible tag.') + + def test_collapsible_rte_tag(self) -> None: + """Validate Collapsible tag.""" + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'No collapsible content is present ' + 'inside the tag.') - # Validate Outcome. - outcome = init_state.interaction.answer_groups[0].outcome - destination = exploration.init_state_name - outcome.dest = None + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, 'Every outcome should have a destination.') + self.new_exploration, 'No content attribute present in ' + 'collapsible tag.') - # Try setting the outcome destination to something other than a string. - outcome.dest = 15 + self.state.content.html = ( + '' + ) self._assert_validation_error( - exploration, 'Expected outcome dest to be a string') + self.new_exploration, 'Heading attribute inside the collapsible ' + 'tag is empty.') - outcome.dest = destination + self.state.content.html = ( + '' + ) + self._assert_validation_error( + self.new_exploration, 'No heading attribute present in ' + 'collapsible tag.') + + self.state.content.html = ( + '' + '&quot;\' heading-with-value' + '=\'&quot;heading&quot;\'>' + '' + ) + self._assert_validation_error( + self.new_exploration, 'Collapsible tag should not be present ' + 'inside another Tabs or Collapsible tag.') + self.state.content.html = 'Valid content' - outcome.feedback = state_domain.SubtitledHtml('feedback_1', '') - exploration.validate() + def test_continue_interaction(self) -> None: + """Tests Continue interaction.""" + self.set_interaction_for_state( + self.state, 'Continue', self.content_id_generator) + # Here we use cast because we are narrowing down the type from various + # customization args value types to 'SubtitledUnicode' type, and this + # is done because here we are accessing 'buttontext' key from continue + # customization arg whose value is always of SubtitledUnicode type. + subtitled_unicode_continue_ca_arg = cast( + state_domain.SubtitledUnicode, + self.state.interaction.customization_args[ + 'buttonText' + ].value + ) + subtitled_unicode_continue_ca_arg.unicode_str = ( + 'Continueeeeeeeeeeeeeeeeee' + ) + self._assert_validation_error( + self.new_exploration, ( + 'The `continue` interaction text length should be atmost ' + '20 characters.') + ) - outcome.labelled_as_correct = 'hello' + def test_end_interaction(self) -> None: + """Tests End interaction.""" + self.set_interaction_for_state( + self.state, 'EndExploration', self.content_id_generator) + self.state.interaction.customization_args[ + 'recommendedExplorationIds'].value = ['id1', 'id2', 'id3', 'id4'] + self.state.update_interaction_default_outcome(None) self._assert_validation_error( - exploration, 'The "labelled_as_correct" field should be a boolean') + self.new_exploration, ( + 'The total number of recommended explorations inside End ' + 'interaction should be atmost 3.') + ) - # Test that labelled_as_correct must be False for self-loops, and that - # this causes a strict validation failure but not a normal validation - # failure. - outcome.labelled_as_correct = True - with self.assertRaisesRegexp( - Exception, 'is labelled correct but is a self-loop.' + def test_numeric_interaction(self) -> None: + """Tests Numeric interaction.""" + content_id_generator = translation_domain.ContentIdGenerator() + self.set_interaction_for_state( + self.state, 'NumericInput', content_id_generator) + test_ans_group_for_numeric_interaction = [ + state_domain.AnswerGroup.from_dict({ + 'rule_specs': [ + { + 'rule_type': 'IsLessThanOrEqualTo', + 'inputs': { + 'x': 7 + } + }, + { + 'rule_type': 'IsInclusivelyBetween', + 'inputs': { + 'a': 3, + 'b': 5 + } + }, + { + 'rule_type': 'IsWithinTolerance', + 'inputs': { + 'x': 1, + 'tol': -1 + } + }, + { + 'rule_type': 'IsInclusivelyBetween', + 'inputs': { + 'a': 8, + 'b': 8 + } + }, + { + 'rule_type': 'IsLessThanOrEqualTo', + 'inputs': { + 'x': 7 + } + }, + { + 'rule_type': 'IsGreaterThanOrEqualTo', + 'inputs': { + 'x': 10 + } + }, + { + 'rule_type': 'IsGreaterThanOrEqualTo', + 'inputs': { + 'x': 15 + } + } + ], + 'outcome': { + 'dest': 'EXP_1_STATE_1', + 'feedback': { + 'content_id': 'feedback_0', + 'html': '

    good

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest_if_really_stuck': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }) + ] + self.state.interaction.answer_groups = ( + test_ans_group_for_numeric_interaction) + with self.assertRaisesRegex( + utils.ValidationError, 'Rule \'1\' from answer group \'0\' will ' + 'never be matched because it is made redundant by the above rules' ): - exploration.validate(strict=True) - exploration.validate() - - outcome.labelled_as_correct = False - exploration.validate() + self.new_exploration.validate(strict=True) + rule_specs = self.state.interaction.answer_groups[0].rule_specs + rule_specs.remove(rule_specs[1]) - outcome.param_changes = 'Changes' - self._assert_validation_error( - exploration, 'Expected outcome param_changes to be a list') - - outcome.param_changes = [param_domain.ParamChange( - 0, 'generator_id', {})] self._assert_validation_error( - exploration, - 'Expected param_change name to be a string, received 0') - - outcome.param_changes = [] - exploration.validate() + self.new_exploration, 'The rule \'1\' of answer group \'0\' having ' + 'rule type \'IsWithinTolerance\' have \'tol\' value less than or ' + 'equal to zero in NumericInput interaction.') + rule_specs.remove(rule_specs[1]) + + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' ' + 'having rule type \'IsInclusivelyBetween\' have `a` value greater ' + 'than `b` value in NumericInput interaction.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) - outcome.refresher_exploration_id = 12345 - self._assert_validation_error( - exploration, - 'Expected outcome refresher_exploration_id to be a string') + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' of ' + 'NumericInput interaction is already present.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) - outcome.refresher_exploration_id = None - exploration.validate() + with self.assertRaisesRegex( + utils.ValidationError, 'Rule \'2\' from answer group \'0\' will ' + 'never be matched because it is made redundant by the above rules' + ): + self.new_exploration.validate(strict=True) - outcome.refresher_exploration_id = 'valid_string' - exploration.validate() + def test_fraction_interaction(self) -> None: + """Tests Fraction interaction.""" + state = self.new_exploration.states['Introduction'] + content_id_generator = translation_domain.ContentIdGenerator() + self.set_interaction_for_state( + state, 'FractionInput', content_id_generator) + test_ans_group_for_fraction_interaction = [ + state_domain.AnswerGroup.from_dict({ + 'rule_specs': [ + { + 'rule_type': 'HasFractionalPartExactlyEqualTo', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 2, + 'denominator': 3 + } + } + }, + { + 'rule_type': 'HasFractionalPartExactlyEqualTo', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 2, + 'denominator': 3 + } + } + }, + { + 'rule_type': 'HasFractionalPartExactlyEqualTo', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 4, + 'denominator': 6 + } + } + }, + { + 'rule_type': 'HasFractionalPartExactlyEqualTo', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 1, + 'numerator': 3, + 'denominator': 2 + } + } + }, + { + 'rule_type': 'HasFractionalPartExactlyEqualTo', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 3, + 'denominator': 2 + } + } + }, + { + 'rule_type': 'IsExactlyEqualTo', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 2, + 'numerator': 2, + 'denominator': 3 + } + } + }, + { + 'rule_type': 'IsGreaterThan', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 10, + 'denominator': 3 + } + } + }, + { + 'rule_type': 'IsExactlyEqualTo', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 27, + 'denominator': 2 + } + } + }, + { + 'rule_type': 'HasDenominatorEqualTo', + 'inputs': { + 'x': 4 + } + }, + { + 'rule_type': 'HasFractionalPartExactlyEqualTo', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 9, + 'denominator': 4 + } + } + }, + { + 'rule_type': 'IsLessThan', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 7, + 'denominator': 2 + } + } + }, + { + 'rule_type': 'IsLessThan', + 'inputs': { + 'f': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 5, + 'denominator': 2 + } + } + } + ], + 'outcome': { + 'dest': 'EXP_1_STATE_1', + 'feedback': { + 'content_id': 'feedback_0', + 'html': '

    good

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest_if_really_stuck': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }) + ] + state.interaction.answer_groups = ( + test_ans_group_for_fraction_interaction) + state.interaction.customization_args[ + 'allowNonzeroIntegerPart'].value = False + state.interaction.customization_args[ + 'allowImproperFraction'].value = False + state.interaction.customization_args[ + 'requireSimplestForm'].value = True + rule_specs = state.interaction.answer_groups[0].rule_specs + + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' of ' + 'FractionInput interaction is already present.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) - outcome.missing_prerequisite_skill_id = 12345 - self._assert_validation_error( - exploration, - 'Expected outcome missing_prerequisite_skill_id to be a string') + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' do ' + 'not have value in simple form ' + 'in FractionInput interaction.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) - outcome.missing_prerequisite_skill_id = None - exploration.validate() + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' do ' + 'not have value in proper fraction ' + 'in FractionInput interaction.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) - outcome.missing_prerequisite_skill_id = 'valid_string' - exploration.validate() + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' do ' + 'not have value in proper fraction ' + 'in FractionInput interaction.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) + + state.interaction.customization_args[ + 'allowImproperFraction'].value = True + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' has ' + 'non zero integer part in FractionInput interaction.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) - # Test that refresher_exploration_id must be None for non-self-loops. - new_state_name = 'New state' - exploration.add_states([new_state_name]) + with self.assertRaisesRegex( + utils.ValidationError, 'Rule \'2\' from answer group \'0\' of ' + 'FractionInput interaction will never be matched because it is ' + 'made redundant by the above rules' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) - outcome.dest = new_state_name - outcome.refresher_exploration_id = 'another_string' self._assert_validation_error( - exploration, - 'has a refresher exploration ID, but is not a self-loop') - - outcome.refresher_exploration_id = None - exploration.validate() - exploration.delete_state(new_state_name) + self.new_exploration, 'Rule \'3\' from answer group \'0\' of ' + 'FractionInput interaction having rule type HasFractionalPart' + 'ExactlyEqualTo will never be matched because it is ' + 'made redundant by the above rules') + rule_specs.remove(rule_specs[1]) + rule_specs.remove(rule_specs[1]) + + with self.assertRaisesRegex( + utils.ValidationError, 'Rule \'3\' from answer group \'0\' of ' + 'FractionInput interaction will never be matched because it is ' + 'made redundant by the above rules' + ): + self.new_exploration.validate(strict=True) - # Validate InteractionInstance. - interaction.id = 15 - self._assert_validation_error( - exploration, 'Expected interaction id to be a string') + def test_number_with_units_interaction(self) -> None: + """Tests NumberWithUnits interaction.""" + content_id_generator = translation_domain.ContentIdGenerator() + self.set_interaction_for_state( + self.state, 'NumberWithUnits', content_id_generator) + test_ans_group_for_number_with_units_interaction = [ + state_domain.AnswerGroup.from_dict({ + 'rule_specs': [ + { + 'rule_type': 'IsEquivalentTo', + 'inputs': { + 'f': { + + 'type': 'real', + 'real': 2, + 'fraction': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 0, + 'denominator': 1 + }, + 'units': [ + { + 'unit': 'km', + 'exponent': 1 + }, + { + 'unit': 'hr', + 'exponent': -1 + } + ] + } + } + }, + { + 'rule_type': 'IsEqualTo', + 'inputs': { + 'f': { + + 'type': 'real', + 'real': 2, + 'fraction': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 0, + 'denominator': 1 + }, + 'units': [ + { + 'unit': 'km', + 'exponent': 1 + }, + { + 'unit': 'hr', + 'exponent': -1 + } + ] + } + } + }, + { + 'rule_type': 'IsEquivalentTo', + 'inputs': { + 'f': { + + 'type': 'real', + 'real': 2, + 'fraction': { + 'isNegative': False, + 'wholeNumber': 0, + 'numerator': 0, + 'denominator': 1 + }, + 'units': [ + { + 'unit': 'km', + 'exponent': 1 + }, + { + 'unit': 'hr', + 'exponent': -1 + } + ] + } + } + } + ], + 'outcome': { + 'dest': 'EXP_1_STATE_1', + 'feedback': { + 'content_id': 'feedback_0', + 'html': '

    good

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest_if_really_stuck': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }) + ] + self.state.update_interaction_answer_groups( + test_ans_group_for_number_with_units_interaction) + rule_specs = self.state.interaction.answer_groups[0].rule_specs + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' has ' + 'rule type equal is coming after rule type equivalent having ' + 'same value in FractionInput interaction.' + ): + self.new_exploration.validate(strict=True) - interaction.id = 'SomeInteractionTypeThatDoesNotExist' - self._assert_validation_error(exploration, 'Invalid interaction id') - interaction.id = 'PencilCodeEditor' + rule_specs.remove(rule_specs[1]) + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' of ' + 'NumberWithUnitsInput interaction is already present.' + ): + self.new_exploration.validate(strict=True) - self.set_interaction_for_state(init_state, 'TextInput') - new_answer_groups = [ - state_domain.AnswerGroup.from_dict(answer_group) - for answer_group in old_answer_groups + def test_multiple_choice_interaction(self) -> None: + """Tests MultipleChoice interaction.""" + content_id_generator = translation_domain.ContentIdGenerator() + self.set_interaction_for_state( + self.state, 'MultipleChoiceInput', content_id_generator) + test_ans_group_for_multiple_choice_interaction = [ + state_domain.AnswerGroup.from_dict({ + 'rule_specs': [ + { + 'rule_type': 'Equals', + 'inputs': { + 'x': 0 + } + }, + { + 'rule_type': 'Equals', + 'inputs': { + 'x': 0 + } + } + ], + 'outcome': { + 'dest': 'EXP_1_STATE_1', + 'feedback': { + 'content_id': 'feedback_0', + 'html': '

    good

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest_if_really_stuck': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }) + ] + self.state.update_interaction_answer_groups( + test_ans_group_for_multiple_choice_interaction) + rule_specs = self.state.interaction.answer_groups[0].rule_specs + self.state.interaction.customization_args['choices'].value = [ + state_domain.SubtitledHtml('ca_choices_0', '

    1

    '), + state_domain.SubtitledHtml('ca_choices_1', '

    2

    '), + state_domain.SubtitledHtml('ca_choices_2', '

    3

    ') ] - init_state.update_interaction_answer_groups(new_answer_groups) - valid_text_input_cust_args = init_state.interaction.customization_args - rule_spec.inputs = {'x': { - 'contentId': 'rule_input_Equals', - 'normalizedStrSet': ['Test'] - }} - rule_spec.rule_type = 'Contains' - exploration.validate() - interaction.customization_args = [] - self._assert_validation_error( - exploration, 'Expected customization args to be a dict') + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' of ' + 'MultipleChoiceInput interaction is already present.' + ): + self.new_exploration.validate(strict=True) - interaction.customization_args = {15: ''} - self._assert_validation_error( - exploration, - ( - 'Expected customization arg value to be a ' - 'InteractionCustomizationArg' - ) - ) + rule_specs.remove(rule_specs[1]) + self.state.interaction.customization_args[ + 'choices'].value[2].html = '

    2

    ' - interaction.customization_args = { - 15: state_domain.InteractionCustomizationArg('', { - 'type': 'unicode' + def test_item_selection_choice_interaction(self) -> None: + """Tests ItemSelection interaction.""" + content_id_generator = translation_domain.ContentIdGenerator() + self.set_interaction_for_state( + self.state, 'ItemSelectionInput', content_id_generator) + self.state.interaction.customization_args[ + 'minAllowableSelectionCount'].value = 1 + self.state.interaction.customization_args[ + 'maxAllowableSelectionCount'].value = 3 + test_ans_group_for_item_selection_interaction = [ + state_domain.AnswerGroup.from_dict({ + 'rule_specs': [ + { + 'rule_type': 'Equals', + 'inputs': { + 'x': ['ca_choices_0', 'ca_choices_1', 'ca_choices_2'] + } + }, + { + 'rule_type': 'Equals', + 'inputs': { + 'x': ['ca_choices_0', 'ca_choices_1', 'ca_choices_2'] + } + } + ], + 'outcome': { + 'dest': 'EXP_1_STATE_1', + 'feedback': { + 'content_id': 'feedback_0', + 'html': '

    good

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest_if_really_stuck': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None }) - } - self._assert_validation_error( - exploration, 'Invalid customization arg name') - - interaction.customization_args = valid_text_input_cust_args - self.set_interaction_for_state(init_state, 'TextInput') - exploration.validate() - - interaction.answer_groups = {} - self._assert_validation_error( - exploration, 'Expected answer groups to be a list') - - new_answer_groups = [ - state_domain.AnswerGroup.from_dict(answer_group) - for answer_group in old_answer_groups ] - init_state.update_interaction_answer_groups(new_answer_groups) - self.set_interaction_for_state(init_state, 'EndExploration') - self._assert_validation_error( - exploration, - 'Terminal interactions must not have a default outcome.') + self.state.update_interaction_answer_groups( + test_ans_group_for_item_selection_interaction) + rule_specs = self.state.interaction.answer_groups[0].rule_specs + self.state.interaction.customization_args['choices'].value = [ + state_domain.SubtitledHtml('ca_choices_0', '

    1

    '), + state_domain.SubtitledHtml('ca_choices_1', '

    2

    '), + state_domain.SubtitledHtml('ca_choices_2', '

    3

    ') + ] + self.state.interaction.customization_args[ + 'minAllowableSelectionCount'].value = 3 + self.state.interaction.customization_args[ + 'maxAllowableSelectionCount'].value = 1 - self.set_interaction_for_state(init_state, 'TextInput') - init_state.update_interaction_default_outcome(None) self._assert_validation_error( - exploration, - 'Non-terminal interactions must have a default outcome.') + self.new_exploration, 'Min value which is 3 is greater than max ' + 'value which is 1 in ItemSelectionInput interaction.' + ) - self.set_interaction_for_state(init_state, 'EndExploration') - init_state.interaction.answer_groups = answer_groups + self.state.interaction.customization_args[ + 'minAllowableSelectionCount'].value = 4 + self.state.interaction.customization_args[ + 'maxAllowableSelectionCount'].value = 4 self._assert_validation_error( - exploration, - 'Terminal interactions must not have any answer groups.') - - # A terminal interaction without a default outcome or answer group is - # valid. This resets the exploration back to a valid state. - init_state.interaction.answer_groups = [] - exploration.validate() - - # Restore a valid exploration. - self.set_interaction_for_state(init_state, 'TextInput') - init_state.update_interaction_answer_groups(answer_groups) - init_state.update_interaction_default_outcome(default_outcome) - exploration.validate() - solution_dict = { - 'answer_is_exclusive': True, - 'correct_answer': 'hello_world!', - 'explanation': { - 'content_id': 'solution', - 'html': 'hello_world is a string' + self.new_exploration, 'Number of choices which is 3 is lesser ' + 'than the min value selection which is 4 in ItemSelectionInput ' + 'interaction.') + + self.state.interaction.customization_args[ + 'minAllowableSelectionCount'].value = 1 + self.state.interaction.customization_args[ + 'maxAllowableSelectionCount'].value = 3 + with self.assertRaisesRegex( + utils.ValidationError, 'The rule 1 of answer group 0 of ' + 'ItemSelectionInput interaction is already present.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) + + self.state.interaction.customization_args[ + 'minAllowableSelectionCount'].value = 1 + self.state.interaction.customization_args[ + 'maxAllowableSelectionCount'].value = 2 + with self.assertRaisesRegex( + utils.ValidationError, 'Selected choices of rule \'0\' of answer ' + 'group \'0\' either less than min_selection_value or greater than ' + 'max_selection_value in ItemSelectionInput interaction.' + ): + self.new_exploration.validate(strict=True) + + self.state.interaction.customization_args[ + 'minAllowableSelectionCount'].value = 1 + self.state.interaction.customization_args[ + 'maxAllowableSelectionCount'].value = 3 + + def test_drag_and_drop_interaction(self) -> None: + """Tests DragAndDrop interaction.""" + self.state.recorded_voiceovers.add_content_id_for_voiceover( + 'ca_choices_2') + content_id_generator = translation_domain.ContentIdGenerator() + self.set_interaction_for_state( + self.state, 'DragAndDropSortInput', content_id_generator) + empty_list: List[str] = [] + test_ans_group_for_drag_and_drop_interaction = [ + state_domain.AnswerGroup.from_dict({ + 'rule_specs': [ + { + 'rule_type': ( + 'IsEqualToOrderingWithOneItemAtIncorrectPosition'), + 'inputs': { + 'x': [ + [ + 'ca_choices_0' + ], + [ + 'ca_choices_1', 'ca_choices_2' + ], + [ + 'ca_choices_3' + ] + ] + } + }, + { + 'rule_type': 'IsEqualToOrdering', + 'inputs': { + 'x': [ + [ + 'ca_choices_0' + ], + [ + 'ca_choices_1', 'ca_choices_2' + ], + [ + 'ca_choices_3' + ] + ] + } + }, + { + 'rule_type': 'HasElementXAtPositionY', + 'inputs': { + 'x': 'ca_choices_0', + 'y': 4 + } + }, + { + 'rule_type': 'IsEqualToOrdering', + 'inputs': { + 'x': [ + [ + 'ca_choices_3' + ], + [ + 'ca_choices_0', 'ca_choices_1', 'ca_choices_2' + ] + ] + } + }, + { + 'rule_type': 'HasElementXBeforeElementY', + 'inputs': { + 'x': 'ca_choices_0', + 'y': 'ca_choices_0' + } + }, + { + 'rule_type': 'IsEqualToOrdering', + 'inputs': { + 'x': empty_list + } + }, + { + 'rule_type': 'HasElementXAtPositionY', + 'inputs': { + 'x': 'ca_choices_0', + 'y': 1 + } + }, + { + 'rule_type': 'IsEqualToOrdering', + 'inputs': { + 'x': [ + [ + 'ca_choices_0' + ], + [ + 'ca_choices_1', 'ca_choices_2' + ], + [ + 'ca_choices_3' + ] + ] + } + }, + { + 'rule_type': ( + 'IsEqualToOrderingWithOneItemAtIncorrectPosition'), + 'inputs': { + 'x': [ + [ + 'ca_choices_1', 'ca_choices_3' + ], + [ + 'ca_choices_0' + ], + [ + 'ca_choices_2' + ] + ] + } + }, + { + 'rule_type': 'IsEqualToOrdering', + 'inputs': { + 'x': [ + [ + 'ca_choices_1' + ], + [ + 'ca_choices_0' + ], + [ + 'ca_choices_2', 'ca_choices_3' + ] + ] + } + }, + { + 'rule_type': 'IsEqualToOrdering', + 'inputs': { + 'x': [ + [ + 'ca_choices_3' + ], + [ + 'ca_choices_2' + ], + [ + 'ca_choices_1' + ], + [ + 'ca_choices_0' + ] + ] + } } - } - solution = state_domain.Solution.from_dict( - init_state.interaction.id, solution_dict) - init_state.update_interaction_solution(solution) - self._assert_validation_error( - exploration, - re.escape('Hint(s) must be specified if solution is specified')) - - init_state.update_interaction_solution(None) - interaction.hints = {} - self._assert_validation_error( - exploration, 'Expected hints to be a list') - interaction.hints = [] - - # Validate AnswerGroup. - state_answer_group = state_domain.AnswerGroup( - state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( - 'feedback_1', 'Feedback'), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Contains', - { - 'x': - { - 'contentId': 'rule_input_Contains', - 'normalizedStrSet': ['Test'] - } - }) ], - [], - 1 - ) - init_state.update_interaction_answer_groups([state_answer_group]) + 'outcome': { + 'dest': 'EXP_1_STATE_1', + 'feedback': { + 'content_id': 'feedback_0', + 'html': '

    good

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest_if_really_stuck': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }) + ] + self.state.interaction.answer_groups = ( + test_ans_group_for_drag_and_drop_interaction) + rule_specs = self.state.interaction.answer_groups[0].rule_specs + self.state.interaction.customization_args['choices'].value = [ + state_domain.SubtitledHtml('ca_choices_0', '

    1

    ') + ] self._assert_validation_error( - exploration, - 'Expected tagged skill misconception id to be a str, received 1') - state_answer_group = state_domain.AnswerGroup( - state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( - 'feedback_1', 'Feedback'), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Contains', - { - 'x': - { - 'contentId': 'rule_input_Contains', - 'normalizedStrSet': ['Test'] - } - }) - ], - [], - 'invalid_tagged_skill_misconception_id' + self.new_exploration, ( + 'There should be atleast 2 values inside DragAndDrop ' + 'interaction.') ) - init_state.update_interaction_answer_groups([state_answer_group]) - - self._assert_validation_error( - exploration, - 'Expected the format of tagged skill misconception id ' - 'to be -, received ' - 'invalid_tagged_skill_misconception_id') - init_state.interaction.answer_groups[0].rule_specs = {} - self._assert_validation_error( - exploration, 'Expected answer group rules to be a list') + self.state.interaction.customization_args['choices'].value = [ + state_domain.SubtitledHtml('ca_choices_0', '

    1

    '), + state_domain.SubtitledHtml('ca_choices_1', '

    '), + state_domain.SubtitledHtml('ca_choices_2', '') + ] + self.state.interaction.customization_args[ + 'allowMultipleItemsInSamePosition'].value = False - first_answer_group = init_state.interaction.answer_groups[0] - first_answer_group.tagged_skill_misconception_id = None - first_answer_group.rule_specs = [] self._assert_validation_error( - exploration, - 'There must be at least one rule or training data for each' - ' answer group.') - - exploration.states = { - exploration.init_state_name: ( - state_domain.State.create_default_state( - exploration.init_state_name, is_initial_state=True)) - } - self.set_interaction_for_state( - exploration.states[exploration.init_state_name], 'TextInput') - exploration.validate() - - exploration.language_code = 'fake_code' - self._assert_validation_error(exploration, 'Invalid language_code') - exploration.language_code = 'English' - self._assert_validation_error(exploration, 'Invalid language_code') - exploration.language_code = 'en' - exploration.validate() - - exploration.param_specs = 'A string' - self._assert_validation_error(exploration, 'param_specs to be a dict') + self.new_exploration, 'Choices should be non empty.' + ) + self.state.interaction.customization_args['choices'].value = [ + state_domain.SubtitledHtml('ca_choices_0', '

    1

    '), + state_domain.SubtitledHtml('ca_choices_1', '

    2

    '), + state_domain.SubtitledHtml('ca_choices_2', '') + ] - exploration.param_specs = { - '@': param_domain.ParamSpec.from_dict({ - 'obj_type': 'UnicodeString' - }) - } self._assert_validation_error( - exploration, 'Only parameter names with characters') - - exploration.param_specs = { - 'notAParamSpec': param_domain.ParamSpec.from_dict( - {'obj_type': 'UnicodeString'}) - } - exploration.validate() - - def test_tag_validation(self): - """Test validation of exploration tags.""" - exploration = exp_domain.Exploration.create_default_exploration('eid') - exploration.objective = 'Objective' - init_state = exploration.states[exploration.init_state_name] - self.set_interaction_for_state(init_state, 'EndExploration') - init_state.update_interaction_default_outcome(None) - exploration.validate() + self.new_exploration, 'Choices should be non empty.' + ) + self.state.interaction.customization_args['choices'].value = [ + state_domain.SubtitledHtml('ca_choices_0', '

    1

    '), + state_domain.SubtitledHtml('ca_choices_1', '

    2

    '), + state_domain.SubtitledHtml('ca_choices_2', '

    2

    ') + ] - exploration.tags = 'this should be a list' self._assert_validation_error( - exploration, 'Expected \'tags\' to be a list') - - exploration.tags = [123] - self._assert_validation_error(exploration, 'to be a string') - exploration.tags = ['abc', 123] - self._assert_validation_error(exploration, 'to be a string') - - exploration.tags = [''] - self._assert_validation_error(exploration, 'Tags should be non-empty') + self.new_exploration, 'Choices should be unique.' + ) + self.state.interaction.customization_args['choices'].value = [ + state_domain.SubtitledHtml('ca_choices_0', '

    1

    '), + state_domain.SubtitledHtml('ca_choices_1', '

    2

    '), + state_domain.SubtitledHtml('ca_choices_2', '

    3

    ') + ] - exploration.tags = ['123'] - self._assert_validation_error( - exploration, 'should only contain lowercase letters and spaces') - exploration.tags = ['ABC'] - self._assert_validation_error( - exploration, 'should only contain lowercase letters and spaces') + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'0\' of answer group \'0\' ' + 'having rule type - IsEqualToOrderingWithOneItemAtIncorrectPosition' + ' should not be there when the multiple items in same position ' + 'setting is turned off in DragAndDropSortInput interaction.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[0]) + + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'0\' of answer group \'0\' ' + 'have multiple items at same place when multiple items in same ' + 'position settings is turned off in DragAndDropSortInput ' + 'interaction.' + ): + self.new_exploration.validate(strict=True) + + self.state.interaction.customization_args[ + 'allowMultipleItemsInSamePosition'].value = True + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'3\' of answer group \'0\', ' + 'the value 1 and value 2 cannot be same when rule type is ' + 'HasElementXBeforeElementY of DragAndDropSortInput interaction.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) + rule_specs.remove(rule_specs[1]) + rule_specs.remove(rule_specs[1]) - exploration.tags = [' a b'] - self._assert_validation_error( - exploration, 'Tags should not start or end with whitespace') - exploration.tags = ['a b '] self._assert_validation_error( - exploration, 'Tags should not start or end with whitespace') + self.new_exploration, 'The rule \'1\'of answer group \'0\', ' + 'having rule type IsEqualToOrdering should not have empty values.') + rule_specs.remove(rule_specs[1]) - exploration.tags = ['a b'] - self._assert_validation_error( - exploration, 'Adjacent whitespace in tags should be collapsed') + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'2\' of answer group \'0\' of ' + 'DragAndDropInput interaction is already present.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[0]) - exploration.tags = ['abc', 'abc'] - self._assert_validation_error( - exploration, 'Some tags duplicate each other') - - exploration.tags = ['computer science', 'analysis', 'a b c'] - exploration.validate() - - def test_title_category_and_objective_validation(self): - """Test that titles, categories and objectives are validated only in - 'strict' mode. - """ - self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') - exploration = exp_fetchers.get_exploration_by_id('exp_id') - exploration.validate() + with self.assertRaisesRegex( + utils.ValidationError, 'Rule - 1 of answer group 0 ' + 'will never be match because it is made redundant by the ' + 'HasElementXAtPositionY rule above.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[0]) + rule_specs.remove(rule_specs[0]) + + with self.assertRaisesRegex( + utils.ValidationError, 'Rule - 1 of answer group 0 will never ' + 'be match because it is made redundant by the ' + 'IsEqualToOrderingWithOneItemAtIncorrectPosition rule above.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[1]) + + def test_text_interaction(self) -> None: + """Tests Text interaction.""" + self.state.recorded_voiceovers.add_content_id_for_voiceover( + 'feedback_0') + self.state.recorded_voiceovers.add_content_id_for_voiceover( + 'rule_input_27') + self.state.recorded_voiceovers.add_content_id_for_voiceover( + 'ca_choices_0') + self.state.recorded_voiceovers.add_content_id_for_voiceover( + 'ca_choices_1') + self.state.recorded_voiceovers.add_content_id_for_voiceover( + 'ca_choices_2') + content_id_generator = translation_domain.ContentIdGenerator() + self.set_interaction_for_state( + self.state, 'TextInput', content_id_generator) + test_ans_group_for_text_interaction = [ + state_domain.AnswerGroup.from_dict({ + 'rule_specs': [ + { + 'rule_type': 'Contains', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'hello', + 'abc', + 'def' + ] + } + } + }, + { + 'rule_type': 'Contains', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'helloooooo' + ] + } + } + }, + { + 'rule_type': 'StartsWith', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'exci' + ] + } + } + }, + { + 'rule_type': 'StartsWith', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'excitement' + ] + } + } + }, + { + 'rule_type': 'Contains', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'he' + ] + } + } + }, + { + 'rule_type': 'StartsWith', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'hello' + ] + } + } + }, + { + 'rule_type': 'Contains', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'he' + ] + } + } + }, + { + 'rule_type': 'Equals', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'hello' + ] + } + } + }, + { + 'rule_type': 'StartsWith', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'he' + ] + } + } + }, + { + 'rule_type': 'Equals', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'hello' + ] + } + } + }, + { + 'rule_type': 'Equals', + 'inputs': { + 'x': { + 'contentId': 'rule_input_27', + 'normalizedStrSet': [ + 'hello' + ] + } + } + } + ], + 'outcome': { + 'dest': 'EXP_1_STATE_1', + 'feedback': { + 'content_id': 'feedback_0', + 'html': '

    good

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest_if_really_stuck': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }) + ] + self.state.interaction.answer_groups = ( + test_ans_group_for_text_interaction) + rule_specs = self.state.interaction.answer_groups[0].rule_specs + + self.state.interaction.customization_args['rows'].value = 15 + with self.assertRaisesRegex( + utils.ValidationError, 'Rows value in Text interaction should ' + 'be between 1 and 10.' + ): + self.new_exploration.validate() - with self.assertRaisesRegexp( - utils.ValidationError, 'title must be specified' - ): - exploration.validate(strict=True) - exploration.title = 'A title' + self.state.interaction.customization_args['rows'].value = 5 + with self.assertRaisesRegex( + utils.ValidationError, 'Rule - \'1\' of answer group - \'0\' ' + 'having rule type \'Contains\' will never be matched because it ' + 'is made redundant by the above \'contains\' rule.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[0]) + rule_specs.remove(rule_specs[0]) + + with self.assertRaisesRegex( + utils.ValidationError, 'Rule - \'1\' of answer group - \'0\' ' + 'having rule type \'StartsWith\' will never be matched because it ' + 'is made redundant by the above \'StartsWith\' rule.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[0]) + rule_specs.remove(rule_specs[0]) + + with self.assertRaisesRegex( + utils.ValidationError, 'Rule - \'1\' of answer group - \'0\' ' + 'having rule type \'StartsWith\' will never be matched because it ' + 'is made redundant by the above \'contains\' rule.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[0]) + rule_specs.remove(rule_specs[0]) + + with self.assertRaisesRegex( + utils.ValidationError, 'Rule - \'1\' of answer group - \'0\' ' + 'having rule type \'Equals\' will never be matched because it ' + 'is made redundant by the above \'contains\' rule.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[0]) + rule_specs.remove(rule_specs[0]) + + with self.assertRaisesRegex( + utils.ValidationError, 'Rule - \'1\' of answer group - \'0\' ' + 'having rule type \'Equals\' will never be matched because it ' + 'is made redundant by the above \'StartsWith\' rule.' + ): + self.new_exploration.validate(strict=True) + rule_specs.remove(rule_specs[0]) - with self.assertRaisesRegexp( - utils.ValidationError, 'category must be specified' - ): - exploration.validate(strict=True) - exploration.category = 'A category' + with self.assertRaisesRegex( + utils.ValidationError, 'The rule \'1\' of answer group \'0\' of ' + 'TextInput interaction is already present.' + ): + self.new_exploration.validate(strict=True) - with self.assertRaisesRegexp( - utils.ValidationError, 'objective must be specified' - ): - exploration.validate(strict=True) + # TODO(bhenning): The validation tests below should be split into separate + # unit tests. Also, all validation errors should be covered in the tests. + def test_validation(self) -> None: + """Test validation of explorations.""" + exploration = exp_domain.Exploration.create_default_exploration('eid') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + exploration.init_state_name = '' + exploration.states = {} - exploration.objective = 'An objective' + exploration.title = 'Hello #' + self._assert_validation_error(exploration, 'Invalid character #') - exploration.validate(strict=True) + exploration.title = 'Title' + exploration.category = 'Category' - def test_get_trainable_states_dict(self): - """Test the get_trainable_states_dict() method.""" - exp_id = 'exp_id1' - test_exp_filepath = os.path.join( - feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') - yaml_content = utils.get_file_contents(test_exp_filepath) - assets_list = [] - exp_services.save_new_exploration_from_yaml_and_assets( - feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id, - assets_list) + # Note: If '/' ever becomes a valid state name, ensure that the rule + # editor frontend tenplate is fixed -- it currently uses '/' as a + # sentinel for an invalid state name. + bad_state = state_domain.State.create_default_state( + '/', + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + exploration.states = {'/': bad_state} + self._assert_validation_error( + exploration, 'Invalid character / in a state name') - exploration_model = exp_models.ExplorationModel.get( - exp_id, strict=False) - old_states = exp_fetchers.get_exploration_from_model( - exploration_model).states - exploration = exp_fetchers.get_exploration_by_id(exp_id) + new_state = state_domain.State.create_default_state( + 'ABC', + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME)) + self.set_interaction_for_state( + new_state, 'TextInput', content_id_generator) + second_state = state_domain.State.create_default_state( + 'BCD', + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME)) + self.set_interaction_for_state( + second_state, 'TextInput', content_id_generator) - # Rename a state to add it in unchanged answer group. - exploration.rename_state('Home', 'Renamed state') - change_list = [exp_domain.ExplorationChange({ - 'cmd': 'rename_state', - 'old_state_name': 'Home', - 'new_state_name': 'Renamed state' - })] + # The 'states' property must be a non-empty dict of states. + exploration.states = {} + self._assert_validation_error( + exploration, 'exploration has no states') + exploration.states = {'A string #': new_state} + self._assert_validation_error( + exploration, 'Invalid character # in a state name') + exploration.states = {'A string _': new_state} + self._assert_validation_error( + exploration, 'Invalid character _ in a state name') - expected_dict = { - 'state_names_with_changed_answer_groups': [], - 'state_names_with_unchanged_answer_groups': ['Renamed state'] + exploration.states = { + 'ABC': new_state, + 'BCD': second_state } - exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) - actual_dict = exploration.get_trainable_states_dict( - old_states, exp_versions_diff) - self.assertEqual(actual_dict, expected_dict) - # Modify answer groups to trigger change in answer groups. - state = exploration.states['Renamed state'] - exploration.states['Renamed state'].interaction.answer_groups.insert( - 3, state.interaction.answer_groups[3]) - answer_groups = [] - for answer_group in state.interaction.answer_groups: - answer_groups.append(answer_group.to_dict()) - change_list = [exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'state_name': 'Renamed state', - 'property_name': 'answer_groups', - 'new_value': answer_groups - })] + self._assert_validation_error( + exploration, 'has no initial state name') - expected_dict = { - 'state_names_with_changed_answer_groups': ['Renamed state'], - 'state_names_with_unchanged_answer_groups': [] - } - exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) - actual_dict = exploration.get_trainable_states_dict( - old_states, exp_versions_diff) - self.assertEqual(actual_dict, expected_dict) + exploration.init_state_name = 'initname' - # Add new state to trigger change in answer groups. - exploration.add_states(['New state']) - exploration.states['New state'] = copy.deepcopy( - exploration.states['Renamed state']) - change_list = [exp_domain.ExplorationChange({ - 'cmd': 'add_state', - 'state_name': 'New state', - })] + self._assert_validation_error( + exploration, + r'There is no state in \[\'ABC\'\, \'BCD\'\] corresponding to ' + 'the exploration\'s initial state name initname.') - expected_dict = { - 'state_names_with_changed_answer_groups': [ - 'Renamed state', 'New state'], - 'state_names_with_unchanged_answer_groups': [] + # Test whether a default outcome to a non-existing state is invalid. + exploration.states = { + exploration.init_state_name: new_state, + 'BCD': second_state } - exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) - actual_dict = exploration.get_trainable_states_dict( - old_states, exp_versions_diff) - self.assertEqual(actual_dict, expected_dict) - - # Delete state. - exploration.delete_state('New state') - change_list = [exp_domain.ExplorationChange({ - 'cmd': 'delete_state', - 'state_name': 'New state' - })] + exploration.update_next_content_id_index( + content_id_generator.next_content_id_index) + self._assert_validation_error( + exploration, 'destination ABC is not a valid') - expected_dict = { - 'state_names_with_changed_answer_groups': ['Renamed state'], - 'state_names_with_unchanged_answer_groups': [] - } - exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) - actual_dict = exploration.get_trainable_states_dict( - old_states, exp_versions_diff) - self.assertEqual(actual_dict, expected_dict) + # Restore a valid exploration. + init_state = exploration.states[exploration.init_state_name] + default_outcome = init_state.interaction.default_outcome + # Ruling out the possibility of None for mypy type checking. + assert default_outcome is not None + default_outcome.dest = exploration.init_state_name + init_state.update_interaction_default_outcome(default_outcome) + init_state.update_card_is_checkpoint(True) + exploration.validate() - # Test addition and multiple renames. - exploration.add_states(['New state']) - exploration.states['New state'] = copy.deepcopy( - exploration.states['Renamed state']) - exploration.rename_state('New state', 'New state2') - exploration.rename_state('New state2', 'New state3') - change_list = [exp_domain.ExplorationChange({ - 'cmd': 'add_state', - 'state_name': 'New state', - }), exp_domain.ExplorationChange({ - 'cmd': 'rename_state', - 'old_state_name': 'New state', - 'new_state_name': 'New state2' - }), exp_domain.ExplorationChange({ - 'cmd': 'rename_state', - 'old_state_name': 'New state2', - 'new_state_name': 'New state3' - })] + # Ensure an invalid destination can also be detected for answer groups. + # Note: The state must keep its default_outcome, otherwise it will + # trigger a validation error for non-terminal states needing to have a + # default outcome. To validate the outcome of the answer group, this + # default outcome must point to a valid state. + init_state = exploration.states[exploration.init_state_name] + default_outcome = init_state.interaction.default_outcome + # Ruling out the possibility of None for mypy type checking. + assert default_outcome is not None + default_outcome.dest = exploration.init_state_name + old_answer_groups: List[state_domain.AnswerGroupDict] = [ + { + 'outcome': { + 'dest': exploration.init_state_name, + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'rule_specs': [{ + 'inputs': { + 'x': { + 'contentId': 'rule_input_Equals', + 'normalizedStrSet': ['Test'] + } + }, + 'rule_type': 'Contains' + }], + 'training_data': [], + 'tagged_skill_misconception_id': None + } + ] - expected_dict = { - 'state_names_with_changed_answer_groups': [ - 'Renamed state', 'New state3' - ], - 'state_names_with_unchanged_answer_groups': [] - } - exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) - actual_dict = exploration.get_trainable_states_dict( - old_states, exp_versions_diff) - self.assertEqual(actual_dict, expected_dict) + new_answer_groups = [ + state_domain.AnswerGroup.from_dict(answer_group) + for answer_group in old_answer_groups + ] + init_state.update_interaction_answer_groups(new_answer_groups) - def test_get_languages_with_complete_translation(self): - exploration = exp_domain.Exploration.create_default_exploration('0') - self.assertEqual( - exploration.get_languages_with_complete_translation(), []) - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Translation in Hindi.

    ', - 'needs_update': False - } - } - } - }) - exploration.states[ - feconf.DEFAULT_INIT_STATE_NAME].update_written_translations( - written_translations) + exploration.validate() - self.assertEqual( - exploration.get_languages_with_complete_translation(), ['hi']) + interaction = init_state.interaction + answer_groups = interaction.answer_groups + answer_group = answer_groups[0] - def test_get_translation_counts_with_no_needs_update(self): - exploration = exp_domain.Exploration.create_default_exploration('0') - self.assertEqual( - exploration.get_translation_counts(), {}) + default_outcome.dest_if_really_stuck = 'ABD' + self._assert_validation_error( + exploration, 'The destination for the stuck learner ' + 'ABD is not a valid state') - init_state = exploration.states[exploration.init_state_name] - init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - init_state.update_interaction_id('TextInput') - default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( - 'default_outcome', '

    The default outcome.

    '), - False, [], None, None - ) + default_outcome.dest_if_really_stuck = None - init_state.update_interaction_default_outcome(default_outcome) + answer_group.outcome.dest = 'DEF' + self._assert_validation_error( + exploration, 'destination DEF is not a valid') + answer_group.outcome.dest = exploration.init_state_name - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Translation in Hindi.

    ', - 'needs_update': False - } - }, - 'default_outcome': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Translation in Hindi.

    ', - 'needs_update': False - } - } - } - }) - init_state.update_written_translations(written_translations) + answer_group.outcome.dest_if_really_stuck = 'XYZ' + self._assert_validation_error( + exploration, 'The destination for the stuck learner ' + 'XYZ is not a valid state') - exploration.add_states(['New state']) - new_state = exploration.states['New state'] - new_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - new_state.update_interaction_id('TextInput') - default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( - 'default_outcome', '

    The default outcome.

    '), - False, [], None, None) - new_state.update_interaction_default_outcome(default_outcome) + answer_group.outcome.dest_if_really_stuck = None - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    New state translation in Hindi.

    ', - 'needs_update': False - } - }, - 'default_outcome': { - 'hi': { - 'data_format': 'html', - 'translation': '

    New State translation in Hindi.

    ', - 'needs_update': False - } - } - } - }) - new_state.update_written_translations(written_translations) + # Restore a valid exploration. + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) + new_answer_groups = [ + state_domain.AnswerGroup.from_dict(answer_groups) + for answer_groups in old_answer_groups + ] + init_state.update_interaction_answer_groups(new_answer_groups) + answer_groups = interaction.answer_groups + answer_group = answer_groups[0] + answer_group.outcome.dest = exploration.init_state_name + exploration.validate() - self.assertEqual( - exploration.get_translation_counts(), {'hi': 4}) + # Validate RuleSpec. + rule_spec = answer_group.rule_specs[0] + rule_spec.inputs = {} + self._assert_validation_error( + exploration, 'RuleSpec \'Contains\' is missing inputs') - def test_get_translation_counts_with_needs_update(self): - exploration = exp_domain.Exploration.create_default_exploration('0') - self.assertEqual( - exploration.get_translation_counts(), {}) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + rule_spec.inputs = 'Inputs string' # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected inputs to be a dict') - init_state = exploration.states[feconf.DEFAULT_INIT_STATE_NAME] - init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - init_state.update_interaction_id('TextInput') - default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( - 'default_outcome', '

    The default outcome.

    '), - False, [], None, None - ) - init_state.update_interaction_default_outcome(default_outcome) + rule_spec.inputs = {'x': 'Test'} + rule_spec.rule_type = 'FakeRuleType' + self._assert_validation_error(exploration, 'Unrecognized rule type') - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Translation in Hindi.

    ', - 'needs_update': True - } - }, - 'default_outcome': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Translation in Hindi.

    ', - 'needs_update': False - } - } - } - }) - init_state.update_written_translations(written_translations) + rule_spec.inputs = {'x': { + 'contentId': 'rule_input_Equals', + 'normalizedStrSet': 15 + }} + rule_spec.rule_type = 'Contains' + with self.assertRaisesRegex( + AssertionError, 'Expected list, received 15' + ): + exploration.validate() - self.assertEqual( - exploration.get_translation_counts(), {'hi': 1}) + self.set_interaction_for_state( + exploration.states[exploration.init_state_name], + 'PencilCodeEditor', content_id_generator) + temp_rule = old_answer_groups[0]['rule_specs'][0] + old_answer_groups[0]['rule_specs'][0] = { + 'rule_type': 'ErrorContains', + 'inputs': {'x': '{{ExampleParam}}'} + } + new_answer_groups = [ + state_domain.AnswerGroup.from_dict(answer_group) + for answer_group in old_answer_groups + ] + init_state.update_interaction_answer_groups(new_answer_groups) + old_answer_groups[0]['rule_specs'][0] = temp_rule - def test_get_translation_counts_with_translation_in_multiple_lang(self): - exploration = exp_domain.Exploration.create_default_exploration('0') - self.assertEqual( - exploration.get_translation_counts(), {}) - init_state = exploration.states[feconf.DEFAULT_INIT_STATE_NAME] - init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - init_state.update_interaction_id('TextInput') - default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( - 'default_outcome', '

    The default outcome.

    '), - False, [], None, None - ) + self._assert_validation_error( + exploration, + 'RuleSpec \'ErrorContains\' has an input with name \'x\' which ' + 'refers to an unknown parameter within the exploration: ' + 'ExampleParam') - init_state.update_interaction_default_outcome(default_outcome) + # Restore a valid exploration. + exploration.param_specs['ExampleParam'] = param_domain.ParamSpec( + 'UnicodeString') + exploration.validate() - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': { - 'content': { - 'hi-en': { - 'data_format': 'html', - 'translation': '

    Translation in Hindi.

    ', - 'needs_update': False - }, - 'hi': { - 'data_format': 'html', - 'translation': '

    Translation in Hindi.

    ', - 'needs_update': False - } - }, - 'default_outcome': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Translation in Hindi.

    ', - 'needs_update': False - } - } - } - }) - init_state.update_written_translations(written_translations) + # Validate Outcome. + outcome = init_state.interaction.answer_groups[0].outcome + destination = exploration.init_state_name - self.assertEqual( - exploration.get_translation_counts(), { - 'hi': 2, - 'hi-en': 1 - }) + outcome.dest = None + self._assert_validation_error( + exploration, 'Every outcome should have a destination.') - def test_get_content_count(self): - # Adds 1 to content count to exploration (content, default_outcome). - exploration = exp_domain.Exploration.create_default_exploration('0') - self.assertEqual(exploration.get_content_count(), 1) + outcome.dest = destination - # Adds 2 to content count to exploration (content default_outcome). - exploration.add_states(['New state']) - init_state = exploration.states[exploration.init_state_name] + default_outcome = init_state.interaction.default_outcome + # Ruling out the possibility of None for mypy type checking. + assert default_outcome is not None - # Adds 1 to content count to exploration (ca_placeholder_0) - self.set_interaction_for_state(init_state, 'TextInput') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + default_outcome.dest_if_really_stuck = 20 # type: ignore[assignment] - state_answer_group = state_domain.AnswerGroup( - state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( - 'feedback_1', 'Feedback'), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Contains', - { - 'x': - { - 'contentId': 'rule_input_5', - 'normalizedStrSet': ['Test'] - } - }) - ], - [], - None - ) - # Adds 1 to content count to exploration (feedback_1). - init_state.update_interaction_answer_groups([state_answer_group]) + self._assert_validation_error( + exploration, 'Expected dest_if_really_stuck to be a string') - hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

    hint one

    ') - ) - ] - # Adds 1 to content count to exploration (hint_1). - init_state.update_interaction_hints(hints_list) + default_outcome.dest_if_really_stuck = None - solution_dict = { - 'answer_is_exclusive': False, - 'correct_answer': 'helloworld!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    hello_world is a string

    ' - }, - } - solution = state_domain.Solution.from_dict( - init_state.interaction.id, solution_dict) - # Adds 1 to content count to exploration (solution). - init_state.update_interaction_solution(solution) + # Try setting the outcome destination to something other than a string. + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + outcome.dest = 15 # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected outcome dest to be a string') - self.assertEqual(exploration.get_content_count(), 6) + outcome.dest = destination - def test_get_content_with_correct_state_name_returns_html(self): - exploration = exp_domain.Exploration.create_default_exploration('0') + outcome.feedback = state_domain.SubtitledHtml('feedback_1', '') + exploration.validate() - init_state = exploration.states[exploration.init_state_name] - self.set_interaction_for_state(init_state, 'TextInput') - hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

    hint one

    ') - ) - ] - init_state.update_interaction_hints(hints_list) - - self.assertEqual( - exploration.get_content_html(exploration.init_state_name, 'hint_1'), - '

    hint one

    ') - - hints_list[0].hint_content.html = '

    Changed hint one

    ' - init_state.update_interaction_hints(hints_list) - - self.assertEqual( - exploration.get_content_html(exploration.init_state_name, 'hint_1'), - '

    Changed hint one

    ') - - def test_get_content_with_incorrect_state_name_raise_error(self): - exploration = exp_domain.Exploration.create_default_exploration('0') - - init_state = exploration.states[exploration.init_state_name] - self.set_interaction_for_state(init_state, 'TextInput') - hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

    hint one

    ') - ) - ] - init_state.update_interaction_hints(hints_list) - - self.assertEqual( - exploration.get_content_html(exploration.init_state_name, 'hint_1'), - '

    hint one

    ') - - with self.assertRaisesRegexp( - ValueError, 'State Invalid state does not exist'): - exploration.get_content_html('Invalid state', 'hint_1') - - def test_is_demo_property(self): - """Test the is_demo property.""" - demo = exp_domain.Exploration.create_default_exploration('0') - self.assertEqual(demo.is_demo, True) - - notdemo1 = exp_domain.Exploration.create_default_exploration('a') - self.assertEqual(notdemo1.is_demo, False) - - notdemo2 = exp_domain.Exploration.create_default_exploration('abcd') - self.assertEqual(notdemo2.is_demo, False) - - def test_has_state_name(self): - """Test for has_state_name.""" - demo = exp_domain.Exploration.create_default_exploration('0') - state_names = list(demo.states.keys()) - self.assertEqual(state_names, ['Introduction']) - self.assertEqual(demo.has_state_name('Introduction'), True) - self.assertEqual(demo.has_state_name('Fake state name'), False) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + outcome.labelled_as_correct = 'hello' # type: ignore[assignment] + self._assert_validation_error( + exploration, 'The "labelled_as_correct" field should be a boolean') - def test_get_interaction_id_by_state_name(self): - """Test for get_interaction_id_by_state_name.""" - demo = exp_domain.Exploration.create_default_exploration('0') - self.assertEqual( - demo.get_interaction_id_by_state_name('Introduction'), None) + # Test that labelled_as_correct must be False for self-loops, and that + # this causes a strict validation failure but not a normal validation + # failure. + outcome.labelled_as_correct = True + with self.assertRaisesRegex( + Exception, 'is labelled correct but is a self-loop.' + ): + exploration.validate(strict=True) + exploration.validate() - def test_exploration_export_import(self): - """Test that to_dict and from_dict preserve all data within an - exploration. - """ - demo = exp_domain.Exploration.create_default_exploration('0') - demo_dict = demo.to_dict() - exp_from_dict = exp_domain.Exploration.from_dict(demo_dict) - self.assertEqual(exp_from_dict.to_dict(), demo_dict) + outcome.labelled_as_correct = False + exploration.validate() - def test_interaction_with_none_id_is_not_terminal(self): - """Test that an interaction with an id of None leads to is_terminal - being false. - """ - # Default exploration has a default interaction with an ID of None. - demo = exp_domain.Exploration.create_default_exploration('0') - init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME] - self.assertFalse(init_state.interaction.is_terminal) + # Try setting the outcome destination if stuck to something other + # than a string. + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + outcome.dest_if_really_stuck = 30 # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected dest_if_really_stuck to be a string') - def test_cannot_create_demo_exp_with_invalid_param_changes(self): - demo_exp = exp_domain.Exploration.create_default_exploration('0') - demo_dict = demo_exp.to_dict() - new_state = state_domain.State.create_default_state('new_state_name') - new_state.param_changes = [param_domain.ParamChange.from_dict({ - 'customization_args': { - 'list_of_values': ['1', '2'], 'parse_with_jinja': False - }, - 'name': 'myParam', - 'generator_id': 'RandomSelector' - })] + outcome.dest_if_really_stuck = 'BCD' + outcome.dest = 'BCD' - demo_dict['states']['new_state_name'] = new_state.to_dict() - demo_dict['param_specs'] = { - 'ParamSpec': {'obj_type': 'UnicodeString'} - } - with self.assertRaisesRegexp( - Exception, - 'Parameter myParam was used in a state but not ' - 'declared in the exploration param_specs.'): - exp_domain.Exploration.from_dict(demo_dict) + # Test that no destination for the stuck learner is specified when + # the outcome is labelled correct. + outcome.labelled_as_correct = True - def test_validate_exploration_category(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + with self.assertRaisesRegex( + Exception, 'The outcome for the state is labelled ' + 'correct but a destination for the stuck learner ' + 'is specified.' + ): + exploration.validate(strict=True) exploration.validate() - exploration.category = 1 - with self.assertRaisesRegexp( - Exception, 'Expected category to be a string, received 1'): - exploration.validate() - - def test_validate_exploration_objective(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + outcome.labelled_as_correct = False exploration.validate() - exploration.objective = 1 - with self.assertRaisesRegexp( - Exception, 'Expected objective to be a string, received 1'): - exploration.validate() - - def test_validate_exploration_blurb(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') - exploration.validate() + outcome.dest = destination + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + outcome.param_changes = 'Changes' # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected outcome param_changes to be a list') - exploration.blurb = 1 - with self.assertRaisesRegexp( - Exception, 'Expected blurb to be a string, received 1'): - exploration.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + outcome.param_changes = [param_domain.ParamChange( + 0, 'generator_id', {})] # type: ignore[arg-type] + self._assert_validation_error( + exploration, + 'Expected param_change name to be a string, received 0') - def test_validate_exploration_language_code(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + outcome.param_changes = [] exploration.validate() - exploration.language_code = 1 - with self.assertRaisesRegexp( - Exception, 'Expected language_code to be a string, received 1'): - exploration.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + outcome.refresher_exploration_id = 12345 # type: ignore[assignment] + self._assert_validation_error( + exploration, + 'Expected outcome refresher_exploration_id to be a string') - def test_validate_exploration_author_notes(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + outcome.refresher_exploration_id = None exploration.validate() - exploration.author_notes = 1 - with self.assertRaisesRegexp( - Exception, 'Expected author_notes to be a string, received 1'): - exploration.validate() - - def test_validate_exploration_states(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + outcome.refresher_exploration_id = 'valid_string' exploration.validate() - exploration.states = 1 - with self.assertRaisesRegexp( - Exception, 'Expected states to be a dict, received 1'): - exploration.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + outcome.missing_prerequisite_skill_id = 12345 # type: ignore[assignment] + self._assert_validation_error( + exploration, + 'Expected outcome missing_prerequisite_skill_id to be a string') - def test_validate_exploration_outcome_dest(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + outcome.missing_prerequisite_skill_id = None exploration.validate() - exploration.init_state.interaction.default_outcome.dest = None - with self.assertRaisesRegexp( - Exception, 'Every outcome should have a destination.'): - exploration.validate() - - def test_validate_exploration_outcome_dest_type(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + outcome.missing_prerequisite_skill_id = 'valid_string' exploration.validate() - exploration.init_state.interaction.default_outcome.dest = 1 - with self.assertRaisesRegexp( - Exception, 'Expected outcome dest to be a string, received 1'): - exploration.validate() - - def test_validate_exploration_states_schema_version(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') - exploration.validate() + # Test that refresher_exploration_id must be None for non-self-loops. + new_state_name = 'New state' + exploration.add_states([new_state_name]) - exploration.states_schema_version = None - with self.assertRaisesRegexp( - Exception, 'This exploration has no states schema version.'): - exploration.validate() + outcome.dest = new_state_name + outcome.refresher_exploration_id = 'another_string' + self._assert_validation_error( + exploration, + 'has a refresher exploration ID, but is not a self-loop') - def test_validate_exploration_auto_tts_enabled(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + outcome.refresher_exploration_id = None exploration.validate() + exploration.delete_state(new_state_name) - exploration.auto_tts_enabled = 1 - with self.assertRaisesRegexp( - Exception, 'Expected auto_tts_enabled to be a bool, received 1'): - exploration.validate() + # Validate InteractionInstance. + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + interaction.id = 15 # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected interaction id to be a string') - def test_validate_exploration_correctness_feedback_enabled(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + interaction.id = 'SomeInteractionTypeThatDoesNotExist' + self._assert_validation_error(exploration, 'Invalid interaction id') + interaction.id = 'PencilCodeEditor' + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) + new_answer_groups = [ + state_domain.AnswerGroup.from_dict(answer_group) + for answer_group in old_answer_groups + ] + init_state.update_interaction_answer_groups(new_answer_groups) + valid_text_input_cust_args = init_state.interaction.customization_args + rule_spec.inputs = {'x': { + 'contentId': 'rule_input_Equals', + 'normalizedStrSet': ['Test'] + }} + rule_spec.rule_type = 'Contains' exploration.validate() - exploration.correctness_feedback_enabled = 1 - with self.assertRaisesRegexp( - Exception, - 'Expected correctness_feedback_enabled to be a bool, received 1'): - exploration.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + interaction.customization_args = [] # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected customization args to be a dict') - def test_validate_exploration_param_specs(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') - exploration.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + interaction.customization_args = {15: ''} # type: ignore[dict-item] + self._assert_validation_error( + exploration, + ( + 'Expected customization arg value to be a ' + 'InteractionCustomizationArg' + ) + ) - exploration.param_specs = { - 1: param_domain.ParamSpec.from_dict( - {'obj_type': 'UnicodeString'}) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + interaction.customization_args = { + 15: state_domain.InteractionCustomizationArg('', { # type: ignore[dict-item, no-untyped-call] + 'type': 'unicode' + }) } - with self.assertRaisesRegexp( - Exception, 'Expected parameter name to be a string, received 1'): - exploration.validate() + self._assert_validation_error( + exploration, 'Invalid customization arg name') - def test_validate_exploration_param_changes_type(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + interaction.customization_args = valid_text_input_cust_args + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) exploration.validate() - exploration.param_changes = 1 - with self.assertRaisesRegexp( - Exception, 'Expected param_changes to be a list, received 1'): - exploration.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + interaction.answer_groups = {} # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected answer groups to be a list') - def test_validate_exploration_param_name(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') - exploration.validate() + new_answer_groups = [ + state_domain.AnswerGroup.from_dict(answer_group) + for answer_group in old_answer_groups + ] + init_state.update_interaction_answer_groups(new_answer_groups) + self.set_interaction_for_state( + init_state, 'EndExploration', content_id_generator) + self._assert_validation_error( + exploration, + 'Terminal interactions must not have a default outcome.') - exploration.param_changes = [param_domain.ParamChange.from_dict({ - 'customization_args': { - 'list_of_values': ['1', '2'], 'parse_with_jinja': False - }, - 'name': 'invalid', - 'generator_id': 'RandomSelector' - })] - with self.assertRaisesRegexp( - Exception, - 'No parameter named \'invalid\' exists in this ' - 'exploration'): - exploration.validate() + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) + init_state.update_interaction_default_outcome(None) + self._assert_validation_error( + exploration, + 'Non-terminal interactions must have a default outcome.') - def test_validate_exploration_reserved_param_name(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') - exploration.validate() + self.set_interaction_for_state( + init_state, 'EndExploration', content_id_generator + ) + init_state.interaction.answer_groups = answer_groups + self._assert_validation_error( + exploration, + 'Terminal interactions must not have any answer groups.') - exploration.param_changes = [param_domain.ParamChange.from_dict({ - 'customization_args': { - 'list_of_values': ['1', '2'], 'parse_with_jinja': False - }, - 'name': 'all', - 'generator_id': 'RandomSelector' - })] - with self.assertRaisesRegexp( - Exception, - 'The exploration-level parameter with name \'all\' is ' - 'reserved. Please choose a different name.'): - exploration.validate() + init_state.interaction.answer_groups = [] + self.set_interaction_for_state( + init_state, 'Continue', content_id_generator) + init_state.interaction.answer_groups = answer_groups + init_state.update_interaction_default_outcome(default_outcome) + self._assert_validation_error( + exploration, + 'Linear interactions must not have any answer groups.') + exploration.update_next_content_id_index( + content_id_generator.next_content_id_index) + # A terminal interaction without a default outcome or answer group is + # valid. This resets the exploration back to a valid state. + init_state.interaction.answer_groups = [] + exploration.validate() - def test_validate_exploration_is_non_self_loop(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') + # Restore a valid exploration. + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) + init_state.update_interaction_answer_groups(answer_groups) + init_state.update_interaction_default_outcome(default_outcome) + exploration.update_next_content_id_index( + content_id_generator.next_content_id_index) exploration.validate() - exploration.add_states(['DEF']) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + interaction.hints = {} # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected hints to be a list') + interaction.hints = [] - default_outcome = state_domain.Outcome( - 'DEF', state_domain.SubtitledHtml( - 'default_outcome', '

    Default outcome for state1

    '), - False, [], 'refresher_exploration_id', None, - ) - exploration.init_state.update_interaction_default_outcome( - default_outcome + # Validate AnswerGroup. + state_answer_group = state_domain.AnswerGroup( + state_domain.Outcome( + exploration.init_state_name, None, state_domain.SubtitledHtml( + 'feedback_1', 'Feedback'), + False, [], None, None), + [ + state_domain.RuleSpec( + 'Contains', + { + 'x': + { + 'contentId': 'rule_input_Contains', + 'normalizedStrSet': ['Test'] + } + }) + ], + [], + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + 1 # type: ignore[arg-type] ) + init_state.update_interaction_answer_groups([state_answer_group]) - with self.assertRaisesRegexp( + self._assert_validation_error( + exploration, + 'Expected tagged skill misconception id to be None, received 1') + with self.assertRaisesRegex( Exception, - 'The default outcome for state Introduction has a refresher ' - 'exploration ID, but is not a self-loop.'): - exploration.validate() - - def test_validate_exploration_answer_group_parameter(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='', category='', - objective='', end_state_name='End') - exploration.validate() - - param_changes = [param_domain.ParamChange( - 'ParamChange', 'RandomSelector', { - 'list_of_values': ['1', '2'], 'parse_with_jinja': False - } - )] + 'Expected tagged skill misconception id to be None, received 1' + ): + exploration.init_state.validate( + exploration.param_specs, + allow_null_interaction=False, + tagged_skill_misconception_id_required=False) state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback_1', 'Feedback'), - False, param_changes, None, None), + False, [], None, None), [ state_domain.RuleSpec( 'Contains', { 'x': { - 'contentId': 'rule_input_Equals', + 'contentId': 'rule_input_Contains', 'normalizedStrSet': ['Test'] } }) ], [], - None + 'invalid_tagged_skill_misconception_id' ) - exploration.init_state.update_interaction_answer_groups( - [state_answer_group]) - with self.assertRaisesRegexp( + init_state.update_interaction_answer_groups([state_answer_group]) + + self._assert_validation_error( + exploration, + 'Expected tagged skill misconception id to be None, received ' + 'invalid_tagged_skill_misconception_id') + + with self.assertRaisesRegex( Exception, - 'The parameter ParamChange was used in an answer group, ' - 'but it does not exist in this exploration'): - exploration.validate() + 'Expected tagged skill misconception id to be None, received ' + 'invalid_tagged_skill_misconception_id' + ): + exploration.init_state.validate( + exploration.param_specs, + allow_null_interaction=False, + tagged_skill_misconception_id_required=False) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + init_state.interaction.answer_groups[0].rule_specs = {} # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected answer group rules to be a list') - def test_verify_all_states_reachable(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'owner_id') + first_answer_group = init_state.interaction.answer_groups[0] + first_answer_group.tagged_skill_misconception_id = None + first_answer_group.rule_specs = [] + self._assert_validation_error( + exploration, + 'There must be at least one rule for each answer group.') + with self.assertRaisesRegex( + Exception, + 'There must be at least one rule for each answer group.' + ): + exploration.init_state.validate( + exploration.param_specs, + allow_null_interaction=False, + tagged_skill_misconception_id_required=False) + + exploration.states = { + exploration.init_state_name: ( + state_domain.State.create_default_state( + exploration.init_state_name, + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + is_initial_state=True)) + } + self.set_interaction_for_state( + exploration.states[exploration.init_state_name], 'TextInput', + content_id_generator) + exploration.update_next_content_id_index( + content_id_generator.next_content_id_index) exploration.validate() - exploration.add_states(['End']) - end_state = exploration.states['End'] - self.set_interaction_for_state(end_state, 'EndExploration') - end_state.update_interaction_default_outcome(None) + exploration.language_code = 'fake_code' + self._assert_validation_error(exploration, 'Invalid language_code') + exploration.language_code = 'English' + self._assert_validation_error(exploration, 'Invalid language_code') + exploration.language_code = 'en' + exploration.validate() - with self.assertRaisesRegexp( - Exception, - 'Please fix the following issues before saving this exploration: ' - '1. The following states are not reachable from the initial state: ' - 'End 2. It is impossible to complete the exploration from the ' - 'following states: Introduction'): - exploration.validate(strict=True) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + exploration.param_specs = 'A string' # type: ignore[assignment] + self._assert_validation_error(exploration, 'param_specs to be a dict') - def test_update_init_state_name_with_invalid_state(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='title', category='category', - objective='objective', end_state_name='End') + exploration.param_specs = { + '@': param_domain.ParamSpec.from_dict({ + 'obj_type': 'UnicodeString' + }) + } + self._assert_validation_error( + exploration, 'Only parameter names with characters') - exploration.update_init_state_name('End') - self.assertEqual(exploration.init_state_name, 'End') + exploration.param_specs = { + 'notAParamSpec': param_domain.ParamSpec.from_dict( + {'obj_type': 'UnicodeString'}) + } + exploration.validate() - with self.assertRaisesRegexp( - Exception, - 'Invalid new initial state name: invalid_state;'): - exploration.update_init_state_name('invalid_state') + def test_tag_validation(self) -> None: + """Test validation of exploration tags.""" + exploration = exp_domain.Exploration.create_default_exploration('eid') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + exploration.objective = 'Objective' + init_state = exploration.states[exploration.init_state_name] + self.set_interaction_for_state( + init_state, 'EndExploration', content_id_generator) + init_state.update_interaction_default_outcome(None) + exploration.validate() - def test_rename_state_with_invalid_state(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='title', category='category', - objective='objective', end_state_name='End') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + exploration.tags = 'this should be a list' # type: ignore[assignment] + self._assert_validation_error( + exploration, 'Expected \'tags\' to be a list') - self.assertTrue(exploration.states.get('End')) - self.assertFalse(exploration.states.get('new state name')) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + exploration.tags = [123] # type: ignore[list-item] + self._assert_validation_error(exploration, 'to be a string') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + exploration.tags = ['abc', 123] # type: ignore[list-item] + self._assert_validation_error(exploration, 'to be a string') - exploration.rename_state('End', 'new state name') - self.assertFalse(exploration.states.get('End')) - self.assertTrue(exploration.states.get('new state name')) + exploration.tags = [''] + self._assert_validation_error(exploration, 'Tags should be non-empty') - with self.assertRaisesRegexp( - Exception, 'State invalid_state does not exist'): - exploration.rename_state('invalid_state', 'new state name') + exploration.tags = ['123'] + self._assert_validation_error( + exploration, 'should only contain lowercase letters and spaces') + exploration.tags = ['ABC'] + self._assert_validation_error( + exploration, 'should only contain lowercase letters and spaces') - def test_default_outcome_is_labelled_incorrect_for_self_loop(self): - exploration = self.save_new_valid_exploration( - 'exp_id', 'user@example.com', title='title', category='category', - objective='objective', end_state_name='End') - exploration.validate(strict=True) + exploration.tags = [' a b'] + self._assert_validation_error( + exploration, 'Tags should not start or end with whitespace') + exploration.tags = ['a b '] + self._assert_validation_error( + exploration, 'Tags should not start or end with whitespace') - ( - exploration.init_state.interaction.default_outcome - .labelled_as_correct) = True + exploration.tags = ['a b'] + self._assert_validation_error( + exploration, 'Adjacent whitespace in tags should be collapsed') - ( - exploration.init_state.interaction.default_outcome - .dest) = exploration.init_state_name + exploration.tags = ['abc', 'abc'] + self._assert_validation_error( + exploration, 'Some tags duplicate each other') - with self.assertRaisesRegexp( - Exception, - 'The default outcome for state Introduction is labelled ' - 'correct but is a self-loop'): - exploration.validate(strict=True) + exploration.tags = ['computer science', 'analysis', 'a b c'] + exploration.validate() - def test_serialize_and_deserialize_returns_unchanged_exploration(self): - """Checks that serializing and then deserializing a default exploration - works as intended by leaving the exploration unchanged. + def test_title_category_and_objective_validation(self) -> None: + """Test that titles, categories and objectives are validated only in + 'strict' mode. """ - exploration = exp_domain.Exploration.create_default_exploration('eid') - self.assertEqual( - exploration.to_dict(), - exp_domain.Exploration.deserialize( - exploration.serialize()).to_dict()) + self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration = exp_fetchers.get_exploration_by_id('exp_id') + exploration.validate() + with self.assertRaisesRegex( + utils.ValidationError, 'title must be specified' + ): + exploration.validate(strict=True) + exploration.title = 'A title' -class ExplorationSummaryTests(test_utils.GenericTestBase): + with self.assertRaisesRegex( + utils.ValidationError, 'category must be specified' + ): + exploration.validate(strict=True) + exploration.category = 'A category' - def setUp(self): - super(ExplorationSummaryTests, self).setUp() - self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) - self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) - exploration = exp_domain.Exploration.create_default_exploration('eid') - exp_services.save_new_exploration(self.owner_id, exploration) - self.exp_summary = exp_fetchers.get_exploration_summary_by_id('eid') - self.exp_summary.editor_ids = ['editor_id'] - self.exp_summary.voice_artist_ids = ['voice_artist_id'] - self.exp_summary.viewer_ids = ['viewer_id'] - self.exp_summary.contributor_ids = ['contributor_id'] + with self.assertRaisesRegex( + utils.ValidationError, 'objective must be specified' + ): + exploration.validate(strict=True) - def test_validation_passes_with_valid_properties(self): - self.exp_summary.validate() + exploration.objective = 'An objective' - def test_validation_fails_with_invalid_title(self): - self.exp_summary.title = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected title to be a string, received 0'): - self.exp_summary.validate() + exploration.validate(strict=True) - def test_validation_fails_with_invalid_category(self): - self.exp_summary.category = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected category to be a string, received 0'): - self.exp_summary.validate() + def test_get_trainable_states_dict(self) -> None: + """Test the get_trainable_states_dict() method.""" + exp_id = 'exp_id1' + test_exp_filepath = os.path.join( + feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') + yaml_content = utils.get_file_contents(test_exp_filepath) + assets_list: List[Tuple[str, bytes]] = [] + exp_services.save_new_exploration_from_yaml_and_assets( + feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id, + assets_list) - def test_validation_fails_with_invalid_objective(self): - self.exp_summary.objective = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected objective to be a string, received 0'): - self.exp_summary.validate() + exploration_model = exp_models.ExplorationModel.get( + exp_id, strict=True) + old_states = exp_fetchers.get_exploration_from_model( + exploration_model).states + exploration = exp_fetchers.get_exploration_by_id(exp_id) - def test_validation_fails_with_invalid_language_code(self): - self.exp_summary.language_code = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected language_code to be a string, received 0'): - self.exp_summary.validate() + # Rename a state to add it in unchanged answer group. + exploration.rename_state('Home', 'Renamed state') + change_list = [exp_domain.ExplorationChange({ + 'cmd': 'rename_state', + 'old_state_name': 'Home', + 'new_state_name': 'Renamed state' + })] - def test_validation_fails_with_unallowed_language_code(self): - self.exp_summary.language_code = 'invalid' - with self.assertRaisesRegexp( - utils.ValidationError, 'Invalid language_code: invalid'): - self.exp_summary.validate() + expected_dict = { + 'state_names_with_changed_answer_groups': [], + 'state_names_with_unchanged_answer_groups': ['Renamed state'] + } + exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) + actual_dict = exploration.get_trainable_states_dict( + old_states, exp_versions_diff) + self.assertEqual(actual_dict, expected_dict) - def test_validation_fails_with_invalid_tags(self): - self.exp_summary.tags = 'tags' - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected \'tags\' to be a list, received tags'): - self.exp_summary.validate() + # Modify answer groups to trigger change in answer groups. + state = exploration.states['Renamed state'] + exploration.states['Renamed state'].interaction.answer_groups.insert( + 3, state.interaction.answer_groups[3]) + answer_groups = [] + for answer_group in state.interaction.answer_groups: + answer_groups.append(answer_group.to_dict()) + change_list = [exp_domain.ExplorationChange({ + 'cmd': 'edit_state_property', + 'state_name': 'Renamed state', + 'property_name': 'answer_groups', + 'new_value': answer_groups + })] - def test_validation_fails_with_invalid_tag_in_tags(self): - self.exp_summary.tags = ['tag', 2] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected each tag in \'tags\' to be a string, received \'2\''): - self.exp_summary.validate() + expected_dict = { + 'state_names_with_changed_answer_groups': ['Renamed state'], + 'state_names_with_unchanged_answer_groups': [] + } + exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) + actual_dict = exploration.get_trainable_states_dict( + old_states, exp_versions_diff) + self.assertEqual(actual_dict, expected_dict) - def test_validation_fails_with_empty_tag_in_tags(self): - self.exp_summary.tags = ['', 'abc'] - with self.assertRaisesRegexp( - utils.ValidationError, 'Tags should be non-empty'): - self.exp_summary.validate() + # Add new state to trigger change in answer groups. + exploration.add_states(['New state']) + exploration.states['New state'] = copy.deepcopy( + exploration.states['Renamed state']) + change_list = [exp_domain.ExplorationChange({ + 'cmd': 'add_state', + 'state_name': 'New state', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' + })] - def test_validation_fails_with_unallowed_characters_in_tag(self): - self.exp_summary.tags = ['123', 'abc'] - with self.assertRaisesRegexp( - utils.ValidationError, ( - 'Tags should only contain lowercase ' - 'letters and spaces, received \'123\'')): - self.exp_summary.validate() + expected_dict = { + 'state_names_with_changed_answer_groups': [ + 'Renamed state', 'New state'], + 'state_names_with_unchanged_answer_groups': [] + } + exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) + actual_dict = exploration.get_trainable_states_dict( + old_states, exp_versions_diff) + self.assertEqual(actual_dict, expected_dict) - def test_validation_fails_with_whitespace_in_tag_start(self): - self.exp_summary.tags = [' ab', 'abc'] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Tags should not start or end with whitespace, received \' ab\''): - self.exp_summary.validate() + # Delete state. + exploration.delete_state('New state') + change_list = [exp_domain.ExplorationChange({ + 'cmd': 'delete_state', + 'state_name': 'New state' + })] - def test_validation_fails_with_whitespace_in_tag_end(self): - self.exp_summary.tags = ['ab ', 'abc'] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Tags should not start or end with whitespace, received \'ab \''): - self.exp_summary.validate() + expected_dict = { + 'state_names_with_changed_answer_groups': ['Renamed state'], + 'state_names_with_unchanged_answer_groups': [] + } + exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) + actual_dict = exploration.get_trainable_states_dict( + old_states, exp_versions_diff) + self.assertEqual(actual_dict, expected_dict) - def test_validation_fails_with_adjacent_whitespace_in_tag(self): - self.exp_summary.tags = ['a b', 'abc'] - with self.assertRaisesRegexp( - utils.ValidationError, ( - 'Adjacent whitespace in tags should ' - 'be collapsed, received \'a b\'')): - self.exp_summary.validate() + # Test addition and multiple renames. + exploration.add_states(['New state']) + exploration.states['New state'] = copy.deepcopy( + exploration.states['Renamed state']) + exploration.rename_state('New state', 'New state2') + exploration.rename_state('New state2', 'New state3') + change_list = [exp_domain.ExplorationChange({ + 'cmd': 'add_state', + 'state_name': 'New state', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' + }), exp_domain.ExplorationChange({ + 'cmd': 'rename_state', + 'old_state_name': 'New state', + 'new_state_name': 'New state2' + }), exp_domain.ExplorationChange({ + 'cmd': 'rename_state', + 'old_state_name': 'New state2', + 'new_state_name': 'New state3' + })] - def test_validation_fails_with_duplicate_tags(self): - self.exp_summary.tags = ['abc', 'abc', 'ab'] - with self.assertRaisesRegexp( - utils.ValidationError, 'Some tags duplicate each other'): - self.exp_summary.validate() + expected_dict = { + 'state_names_with_changed_answer_groups': [ + 'Renamed state', 'New state3' + ], + 'state_names_with_unchanged_answer_groups': [] + } + exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) + actual_dict = exploration.get_trainable_states_dict( + old_states, exp_versions_diff) + self.assertEqual(actual_dict, expected_dict) - def test_validation_fails_with_invalid_rating_type(self): - self.exp_summary.ratings = 0 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected ratings to be a dict, received 0'): - self.exp_summary.validate() + def test_get_metadata(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration('0') + actual_metadata_dict = exploration.get_metadata().to_dict() + expected_metadata_dict = { + 'title': exploration.title, + 'category': exploration.category, + 'objective': exploration.objective, + 'language_code': exploration.language_code, + 'tags': exploration.tags, + 'blurb': exploration.blurb, + 'author_notes': exploration.author_notes, + 'states_schema_version': exploration.states_schema_version, + 'init_state_name': exploration.init_state_name, + 'param_specs': {}, + 'param_changes': [], + 'auto_tts_enabled': exploration.auto_tts_enabled, + 'correctness_feedback_enabled': ( + exploration.correctness_feedback_enabled), + 'edits_allowed': exploration.edits_allowed + } - def test_validation_fails_with_invalid_rating_keys(self): - self.exp_summary.ratings = {'1': 0, '10': 1} - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected ratings to have keys: 1, 2, 3, 4, 5, received 1, 10'): - self.exp_summary.validate() + self.assertEqual(actual_metadata_dict, expected_metadata_dict) - def test_validation_fails_with_invalid_value_type_for_ratings(self): - self.exp_summary.ratings = {'1': 0, '2': 'one', '3': 0, '4': 0, '5': 0} - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected value to be int, received one'): - self.exp_summary.validate() + def test_get_content_with_correct_state_name_returns_html(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration('0') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + init_state = exploration.states[exploration.init_state_name] + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) + hints_list = [ + state_domain.Hint( + state_domain.SubtitledHtml('hint_1', '

    hint one

    ') + ) + ] + init_state.update_interaction_hints(hints_list) - def test_validation_fails_with_invalid_value_for_ratings(self): - self.exp_summary.ratings = {'1': 0, '2': -1, '3': 0, '4': 0, '5': 0} - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected value to be non-negative, received -1'): - self.exp_summary.validate() + self.assertEqual( + exploration.get_content_html(exploration.init_state_name, 'hint_1'), + '

    hint one

    ') - def test_validation_fails_with_invalid_scaled_average_rating(self): - self.exp_summary.scaled_average_rating = 'one' - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected scaled_average_rating to be float, received one'): - self.exp_summary.validate() + hints_list[0].hint_content.html = '

    Changed hint one

    ' + init_state.update_interaction_hints(hints_list) - def test_validation_fails_with_invalid_status(self): - self.exp_summary.status = 0 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected status to be string, received 0'): - self.exp_summary.validate() + self.assertEqual( + exploration.get_content_html(exploration.init_state_name, 'hint_1'), + '

    Changed hint one

    ') - def test_validation_fails_with_invalid_community_owned(self): - self.exp_summary.community_owned = '1' - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected community_owned to be bool, received 1'): - self.exp_summary.validate() + def test_get_content_with_incorrect_state_name_raise_error(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration('0') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + init_state = exploration.states[exploration.init_state_name] + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) + hints_list = [ + state_domain.Hint( + state_domain.SubtitledHtml('hint_1', '

    hint one

    ') + ) + ] + init_state.update_interaction_hints(hints_list) - def test_validation_fails_with_invalid_contributors_summary(self): - self.exp_summary.contributors_summary = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected contributors_summary to be dict, received 0'): - self.exp_summary.validate() + self.assertEqual( + exploration.get_content_html(exploration.init_state_name, 'hint_1'), + '

    hint one

    ') - def test_validation_fails_with_invalid_owner_ids_type(self): - self.exp_summary.owner_ids = 0 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected owner_ids to be list, received 0'): - self.exp_summary.validate() + with self.assertRaisesRegex( + ValueError, 'State Invalid state does not exist'): + exploration.get_content_html('Invalid state', 'hint_1') - def test_validation_fails_with_invalid_owner_id_in_owner_ids(self): - self.exp_summary.owner_ids = ['1', 2, '3'] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected each id in owner_ids to be string, received 2'): - self.exp_summary.validate() + def test_is_demo_property(self) -> None: + """Test the is_demo property.""" + demo = exp_domain.Exploration.create_default_exploration('0') + self.assertEqual(demo.is_demo, True) - def test_validation_fails_with_invalid_editor_ids_type(self): - self.exp_summary.editor_ids = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected editor_ids to be list, received 0'): - self.exp_summary.validate() + notdemo1 = exp_domain.Exploration.create_default_exploration('a') + self.assertEqual(notdemo1.is_demo, False) - def test_validation_fails_with_invalid_editor_id_in_editor_ids(self): - self.exp_summary.editor_ids = ['1', 2, '3'] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected each id in editor_ids to be string, received 2'): - self.exp_summary.validate() + notdemo2 = exp_domain.Exploration.create_default_exploration('abcd') + self.assertEqual(notdemo2.is_demo, False) - def test_validation_fails_with_invalid_voice_artist_ids_type(self): - self.exp_summary.voice_artist_ids = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected voice_artist_ids to be list, received 0'): - self.exp_summary.validate() + def test_has_state_name(self) -> None: + """Test for has_state_name.""" + demo = exp_domain.Exploration.create_default_exploration('0') + state_names = list(demo.states.keys()) + self.assertEqual(state_names, ['Introduction']) + self.assertEqual(demo.has_state_name('Introduction'), True) + self.assertEqual(demo.has_state_name('Fake state name'), False) - def test_validation_fails_with_invalid_voice_artist_id_in_voice_artists_ids( - self): - self.exp_summary.voice_artist_ids = ['1', 2, '3'] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected each id in voice_artist_ids to be string, received 2'): - self.exp_summary.validate() + def test_get_interaction_id_by_state_name(self) -> None: + """Test for get_interaction_id_by_state_name.""" + demo = exp_domain.Exploration.create_default_exploration('0') + self.assertEqual( + demo.get_interaction_id_by_state_name('Introduction'), None) - def test_validation_fails_with_invalid_viewer_ids_type(self): - self.exp_summary.viewer_ids = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected viewer_ids to be list, received 0'): - self.exp_summary.validate() - - def test_validation_fails_with_invalid_viewer_id_in_viewer_ids(self): - self.exp_summary.viewer_ids = ['1', 2, '3'] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected each id in viewer_ids to be string, received 2'): - self.exp_summary.validate() + def test_exploration_export_import(self) -> None: + """Test that to_dict and from_dict preserve all data within an + exploration. + """ + demo = exp_domain.Exploration.create_default_exploration('0') + demo_dict = demo.to_dict() + exp_from_dict = exp_domain.Exploration.from_dict(demo_dict) + self.assertEqual(exp_from_dict.to_dict(), demo_dict) - def test_validation_fails_with_invalid_contributor_ids_type(self): - self.exp_summary.contributor_ids = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected contributor_ids to be list, received 0'): - self.exp_summary.validate() + def test_interaction_with_none_id_is_not_terminal(self) -> None: + """Test that an interaction with an id of None leads to is_terminal + being false. + """ + # Default exploration has a default interaction with an ID of None. + demo = exp_domain.Exploration.create_default_exploration('0') + init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME] + self.assertFalse(init_state.interaction.is_terminal) - def test_validation_fails_with_invalid_contributor_id_in_contributor_ids( - self): - self.exp_summary.contributor_ids = ['1', 2, '3'] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected each id in contributor_ids to be string, received 2'): - self.exp_summary.validate() + def test_cannot_create_demo_exp_with_invalid_param_changes(self) -> None: + demo_exp = exp_domain.Exploration.create_default_exploration('0') + content_id_generator = translation_domain.ContentIdGenerator( + demo_exp.next_content_id_index + ) + demo_dict = demo_exp.to_dict() + new_state = state_domain.State.create_default_state( + 'new_state_name', + content_id_generator.generate( + translation_domain.ContentType.CONTENT + ), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME + )) + new_state.param_changes = [param_domain.ParamChange.from_dict({ + 'customization_args': { + 'list_of_values': ['1', '2'], 'parse_with_jinja': False + }, + 'name': 'myParam', + 'generator_id': 'RandomSelector' + })] - def test_is_private(self): - self.assertTrue(self.exp_summary.is_private()) - self.exp_summary.status = constants.ACTIVITY_STATUS_PUBLIC - self.assertFalse(self.exp_summary.is_private()) + demo_dict['states']['new_state_name'] = new_state.to_dict() + demo_dict['param_specs'] = { + 'ParamSpec': {'obj_type': 'UnicodeString'} + } + with self.assertRaisesRegex( + Exception, + 'Parameter myParam was used in a state but not ' + 'declared in the exploration param_specs.'): + exp_domain.Exploration.from_dict(demo_dict) - def test_is_solely_owned_by_user_one_owner(self): - self.assertTrue(self.exp_summary.is_solely_owned_by_user(self.owner_id)) - self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id')) - self.exp_summary.owner_ids = ['other_id'] - self.assertFalse( - self.exp_summary.is_solely_owned_by_user(self.owner_id)) - self.assertTrue(self.exp_summary.is_solely_owned_by_user('other_id')) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_category(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() - def test_is_solely_owned_by_user_multiple_owners(self): - self.assertTrue(self.exp_summary.is_solely_owned_by_user(self.owner_id)) - self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id')) - self.exp_summary.owner_ids = [self.owner_id, 'other_id'] - self.assertFalse( - self.exp_summary.is_solely_owned_by_user(self.owner_id)) - self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id')) + exploration.category = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected category to be a string, received 1'): + exploration.validate() - def test_is_solely_owned_by_user_other_users(self): - self.assertFalse(self.exp_summary.is_solely_owned_by_user('editor_id')) - self.assertFalse( - self.exp_summary.is_solely_owned_by_user('voice_artist_id')) - self.assertFalse(self.exp_summary.is_solely_owned_by_user('viewer_id')) - self.assertFalse( - self.exp_summary.is_solely_owned_by_user('contributor_id')) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_objective(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() - def test_add_new_contribution_for_user_adds_user_to_contributors(self): - self.exp_summary.add_contribution_by_user('user_id') - self.assertIn('user_id', self.exp_summary.contributors_summary) - self.assertEqual(self.exp_summary.contributors_summary['user_id'], 1) - self.assertIn('user_id', self.exp_summary.contributor_ids) + exploration.objective = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected objective to be a string, received 1'): + exploration.validate() - def test_add_new_contribution_for_user_increases_score_in_contributors( - self): - self.exp_summary.add_contribution_by_user('user_id') - self.exp_summary.add_contribution_by_user('user_id') - self.assertIn('user_id', self.exp_summary.contributors_summary) - self.assertEqual(self.exp_summary.contributors_summary['user_id'], 2) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_blurb(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() - def test_add_new_contribution_for_user_does_not_add_system_user(self): - self.exp_summary.add_contribution_by_user( - feconf.SYSTEM_COMMITTER_ID) - self.assertNotIn( - feconf.SYSTEM_COMMITTER_ID, self.exp_summary.contributors_summary) - self.assertNotIn( - feconf.SYSTEM_COMMITTER_ID, self.exp_summary.contributor_ids) + exploration.blurb = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected blurb to be a string, received 1'): + exploration.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_language_code(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() -class YamlCreationUnitTests(test_utils.GenericTestBase): - """Test creation of explorations from YAML files.""" + exploration.language_code = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected language_code to be a string, received 1'): + exploration.validate() - YAML_CONTENT_INVALID_SCHEMA_VERSION = ( - """author_notes: '' -auto_tts_enabled: true -blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) -language_code: en -objective: '' -param_changes: [] -param_specs: {} -schema_version: 10000 -states: - (untitled state): - classifier_model_id: null - content: - content_id: content - html: '' - interaction: - answer_groups: - - outcome: - dest: END - feedback: - content_id: feedback_1 - html:

    Correct!

    - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - rule_specs: - - inputs: - x: - contentId: rule_input_3 - normalizedStrSet: - - InputString - rule_type: Equals - tagged_skill_misconception_id: null - training_data: [] - confirmed_unclassified_answers: [] - customization_args: - placeholder: - value: - content_id: ca_placeholder_2 - unicode_str: '' - rows: - value: 1 - default_outcome: - dest: (untitled state) - feedback: - content_id: default_outcome - html: '' - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - hints: [] - id: TextInput - solution: null - next_content_id_index: 4 - param_changes: [] - recorded_voiceovers: - voiceovers_mapping: - ca_placeholder_2: {} - content: {} - default_outcome: {} - feedback_1: {} - rule_input_3: {} - solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_2: {} - content: {} - default_outcome: {} - feedback_1: {} - rule_input_3: {} - END: - classifier_model_id: null - content: - content_id: content - html:

    Congratulations, you have finished!

    - interaction: - answer_groups: [] - confirmed_unclassified_answers: [] - customization_args: - recommendedExplorationIds: - value: [] - default_outcome: null - hints: [] - id: EndExploration - solution: null - next_content_id_index: 0 - param_changes: [] - recorded_voiceovers: - voiceovers_mapping: - content: {} - solicit_answer_details: false - written_translations: - translations_mapping: - content: {} - New state: - classifier_model_id: null - content: - content_id: content - html: '' - interaction: - answer_groups: [] - confirmed_unclassified_answers: [] - customization_args: - placeholder: - value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 - default_outcome: - dest: END - feedback: - content_id: default_outcome - html: '' - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - hints: [] - id: TextInput - solution: null - next_content_id_index: 1 - param_changes: [] - recorded_voiceovers: - voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} - solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 10000 -tags: [] -title: Title -""") + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_author_notes(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() - EXP_ID = 'An exploration_id' + exploration.author_notes = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected author_notes to be a string, received 1'): + exploration.validate() - def test_creation_with_invalid_yaml_schema_version(self): - """Test that a schema version that is too big is detected.""" - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_states(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + exploration.states = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected states to be a dict, received 1'): + exploration.validate() + + def test_validate_exploration_outcome_dest(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + # Ruling out the possibility of None for mypy type checking. + assert exploration.init_state.interaction.default_outcome is not None + exploration.init_state.interaction.default_outcome.dest = None + with self.assertRaisesRegex( + Exception, 'Every outcome should have a destination.'): + exploration.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_outcome_dest_type(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + # Ruling out the possibility of None for mypy type checking. + assert exploration.init_state.interaction.default_outcome is not None + exploration.init_state.interaction.default_outcome.dest = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected outcome dest to be a string, received 1'): + exploration.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_states_schema_version(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + exploration.states_schema_version = None # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'This exploration has no states schema version.'): + exploration.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_auto_tts_enabled(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + exploration.auto_tts_enabled = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected auto_tts_enabled to be a bool, received 1'): + exploration.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_correctness_feedback_enabled(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + exploration.correctness_feedback_enabled = 1 # type: ignore[assignment] + with self.assertRaisesRegex( Exception, - 'Sorry, we can only process v46 to v[0-9]+ exploration YAML files ' - 'at present.'): - exp_domain.Exploration.from_yaml( - 'bad_exp', self.YAML_CONTENT_INVALID_SCHEMA_VERSION) + 'Expected correctness_feedback_enabled to be a bool, received 1'): + exploration.validate() - def test_yaml_import_and_export(self): - """Test the from_yaml() and to_yaml() methods.""" - exploration = exp_domain.Exploration.create_default_exploration( - self.EXP_ID, title='Title', category='Category') - exploration.add_states(['New state']) - self.assertEqual(len(exploration.states), 2) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_next_content_id_index(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + exploration.next_content_id_index = '5' # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, + 'Expected next_content_id_index to be an int, received 5'): + exploration.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_edits_allowed(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') exploration.validate() - yaml_content = exploration.to_yaml() - self.assertEqual(yaml_content, self.SAMPLE_YAML_CONTENT) + exploration.edits_allowed = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, + 'Expected edits_allowed to be a bool, received 1'): + exploration.validate() - exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content) - self.assertEqual(len(exploration2.states), 2) - yaml_content_2 = exploration2.to_yaml() - self.assertEqual(yaml_content_2, yaml_content) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validate_exploration_param_specs(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() - with self.assertRaisesRegexp( - Exception, 'Please ensure that you are uploading a YAML text file, ' - 'not a zip file. The YAML parser returned the following error: '): - exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name') + exploration.param_specs = { + 1: param_domain.ParamSpec.from_dict( # type: ignore[dict-item] + {'obj_type': 'UnicodeString'}) + } + with self.assertRaisesRegex( + Exception, 'Expected parameter name to be a string, received 1'): + exploration.validate() + + def test_validate_exploration_param_changes_type(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + exploration.param_changes = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + Exception, 'Expected param_changes to be a list, received 1'): + exploration.validate() - with self.assertRaisesRegexp( + def test_validate_exploration_param_name(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() + + exploration.param_changes = [param_domain.ParamChange.from_dict({ + 'customization_args': { + 'list_of_values': ['1', '2'], 'parse_with_jinja': False + }, + 'name': 'invalid', + 'generator_id': 'RandomSelector' + })] + with self.assertRaisesRegex( Exception, - 'Please ensure that you are uploading a YAML text file, not a zip' - ' file. The YAML parser returned the following error: mapping ' - 'values are not allowed here'): - exp_domain.Exploration.from_yaml( - 'exp4', 'Invalid\ninit_state_name:\nMore stuff') + 'No parameter named \'invalid\' exists in this ' + 'exploration'): + exploration.validate() + + def test_validate_exploration_reserved_param_name(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() - with self.assertRaisesRegexp( + exploration.param_changes = [param_domain.ParamChange.from_dict({ + 'customization_args': { + 'list_of_values': ['1', '2'], 'parse_with_jinja': False + }, + 'name': 'all', + 'generator_id': 'RandomSelector' + })] + with self.assertRaisesRegex( Exception, - 'Please ensure that you are uploading a YAML text file, not a zip' - ' file. The YAML parser returned the following error: while ' - 'scanning a simple key'): - exp_domain.Exploration.from_yaml( - 'exp4', 'State1:\n(\nInvalid yaml') + 'The exploration-level parameter with name \'all\' is ' + 'reserved. Please choose a different name.'): + exploration.validate() + def test_validate_exploration_is_non_self_loop(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() -class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase): - """Tests the presence of appropriate schema migration methods in the - Exploration domain object class. - """ + exploration.add_states(['DEF']) - def test_correct_states_schema_conversion_methods_exist(self): - """Test that the right states schema conversion methods exist.""" - current_states_schema_version = ( - feconf.CURRENT_STATE_SCHEMA_VERSION) - for version_num in range( - feconf.EARLIEST_SUPPORTED_STATE_SCHEMA_VERSION, - current_states_schema_version): - self.assertTrue(hasattr( - exp_domain.Exploration, - '_convert_states_v%s_dict_to_v%s_dict' % ( - version_num, version_num + 1))) + default_outcome = state_domain.Outcome( + 'DEF', None, state_domain.SubtitledHtml( + 'default_outcome', '

    Default outcome for state1

    '), + False, [], 'refresher_exploration_id', None, + ) + exploration.init_state.update_interaction_default_outcome( + default_outcome + ) - self.assertFalse(hasattr( - exp_domain.Exploration, - '_convert_states_v%s_dict_to_v%s_dict' % ( - current_states_schema_version, - current_states_schema_version + 1))) + with self.assertRaisesRegex( + Exception, + 'The default outcome for state Introduction has a refresher ' + 'exploration ID, but is not a self-loop.'): + exploration.validate() - def test_correct_exploration_schema_conversion_methods_exist(self): - """Test that the right exploration schema conversion methods exist.""" - current_exp_schema_version = ( - exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION) + def test_validate_exploration_answer_group_parameter(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='', category='', + objective='', end_state_name='End') + exploration.validate() - for version_num in range( - exp_domain.Exploration.EARLIEST_SUPPORTED_EXP_SCHEMA_VERSION, - current_exp_schema_version): - self.assertTrue(hasattr( - exp_domain.Exploration, - '_convert_v%s_dict_to_v%s_dict' % ( - version_num, version_num + 1))) + param_changes = [param_domain.ParamChange( + 'ParamChange', 'RandomSelector', { + 'list_of_values': ['1', '2'], 'parse_with_jinja': False + } + )] + state_answer_group = state_domain.AnswerGroup( + state_domain.Outcome( + exploration.init_state_name, None, state_domain.SubtitledHtml( + 'feedback_1', 'Feedback'), + False, param_changes, None, None), + [ + state_domain.RuleSpec( + 'Contains', + { + 'x': + { + 'contentId': 'rule_input_Equals', + 'normalizedStrSet': ['Test'] + } + }) + ], + [], + None + ) + exploration.init_state.update_interaction_answer_groups( + [state_answer_group]) + with self.assertRaisesRegex( + Exception, + 'The parameter ParamChange was used in an answer group, ' + 'but it does not exist in this exploration'): + exploration.validate() - self.assertFalse(hasattr( - exp_domain.Exploration, - '_convert_v%s_dict_to_v%s_dict' % ( - current_exp_schema_version, current_exp_schema_version + 1))) + def test_verify_all_states_reachable(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'owner_id') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + exploration.validate() + exploration.add_states(['End', 'Stuck State']) + end_state = exploration.states['End'] + init_state = exploration.states['Introduction'] + stuck_state = exploration.states['Stuck State'] + state_default_outcome = state_domain.Outcome( + 'Introduction', 'Stuck State', state_domain.SubtitledHtml( + 'default_outcome_1', '

    Default outcome for State1

    '), + False, [], None, None + ) + init_state.update_interaction_default_outcome(state_default_outcome) + self.set_interaction_for_state( + stuck_state, 'TextInput', content_id_generator) + self.set_interaction_for_state( + end_state, 'EndExploration', content_id_generator) + end_state.update_interaction_default_outcome(None) -class SchemaMigrationUnitTests(test_utils.GenericTestBase): - """Test migration methods for yaml content.""" + with self.assertRaisesRegex( + Exception, + 'Please fix the following issues before saving this exploration: ' + '1. The following states are not reachable from the initial state: ' + 'End 2. It is impossible to complete the exploration from the ' + 'following states: Introduction, Stuck State'): + exploration.validate(strict=True) - YAML_CONTENT_V46 = ( - """author_notes: '' -auto_tts_enabled: true -blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) -language_code: en -objective: '' -param_changes: [] -param_specs: {} -schema_version: 46 -states: - (untitled state): - classifier_model_id: null - content: - content_id: content - html: '' - interaction: - answer_groups: - - outcome: - dest: END + def test_update_init_state_name_with_invalid_state(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='title', category='category', + objective='objective', end_state_name='End') + + exploration.update_init_state_name('End') + self.assertEqual(exploration.init_state_name, 'End') + + with self.assertRaisesRegex( + Exception, + 'Invalid new initial state name: invalid_state;'): + exploration.update_init_state_name('invalid_state') + + def test_rename_state_with_invalid_state(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='title', category='category', + objective='objective', end_state_name='End') + + self.assertTrue(exploration.states.get('End')) + self.assertFalse(exploration.states.get('new state name')) + + exploration.rename_state('End', 'new state name') + self.assertFalse(exploration.states.get('End')) + self.assertTrue(exploration.states.get('new state name')) + + with self.assertRaisesRegex( + Exception, 'State invalid_state does not exist'): + exploration.rename_state('invalid_state', 'new state name') + + def test_default_outcome_is_labelled_incorrect_for_self_loop(self) -> None: + exploration = self.save_new_valid_exploration( + 'exp_id', 'user@example.com', title='title', category='category', + objective='objective', end_state_name='End') + exploration.validate(strict=True) + # Ruling out the possibility of None for mypy type checking. + assert ( + exploration.init_state.interaction.default_outcome is not None + ) + + ( + exploration.init_state.interaction.default_outcome + .labelled_as_correct) = True + + ( + exploration.init_state.interaction.default_outcome + .dest) = exploration.init_state_name + + with self.assertRaisesRegex( + Exception, + 'The default outcome for state Introduction is labelled ' + 'correct but is a self-loop'): + exploration.validate(strict=True) + + def test_serialize_and_deserialize_returns_unchanged_exploration( + self + ) -> None: + """Checks that serializing and then deserializing a default exploration + works as intended by leaving the exploration unchanged. + """ + exploration = exp_domain.Exploration.create_default_exploration('eid') + self.assertEqual( + exploration.to_dict(), + exp_domain.Exploration.deserialize( + exploration.serialize()).to_dict()) + + def test_get_all_translatable_content_for_exp(self) -> None: + """Get all translatable fields from exploration.""" + exploration = exp_domain.Exploration.create_default_exploration( + 'exp_id') + exploration.add_states(['State1']) + state = exploration.states['State1'] + state_content_dict: state_domain.SubtitledHtmlDict = { + 'content_id': 'content_0', + 'html': '

    state content html

    ' + } + state_answer_group = [state_domain.AnswerGroup( + state_domain.Outcome( + exploration.init_state_name, None, state_domain.SubtitledHtml( + 'feedback_1', '

    state outcome html

    '), + False, [], None, None), + [ + state_domain.RuleSpec( + 'Equals', { + 'x': { + 'contentId': 'rule_input_Equals', + 'normalizedStrSet': ['Test'] + }}) + ], + [], + None + )] + state_default_outcome = state_domain.Outcome( + 'State1', None, state_domain.SubtitledHtml( + 'default_outcome', '

    Default outcome for State1

    '), + False, [], None, None + ) + state_hint_list = [ + state_domain.Hint( + state_domain.SubtitledHtml( + 'hint_1', '

    Hello, this is html1 for state1

    ' + ) + ), + state_domain.Hint( + state_domain.SubtitledHtml( + 'hint_2', '

    Hello, this is html2 for state1

    ' + ) + ), + ] + state_solution_dict: state_domain.SolutionDict = { + 'answer_is_exclusive': True, + 'correct_answer': 'Answer1', + 'explanation': { + 'content_id': 'solution', + 'html': '

    This is solution for state1

    ' + } + } + state_interaction_cust_args: Dict[ + str, Dict[str, Union[state_domain.SubtitledUnicodeDict, int]] + ] = { + 'placeholder': { + 'value': { + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' + } + }, + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} + } + state.update_content( + state_domain.SubtitledHtml.from_dict(state_content_dict)) + state.update_interaction_id('TextInput') + state.update_interaction_customization_args(state_interaction_cust_args) + state.update_interaction_answer_groups( + state_answer_group) + state.update_interaction_default_outcome(state_default_outcome) + state.update_interaction_hints(state_hint_list) + # Ruling out the possibility of None for mypy type checking. + assert state.interaction.id is not None + solution = state_domain.Solution.from_dict( + state.interaction.id, state_solution_dict) + state.update_interaction_solution(solution) + translatable_contents = [ + translatable_content.content_value + for translatable_content in + exploration.get_all_contents_which_need_translations( + self.dummy_entity_translations).values() + ] + + self.assertItemsEqual( + translatable_contents, + [ + '

    state outcome html

    ', + '

    Default outcome for State1

    ', + '

    Hello, this is html1 for state1

    ', + ['Test'], + '

    Hello, this is html2 for state1

    ', + '

    This is solution for state1

    ', + '

    state content html

    ' + ]) + + +class ExplorationSummaryTests(test_utils.GenericTestBase): + + def setUp(self) -> None: + super().setUp() + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + exploration = exp_domain.Exploration.create_default_exploration('eid') + exp_services.save_new_exploration(self.owner_id, exploration) + self.exp_summary = exp_fetchers.get_exploration_summary_by_id('eid') + self.exp_summary.editor_ids = ['editor_id'] + self.exp_summary.voice_artist_ids = ['voice_artist_id'] + self.exp_summary.viewer_ids = ['viewer_id'] + self.exp_summary.contributor_ids = ['contributor_id'] + + def test_validation_passes_with_valid_properties(self) -> None: + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_title(self) -> None: + self.exp_summary.title = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected title to be a string, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_category(self) -> None: + self.exp_summary.category = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected category to be a string, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_objective(self) -> None: + self.exp_summary.objective = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected objective to be a string, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_language_code(self) -> None: + self.exp_summary.language_code = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected language_code to be a string, received 0'): + self.exp_summary.validate() + + def test_validation_fails_with_unallowed_language_code(self) -> None: + self.exp_summary.language_code = 'invalid' + with self.assertRaisesRegex( + utils.ValidationError, 'Invalid language_code: invalid'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_tags(self) -> None: + self.exp_summary.tags = 'tags' # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected \'tags\' to be a list, received tags'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_tag_in_tags(self) -> None: + self.exp_summary.tags = ['tag', 2] # type: ignore[list-item] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected each tag in \'tags\' to be a string, received \'2\''): + self.exp_summary.validate() + + def test_validation_fails_with_empty_tag_in_tags(self) -> None: + self.exp_summary.tags = ['', 'abc'] + with self.assertRaisesRegex( + utils.ValidationError, 'Tags should be non-empty'): + self.exp_summary.validate() + + def test_validation_fails_with_unallowed_characters_in_tag(self) -> None: + self.exp_summary.tags = ['123', 'abc'] + with self.assertRaisesRegex( + utils.ValidationError, ( + 'Tags should only contain lowercase ' + 'letters and spaces, received \'123\'')): + self.exp_summary.validate() + + def test_validation_fails_with_whitespace_in_tag_start(self) -> None: + self.exp_summary.tags = [' ab', 'abc'] + with self.assertRaisesRegex( + utils.ValidationError, + 'Tags should not start or end with whitespace, received \' ab\''): + self.exp_summary.validate() + + def test_validation_fails_with_whitespace_in_tag_end(self) -> None: + self.exp_summary.tags = ['ab ', 'abc'] + with self.assertRaisesRegex( + utils.ValidationError, + 'Tags should not start or end with whitespace, received \'ab \''): + self.exp_summary.validate() + + def test_validation_fails_with_adjacent_whitespace_in_tag(self) -> None: + self.exp_summary.tags = ['a b', 'abc'] + with self.assertRaisesRegex( + utils.ValidationError, ( + 'Adjacent whitespace in tags should ' + 'be collapsed, received \'a b\'')): + self.exp_summary.validate() + + def test_validation_fails_with_duplicate_tags(self) -> None: + self.exp_summary.tags = ['abc', 'abc', 'ab'] + with self.assertRaisesRegex( + utils.ValidationError, 'Some tags duplicate each other'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_rating_type(self) -> None: + self.exp_summary.ratings = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, 'Expected ratings to be a dict, received 0'): + self.exp_summary.validate() + + def test_validation_fails_with_invalid_rating_keys(self) -> None: + self.exp_summary.ratings = {'1': 0, '10': 1} + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected ratings to have keys: 1, 2, 3, 4, 5, received 1, 10'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_value_type_for_ratings(self) -> None: + self.exp_summary.ratings = {'1': 0, '2': 'one', '3': 0, '4': 0, '5': 0} # type: ignore[dict-item] + with self.assertRaisesRegex( + utils.ValidationError, 'Expected value to be int, received one'): + self.exp_summary.validate() + + def test_validation_fails_with_invalid_value_for_ratings(self) -> None: + self.exp_summary.ratings = {'1': 0, '2': -1, '3': 0, '4': 0, '5': 0} + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected value to be non-negative, received -1'): + self.exp_summary.validate() + + def test_validation_passes_with_int_scaled_average_rating(self) -> None: + self.exp_summary.scaled_average_rating = 1 + self.exp_summary.validate() + self.assertEqual(self.exp_summary.scaled_average_rating, 1) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_scaled_average_rating(self) -> None: + self.exp_summary.scaled_average_rating = 'one' # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected scaled_average_rating to be float, received one' + ): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_status(self) -> None: + self.exp_summary.status = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, 'Expected status to be string, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_community_owned(self) -> None: + self.exp_summary.community_owned = '1' # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected community_owned to be bool, received 1'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_contributors_summary(self) -> None: + self.exp_summary.contributors_summary = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected contributors_summary to be dict, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_owner_ids_type(self) -> None: + self.exp_summary.owner_ids = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, 'Expected owner_ids to be list, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_owner_id_in_owner_ids(self) -> None: + self.exp_summary.owner_ids = ['1', 2, '3'] # type: ignore[list-item] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected each id in owner_ids to be string, received 2'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_editor_ids_type(self) -> None: + self.exp_summary.editor_ids = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected editor_ids to be list, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_editor_id_in_editor_ids( + self + ) -> None: + self.exp_summary.editor_ids = ['1', 2, '3'] # type: ignore[list-item] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected each id in editor_ids to be string, received 2'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_voice_artist_ids_type(self) -> None: + self.exp_summary.voice_artist_ids = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected voice_artist_ids to be list, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_voice_artist_id_in_voice_artists_ids( + self + ) -> None: + self.exp_summary.voice_artist_ids = ['1', 2, '3'] # type: ignore[list-item] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected each id in voice_artist_ids to be string, received 2'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_viewer_ids_type(self) -> None: + self.exp_summary.viewer_ids = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected viewer_ids to be list, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_viewer_id_in_viewer_ids( + self + ) -> None: + self.exp_summary.viewer_ids = ['1', 2, '3'] # type: ignore[list-item] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected each id in viewer_ids to be string, received 2'): + self.exp_summary.validate() + + def test_validation_fails_with_duplicate_user_role(self) -> None: + self.exp_summary.owner_ids = ['1'] + self.exp_summary.editor_ids = ['2', '3'] + self.exp_summary.voice_artist_ids = ['4'] + self.exp_summary.viewer_ids = ['2'] + with self.assertRaisesRegex( + utils.ValidationError, ( + 'Users should not be assigned to multiple roles at once, ' + 'received users: 1, 2, 3, 4, 2') + ): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_contributor_ids_type(self) -> None: + self.exp_summary.contributor_ids = 0 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected contributor_ids to be list, received 0'): + self.exp_summary.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_contributor_id_in_contributor_ids( + self + ) -> None: + self.exp_summary.contributor_ids = ['1', 2, '3'] # type: ignore[list-item] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected each id in contributor_ids to be string, received 2'): + self.exp_summary.validate() + + def test_is_private(self) -> None: + self.assertTrue(self.exp_summary.is_private()) + self.exp_summary.status = constants.ACTIVITY_STATUS_PUBLIC + self.assertFalse(self.exp_summary.is_private()) + + def test_is_solely_owned_by_user_one_owner(self) -> None: + self.assertTrue(self.exp_summary.is_solely_owned_by_user(self.owner_id)) + self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id')) + self.exp_summary.owner_ids = ['other_id'] + self.assertFalse( + self.exp_summary.is_solely_owned_by_user(self.owner_id)) + self.assertTrue(self.exp_summary.is_solely_owned_by_user('other_id')) + + def test_is_solely_owned_by_user_multiple_owners(self) -> None: + self.assertTrue(self.exp_summary.is_solely_owned_by_user(self.owner_id)) + self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id')) + self.exp_summary.owner_ids = [self.owner_id, 'other_id'] + self.assertFalse( + self.exp_summary.is_solely_owned_by_user(self.owner_id)) + self.assertFalse(self.exp_summary.is_solely_owned_by_user('other_id')) + + def test_is_solely_owned_by_user_other_users(self) -> None: + self.assertFalse(self.exp_summary.is_solely_owned_by_user('editor_id')) + self.assertFalse( + self.exp_summary.is_solely_owned_by_user('voice_artist_id')) + self.assertFalse(self.exp_summary.is_solely_owned_by_user('viewer_id')) + self.assertFalse( + self.exp_summary.is_solely_owned_by_user('contributor_id')) + + def test_add_new_contribution_for_user_adds_user_to_contributors( + self + ) -> None: + self.exp_summary.add_contribution_by_user('user_id') + self.assertIn('user_id', self.exp_summary.contributors_summary) + self.assertEqual(self.exp_summary.contributors_summary['user_id'], 1) + self.assertIn('user_id', self.exp_summary.contributor_ids) + + def test_add_new_contribution_for_user_increases_score_in_contributors( + self + ) -> None: + self.exp_summary.add_contribution_by_user('user_id') + self.exp_summary.add_contribution_by_user('user_id') + self.assertIn('user_id', self.exp_summary.contributors_summary) + self.assertEqual(self.exp_summary.contributors_summary['user_id'], 2) + + def test_add_new_contribution_for_user_does_not_add_system_user( + self + ) -> None: + self.exp_summary.add_contribution_by_user( + feconf.SYSTEM_COMMITTER_ID) + self.assertNotIn( + feconf.SYSTEM_COMMITTER_ID, self.exp_summary.contributors_summary) + self.assertNotIn( + feconf.SYSTEM_COMMITTER_ID, self.exp_summary.contributor_ids) + + +class YamlCreationUnitTests(test_utils.GenericTestBase): + """Test creation of explorations from YAML files.""" + + YAML_CONTENT_INVALID_SCHEMA_VERSION: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 10000 +states: + (untitled state): + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 10000 +tags: [] +title: Title +""") + + EXP_ID: Final = 'An exploration_id' + + def test_creation_with_invalid_yaml_schema_version(self) -> None: + """Test that a schema version that is too big is detected.""" + with self.assertRaisesRegex( + Exception, + 'Sorry, we can only process v46 to v[0-9]+ exploration YAML files ' + 'at present.'): + exp_domain.Exploration.from_yaml( + 'bad_exp', self.YAML_CONTENT_INVALID_SCHEMA_VERSION) + + def test_yaml_import_and_export(self) -> None: + """Test the from_yaml() and to_yaml() methods.""" + exploration = exp_domain.Exploration.create_default_exploration( + self.EXP_ID, title='Title', category='Category') + exploration.add_states(['New state']) + self.assertEqual(len(exploration.states), 2) + + exploration.validate() + + yaml_content = exploration.to_yaml() + self.assertEqual(yaml_content, self.SAMPLE_YAML_CONTENT) + + exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content) + self.assertEqual(len(exploration2.states), 2) + yaml_content_2 = exploration2.to_yaml() + self.assertEqual(yaml_content_2, yaml_content) + + with self.assertRaisesRegex( + Exception, 'Please ensure that you are uploading a YAML text file, ' + 'not a zip file. The YAML parser returned the following error: '): + exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name') + + with self.assertRaisesRegex( + Exception, + 'Please ensure that you are uploading a YAML text file, not a zip' + ' file. The YAML parser returned the following error: mapping ' + 'values are not allowed here'): + exp_domain.Exploration.from_yaml( + 'exp4', 'Invalid\ninit_state_name:\nMore stuff') + + with self.assertRaisesRegex( + Exception, + 'Please ensure that you are uploading a YAML text file, not a zip' + ' file. The YAML parser returned the following error: while ' + 'scanning a simple key'): + exp_domain.Exploration.from_yaml( + 'exp4', 'State1:\n(\nInvalid yaml') + + +class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase): + """Tests the presence of appropriate schema migration methods in the + Exploration domain object class. + """ + + def test_correct_states_schema_conversion_methods_exist(self) -> None: + """Test that the right states schema conversion methods exist.""" + current_states_schema_version = ( + feconf.CURRENT_STATE_SCHEMA_VERSION) + for version_num in range( + feconf.EARLIEST_SUPPORTED_STATE_SCHEMA_VERSION, + current_states_schema_version): + self.assertTrue(hasattr( + exp_domain.Exploration, + '_convert_states_v%s_dict_to_v%s_dict' % ( + version_num, version_num + 1))) + + self.assertFalse(hasattr( + exp_domain.Exploration, + '_convert_states_v%s_dict_to_v%s_dict' % ( + current_states_schema_version, + current_states_schema_version + 1))) + + def test_correct_exploration_schema_conversion_methods_exist(self) -> None: + """Test that the right exploration schema conversion methods exist.""" + current_exp_schema_version = ( + exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION) + + for version_num in range( + exp_domain.Exploration.EARLIEST_SUPPORTED_EXP_SCHEMA_VERSION, + current_exp_schema_version): + self.assertTrue(hasattr( + exp_domain.Exploration, + '_convert_v%s_dict_to_v%s_dict' % ( + version_num, version_num + 1))) + + self.assertFalse(hasattr( + exp_domain.Exploration, + '_convert_v%s_dict_to_v%s_dict' % ( + current_exp_schema_version, current_exp_schema_version + 1))) + + +class SchemaMigrationUnitTests(test_utils.GenericTestBase): + """Test migration methods for yaml content.""" + + YAML_CONTENT_V46: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 46 +states: + (untitled state): + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 41 +tags: [] +title: Title +""") + + YAML_CONTENT_V47: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 47 +states: + (untitled state): + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 42 +tags: [] +title: Title +""") + + YAML_CONTENT_V48: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 48 +states: + (untitled state): + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 43 +tags: [] +title: Title +""") + + YAML_CONTENT_V49: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 49 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 44 +tags: [] +title: Title +""") + + YAML_CONTENT_V50: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 50 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 45 +tags: [] +title: Title +""") + + YAML_CONTENT_V51: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 51 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 46 +tags: [] +title: Title +""") + + YAML_CONTENT_V52: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 52 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 47 +tags: [] +title: Title +""") + + YAML_CONTENT_V53: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 53 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 48 +tags: [] +title: Title +""") + + YAML_CONTENT_V54: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 54 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 6 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + requireNonnegativeInput: + value: False + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: NumericInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + requireNonnegativeInput: + value: False + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: NumericInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 49 +tags: [] +title: Title +""") + + YAML_CONTENT_V55: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 55 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 6 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + requireNonnegativeInput: + value: False + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: NumericInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + requireNonnegativeInput: + value: False + default_outcome: + dest: END + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: NumericInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 50 +tags: [] +title: Title +""") + + YAML_CONTENT_V56: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 56 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + dest_if_really_stuck: null + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 6 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + requireNonnegativeInput: + value: False + default_outcome: + dest: (untitled state) + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: NumericInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: END + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 51 +tags: [] +title: Title +""") + + YAML_CONTENT_V58: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 58 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + dest_if_really_stuck: null + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 6 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + requireNonnegativeInput: + value: False + default_outcome: + dest: (untitled state) + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: NumericInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + catchMisspellings: + value: false + default_outcome: + dest: END + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 53 +tags: [] +title: Title +""") + + YAML_CONTENT_V59: Final = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + dest_if_really_stuck: null + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 6 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + requireNonnegativeInput: + value: False + default_outcome: + dest: (untitled state) + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: NumericInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + rule_input_3: {} + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_0 + unicode_str: '' + rows: + value: 1 + catchMisspellings: + value: false + default_outcome: + dest: END + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 1 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_0: {} + content: {} + default_outcome: {} +states_schema_version: 55 +tags: [] +title: Title +""") + + _LATEST_YAML_CONTENT: Final = YAML_CONTENT_V59 + + def test_load_from_v46_with_item_selection_input_interaction(self) -> None: + """Tests the migration of ItemSelectionInput rule inputs.""" + sample_yaml_content: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 46 +states: + (untitled state): + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + dest_if_really_stuck: null + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + -

    Choice 1

    + -

    Choice 2

    + -

    Choice Invalid

    + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + choices: + value: + - content_id: ca_choices_2 + html:

    Choice 1

    + - content_id: ca_choices_3 + html:

    Choice 2

    + maxAllowableSelectionCount: + value: 2 + minAllowableSelectionCount: + value: 1 + default_outcome: + dest: (untitled state) + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: ItemSelectionInput + solution: + answer_is_exclusive: true + correct_answer: + -

    Choice 1

    + explanation: + content_id: solution + html: This is solution for state1 + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_2: {} + ca_choices_3: {} + content: {} + default_outcome: {} + feedback_1: {} + solution: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_choices_2: {} + ca_choices_3: {} + content: {} + default_outcome: {} + feedback_1: {} + solution: {} + END: + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 41 +tags: [] +title: Title +""") + + latest_sample_yaml_content: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +next_content_id_index: 7 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html: '' + interaction: + answer_groups: + - outcome: + dest: END + dest_if_really_stuck: null feedback: - content_id: feedback_1 + content_id: feedback_2 html:

    Correct!

    labelled_as_correct: false missing_prerequisite_skill_id: null @@ -2888,53 +6962,317 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): rule_specs: - inputs: x: - contentId: rule_input_3 - normalizedStrSet: - - InputString + - ca_choices_4 + - ca_choices_5 + - invalid_content_id rule_type: Equals tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + choices: value: - content_id: ca_placeholder_2 - unicode_str: '' - rows: + - content_id: ca_choices_4 + html:

    Choice 1

    + - content_id: ca_choices_5 + html:

    Choice 2

    + maxAllowableSelectionCount: + value: 3 + minAllowableSelectionCount: value: 1 default_outcome: dest: (untitled state) + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_1 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: ItemSelectionInput + solution: + answer_is_exclusive: true + correct_answer: + - ca_choices_4 + explanation: + content_id: solution_3 + html: This is solution for state1 + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_4: {} + ca_choices_5: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + solution_3: {} + solicit_answer_details: false + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_6 + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_6: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: Title +""") + + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content) + self.assertEqual(exploration.to_yaml(), latest_sample_yaml_content) + + def test_load_from_v46_with_drag_and_drop_sort_input_interaction( + self + ) -> None: + """Tests the migration of DragAndDropSortInput rule inputs.""" + sample_yaml_content: str = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 46 +states: + (untitled state): + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - -

    Choice 1

    + -

    Choice 2

    + rule_type: IsEqualToOrdering + - inputs: + x: + - -

    Choice 1

    + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x:

    Choice 1

    + y: 1 + rule_type: HasElementXAtPositionY + - inputs: + x:

    Choice 1

    + y:

    Choice 2

    + rule_type: HasElementXBeforeElementY + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: true + choices: + value: + - content_id: ca_choices_2 + html:

    Choice 1

    + - content_id: ca_choices_3 + html:

    Choice 2

    + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: + answer_is_exclusive: true + correct_answer: + - -

    Choice 1

    + -

    Choice 2

    + explanation: + content_id: solution + html: This is solution for state1 + linked_skill_id: null next_content_id_index: 4 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} + ca_choices_2: {} + ca_choices_3: {} content: {} default_outcome: {} feedback_1: {} - rule_input_3: {} + solution: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_2: {} + ca_choices_2: {} + ca_choices_3: {} content: {} default_outcome: {} feedback_1: {} - rule_input_3: {} + solution: {} + END: + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 41 +tags: [] +title: Title +""") + + latest_sample_yaml_content: str = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +next_content_id_index: 7 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + (untitled state): + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html: '' + interaction: + answer_groups: + - outcome: + dest: END + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - - ca_choices_4 + - ca_choices_5 + rule_type: IsEqualToOrdering + - inputs: + x: + - - ca_choices_4 + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x: ca_choices_4 + y: 1 + rule_type: HasElementXAtPositionY + - inputs: + x: ca_choices_4 + y: ca_choices_5 + rule_type: HasElementXBeforeElementY + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: true + choices: + value: + - content_id: ca_choices_4 + html:

    Choice 1

    + - content_id: ca_choices_5 + html:

    Choice 2

    + default_outcome: + dest: (untitled state) + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: + answer_is_exclusive: true + correct_answer: + - - ca_choices_4 + - ca_choices_5 + explanation: + content_id: solution_3 + html: This is solution for state1 + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_4: {} + ca_choices_5: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + solution_3: {} + solicit_answer_details: false END: + card_is_checkpoint: false classifier_model_id: null content: - content_id: content + content_id: content_6 html:

    Congratulations, you have finished!

    interaction: answer_groups: [] @@ -2946,72 +7284,37 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): hints: [] id: EndExploration solution: null - next_content_id_index: 0 - param_changes: [] - recorded_voiceovers: - voiceovers_mapping: - content: {} - solicit_answer_details: false - written_translations: - translations_mapping: - content: {} - New state: - classifier_model_id: null - content: - content_id: content - html: '' - interaction: - answer_groups: [] - confirmed_unclassified_answers: [] - customization_args: - placeholder: - value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 - default_outcome: - dest: END - feedback: - content_id: default_outcome - html: '' - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - hints: [] - id: TextInput - solution: null - next_content_id_index: 1 + linked_skill_id: null param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + content_6: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 41 +states_schema_version: 55 tags: [] title: Title """) + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content) + self.assertEqual(exploration.to_yaml(), latest_sample_yaml_content) - YAML_CONTENT_V47 = ( - """author_notes: '' + def test_load_from_v46_with_invalid_unicode_written_translations( + self + ) -> None: + """Tests the migration of unicode written translations rule inputs.""" + sample_yaml_content: str = ( + """author_notes: '' auto_tts_enabled: true blurb: '' category: Category correctness_feedback_enabled: false +edits_allowed: true init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} -schema_version: 47 +schema_version: 46 states: (untitled state): classifier_model_id: null @@ -3019,35 +7322,15 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): content_id: content html: '' interaction: - answer_groups: - - outcome: - dest: END - feedback: - content_id: feedback_1 - html:

    Correct!

    - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - rule_specs: - - inputs: - x: - contentId: rule_input_3 - normalizedStrSet: - - InputString - rule_type: Equals - tagged_skill_misconception_id: null - training_data: [] + answer_groups: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + buttonText: value: - content_id: ca_placeholder_2 - unicode_str: '' - rows: - value: 1 + content_id: ca_buttonText + unicode_str: Continue default_outcome: - dest: (untitled state) + dest: END feedback: content_id: default_outcome html: '' @@ -3056,25 +7339,30 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: Continue solution: null + linked_skill_id: null next_content_id_index: 4 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} + ca_buttonText: {} content: {} default_outcome: {} feedback_1: {} - rule_input_3: {} + solution: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_2: {} + ca_buttonText: + bn: + data_format: html + needs_update: false + translation:

    hello

    content: {} default_outcome: {} feedback_1: {} - rule_input_3: {} + solution: {} END: classifier_model_id: null content: @@ -3090,6 +7378,7 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): hints: [] id: EndExploration solution: null + linked_skill_id: null next_content_id_index: 0 param_changes: [] recorded_voiceovers: @@ -3099,131 +7388,181 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): written_translations: translations_mapping: content: {} - New state: +states_schema_version: 41 +tags: [] +title: Title +""") + + latest_sample_yaml_content: str = ( + """author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: (untitled state) +language_code: en +next_content_id_index: 4 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + (untitled state): + card_is_checkpoint: true classifier_model_id: null content: - content_id: content + content_id: content_0 html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + buttonText: value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 + content_id: ca_buttonText_2 + unicode_str: Continue default_outcome: dest: END + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_1 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: Continue solution: null - next_content_id_index: 1 + linked_skill_id: null param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_buttonText_2: {} + content_0: {} + default_outcome_1: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 42 + END: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_3 + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_3: {} + solicit_answer_details: false +states_schema_version: 55 tags: [] title: Title """) + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content) + self.assertEqual(exploration.to_yaml(), latest_sample_yaml_content) - YAML_CONTENT_V48 = ( - """author_notes: '' -auto_tts_enabled: true + def test_fixing_invalid_labeled_as_correct_exp_data_by_migrating_to_v58( + self + ) -> None: + """Tests if the answer group's destination is state itself then + `labelled_as_correct` should be false. Migrates the invalid data. + """ + + sample_yaml_content_for_lab_as_correct: str = ( + """author_notes: '' +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en objective: '' param_changes: [] param_specs: {} -schema_version: 48 +schema_version: 57 states: - (untitled state): + Introduction: + card_is_checkpoint: true classifier_model_id: null content: content_id: content - html: '' + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: Introduction + dest_if_really_stuck: null feedback: - content_id: feedback_1 - html:

    Correct!

    - labelled_as_correct: false + content_id: feedback_2 + html:

    fdfdf

    + labelled_as_correct: true missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: - contentId: rule_input_3 - normalizedStrSet: - - InputString + x: 25.0 + rule_type: Equals + - inputs: + x: 25.0 rule_type: Equals tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - placeholder: - value: - content_id: ca_placeholder_2 - unicode_str: '' - rows: - value: 1 + requireNonnegativeInput: + value: false default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome - html: '' + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: NumericInput solution: null - next_content_id_index: 4 + linked_skill_id: null + next_content_id_index: 7 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} + feedback_2: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_2: {} - content: {} + content: + hi: + data_format: html + translation: + -

    choicewa

    + needs_update: false default_outcome: {} - feedback_1: {} - rule_input_3: {} - END: + feedback_2: {} + end: + card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -3234,109 +7573,207 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): hints: [] id: EndExploration solution: null - next_content_id_index: 0 - param_changes: [] - recorded_voiceovers: - voiceovers_mapping: - content: {} - solicit_answer_details: false - written_translations: - translations_mapping: - content: {} - New state: - classifier_model_id: null - content: - content_id: content - html: '' - interaction: - answer_groups: [] - confirmed_unclassified_answers: [] - customization_args: - placeholder: - value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 - default_outcome: - dest: END - feedback: - content_id: default_outcome - html: '' - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - hints: [] - id: TextInput - solution: null - next_content_id_index: 1 + linked_skill_id: null + next_content_id_index: 0 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} content: {} - default_outcome: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_0: {} content: {} - default_outcome: {} -states_schema_version: 43 +states_schema_version: 52 tags: [] -title: Title +title: '' """) - YAML_CONTENT_V49 = ( - """author_notes: '' -auto_tts_enabled: true + latest_sample_yaml_content_for_lab_as_correct: str = ( + """author_notes: '' +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en +next_content_id_index: 4 objective: '' param_changes: [] param_specs: {} -schema_version: 49 +schema_version: 60 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: Introduction + dest_if_really_stuck: null feedback: - content_id: feedback_1 - html:

    Correct!

    + content_id: feedback_2 + html:

    fdfdf

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: - contentId: rule_input_3 - normalizedStrSet: - - InputString + x: 25.0 rule_type: Equals tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + requireNonnegativeInput: + value: false + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: NumericInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_0: {} + default_outcome_1: {} + feedback_2: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_3 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_3: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") + + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_lab_as_correct) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_lab_as_correct) + + def test_fixing_of_rte_content_by_migrating_to_v_58( + self + ) -> None: + """Tests the fixing of RTE content data from version less than 58.""" + +# pylint: disable=single-line-pragma +# pylint: disable=line-too-long + sample_yaml_content_for_rte: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 57 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html: '

    Content of RTE

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + buttonText: value: - content_id: ca_placeholder_2 - unicode_str: '' - rows: - value: 1 + content_id: ca_buttonText_0 + unicode_str: Continueeeeeeeeeeeeeeeeeeeeeee default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome html: '' @@ -3345,41 +7782,52 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: Continue solution: null - next_content_id_index: 4 + linked_skill_id: null + next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} + ca_buttonText_0: + hi: + filename: default_outcome-hi-en-7hl9iw3az8.mp3 + file_size_bytes: 37198 + needs_update: false + duration_secs: 2.324875 content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_2: {} + ca_buttonText_0: + hi: + data_format: html + translation: '

    ' + needs_update: false content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} - END: + end: card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: - value: [] + value: + - id1 + - id2 + - id3 + - id4 default_outcome: null hints: [] id: EndExploration solution: null + linked_skill_id: null next_content_id_index: 0 param_changes: [] recorded_voiceovers: @@ -3389,249 +7837,388 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): written_translations: translations_mapping: content: {} - New state: +states_schema_version: 52 +tags: [] +title: '' +""") + +# pylint: disable=single-line-pragma +# pylint: disable=line-too-long +# pylint: disable=anomalous-backslash-in-string + latest_sample_yaml_content_for_rte: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 4 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html: '

    Content of RTE

    + + + + + + + + + + + + + + + ' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + buttonText: value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 + content_id: ca_buttonText_2 + unicode_str: Continue default_outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_1 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: Continue solution: null - next_content_id_index: 1 + linked_skill_id: null param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_buttonText_2: + hi: + duration_secs: 2.324875 + file_size_bytes: 37198 + filename: default_outcome-hi-en-7hl9iw3az8.mp3 + needs_update: true + content_0: {} + default_outcome_1: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 44 + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_3 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: + - id1 + - id2 + - id3 + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_3: {} + solicit_answer_details: false +states_schema_version: 55 tags: [] -title: Title +title: '' """) + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_rte) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_rte) + + def test_fixing_invalid_continue_and_end_exp_data_by_migrating_to_v58( + self + ) -> None: + """Tests the migration of invalid continue and end exploration data + from version less than 58. + """ - YAML_CONTENT_V50 = ( - """author_notes: '' -auto_tts_enabled: true +# pylint: disable=single-line-pragma +# pylint: disable=line-too-long + sample_yaml_content_for_cont_and_end_interac_1: str = ( + """author_notes: '' +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en objective: '' param_changes: [] param_specs: {} -schema_version: 50 +schema_version: 57 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: content_id: content - html: '' + html:

    Continue and End interaction validation

    interaction: - answer_groups: - - outcome: - dest: END - feedback: - content_id: feedback_1 - html:

    Correct!

    - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - rule_specs: - - inputs: - x: - contentId: rule_input_3 - normalizedStrSet: - - InputString - rule_type: Equals - tagged_skill_misconception_id: null - training_data: [] + answer_groups: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + buttonText: value: - content_id: ca_placeholder_2 - unicode_str: '' - rows: - value: 1 + content_id: ca_buttonText_0 + unicode_str: Continueeeeeeeeeeeeeeeeeeeeeee default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome - html: '' + html: '' labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: Continue solution: null linked_skill_id: null - next_content_id_index: 4 + next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} + ca_buttonText_0: + hi: + filename: default_outcome-hi-en-7hl9iw3az8.mp3 + file_size_bytes: 37198 + needs_update: false + duration_secs: 2.324875 content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_2: {} + ca_buttonText_0: + hi: + data_format: html + translation:

    choicewa

    + needs_update: false content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} - END: + end: card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: - value: [] + value: + - id1 + - id2 + - id3 + - id4 default_outcome: null hints: [] id: EndExploration solution: null linked_skill_id: null - next_content_id_index: 0 + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") + +# pylint: disable=single-line-pragma +# pylint: disable=line-too-long + latest_sample_yaml_content_for_cont_and_end_interac_1: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 4 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Continue and End interaction validation

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + buttonText: + value: + content_id: ca_buttonText_2 + unicode_str: Continue + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: Continue + solution: null + linked_skill_id: null param_changes: [] recorded_voiceovers: voiceovers_mapping: - content: {} + ca_buttonText_2: + hi: + duration_secs: 2.324875 + file_size_bytes: 37198 + filename: default_outcome-hi-en-7hl9iw3az8.mp3 + needs_update: true + content_0: {} + default_outcome_1: {} solicit_answer_details: false - written_translations: - translations_mapping: - content: {} - New state: + end: + card_is_checkpoint: false classifier_model_id: null content: - content_id: content - html: '' + content_id: content_3 + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + recommendedExplorationIds: value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 - default_outcome: - dest: END - feedback: - content_id: default_outcome - html: '' - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null + - id1 + - id2 + - id3 + default_outcome: null hints: [] - id: TextInput + id: EndExploration solution: null linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + content_3: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 45 +states_schema_version: 55 tags: [] -title: Title +title: '' """) - YAML_CONTENT_V51 = ( - """author_notes: '' -auto_tts_enabled: true + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_cont_and_end_interac_1) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_cont_and_end_interac_1) + + sample_yaml_content_for_cont_and_end_interac_2: str = ( + """author_notes: '' +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) -language_code: en +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: hi objective: '' param_changes: [] param_specs: {} -schema_version: 51 +schema_version: 57 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: content_id: content - html: '' + html:

    Continue and End interaction validation

    interaction: - answer_groups: - - outcome: - dest: END - feedback: - content_id: feedback_1 - html:

    Correct!

    - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - rule_specs: - - inputs: - x: - contentId: rule_input_3 - normalizedStrSet: - - InputString - rule_type: Equals - tagged_skill_misconception_id: null - training_data: [] + answer_groups: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + buttonText: value: - content_id: ca_placeholder_2 - unicode_str: '' - rows: - value: 1 + content_id: ca_buttonText_0 + unicode_str: Continueeeeeeeeeeeeeeeeeeeeeee default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome html: '' @@ -3640,38 +8227,38 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: Continue solution: null linked_skill_id: null - next_content_id_index: 4 + next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} + ca_buttonText_0: {} content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_2: {} + ca_buttonText_0: {} content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} - END: + end: card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: - value: [] + value: + - id1 + - id2 + - id3 + - id4 default_outcome: null hints: [] id: EndExploration @@ -3686,135 +8273,378 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): written_translations: translations_mapping: content: {} - New state: +states_schema_version: 52 +tags: [] +title: '' +""") + + latest_sample_yaml_content_for_cont_and_end_interac_2: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: hi +next_content_id_index: 4 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html:

    Continue and End interaction validation

    interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + buttonText: value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 + content_id: ca_buttonText_2 + unicode_str: Continue default_outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_1 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: Continue solution: null linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_buttonText_2: {} + content_0: {} + default_outcome_1: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 46 + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_3 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: + - id1 + - id2 + - id3 + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_3: {} + solicit_answer_details: false +states_schema_version: 55 tags: [] -title: Title +title: '' """) - YAML_CONTENT_V52 = ( - """author_notes: '' -auto_tts_enabled: true + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_cont_and_end_interac_2) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_cont_and_end_interac_2) + + def test_fixing_invalid_numeric_exp_data_by_migrating_to_v58( + self + ) -> None: + """Tests the migration of invalid NumericInput interaction exploration + data from version less than 58. + """ + +# pylint: disable=single-line-pragma +# pylint: disable=line-too-long + sample_yaml_content_for_numeric_interac: str = ( + """author_notes: '' +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en objective: '' param_changes: [] param_specs: {} -schema_version: 52 +schema_version: 57 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: content_id: content - html: '' + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: Introduction + dest_if_really_stuck: null feedback: content_id: feedback_1 - html:

    Correct!

    + html:

    fdfdf

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: - contentId: rule_input_3 - normalizedStrSet: - - InputString + x: 25.0 + rule_type: Equals + - inputs: + x: 25.0 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + a: 18.0 + b: 18.0 + rule_type: IsInclusivelyBetween + - inputs: + x: 25.0 + rule_type: Equals + - inputs: + tol: -5.0 + x: 5.0 + rule_type: IsWithinTolerance + - inputs: + a: 30.0 + b: 39.0 + rule_type: IsInclusivelyBetween + - inputs: + a: 17.0 + b: 15.0 + rule_type: IsInclusivelyBetween + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_3 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 25.0 + rule_type: IsLessThanOrEqualTo + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_4 + html:

    cc

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 15.0 + rule_type: IsLessThanOrEqualTo + - inputs: + x: 10.0 + rule_type: Equals + - inputs: + x: 5.0 + rule_type: IsLessThan + - inputs: + a: 9.0 + b: 5.0 + rule_type: IsInclusivelyBetween + - inputs: + tol: 2.0 + x: 5.0 + rule_type: IsWithinTolerance + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_5 + html:

    cv

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 40.0 + rule_type: IsGreaterThanOrEqualTo + - inputs: + x: 50.0 + rule_type: Equals + - inputs: + x: 40.0 + rule_type: IsGreaterThan + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_6 + html:

    vb

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: string + rule_type: IsLessThanOrEqualTo + - inputs: + x: string rule_type: Equals + - inputs: + x: string + rule_type: IsLessThan + - inputs: + a: string + b: 9.0 + rule_type: IsInclusivelyBetween + - inputs: + tol: string + x: 5.0 + rule_type: IsWithinTolerance + - inputs: + x: string + rule_type: IsGreaterThanOrEqualTo + - inputs: + x: string + rule_type: IsGreaterThan + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_7 + html:

    vb

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + tol: string + x: 60.0 + rule_type: IsWithinTolerance + - inputs: + a: string + b: 10.0 + rule_type: IsInclusivelyBetween tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - placeholder: - value: - content_id: ca_placeholder_2 - unicode_str: '' - rows: - value: 1 + requireNonnegativeInput: + value: false default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome - html: '' + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null - hints: [] - id: TextInput + hints: + - hint_content: + content_id: hint + html: '' + - hint_content: + content_id: hint_2 + html: '' + id: NumericInput solution: null linked_skill_id: null - next_content_id_index: 4 + next_content_id_index: 7 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} content: {} default_outcome: {} + hint: {} + hint_2: {} feedback_1: {} - rule_input_3: {} + feedback_2: {} + feedback_3: {} + feedback_4: {} + feedback_5: {} + feedback_6: {} + feedback_7: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_2: {} content: {} default_outcome: {} + hint: {} + hint_2: {} feedback_1: {} - rule_input_3: {} - END: + feedback_2: {} + feedback_3: {} + feedback_4: {} + feedback_5: {} + feedback_6: {} + feedback_7: {} + end: card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -3835,135 +8665,406 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): written_translations: translations_mapping: content: {} - New state: +states_schema_version: 52 +tags: [] +title: '' +""") + +# pylint: disable=single-line-pragma +# pylint: disable=line-too-long + latest_sample_yaml_content_for_numeric_interac: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 7 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html:

    Numeric interaction validation

    interaction: - answer_groups: [] + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 18.0 + rule_type: Equals + - inputs: + x: 25.0 + rule_type: Equals + - inputs: + tol: 5.0 + x: 5.0 + rule_type: IsWithinTolerance + - inputs: + a: 30.0 + b: 39.0 + rule_type: IsInclusivelyBetween + - inputs: + a: 15.0 + b: 17.0 + rule_type: IsInclusivelyBetween + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_3 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 25.0 + rule_type: IsLessThanOrEqualTo + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_4 + html:

    cv

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 40.0 + rule_type: IsGreaterThanOrEqualTo + tagged_skill_misconception_id: null + training_data: [] confirmed_unclassified_answers: [] customization_args: - placeholder: - value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 + requireNonnegativeInput: + value: false default_outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: default_outcome - html: '' + content_id: default_outcome_1 + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null + hints: + - hint_content: + content_id: hint_5 + html: '' + id: NumericInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_0: {} + default_outcome_1: {} + feedback_2: {} + feedback_3: {} + feedback_4: {} + hint_5: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_6 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null hints: [] - id: TextInput + id: EndExploration solution: null linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + content_6: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 47 +states_schema_version: 55 tags: [] -title: Title +title: '' """) - YAML_CONTENT_V53 = ( - """author_notes: '' -auto_tts_enabled: true + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_numeric_interac) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_numeric_interac) + + def test_fixing_invalid_fraction_exp_data_by_migrating_to_v58( + self + ) -> None: + """Tests the migration of invalid FractionInput interaction exploration + data from version less than 58. + """ + + sample_yaml_content_for_fraction_interac: str = ( + """author_notes: '' +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en objective: '' param_changes: [] param_specs: {} -schema_version: 53 +schema_version: 57 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: content_id: content - html: '' + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: feedback_1 - html:

    Correct!

    + content_id: feedback_8 + html:

    jj

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: - contentId: rule_input_3 - normalizedStrSet: - - InputString - rule_type: Equals + f: + denominator: 3 + isNegative: false + numerator: 17 + wholeNumber: 0 + rule_type: IsExactlyEqualTo + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 17 + wholeNumber: 0 + rule_type: IsExactlyEqualTo + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_9 + html:

    dfd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 17 + wholeNumber: 0 + rule_type: IsExactlyEqualTo + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_10 + html:

    hj

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 11 + wholeNumber: 0 + rule_type: IsGreaterThan + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 14 + wholeNumber: 0 + rule_type: IsExactlyEqualTo + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_11 + html:

    hj

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 11 + wholeNumber: 0 + rule_type: IsLessThan + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 7 + wholeNumber: 0 + rule_type: IsExactlyEqualTo + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_12 + html:

    ll

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 3 + rule_type: HasDenominatorEqualTo + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 11 + wholeNumber: 0 + rule_type: HasFractionalPartExactlyEqualTo + - inputs: + x: string + rule_type: HasDenominatorEqualTo + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_13 + html:

    hj

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 19 + wholeNumber: 0 + rule_type: IsExactlyEqualTo tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - placeholder: + allowImproperFraction: + value: true + allowNonzeroIntegerPart: + value: true + customPlaceholder: value: - content_id: ca_placeholder_2 + content_id: ca_customPlaceholder_7 unicode_str: '' - rows: - value: 1 + requireSimplestForm: + value: false default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome - html: '' + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: TextInput + id: FractionInput solution: null linked_skill_id: null - next_content_id_index: 4 + next_content_id_index: 14 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} + ca_customPlaceholder_7: {} content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} + feedback_10: {} + feedback_11: {} + feedback_12: {} + feedback_13: {} + feedback_8: {} + feedback_9: {} solicit_answer_details: false written_translations: translations_mapping: - ca_placeholder_2: {} + ca_customPlaceholder_7: {} content: {} default_outcome: {} - feedback_1: {} - rule_input_3: {} - END: + feedback_10: {} + feedback_11: {} + feedback_12: {} + feedback_13: {} + feedback_8: {} + feedback_9: {} + end: card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -3984,128 +9085,153 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): written_translations: translations_mapping: content: {} - New state: - classifier_model_id: null - content: - content_id: content - html: '' - interaction: - answer_groups: [] - confirmed_unclassified_answers: [] - customization_args: - placeholder: - value: - content_id: ca_placeholder_0 - unicode_str: '' - rows: - value: 1 - default_outcome: - dest: END - feedback: - content_id: default_outcome - html: '' - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - hints: [] - id: TextInput - solution: null - linked_skill_id: null - next_content_id_index: 1 - param_changes: [] - recorded_voiceovers: - voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} - solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 48 +states_schema_version: 52 tags: [] -title: Title +title: '' """) - YAML_CONTENT_V54 = ( - """author_notes: '' -auto_tts_enabled: true + latest_sample_yaml_content_for_fraction_interac: str = ( + """author_notes: '' +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en +next_content_id_index: 8 objective: '' param_changes: [] param_specs: {} -schema_version: 54 +schema_version: 60 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html:

    jj

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 17 + wholeNumber: 0 + rule_type: IsExactlyEqualTo + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null feedback: - content_id: feedback_1 - html:

    Correct!

    + content_id: feedback_3 + html:

    hj

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: 6 - rule_type: Equals + f: + denominator: 3 + isNegative: false + numerator: 11 + wholeNumber: 0 + rule_type: IsGreaterThan + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_4 + html:

    hj

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + f: + denominator: 3 + isNegative: false + numerator: 11 + wholeNumber: 0 + rule_type: IsLessThan + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_5 + html:

    ll

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 3 + rule_type: HasDenominatorEqualTo tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - requireNonnegativeInput: - value: False + allowImproperFraction: + value: true + allowNonzeroIntegerPart: + value: true + customPlaceholder: + value: + content_id: ca_customPlaceholder_6 + unicode_str: '' + requireSimplestForm: + value: false default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: - content_id: default_outcome - html: '' + content_id: default_outcome_1 + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: NumericInput + id: FractionInput solution: null linked_skill_id: null - next_content_id_index: 4 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_2: {} - content: {} - default_outcome: {} - feedback_1: {} - rule_input_3: {} + ca_customPlaceholder_6: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + feedback_3: {} + feedback_4: {} + feedback_5: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_2: {} - content: {} - default_outcome: {} - feedback_1: {} - rule_input_3: {} - END: + end: card_is_checkpoint: false classifier_model_id: null content: - content_id: content - html:

    Congratulations, you have finished!

    + content_id: content_7 + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -4117,153 +9243,124 @@ class SchemaMigrationUnitTests(test_utils.GenericTestBase): id: EndExploration solution: null linked_skill_id: null - next_content_id_index: 0 - param_changes: [] - recorded_voiceovers: - voiceovers_mapping: - content: {} - solicit_answer_details: false - written_translations: - translations_mapping: - content: {} - New state: - classifier_model_id: null - content: - content_id: content - html: '' - interaction: - answer_groups: [] - confirmed_unclassified_answers: [] - customization_args: - requireNonnegativeInput: - value: False - default_outcome: - dest: END - feedback: - content_id: default_outcome - html: '' - labelled_as_correct: false - missing_prerequisite_skill_id: null - param_changes: [] - refresher_exploration_id: null - hints: [] - id: NumericInput - solution: null - linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + content_7: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} -states_schema_version: 49 +states_schema_version: 55 tags: [] -title: Title +title: '' """) - _LATEST_YAML_CONTENT = YAML_CONTENT_V54 + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_fraction_interac) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_fraction_interac) - def test_load_from_v46_with_item_selection_input_interaction(self): - """Tests the migration of ItemSelectionInput rule inputs.""" - sample_yaml_content = ( + sample_yaml_content_for_fraction_interac_2: str = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en objective: '' param_changes: [] param_specs: {} -schema_version: 46 +schema_version: 57 states: - (untitled state): + Introduction: + card_is_checkpoint: true classifier_model_id: null content: content_id: content - html: '' + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: feedback_1 - html:

    Correct!

    + content_id: feedback_8 + html:

    jj

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: - -

    Choice 1

    - -

    Choice 2

    - -

    Choice Invalid

    - rule_type: Equals + f: + denominator: 3 + isNegative: false + numerator: 17 + wholeNumber: 0 + rule_type: IsExactlyEqualTo + - inputs: + f: + denominator: 17 + isNegative: false + numerator: 3 + wholeNumber: 0 + rule_type: IsExactlyEqualTo tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - choices: + allowImproperFraction: + value: false + allowNonzeroIntegerPart: + value: true + customPlaceholder: value: - - content_id: ca_choices_2 - html:

    Choice 1

    - - content_id: ca_choices_3 - html:

    Choice 2

    - maxAllowableSelectionCount: - value: 2 - minAllowableSelectionCount: - value: 1 + content_id: ca_customPlaceholder_7 + unicode_str: '' + requireSimplestForm: + value: false default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome - html: '' + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: ItemSelectionInput - solution: - answer_is_exclusive: true - correct_answer: - -

    Choice 1

    - explanation: - content_id: solution - html: This is solution for state1 - next_content_id_index: 4 + id: FractionInput + solution: null + linked_skill_id: null + next_content_id_index: 14 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_choices_2: {} - ca_choices_3: {} + ca_customPlaceholder_7: {} content: {} default_outcome: {} - feedback_1: {} - solution: {} + feedback_10: {} + feedback_11: {} + feedback_12: {} + feedback_8: {} solicit_answer_details: false written_translations: translations_mapping: - ca_choices_2: {} - ca_choices_3: {} + ca_customPlaceholder_7: {} content: {} default_outcome: {} - feedback_1: {} - solution: {} - END: + feedback_10: {} + feedback_11: {} + feedback_12: {} + feedback_8: {} + end: + card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -4274,6 +9371,7 @@ def test_load_from_v46_with_item_selection_input_interaction(self): hints: [] id: EndExploration solution: null + linked_skill_id: null next_content_id_index: 0 param_changes: [] recorded_voiceovers: @@ -4283,106 +9381,94 @@ def test_load_from_v46_with_item_selection_input_interaction(self): written_translations: translations_mapping: content: {} -states_schema_version: 41 +states_schema_version: 52 tags: [] -title: Title +title: '' """) - latest_sample_yaml_content = ( + latest_sample_yaml_content_for_fraction_interac_2: str = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en +next_content_id_index: 5 objective: '' param_changes: [] param_specs: {} -schema_version: 54 +schema_version: 60 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: feedback_1 - html:

    Correct!

    + content_id: feedback_2 + html:

    jj

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: - - ca_choices_2 - - ca_choices_3 - - invalid_content_id - rule_type: Equals + f: + denominator: 17 + isNegative: false + numerator: 3 + wholeNumber: 0 + rule_type: IsExactlyEqualTo tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - choices: + allowImproperFraction: + value: false + allowNonzeroIntegerPart: + value: true + customPlaceholder: value: - - content_id: ca_choices_2 - html:

    Choice 1

    - - content_id: ca_choices_3 - html:

    Choice 2

    - maxAllowableSelectionCount: - value: 2 - minAllowableSelectionCount: - value: 1 + content_id: ca_customPlaceholder_3 + unicode_str: '' + requireSimplestForm: + value: false default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: - content_id: default_outcome - html: '' + content_id: default_outcome_1 + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: ItemSelectionInput - solution: - answer_is_exclusive: true - correct_answer: - - ca_choices_2 - explanation: - content_id: solution - html: This is solution for state1 + id: FractionInput + solution: null linked_skill_id: null - next_content_id_index: 4 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_choices_2: {} - ca_choices_3: {} - content: {} - default_outcome: {} - feedback_1: {} - solution: {} + ca_customPlaceholder_3: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_choices_2: {} - ca_choices_3: {} - content: {} - default_outcome: {} - feedback_1: {} - solution: {} - END: + end: card_is_checkpoint: false classifier_model_id: null content: - content_id: content - html:

    Congratulations, you have finished!

    + content_id: content_4 + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -4394,128 +9480,177 @@ def test_load_from_v46_with_item_selection_input_interaction(self): id: EndExploration solution: null linked_skill_id: null - next_content_id_index: 0 param_changes: [] recorded_voiceovers: voiceovers_mapping: - content: {} + content_4: {} solicit_answer_details: false - written_translations: - translations_mapping: - content: {} -states_schema_version: 49 +states_schema_version: 55 tags: [] -title: Title +title: '' """) + exploration = exp_domain.Exploration.from_yaml( - 'eid', sample_yaml_content) - self.assertEqual(exploration.to_yaml(), latest_sample_yaml_content) + 'eid', sample_yaml_content_for_fraction_interac_2) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_fraction_interac_2) + + def test_fixing_invalid_multiple_choice_exp_data_by_migrating_to_v58( + self + ) -> None: + """Tests the migration of invalid MultipleChoice interaction exploration + data from version less than 58. + """ - def test_load_from_v46_with_drag_and_drop_sort_input_interaction(self): - """Tests the migration of DragAndDropSortInput rule inputs.""" - sample_yaml_content = ( + sample_yaml_content_for_multiple_choice_interac: str = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en objective: '' param_changes: [] param_specs: {} -schema_version: 46 +schema_version: 57 states: - (untitled state): + Introduction: + card_is_checkpoint: true classifier_model_id: null content: content_id: content - html: '' + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: feedback_1 - html:

    Correct!

    + content_id: feedback_17 + html: '' labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: - - -

    Choice 1

    - -

    Choice 2

    - rule_type: IsEqualToOrdering + x: 0 + rule_type: Equals - inputs: - x: - - -

    Choice 1

    - rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + x: 0 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_18 + html:

    a

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: - inputs: - x:

    Choice 1

    - y: 1 - rule_type: HasElementXAtPositionY + x: 2 + rule_type: Equals - inputs: - x:

    Choice 1

    - y:

    Choice 2

    - rule_type: HasElementXBeforeElementY + x: 0 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_19 + html:

    aa

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: 3 + rule_type: Equals tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - allowMultipleItemsInSamePosition: - value: true choices: value: - - content_id: ca_choices_2 - html:

    Choice 1

    - - content_id: ca_choices_3 + - content_id: ca_choices_13 + html: '' + - content_id: ca_choices_14 + html: '' + - content_id: ca_choices_15 + html:

    1

    + - content_id: ca_choices_16 + html:

    1

    + - content_id: ca_choices_17 html:

    Choice 2

    + showChoicesInShuffledOrder: + value: true default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome - html: '' + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: DragAndDropSortInput - solution: - answer_is_exclusive: true - correct_answer: - - -

    Choice 1

    - -

    Choice 2

    - explanation: - content_id: solution - html: This is solution for state1 + id: MultipleChoiceInput + solution: null linked_skill_id: null - next_content_id_index: 4 + next_content_id_index: 20 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_choices_2: {} - ca_choices_3: {} + ca_choices_13: + hi: + filename: default_outcome-hi-en-7hl9iw3az8.mp3 + file_size_bytes: 37198 + needs_update: false + duration_secs: 2.324875 + ca_choices_14: {} + ca_choices_15: {} + ca_choices_16: {} + ca_choices_17: {} content: {} default_outcome: {} - feedback_1: {} - solution: {} + feedback_17: {} + feedback_18: {} + feedback_19: {} solicit_answer_details: false written_translations: translations_mapping: - ca_choices_2: {} - ca_choices_3: {} + ca_choices_13: + hi: + data_format: html + translation:

    choicewa

    + needs_update: false + ca_choices_14: {} + ca_choices_15: {} + ca_choices_16: {} + ca_choices_17: {} content: {} default_outcome: {} - feedback_1: {} - solution: {} - END: + feedback_17: {} + feedback_18: {} + feedback_19: {} + end: + card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -4536,116 +9671,114 @@ def test_load_from_v46_with_drag_and_drop_sort_input_interaction(self): written_translations: translations_mapping: content: {} -states_schema_version: 41 +states_schema_version: 52 tags: [] -title: Title +title: '' """) - latest_sample_yaml_content = ( + latest_sample_yaml_content_for_multiple_choice_interac: str = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en +next_content_id_index: 8 objective: '' param_changes: [] param_specs: {} -schema_version: 54 +schema_version: 60 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html:

    Numeric interaction validation

    interaction: answer_groups: - outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: feedback_1 - html:

    Correct!

    + content_id: feedback_2 + html: '' labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null rule_specs: - inputs: - x: - - - ca_choices_2 - - ca_choices_3 - rule_type: IsEqualToOrdering - - inputs: - x: - - - ca_choices_2 - rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition - - inputs: - x: ca_choices_2 - y: 1 - rule_type: HasElementXAtPositionY + x: 0 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_3 + html:

    a

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: - inputs: - x: ca_choices_2 - y: ca_choices_3 - rule_type: HasElementXBeforeElementY + x: 2 + rule_type: Equals tagged_skill_misconception_id: null training_data: [] confirmed_unclassified_answers: [] customization_args: - allowMultipleItemsInSamePosition: - value: true choices: value: - - content_id: ca_choices_2 + - content_id: ca_choices_4 html:

    Choice 1

    - - content_id: ca_choices_3 + - content_id: ca_choices_5 + html:

    1

    + - content_id: ca_choices_6 html:

    Choice 2

    + showChoicesInShuffledOrder: + value: true default_outcome: - dest: (untitled state) + dest: end + dest_if_really_stuck: null feedback: - content_id: default_outcome - html: '' + content_id: default_outcome_1 + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: DragAndDropSortInput - solution: - answer_is_exclusive: true - correct_answer: - - - ca_choices_2 - - ca_choices_3 - explanation: - content_id: solution - html: This is solution for state1 + id: MultipleChoiceInput + solution: null linked_skill_id: null - next_content_id_index: 4 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_choices_2: {} - ca_choices_3: {} - content: {} - default_outcome: {} - feedback_1: {} - solution: {} + ca_choices_4: + hi: + duration_secs: 2.324875 + file_size_bytes: 37198 + filename: default_outcome-hi-en-7hl9iw3az8.mp3 + needs_update: true + ca_choices_5: {} + ca_choices_6: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + feedback_3: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_choices_2: {} - ca_choices_3: {} - content: {} - default_outcome: {} - feedback_1: {} - solution: {} - END: + end: card_is_checkpoint: false classifier_model_id: null content: - content_id: content - html:

    Congratulations, you have finished!

    + content_id: content_7 + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -4657,90 +9790,170 @@ def test_load_from_v46_with_drag_and_drop_sort_input_interaction(self): id: EndExploration solution: null linked_skill_id: null - next_content_id_index: 0 param_changes: [] recorded_voiceovers: voiceovers_mapping: - content: {} + content_7: {} solicit_answer_details: false - written_translations: - translations_mapping: - content: {} -states_schema_version: 49 +states_schema_version: 55 tags: [] -title: Title +title: '' """) + exploration = exp_domain.Exploration.from_yaml( - 'eid', sample_yaml_content) - self.assertEqual(exploration.to_yaml(), latest_sample_yaml_content) + 'eid', sample_yaml_content_for_multiple_choice_interac) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_multiple_choice_interac) + + def test_fixing_invalid_item_selec_exp_data_by_migrating_to_v58( + self + ) -> None: + """Tests the migration of invalid ItemSelection interaction exploration + data from version less than 58. + """ - def test_load_from_v46_with_invalid_unicode_written_translations(self): - """Tests the migration of unicode written translations rule inputs.""" - sample_yaml_content = ( +# pylint: disable=single-line-pragma +# pylint: disable=line-too-long + sample_yaml_content_for_item_selection_interac_1: str = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en objective: '' param_changes: [] param_specs: {} -schema_version: 46 +schema_version: 57 states: - (untitled state): + Introduction: + card_is_checkpoint: true classifier_model_id: null content: content_id: content - html: '' + html:

    Numeric interaction validation

    interaction: - answer_groups: [] + answer_groups: + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_24 + html:

    dff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_20 + - ca_choices_21 + rule_type: Equals + - inputs: + x: + - ca_choices_22 + rule_type: Equals + - inputs: + x: + - ca_choices_20 + rule_type: ContainsAtLeastOneOf + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_25 + html:

    gg

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_20 + rule_type: Equals + - inputs: + x: + - ca_choices_20 + - ca_choices_21 + - ca_choices_22 + - ca_choices_23 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] confirmed_unclassified_answers: [] customization_args: - buttonText: + choices: value: - content_id: ca_buttonText - unicode_str: Continue + - content_id: ca_choices_20 + html:

    1

    + - content_id: ca_choices_21 + html:

    2

    + - content_id: ca_choices_22 + html:

    3

    + - content_id: ca_choices_23 + html:

    4

    + maxAllowableSelectionCount: + value: 2 + minAllowableSelectionCount: + value: 3 default_outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: content_id: default_outcome - html: '' + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null hints: [] - id: Continue - solution: null + id: ItemSelectionInput + solution: + answer_is_exclusive: true + correct_answer: + - ca_choices_20 + explanation: + content_id: solution + html: This is solution for state1 linked_skill_id: null - next_content_id_index: 4 + next_content_id_index: 26 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_buttonText: {} + ca_choices_20: {} + ca_choices_21: {} + ca_choices_22: {} + ca_choices_23: {} content: {} default_outcome: {} - feedback_1: {} solution: {} + feedback_24: {} + feedback_25: {} solicit_answer_details: false written_translations: translations_mapping: - ca_buttonText: - bn: - data_format: html - needs_update: false - translation:

    hello

    + ca_choices_20: {} + ca_choices_21: {} + ca_choices_22: {} + ca_choices_23: {} content: {} default_outcome: {} - feedback_1: {} solution: {} - END: + feedback_24: {} + feedback_25: {} + end: + card_is_checkpoint: false classifier_model_id: null content: content_id: content - html:

    Congratulations, you have finished!

    + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -4761,78 +9974,138 @@ def test_load_from_v46_with_invalid_unicode_written_translations(self): written_translations: translations_mapping: content: {} -states_schema_version: 41 +states_schema_version: 52 tags: [] -title: Title +title: '' """) - latest_sample_yaml_content = ( +# pylint: disable=single-line-pragma +# pylint: disable=line-too-long + latest_sample_yaml_content_for_item_selection_interac_1: str = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: Category -correctness_feedback_enabled: false -init_state_name: (untitled state) +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction language_code: en +next_content_id_index: 10 objective: '' param_changes: [] param_specs: {} -schema_version: 54 +schema_version: 60 states: - (untitled state): + Introduction: card_is_checkpoint: true classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html:

    Numeric interaction validation

    interaction: - answer_groups: [] + answer_groups: + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html:

    dff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_5 + - ca_choices_6 + rule_type: Equals + - inputs: + x: + - ca_choices_5 + rule_type: ContainsAtLeastOneOf + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_3 + html:

    gg

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_5 + rule_type: Equals + - inputs: + x: + - ca_choices_5 + - ca_choices_6 + - ca_choices_7 + - ca_choices_8 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] confirmed_unclassified_answers: [] customization_args: - buttonText: + choices: value: - content_id: ca_buttonText - unicode_str: Continue + - content_id: ca_choices_5 + html:

    1

    + - content_id: ca_choices_6 + html:

    2

    + - content_id: ca_choices_7 + html:

    3

    + - content_id: ca_choices_8 + html:

    4

    + maxAllowableSelectionCount: + value: 4 + minAllowableSelectionCount: + value: 1 default_outcome: - dest: END + dest: end + dest_if_really_stuck: null feedback: - content_id: default_outcome - html: '' + content_id: default_outcome_1 + html:

    df

    labelled_as_correct: false missing_prerequisite_skill_id: null param_changes: [] refresher_exploration_id: null - hints: [] - id: Continue - solution: null + hints: [] + id: ItemSelectionInput + solution: + answer_is_exclusive: true + correct_answer: + - ca_choices_5 + explanation: + content_id: solution_4 + html: This is solution for state1 linked_skill_id: null - next_content_id_index: 4 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_buttonText: {} - content: {} - default_outcome: {} - feedback_1: {} - solution: {} + ca_choices_5: {} + ca_choices_6: {} + ca_choices_7: {} + ca_choices_8: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + feedback_3: {} + solution_4: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_buttonText: - bn: - data_format: unicode - needs_update: false - translation: hello - content: {} - default_outcome: {} - feedback_1: {} - solution: {} - END: + end: card_is_checkpoint: false classifier_model_id: null content: - content_id: content - html:

    Congratulations, you have finished!

    + content_id: content_9 + html:

    End interaction

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -4844,1514 +10117,3382 @@ def test_load_from_v46_with_invalid_unicode_written_translations(self): id: EndExploration solution: null linked_skill_id: null - next_content_id_index: 0 param_changes: [] recorded_voiceovers: voiceovers_mapping: - content: {} + content_9: {} solicit_answer_details: false - written_translations: - translations_mapping: - content: {} -states_schema_version: 49 +states_schema_version: 55 tags: [] -title: Title +title: '' """) - exploration = exp_domain.Exploration.from_yaml( - 'eid', sample_yaml_content) - self.assertEqual(exploration.to_yaml(), latest_sample_yaml_content) - - -class ConversionUnitTests(test_utils.GenericTestBase): - """Test conversion methods.""" - - def test_convert_exploration_to_player_dict(self): - exp_title = 'Title' - second_state_name = 'first state' - - exploration = exp_domain.Exploration.create_default_exploration( - 'eid', title=exp_title, category='Category') - exploration.add_states([second_state_name]) - - def _get_default_state_dict(content_str, dest_name, is_init_state): - """Gets the default state dict of the exploration.""" - return { - 'linked_skill_id': None, - 'next_content_id_index': 0, - 'classifier_model_id': None, - 'content': { - 'content_id': 'content', - 'html': content_str, - }, - 'recorded_voiceovers': { - 'voiceovers_mapping': { - 'content': {}, - 'default_outcome': {} - } - }, - 'solicit_answer_details': False, - 'card_is_checkpoint': is_init_state, - 'written_translations': { - 'translations_mapping': { - 'content': {}, - 'default_outcome': {} - } - }, - 'interaction': { - 'answer_groups': [], - 'confirmed_unclassified_answers': [], - 'customization_args': {}, - 'default_outcome': { - 'dest': dest_name, - 'feedback': { - 'content_id': feconf.DEFAULT_OUTCOME_CONTENT_ID, - 'html': '' - }, - 'labelled_as_correct': False, - 'param_changes': [], - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None - }, - 'hints': [], - 'id': None, - 'solution': None, - }, - 'param_changes': [], - } - - self.assertEqual(exploration.to_player_dict(), { - 'init_state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'title': exp_title, - 'objective': feconf.DEFAULT_EXPLORATION_OBJECTIVE, - 'states': { - feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict( - feconf.DEFAULT_INIT_STATE_CONTENT_STR, - feconf.DEFAULT_INIT_STATE_NAME, True), - second_state_name: _get_default_state_dict( - '', second_state_name, False), - }, - 'param_changes': [], - 'param_specs': {}, - 'language_code': 'en', - 'correctness_feedback_enabled': False, - }) - - -class StateOperationsUnitTests(test_utils.GenericTestBase): - """Test methods operating on states.""" - - def test_delete_state(self): - """Test deletion of states.""" - exploration = exp_domain.Exploration.create_default_exploration('eid') - exploration.add_states(['first state']) - - with self.assertRaisesRegexp( - ValueError, 'Cannot delete initial state' - ): - exploration.delete_state(exploration.init_state_name) - - exploration.add_states(['second state']) - exploration.delete_state('second state') - - with self.assertRaisesRegexp(ValueError, 'fake state does not exist'): - exploration.delete_state('fake state') - - -class HtmlCollectionTests(test_utils.GenericTestBase): - """Test method to obtain all html strings.""" - - def test_all_html_strings_are_collected(self): - - exploration = exp_domain.Exploration.create_default_exploration( - 'eid', title='title', category='category') - exploration.add_states(['state1', 'state2', 'state3', 'state4']) - state1 = exploration.states['state1'] - state2 = exploration.states['state2'] - state3 = exploration.states['state3'] - state4 = exploration.states['state4'] - content1_dict = { - 'content_id': 'content', - 'html': '
    Hello, this is state1
    ' - } - content2_dict = { - 'content_id': 'content', - 'html': '
    Hello, this is state2
    ' - } - content3_dict = { - 'content_id': 'content', - 'html': '

    Hello, this is state3

    ' - } - content4_dict = { - 'content_id': 'content', - 'html': '

    Hello, this is state4

    ' - } - state1.update_content( - state_domain.SubtitledHtml.from_dict(content1_dict)) - state2.update_content( - state_domain.SubtitledHtml.from_dict(content2_dict)) - state3.update_content( - state_domain.SubtitledHtml.from_dict(content3_dict)) - state4.update_content( - state_domain.SubtitledHtml.from_dict(content4_dict)) - - self.set_interaction_for_state(state1, 'TextInput') - self.set_interaction_for_state(state2, 'MultipleChoiceInput') - self.set_interaction_for_state(state3, 'ItemSelectionInput') - self.set_interaction_for_state(state4, 'DragAndDropSortInput') - - customization_args_dict1 = { - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'Enter here.' - } - }, - 'rows': {'value': 1} - } - customization_args_dict2 = { - 'choices': {'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '

    This is value1 for MultipleChoice

    ' - }, - { - 'content_id': 'ca_choices_1', - 'html': '

    This is value2 for MultipleChoice

    ' - } - ]}, - 'showChoicesInShuffledOrder': {'value': True} - } - customization_args_dict3 = { - 'choices': {'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '

    This is value1 for ItemSelection

    ' - }, - { - 'content_id': 'ca_choices_1', - 'html': '

    This is value2 for ItemSelection

    ' - }, - { - 'content_id': 'ca_choices_2', - 'html': '

    This is value3 for ItemSelection

    ' - } - ]}, - 'minAllowableSelectionCount': {'value': 1}, - 'maxAllowableSelectionCount': {'value': 2} - } - customization_args_dict4 = { - 'choices': {'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '

    This is value1 for DragAndDropSortInput

    ' - }, - { - 'content_id': 'ca_choices_1', - 'html': '

    This is value2 for DragAndDropSortInput

    ' - } - ]}, - 'allowMultipleItemsInSamePosition': {'value': True} - } - - state1.update_interaction_customization_args(customization_args_dict1) - state2.update_interaction_customization_args(customization_args_dict2) - state3.update_interaction_customization_args(customization_args_dict3) - state4.update_interaction_customization_args(customization_args_dict4) - - default_outcome = state_domain.Outcome( - 'state2', state_domain.SubtitledHtml( - 'default_outcome', '

    Default outcome for state1

    '), - False, [], None, None - ) - state1.update_interaction_default_outcome(default_outcome) - hint_list2 = [ - state_domain.Hint( - state_domain.SubtitledHtml( - 'hint_1', '

    Hello, this is html1 for state2

    ' - ) - ), - state_domain.Hint( - state_domain.SubtitledHtml( - 'hint_2', '

    Hello, this is html2 for state2

    ' - ) - ), - ] - state2.update_interaction_hints(hint_list2) + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_item_selection_interac_1) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_item_selection_interac_1) - solution_dict = { - 'interaction_id': '', - 'answer_is_exclusive': True, - 'correct_answer': 'Answer1', - 'explanation': { - 'content_id': 'solution', - 'html': '

    This is solution for state1

    ' - } - } - solution = state_domain.Solution.from_dict( - state1.interaction.id, solution_dict) - state1.update_interaction_solution(solution) + sample_yaml_content_for_item_selection_interac_2: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 57 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_24 + html:

    dff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_20 + rule_type: Equals + - inputs: + x: + - ca_choices_22 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_25 + html:

    gg

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_22 + rule_type: Equals + - inputs: + x: + - ca_choices_20 + rule_type: Equals + - inputs: + x: + - ca_choices_21 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_26 + html:

    gg

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_22 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_27 + html:

    gg

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_23 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + choices: + value: + - content_id: ca_choices_20 + html:

    1

    + - content_id: ca_choices_21 + html:

    2

    + - content_id: ca_choices_22 + html:

    3

    + - content_id: ca_choices_23 + html:

    + maxAllowableSelectionCount: + value: 4 + minAllowableSelectionCount: + value: 2 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: ItemSelectionInput + solution: + answer_is_exclusive: true + correct_answer: + - ca_choices_23 + explanation: + content_id: solution + html: This is solution for state1 + linked_skill_id: null + next_content_id_index: 28 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_20: {} + ca_choices_21: {} + ca_choices_22: {} + ca_choices_23: {} + content: {} + default_outcome: {} + solution: {} + feedback_24: {} + feedback_25: {} + feedback_26: {} + feedback_27: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_choices_20: {} + ca_choices_21: {} + ca_choices_22: {} + ca_choices_23: {} + content: {} + default_outcome: {} + solution: {} + feedback_24: {} + feedback_25: {} + feedback_26: {} + feedback_27: {} + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") - state_answer_group_list2 = [ - state_domain.AnswerGroup( - state_domain.Outcome( - 'state1', state_domain.SubtitledHtml( - 'feedback_1', '

    Outcome2 for state2

    '), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Equals', - { - 'x': 0 - }), - state_domain.RuleSpec( - 'Equals', - { - 'x': 1 - }) - ], - [], - None), - state_domain.AnswerGroup( - state_domain.Outcome( - 'state3', state_domain.SubtitledHtml( - 'feedback_2', '

    Outcome1 for state2

    '), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Equals', - { - 'x': 0 - }) - ], - [], - None - )] - state_answer_group_list3 = [state_domain.AnswerGroup( - state_domain.Outcome( - 'state1', state_domain.SubtitledHtml( - 'feedback_1', '

    Outcome for state3

    '), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Equals', - { - 'x': ['ca_choices_0'] - }), - state_domain.RuleSpec( - 'Equals', - { - 'x': ['ca_choices_2'] - }) - ], - [], - None - )] - state2.update_interaction_answer_groups(state_answer_group_list2) - state3.update_interaction_answer_groups(state_answer_group_list3) + latest_sample_yaml_content_for_item_selection_interac_2: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 7 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html:

    gg

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_5 + rule_type: Equals + - inputs: + x: + - ca_choices_3 + rule_type: Equals + - inputs: + x: + - ca_choices_4 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + choices: + value: + - content_id: ca_choices_3 + html:

    1

    + - content_id: ca_choices_4 + html:

    2

    + - content_id: ca_choices_5 + html:

    3

    + maxAllowableSelectionCount: + value: 4 + minAllowableSelectionCount: + value: 1 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: ItemSelectionInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_3: {} + ca_choices_4: {} + ca_choices_5: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_6 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_6: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") - expected_html_list = [ - '', - '', - '
    Hello, this is state2
    ', - '

    Outcome1 for state2

    ', - '

    Outcome2 for state2

    ', - '', - '

    Hello, this is html1 for state2

    ', - '

    Hello, this is html2 for state2

    ', - '

    This is value1 for MultipleChoice

    ', - '

    This is value2 for MultipleChoice

    ', - '
    Hello, this is state1
    ', - '

    Default outcome for state1

    ', - '

    This is solution for state1

    ', - '

    Hello, this is state3

    ', - '

    Outcome for state3

    ', - '', - '

    This is value1 for ItemSelection

    ', - '

    This is value2 for ItemSelection

    ', - '

    This is value3 for ItemSelection

    ', - '

    Hello, this is state4

    ', - '', - '

    This is value1 for DragAndDropSortInput

    ', - '

    This is value2 for DragAndDropSortInput

    ' - ] + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_item_selection_interac_2) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_item_selection_interac_2) - actual_outcome_list = exploration.get_all_html_content_strings() + sample_yaml_content_for_item_selection_interac_3: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 57 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_24 + html:

    dff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_23 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_25 + html:

    dff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_20 + - ca_choices_21 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + choices: + value: + - content_id: ca_choices_20 + html:

    1

    + - content_id: ca_choices_21 + html:

    2

    + - content_id: ca_choices_22 + html:

    3

    + - content_id: ca_choices_23 + html:

    4

    + maxAllowableSelectionCount: + value: 4 + minAllowableSelectionCount: + value: 2 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: ItemSelectionInput + solution: null + linked_skill_id: null + next_content_id_index: 26 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_20: {} + ca_choices_21: {} + ca_choices_22: {} + ca_choices_23: {} + content: {} + default_outcome: {} + feedback_24: {} + feedback_25: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_choices_20: {} + ca_choices_21: {} + ca_choices_22: {} + ca_choices_23: {} + content: {} + default_outcome: {} + feedback_24: {} + feedback_25: {} + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") - self.assertItemsEqual(set(actual_outcome_list), set(expected_html_list)) + latest_sample_yaml_content_for_item_selection_interac_3: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 8 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html:

    dff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_3 + - ca_choices_4 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + choices: + value: + - content_id: ca_choices_3 + html:

    1

    + - content_id: ca_choices_4 + html:

    2

    + - content_id: ca_choices_5 + html:

    3

    + - content_id: ca_choices_6 + html:

    4

    + maxAllowableSelectionCount: + value: 4 + minAllowableSelectionCount: + value: 2 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: ItemSelectionInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_3: {} + ca_choices_4: {} + ca_choices_5: {} + ca_choices_6: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_7 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_7: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_item_selection_interac_3) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_item_selection_interac_3) -class ExplorationChangesMergeabilityUnitTests( - exp_services_test.ExplorationServicesUnitTests, - test_utils.EmailTestBase): - """Test methods related to exploration changes mergeability.""" + sample_yaml_content_for_item_selection_interac_4: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 57 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_24 + html:

    dff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_20 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + choices: + value: + - content_id: ca_choices_20 + html:

    1

    + - content_id: ca_choices_21 + html:

    2

    + maxAllowableSelectionCount: + value: 4 + minAllowableSelectionCount: + value: 3 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: ItemSelectionInput + solution: null + linked_skill_id: null + next_content_id_index: 26 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_20: {} + ca_choices_21: {} + content: {} + default_outcome: {} + feedback_24: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_choices_20: {} + ca_choices_21: {} + content: {} + default_outcome: {} + feedback_24: {} + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") - def test_changes_are_mergeable_when_content_changes_do_not_conflict(self): - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') + latest_sample_yaml_content_for_item_selection_interac_4: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 6 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html:

    dff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - ca_choices_3 + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + choices: + value: + - content_id: ca_choices_3 + html:

    1

    + - content_id: ca_choices_4 + html:

    2

    + maxAllowableSelectionCount: + value: 4 + minAllowableSelectionCount: + value: 1 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: ItemSelectionInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_3: {} + ca_choices_4: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_5 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_5: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_item_selection_interac_4) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_item_selection_interac_4) + + def test_fixing_invalid_drag_and_drop_exp_data_by_migrating_to_v58( + self + ) -> None: + """Tests the migration of invalid DragAndDrop interaction exploration + data from version less than 58. + """ - change_list = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, - 'property_name': 'title', - 'new_value': 'First title' - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, 'Changed title.') + sample_yaml_content_for_drag_and_drop_interac_1: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 57 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_30 + html:

    as

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - - ca_choices_26 + - ca_choices_27 + - - ca_choices_28 + - - ca_choices_29 + rule_type: IsEqualToOrdering + - inputs: + x: + - - ca_choices_26 + - ca_choices_27 + - - ca_choices_28 + - - ca_choices_29 + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_31 + html:

    ff

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: ca_choices_26 + y: ca_choices_26 + rule_type: HasElementXBeforeElementY + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_32 + html:

    a

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: [] + rule_type: IsEqualToOrdering + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_33 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: ca_choices_27 + y: 2 + rule_type: HasElementXAtPositionY + - inputs: + x: + - - ca_choices_26 + - - ca_choices_27 + - - ca_choices_28 + - - ca_choices_29 + rule_type: IsEqualToOrdering + - inputs: + x: + - - ca_choices_26 + - [] + - - ca_choices_28 + - - ca_choices_29 + rule_type: IsEqualToOrdering + - inputs: + x: + - - ca_choices_29 + - - ca_choices_28 + - - ca_choices_27 + - - ca_choices_26 + rule_type: IsEqualToOrdering + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_33 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: ca_choices_27 + y: 4 + rule_type: HasElementXAtPositionY + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: false + choices: + value: + - content_id: ca_choices_26 + html:

    1

    + - content_id: ca_choices_27 + html:

    2

    + - content_id: ca_choices_28 + html:

    3

    + - content_id: ca_choices_29 + html:

    4

    + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: null + linked_skill_id: null + next_content_id_index: 34 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_26: {} + ca_choices_27: {} + ca_choices_28: {} + ca_choices_29: {} + content: {} + default_outcome: {} + feedback_30: {} + feedback_31: {} + feedback_32: {} + feedback_33: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_choices_26: {} + ca_choices_27: {} + ca_choices_28: {} + ca_choices_29: {} + content: {} + default_outcome: {} + feedback_30: {} + feedback_31: {} + feedback_32: {} + feedback_33: {} + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") - # Making changes to properties except content. - change_list_2 = [exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'property_name': 'widget_id', - 'new_value': None, - 'old_value': 'TextInput' - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args', - 'new_value': {}, - 'old_value': { - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': '' - } - }, - 'rows': { - 'value': 1 - } - } - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 2, - 'old_value': 1 - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'property_name': 'widget_id', - 'new_value': 'Continue', - 'old_value': None - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args', - 'new_value': { - 'buttonText': { - 'value': { - 'content_id': 'ca_buttonText_1', - 'unicode_str': 'Continue' - } - } - }, - 'old_value': {} - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, 'Changed Interaction.') + latest_sample_yaml_content_for_drag_and_drop_interac_1: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 9 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: ca_choices_5 + y: 2 + rule_type: HasElementXAtPositionY + - inputs: + x: + - - ca_choices_7 + - - ca_choices_6 + - - ca_choices_5 + - - ca_choices_4 + rule_type: IsEqualToOrdering + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_3 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: ca_choices_5 + y: 4 + rule_type: HasElementXAtPositionY + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: false + choices: + value: + - content_id: ca_choices_4 + html:

    1

    + - content_id: ca_choices_5 + html:

    2

    + - content_id: ca_choices_6 + html:

    3

    + - content_id: ca_choices_7 + html:

    4

    + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_4: {} + ca_choices_5: {} + ca_choices_6: {} + ca_choices_7: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + feedback_3: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_8 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_8: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_drag_and_drop_interac_1) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_drag_and_drop_interac_1) - # Changing content of second state. - change_list_3 = [exp_domain.ExplorationChange({ - 'property_name': 'content', - 'state_name': 'End', - 'cmd': 'edit_state_property', - 'old_value': { - 'html': '', - 'content_id': 'content' - }, - 'new_value': { - 'html': '

    Congratulations, you have finished!

    ', - 'content_id': 'content' - } - })] + sample_yaml_content_for_drag_and_drop_interac_2: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 57 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_33 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - - ca_choices_29 + - ca_choices_28 + - - ca_choices_27 + - - ca_choices_26 + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x: + - - ca_choices_26 + - - ca_choices_27 + - - ca_choices_28 + - - ca_choices_29 + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x: + - - ca_choices_29 + - - ca_choices_27 + - ca_choices_28 + - - ca_choices_26 + rule_type: IsEqualToOrdering + - inputs: + x: ca_choices_27 + y: 4 + rule_type: HasElementXAtPositionY + - inputs: + x: + - - ca_choices_29 + - ca_choices_27 + - ca_choices_28 + - ca_choices_26 + rule_type: IsEqualToOrdering + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: true + choices: + value: + - content_id: ca_choices_26 + html:

    1

    + - content_id: ca_choices_27 + html:

    2

    + - content_id: ca_choices_28 + html:

    3

    + - content_id: ca_choices_29 + html:

    4

    + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: null + linked_skill_id: null + next_content_id_index: 34 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_26: {} + ca_choices_27: {} + ca_choices_28: {} + ca_choices_29: {} + content: {} + default_outcome: {} + feedback_33: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_choices_26: {} + ca_choices_27: {} + ca_choices_28: {} + ca_choices_29: {} + content: {} + default_outcome: {} + feedback_33: {} + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") - # Checking that the changes can be applied when - # changing to same version. - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 3, change_list_3) - self.assertEqual(changes_are_mergeable, True) + latest_sample_yaml_content_for_drag_and_drop_interac_2: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 8 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - - ca_choices_6 + - ca_choices_5 + - - ca_choices_4 + - - ca_choices_3 + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x: + - - ca_choices_3 + - - ca_choices_4 + - - ca_choices_5 + - - ca_choices_6 + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x: ca_choices_4 + y: 4 + rule_type: HasElementXAtPositionY + - inputs: + x: + - - ca_choices_6 + - ca_choices_4 + - ca_choices_5 + - ca_choices_3 + rule_type: IsEqualToOrdering + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: true + choices: + value: + - content_id: ca_choices_3 + html:

    1

    + - content_id: ca_choices_4 + html:

    2

    + - content_id: ca_choices_5 + html:

    3

    + - content_id: ca_choices_6 + html:

    4

    + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_3: {} + ca_choices_4: {} + ca_choices_5: {} + ca_choices_6: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_7 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_7: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_3) - self.assertEqual(changes_are_mergeable, True) + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_drag_and_drop_interac_2) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_drag_and_drop_interac_2) - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_3, - 'Changed content of End state.') + sample_yaml_content_for_drag_and_drop_interac_3: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 57 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_33 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - - ca_choices_26 + - - ca_choices_27 + - - ca_choices_28 + - - ca_choices_29 + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x: + - - ca_choices_29 + - - ca_choices_27 + - ca_choices_28 + - - ca_choices_26 + rule_type: IsEqualToOrdering + - inputs: + x: ca_choices_28 + y: ca_choices_26 + rule_type: HasElementXBeforeElementY + - inputs: + x: ca_choices_26 + y: ca_choices_28 + rule_type: HasElementXBeforeElementY + - inputs: + x: ca_choices_27 + y: 2 + rule_type: HasElementXAtPositionY + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: true + choices: + value: + - content_id: ca_choices_26 + html:

    + - content_id: ca_choices_27 + html:

    + - content_id: ca_choices_28 + html:

    1

    + - content_id: ca_choices_29 + html:

    2

    + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: + answer_is_exclusive: true + correct_answer: + - - ca_choices_29 + - - ca_choices_27 + - ca_choices_28 + - - ca_choices_26 + explanation: + content_id: solution + html: This is solution for state1 + linked_skill_id: null + next_content_id_index: 34 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_26: {} + ca_choices_27: {} + ca_choices_28: {} + ca_choices_29: {} + content: {} + solution: {} + default_outcome: {} + feedback_33: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_choices_26: {} + ca_choices_27: {} + ca_choices_28: {} + ca_choices_29: {} + content: {} + solution: {} + default_outcome: {} + feedback_33: {} + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") - # Changing content of first state. - change_list_4 = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_RENAME_STATE, - 'old_state_name': 'Introduction', - 'new_state_name': 'Renamed state' - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_RENAME_STATE, - 'old_state_name': 'Renamed state', - 'new_state_name': 'Renamed state again' - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_RENAME_STATE, - 'old_state_name': 'Renamed state again', - 'new_state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'property_name': 'content', - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'old_value': { - 'html': '', - 'content_id': 'content' - }, - 'new_value': { - 'html': '

    Hello

    ', - 'content_id': 'content' - } - })] + latest_sample_yaml_content_for_drag_and_drop_interac_3: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 7 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - - ca_choices_4 + - - ca_choices_5 + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x: + - - ca_choices_5 + - - ca_choices_4 + rule_type: IsEqualToOrdering + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: true + choices: + value: + - content_id: ca_choices_4 + html:

    1

    + - content_id: ca_choices_5 + html:

    2

    + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: + answer_is_exclusive: true + correct_answer: + - - ca_choices_5 + - - ca_choices_4 + explanation: + content_id: solution_3 + html: This is solution for state1 + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_4: {} + ca_choices_5: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + solution_3: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_6 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_6: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") - # Checking for the mergability of the fourth change list. - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_4) - self.assertEqual(changes_are_mergeable, True) + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_drag_and_drop_interac_3) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_drag_and_drop_interac_3) + + def test_fixing_invalid_text_exp_data_by_migrating_to_v58( + self + ) -> None: + """Tests the migration of invalid TextInput interaction exploration + data from version less than 58. + """ + sample_yaml_content_for_text_interac_1: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 53 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_35 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_36 + normalizedStrSet: + - and + - drop + rule_type: Contains + - inputs: + x: + contentId: rule_input_37 + normalizedStrSet: + - Draganddrop + rule_type: Contains + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_38 + html:

    sd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_39 + normalizedStrSet: + - ze + rule_type: StartsWith + - inputs: + x: + contentId: rule_input_40 + normalizedStrSet: + - zebra + rule_type: StartsWith + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_41 + html:

    sd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_42 + normalizedStrSet: + - he + rule_type: Contains + - inputs: + x: + contentId: rule_input_43 + normalizedStrSet: + - hello + rule_type: StartsWith + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_44 + html:

    ssd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_45 + normalizedStrSet: + - abc + rule_type: Contains + - inputs: + x: + contentId: rule_input_46 + normalizedStrSet: + - abcd + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_47 + html:

    sd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_48 + normalizedStrSet: + - dog + rule_type: StartsWith + - inputs: + x: + contentId: rule_input_49 + normalizedStrSet: + - dogs + rule_type: Equals + - inputs: + x: + contentId: rule_input_50 + normalizedStrSet: + - beautiful + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_48 + html:

    sd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_51 + normalizedStrSet: + - doggies + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_34 + unicode_str: '' + rows: + value: 15 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 50 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_34: {} + content: {} + default_outcome: {} + feedback_35: {} + feedback_38: {} + feedback_41: {} + feedback_44: {} + feedback_47: {} + feedback_48: {} + rule_input_36: {} + rule_input_37: {} + rule_input_39: {} + rule_input_40: {} + rule_input_42: {} + rule_input_43: {} + rule_input_45: {} + rule_input_46: {} + rule_input_48: {} + rule_input_49: {} + rule_input_50: {} + rule_input_51: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_34: {} + content: {} + default_outcome: {} + feedback_35: {} + feedback_38: {} + feedback_41: {} + feedback_44: {} + feedback_47: {} + feedback_48: {} + rule_input_36: {} + rule_input_37: {} + rule_input_39: {} + rule_input_40: {} + rule_input_42: {} + rule_input_43: {} + rule_input_45: {} + rule_input_46: {} + rule_input_48: {} + rule_input_49: {} + rule_input_50: {} + rule_input_51: {} + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") - # Checking for the mergability when working on latest version. - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 3, change_list_4) - self.assertEqual(changes_are_mergeable, True) + latest_sample_yaml_content_for_text_interac_1: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 15 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - and + - drop + rule_type: Contains + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_4 + html:

    sd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_5 + normalizedStrSet: + - ze + rule_type: StartsWith + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_6 + html:

    sd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_7 + normalizedStrSet: + - he + rule_type: Contains + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_8 + html:

    ssd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_9 + normalizedStrSet: + - abc + rule_type: Contains + tagged_skill_misconception_id: null + training_data: [] + - outcome: + dest: Introduction + dest_if_really_stuck: null + feedback: + content_id: feedback_10 + html:

    sd

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_11 + normalizedStrSet: + - dog + rule_type: StartsWith + - inputs: + x: + contentId: rule_input_12 + normalizedStrSet: + - beautiful + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + catchMisspellings: + value: false + placeholder: + value: + content_id: ca_placeholder_13 + unicode_str: '' + rows: + value: 10 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_13: {} + content_0: {} + default_outcome_1: {} + feedback_10: {} + feedback_2: {} + feedback_4: {} + feedback_6: {} + feedback_8: {} + rule_input_11: {} + rule_input_12: {} + rule_input_3: {} + rule_input_5: {} + rule_input_7: {} + rule_input_9: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_14 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_14: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") - def test_changes_are_not_mergeable_when_content_changes_conflict(self): - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_text_interac_1) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_text_interac_1) - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + sample_yaml_content_for_text_interac_2: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 53 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_35 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_36 + normalizedStrSet: + - and + - drop + rule_type: Contains + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_34 + unicode_str: '' + rows: + value: 0 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 50 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_34: {} + content: {} + default_outcome: {} + feedback_35: {} + rule_input_36: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_placeholder_34: {} + content: {} + default_outcome: {} + feedback_35: {} + rule_input_36: {} + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 52 +tags: [] +title: '' +""") - # Making changes to content of the first state. - change_list = [exp_domain.ExplorationChange({ - 'property_name': 'content', - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'old_value': { - 'html': '', - 'content_id': 'content' - }, - 'new_value': { - 'html': '

    Content 1.

    ', - 'content_id': 'content' - } - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, 'Changed Content.') + latest_sample_yaml_content_for_text_interac_2: str = ( + """author_notes: '' +auto_tts_enabled: false +blurb: '' +category: '' +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: Introduction +language_code: en +next_content_id_index: 6 +objective: '' +param_changes: [] +param_specs: {} +schema_version: 60 +states: + Introduction: + card_is_checkpoint: true + classifier_model_id: null + content: + content_id: content_0 + html:

    Numeric interaction validation

    + interaction: + answer_groups: + - outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: feedback_2 + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - and + - drop + rule_type: Contains + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + catchMisspellings: + value: false + placeholder: + value: + content_id: ca_placeholder_4 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: end + dest_if_really_stuck: null + feedback: + content_id: default_outcome_1 + html:

    df

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_4: {} + content_0: {} + default_outcome_1: {} + feedback_2: {} + rule_input_3: {} + solicit_answer_details: false + end: + card_is_checkpoint: false + classifier_model_id: null + content: + content_id: content_5 + html:

    End interaction

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content_5: {} + solicit_answer_details: false +states_schema_version: 55 +tags: [] +title: '' +""") - # Changing content of the same state to check that - # changes are not mergeable. - change_list_2 = [exp_domain.ExplorationChange({ - 'property_name': 'content', - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'old_value': { - 'html': '', - 'content_id': 'content' - }, - 'new_value': { - 'html': '

    Content 2.

    ', - 'content_id': 'content' - } - })] + exploration = exp_domain.Exploration.from_yaml( + 'eid', sample_yaml_content_for_text_interac_2) + self.assertEqual( + exploration.to_yaml(), + latest_sample_yaml_content_for_text_interac_2) - # Checking for the mergability of the second change list. - changes_are_not_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 1, change_list_2) - self.assertEqual(changes_are_not_mergeable, False) - def test_changes_are_mergeable_when_interaction_id_changes_do_not_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') +class ConversionUnitTests(test_utils.GenericTestBase): + """Test conversion methods.""" - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + def test_convert_exploration_to_player_dict(self) -> None: + exp_title = 'Title' + second_state_name = 'first state' - # Making changes in the properties which are - # not related to the interaction id. - change_list_2 = [exp_domain.ExplorationChange({ - 'new_value': { - 'content_id': 'content', - 'html': '

    This is the first state.

    ' - }, - 'state_name': 'Introduction', - 'old_value': { - 'content_id': 'content', - 'html': '' - }, - 'cmd': 'edit_state_property', - 'property_name': 'content' - }), exp_domain.ExplorationChange({ - 'new_value': [{ - 'hint_content': { - 'content_id': 'hint_1', - 'html': '

    This is a first hint.

    ' - } - }], - 'state_name': 'Introduction', - 'old_value': [], - 'cmd': 'edit_state_property', - 'property_name': 'hints' - }), exp_domain.ExplorationChange({ - 'new_value': 2, - 'state_name': 'Introduction', - 'old_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index' - }), exp_domain.ExplorationChange({ - 'new_value': [{ - 'hint_content': { - 'content_id': 'hint_1', - 'html': '

    This is a first hint.

    ' - } - }, { - 'hint_content': { - 'content_id': 'hint_2', - 'html': '

    This is the second hint.

    ' - } - }], - 'state_name': 'Introduction', - 'old_value': [{ - 'hint_content': { - 'content_id': 'hint_1', - 'html': '

    This is a first hint.

    ' - } - }], - 'cmd': 'edit_state_property', - 'property_name': 'hints' - }), exp_domain.ExplorationChange({ - 'new_value': { - 'content_id': 'content', - 'html': '

    Congratulations, you have finished!

    ' - }, - 'state_name': 'End', - 'old_value': { - 'content_id': 'content', - 'html': '' - }, - 'cmd': 'edit_state_property', - 'property_name': 'content' - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Changed Contents and Hint') + exploration = exp_domain.Exploration.create_default_exploration( + 'eid', title=exp_title, category='Category') + exploration.add_states([second_state_name]) - # Changes to the properties affected by or affecting - # interaction id and in interaction_id itself. - change_list_3 = [exp_domain.ExplorationChange({ - 'new_value': None, - 'state_name': 'Introduction', - 'old_value': 'TextInput', - 'cmd': 'edit_state_property', - 'property_name': 'widget_id' - }), exp_domain.ExplorationChange({ - 'new_value': {}, - 'state_name': 'Introduction', - 'old_value': { - 'rows': { - 'value': 1 + def _get_default_state_dict( + content_str: str, + dest_name: str, + is_init_state: bool, + content_id_generator: translation_domain.ContentIdGenerator + ) -> state_domain.StateDict: + """Gets the default state dict of the exploration.""" + content_id_for_content = content_id_generator.generate( + translation_domain.ContentType.CONTENT) + content_id_for_default_outcome = content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + return { + 'linked_skill_id': None, + 'classifier_model_id': None, + 'content': { + 'content_id': content_id_for_content, + 'html': content_str, }, - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': '' - } - } - }, - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args' - }), exp_domain.ExplorationChange({ - 'new_value': 2, - 'state_name': 'Introduction', - 'old_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index' - }), exp_domain.ExplorationChange({ - 'new_value': 'Continue', - 'state_name': 'Introduction', - 'old_value': None, - 'cmd': 'edit_state_property', - 'property_name': 'widget_id' - }), exp_domain.ExplorationChange({ - 'new_value': { - 'buttonText': { - 'value': { - 'content_id': 'ca_buttonText_1', - 'unicode_str': 'Continue' + 'recorded_voiceovers': { + 'voiceovers_mapping': { + content_id_for_content: {}, + content_id_for_default_outcome: {} } - } + }, + 'solicit_answer_details': False, + 'card_is_checkpoint': is_init_state, + 'interaction': { + 'answer_groups': [], + 'confirmed_unclassified_answers': [], + 'customization_args': {}, + 'default_outcome': { + 'dest': dest_name, + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': content_id_for_default_outcome, + 'html': '' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'hints': [], + 'id': None, + 'solution': None, + }, + 'param_changes': [], + } + + content_id_generator = translation_domain.ContentIdGenerator() + self.assertEqual(exploration.to_player_dict(), { + 'init_state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'title': exp_title, + 'objective': feconf.DEFAULT_EXPLORATION_OBJECTIVE, + 'states': { + feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict( + feconf.DEFAULT_INIT_STATE_CONTENT_STR, + feconf.DEFAULT_INIT_STATE_NAME, True, content_id_generator), + second_state_name: _get_default_state_dict( + '', second_state_name, False, content_id_generator), }, - 'state_name': 'Introduction', - 'old_value': {}, - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args' - })] + 'param_changes': [], + 'param_specs': {}, + 'language_code': 'en', + 'correctness_feedback_enabled': True, + 'next_content_id_index': content_id_generator.next_content_id_index + }) - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 1, change_list_3) - self.assertEqual(changes_are_mergeable, True) - # Creating second exploration to test the scenario - # when changes to same properties are made in two - # different states. - self.save_new_valid_exploration( - self.EXP_1_ID, self.owner_id, end_state_name='End') +class StateOperationsUnitTests(test_utils.GenericTestBase): + """Test methods operating on states.""" - rights_manager.publish_exploration(self.owner, self.EXP_1_ID) + def test_delete_state(self) -> None: + """Test deletion of states.""" + exploration = exp_domain.Exploration.create_default_exploration('eid') + exploration.add_states(['first state']) - # Using the old change_list_3 here because they already covers - # the changes related to interaction in first state. - exp_services.update_exploration( - self.owner_id, self.EXP_1_ID, change_list_3, 'Changed Interaction') + with self.assertRaisesRegex( + ValueError, 'Cannot delete initial state' + ): + exploration.delete_state(exploration.init_state_name) - # Changes related to interaction in the second state - # to check for mergeability. - change_list_4 = [exp_domain.ExplorationChange({ - 'state_name': 'End', - 'cmd': 'edit_state_property', - 'new_value': None, - 'old_value': 'EndExploration', - 'property_name': 'widget_id' - }), exp_domain.ExplorationChange({ - 'state_name': 'End', - 'cmd': 'edit_state_property', - 'new_value': {}, - 'old_value': { - 'recommendedExplorationIds': { - 'value': [] - } - }, - 'property_name': 'widget_customization_args' - }), exp_domain.ExplorationChange({ - 'state_name': 'End', - 'cmd': 'edit_state_property', - 'new_value': 'NumericInput', - 'old_value': None, - 'property_name': 'widget_id' - }), exp_domain.ExplorationChange({ - 'state_name': 'End', - 'cmd': 'edit_state_property', - 'new_value': { - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'labelled_as_correct': False, - 'param_changes': [], - 'feedback': { - 'html': '', - 'content_id': 'default_outcome' - } - }, - 'old_value': None, - 'property_name': 'default_outcome' - }), exp_domain.ExplorationChange({ - 'state_name': 'End', - 'cmd': 'edit_state_property', - 'new_value': 1, - 'old_value': 0, - 'property_name': 'next_content_id_index' - }), exp_domain.ExplorationChange({ - 'state_name': 'End', - 'cmd': 'edit_state_property', - 'new_value': [{ - 'outcome': { - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'labelled_as_correct': False, - 'param_changes': [], - 'feedback': { - 'html': '

    Feedback

    ', - 'content_id': 'feedback_0' - } - }, - 'rule_specs': [{ - 'inputs': { - 'x': 60 - }, - 'rule_type': 'IsLessThanOrEqualTo' - }], - 'tagged_skill_misconception_id': None, - 'training_data': [] - }], - 'old_value': [], - 'property_name': 'answer_groups' - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'state_name': 'End', - 'property_name': 'solicit_answer_details', - 'new_value': True - })] - changes_are_mergeable_1 = exp_services.are_changes_mergeable( - self.EXP_1_ID, 1, change_list_4) - self.assertEqual(changes_are_mergeable_1, True) + exploration.add_states(['second state']) - def test_changes_are_not_mergeable_when_interaction_id_changes_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') + interaction = exploration.states['first state'].interaction - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + default_outcome_for_first_state = interaction.default_outcome + assert default_outcome_for_first_state is not None + default_outcome_for_first_state.dest_if_really_stuck = 'second state' - # Changes to the properties affected by or affecting - # interaction id and in interaction_id itself. - change_list_2 = [exp_domain.ExplorationChange({ - 'new_value': None, - 'state_name': 'Introduction', - 'old_value': 'TextInput', - 'cmd': 'edit_state_property', - 'property_name': 'widget_id' - }), exp_domain.ExplorationChange({ - 'new_value': {}, - 'state_name': 'Introduction', - 'old_value': { - 'rows': { - 'value': 1 - }, - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': '' - } - } - }, - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args' - }), exp_domain.ExplorationChange({ - 'new_value': 2, - 'state_name': 'Introduction', - 'old_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index' - }), exp_domain.ExplorationChange({ - 'new_value': 'Continue', - 'state_name': 'Introduction', - 'old_value': None, - 'cmd': 'edit_state_property', - 'property_name': 'widget_id' - }), exp_domain.ExplorationChange({ - 'new_value': { - 'buttonText': { - 'value': { - 'content_id': 'ca_buttonText_1', - 'unicode_str': 'Continue' - } - } - }, - 'state_name': 'Introduction', - 'old_value': {}, - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args' - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Changed Contents and Hint') + exploration.delete_state('second state') + self.assertEqual( + default_outcome_for_first_state.dest_if_really_stuck, 'first state') - # Changes to the properties affected by or affecting - # interaction id and in interaction_id itself again - # to check that changes are not mergeable. - change_list_3 = [exp_domain.ExplorationChange({ - 'new_value': None, - 'state_name': 'Introduction', - 'old_value': 'TextInput', - 'cmd': 'edit_state_property', - 'property_name': 'widget_id' - }), exp_domain.ExplorationChange({ - 'new_value': {}, - 'state_name': 'Introduction', - 'old_value': { - 'rows': { - 'value': 1 - }, - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': '' - } - } - }, - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args' - }), exp_domain.ExplorationChange({ - 'new_value': 2, - 'state_name': 'Introduction', - 'old_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index' - }), exp_domain.ExplorationChange({ - 'new_value': 'Continue', - 'state_name': 'Introduction', - 'old_value': None, - 'cmd': 'edit_state_property', - 'property_name': 'widget_id' - }), exp_domain.ExplorationChange({ - 'new_value': { - 'buttonText': { - 'value': { - 'content_id': 'ca_buttonText_1', - 'unicode_str': 'Continue' - } - } - }, - 'state_name': 'Introduction', - 'old_value': {}, - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args' - })] + with self.assertRaisesRegex(ValueError, 'fake state does not exist'): + exploration.delete_state('fake state') - changes_are_not_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 1, change_list_3) - self.assertEqual(changes_are_not_mergeable, False) - def test_changes_are_mergeable_when_customization_args_changes_do_not_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') +class HtmlCollectionTests(test_utils.GenericTestBase): + """Test method to obtain all html strings.""" + + def test_all_html_strings_are_collected(self) -> None: + + exploration = exp_domain.Exploration.create_default_exploration( + 'eid', title='title', category='category') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + exploration.add_states(['state1', 'state2', 'state3', 'state4']) + state1 = exploration.states['state1'] + state2 = exploration.states['state2'] + state3 = exploration.states['state3'] + state4 = exploration.states['state4'] + content1_dict: state_domain.SubtitledHtmlDict = { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '
    Hello, this is state1
    ' + } + content2_dict: state_domain.SubtitledHtmlDict = { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '
    Hello, this is state2
    ' + } + content3_dict: state_domain.SubtitledHtmlDict = { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '

    Hello, this is state3

    ' + } + content4_dict: state_domain.SubtitledHtmlDict = { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '

    Hello, this is state4

    ' + } + state1.update_content( + state_domain.SubtitledHtml.from_dict(content1_dict)) + state2.update_content( + state_domain.SubtitledHtml.from_dict(content2_dict)) + state3.update_content( + state_domain.SubtitledHtml.from_dict(content3_dict)) + state4.update_content( + state_domain.SubtitledHtml.from_dict(content4_dict)) - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + self.set_interaction_for_state( + state1, 'TextInput', content_id_generator) + self.set_interaction_for_state( + state2, 'MultipleChoiceInput', content_id_generator) + self.set_interaction_for_state( + state3, 'ItemSelectionInput', content_id_generator) + self.set_interaction_for_state( + state4, 'DragAndDropSortInput', content_id_generator) - # Changes in the properties which aren't affected by - # customization args or doesn't affects customization_args. - change_list = [exp_domain.ExplorationChange({ - 'new_value': { - 'content_id': 'content', - 'html': '

    This is the first state.

    ' - }, - 'state_name': 'Introduction', - 'old_value': { - 'content_id': 'content', - 'html': '' - }, - 'cmd': 'edit_state_property', - 'property_name': 'content' - }), exp_domain.ExplorationChange({ - 'new_value': [{ - 'hint_content': { - 'content_id': 'hint_1', - 'html': '

    This is a first hint.

    ' - } - }], - 'state_name': 'Introduction', - 'old_value': [], - 'cmd': 'edit_state_property', - 'property_name': 'hints' - }), exp_domain.ExplorationChange({ - 'new_value': 2, - 'state_name': 'Introduction', - 'old_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index' - }), exp_domain.ExplorationChange({ - 'new_value': [{ - 'hint_content': { - 'content_id': 'hint_1', - 'html': '

    This is a first hint.

    ' - } - }, { - 'hint_content': { - 'content_id': 'hint_2', - 'html': '

    This is the second hint.

    ' - } - }], - 'state_name': 'Introduction', - 'old_value': [{ - 'hint_content': { - 'content_id': 'hint_1', - 'html': '

    This is a first hint.

    ' - } - }], - 'cmd': 'edit_state_property', - 'property_name': 'hints' - }), exp_domain.ExplorationChange({ - 'new_value': 3, - 'state_name': 'Introduction', - 'old_value': 2, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index' - }), exp_domain.ExplorationChange({ - 'new_value': { - 'content_id': 'content', - 'html': '

    Congratulations, you have finished!

    ' - }, - 'state_name': 'End', - 'old_value': { - 'content_id': 'content', - 'html': '' + ca_placeholder_value_dict: state_domain.SubtitledUnicodeDict = { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='placeholder'), + 'unicode_str': 'Enter here.' + } + customization_args_dict1: Dict[ + str, Dict[str, Union[state_domain.SubtitledUnicodeDict, int]] + ] = { + 'placeholder': { + 'value': ca_placeholder_value_dict }, - 'cmd': 'edit_state_property', - 'property_name': 'content' - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Changed Contents and Hints') + 'rows': {'value': 1}, + 'catchMisspellings': { + 'value': False + } + } - # Changes to the properties affecting customization_args - # or are affected by customization_args in the same state. - # This includes changes related to renaming a state in - # order to check that changes are applied even if states - # are renamed. - change_list_2 = [exp_domain.ExplorationChange({ - 'cmd': 'rename_state', - 'new_state_name': 'Intro-rename', - 'old_state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': 'Introduction', - 'property_name': 'init_state_name', - 'new_value': 'Intro-rename', - 'cmd': 'edit_exploration_property' - }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': { - 'placeholder': - { - 'value': - { - 'content_id': 'ca_placeholder_0', - 'unicode_str': '' - } - }, - 'rows': { - 'value': 1 - } - }, - 'property_name': 'widget_customization_args', - 'new_value': + choices_subtitled_html_dicts: List[state_domain.SubtitledHtmlDict] = [ { - 'placeholder': - { - 'value': - { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'Placeholder text' - } - }, - 'rows': - { - 'value': 2 - } + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': '

    This is value1 for MultipleChoice

    ' }, - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': 'TextInput', - 'property_name': 'widget_id', - 'new_value': None, - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': { - 'placeholder': - { - 'value': - { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'Placeholder text' - } - }, - 'rows': - { - 'value': 2 - } - }, - 'property_name': 'widget_customization_args', - 'new_value': {}, - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': 1, - 'property_name': 'next_content_id_index', - 'new_value': 3, - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': None, - 'property_name': 'widget_id', - 'new_value': 'NumericInput', - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': '

    This is value2 for MultipleChoice

    ' + } + ] + customization_args_dict2: Dict[ + str, Dict[str, Union[List[state_domain.SubtitledHtmlDict], bool]] + ] = { + 'choices': {'value': choices_subtitled_html_dicts}, + 'showChoicesInShuffledOrder': {'value': True} + } + + choices_subtitled_html_dicts = [ { - 'requireNonnegativeInput': - { - 'value': True - } + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': '

    This is value1 for ItemSelection

    ' }, - 'property_name': 'widget_customization_args', - 'new_value': { - 'requireNonnegativeInput': - { - 'value': False - } + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': '

    This is value2 for ItemSelection

    ' }, - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': 3, - 'property_name': 'next_content_id_index', - 'new_value': 4, - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': [], - 'property_name': 'answer_groups', - 'new_value': + { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': '

    This is value3 for ItemSelection

    ' + } + ] + customization_args_dict3: Dict[ + str, Dict[str, Union[List[state_domain.SubtitledHtmlDict], int]] + ] = { + 'choices': {'value': choices_subtitled_html_dicts}, + 'minAllowableSelectionCount': {'value': 1}, + 'maxAllowableSelectionCount': {'value': 2} + } + + choices_subtitled_html_dicts = [ + { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': '

    This is value1 for DragAndDropSortInput

    ' + }, + { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': '

    This is value2 for DragAndDropSortInput

    ' + } + ] + customization_args_dict4: Dict[ + str, Dict[str, Union[List[state_domain.SubtitledHtmlDict], bool]] + ] = { + 'choices': {'value': choices_subtitled_html_dicts}, + 'allowMultipleItemsInSamePosition': {'value': True} + } + + state1.update_interaction_customization_args(customization_args_dict1) + state2.update_interaction_customization_args(customization_args_dict2) + state3.update_interaction_customization_args(customization_args_dict3) + state4.update_interaction_customization_args(customization_args_dict4) + + default_outcome = state_domain.Outcome( + 'state2', None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + '

    Default outcome for state1

    '), + False, [], None, None + ) + state1.update_interaction_default_outcome(default_outcome) + + hint_list2 = [ + state_domain.Hint( + state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.HINT), + '

    Hello, this is html1 for state2

    ' + ) + ), + state_domain.Hint( + state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.HINT), + '

    Hello, this is html2 for state2

    ' + ) + ), + ] + state2.update_interaction_hints(hint_list2) + + solution_dict: state_domain.SolutionDict = { + 'answer_is_exclusive': True, + 'correct_answer': 'Answer1', + 'explanation': { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.SOLUTION), + 'html': '

    This is solution for state1

    ' + } + } + # Ruling out the possibility of None for mypy type checking. + assert state1.interaction.id is not None + solution = state_domain.Solution.from_dict( + state1.interaction.id, solution_dict) + state1.update_interaction_solution(solution) + + state_answer_group_list2 = [ + state_domain.AnswerGroup( + state_domain.Outcome( + 'state1', None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + '

    Outcome2 for state2

    ' + ), False, [], None, None), + [ + state_domain.RuleSpec( + 'Equals', + { + 'x': 0 + }), + state_domain.RuleSpec( + 'Equals', + { + 'x': 1 + }) + ], + [], + None), + state_domain.AnswerGroup( + state_domain.Outcome( + 'state3', None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + '

    Outcome1 for state2

    '), + False, [], None, None), + [ + state_domain.RuleSpec( + 'Equals', + { + 'x': 0 + }) + ], + [], + None + )] + state_answer_group_list3 = [state_domain.AnswerGroup( + state_domain.Outcome( + 'state1', None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + '

    Outcome for state3

    '), + False, [], None, None), [ - { - 'rule_specs': - [ - { - 'inputs': - { - 'x': 50 - }, - 'rule_type': 'IsLessThanOrEqualTo' - } - ], - 'training_data': [], - 'tagged_skill_misconception_id': None, - 'outcome': + state_domain.RuleSpec( + 'Equals', { - 'feedback': - { - 'content_id': 'feedback_3', - 'html': '

    Next

    ' - }, - 'param_changes': [], - 'refresher_exploration_id': None, - 'dest': 'End', - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False - } - } + 'x': ['ca_choices_0'] + }), + state_domain.RuleSpec( + 'Equals', + { + 'x': ['ca_choices_2'] + }) ], - 'cmd': 'edit_state_property' - })] - - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 1, change_list_2) - self.assertEqual(changes_are_mergeable, True) - - # Creating second exploration to test the scenario - # when changes to same properties are made in two - # different states. - self.save_new_valid_exploration( - self.EXP_1_ID, self.owner_id, end_state_name='End') + [], + None + )] + state2.update_interaction_answer_groups(state_answer_group_list2) + state3.update_interaction_answer_groups(state_answer_group_list3) - rights_manager.publish_exploration(self.owner, self.EXP_1_ID) + expected_html_list = [ + '', + '', + '
    Hello, this is state2
    ', + '

    Outcome1 for state2

    ', + '

    Outcome2 for state2

    ', + '', + '

    Hello, this is html1 for state2

    ', + '

    Hello, this is html2 for state2

    ', + '

    This is value1 for MultipleChoice

    ', + '

    This is value2 for MultipleChoice

    ', + '
    Hello, this is state1
    ', + '

    Default outcome for state1

    ', + '

    This is solution for state1

    ', + '

    Hello, this is state3

    ', + '

    Outcome for state3

    ', + '', + '

    This is value1 for ItemSelection

    ', + '

    This is value2 for ItemSelection

    ', + '

    This is value3 for ItemSelection

    ', + '

    Hello, this is state4

    ', + '', + '

    This is value1 for DragAndDropSortInput

    ', + '

    This is value2 for DragAndDropSortInput

    ' + ] - # Using the old change_list_2 here because they already covers - # the changes related to customization args in first state. - exp_services.update_exploration( - self.owner_id, self.EXP_1_ID, change_list_2, - 'Changed Interactions and Customization_args in One State') + actual_outcome_list = exploration.get_all_html_content_strings() + self.assertItemsEqual(set(actual_outcome_list), set(expected_html_list)) - # Changes to the properties related to the customization args - # in the second state to check for mergeability. - change_list_3 = [exp_domain.ExplorationChange({ - 'old_value': 'EndExploration', - 'state_name': 'End', - 'property_name': 'widget_id', - 'cmd': 'edit_state_property', - 'new_value': None - }), exp_domain.ExplorationChange({ - 'old_value': { - 'recommendedExplorationIds': { - 'value': [] - } - }, - 'state_name': 'End', - 'property_name': 'widget_customization_args', - 'cmd': 'edit_state_property', - 'new_value': {} - }), exp_domain.ExplorationChange({ - 'old_value': 0, - 'state_name': 'End', - 'property_name': 'next_content_id_index', - 'cmd': 'edit_state_property', - 'new_value': 4 - }), exp_domain.ExplorationChange({ - 'old_value': None, - 'state_name': 'End', - 'property_name': 'widget_id', - 'cmd': 'edit_state_property', - 'new_value': 'ItemSelectionInput' - }), exp_domain.ExplorationChange({ - 'old_value': {}, - 'state_name': 'End', - 'property_name': 'widget_customization_args', - 'cmd': 'edit_state_property', - 'new_value': { - 'minAllowableSelectionCount': { - 'value': 1 - }, - 'choices': { - 'value': [{ - 'html': '

    A

    ', - 'content_id': 'ca_choices_0' - }, { - 'html': '

    B

    ', - 'content_id': 'ca_choices_1' - }, { - 'html': '

    C

    ', - 'content_id': 'ca_choices_2' - }, { - 'html': '

    D

    ', - 'content_id': 'ca_choices_3' - }] - }, - 'maxAllowableSelectionCount': { - 'value': 1 - } - } - }), exp_domain.ExplorationChange({ - 'old_value': None, - 'state_name': 'End', - 'property_name': 'default_outcome', - 'cmd': 'edit_state_property', - 'new_value': { - 'refresher_exploration_id': None, - 'dest': 'End', - 'missing_prerequisite_skill_id': None, - 'feedback': { - 'html': '', - 'content_id': 'default_outcome' - }, - 'param_changes': [], - 'labelled_as_correct': False - } - }), exp_domain.ExplorationChange({ - 'old_value': 4, - 'state_name': 'End', - 'property_name': 'next_content_id_index', - 'cmd': 'edit_state_property', - 'new_value': 5 - }), exp_domain.ExplorationChange({ - 'old_value': [], - 'state_name': 'End', - 'property_name': 'answer_groups', - 'cmd': 'edit_state_property', - 'new_value': - [ - { - 'training_data': [], - 'tagged_skill_misconception_id': None, - 'outcome': - { - 'refresher_exploration_id': None, - 'dest': 'End', - 'missing_prerequisite_skill_id': None, - 'feedback': - { - 'html': '

    Good

    ', - 'content_id': 'feedback_4' - }, - 'param_changes': [], - 'labelled_as_correct': False - }, - 'rule_specs': - [ - { - 'rule_type': 'Equals', - 'inputs': - { - 'x': - [ - 'ca_choices_1' - ] - } - } - ] - } - ] - })] - changes_are_mergeable_1 = exp_services.are_changes_mergeable( - self.EXP_1_ID, 1, change_list_3) - self.assertEqual(changes_are_mergeable_1, True) +class ExplorationChangesMergeabilityUnitTests( + exp_services_test.ExplorationServicesUnitTests, + test_utils.EmailTestBase): + """Test methods related to exploration changes mergeability.""" - def test_changes_are_not_mergeable_when_customization_args_changes_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( + def setUp(self) -> None: + super().setUp() + exploration = self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='End') - + self.content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) rights_manager.publish_exploration(self.owner, self.EXP_0_ID) - # Changes in the properties which affected by or affecting - # customization_args. + def append_next_content_id_index_change( + self, change_list: List[exp_domain.ExplorationChange] + ) -> List[exp_domain.ExplorationChange]: + """Appends the next_content_id_index change in the change list.""" + change_list.append(exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'next_content_id_index', + 'new_value': self.content_id_generator.next_content_id_index, + 'old_value': 0 + })) + return change_list + + def test_changes_are_mergeable_when_content_changes_do_not_conflict( + self + ) -> None: change_list = [exp_domain.ExplorationChange({ - 'cmd': 'rename_state', - 'new_state_name': 'Intro-rename', - 'old_state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': 'Introduction', - 'property_name': 'init_state_name', - 'new_value': 'Intro-rename', - 'cmd': 'edit_exploration_property' + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'First title' + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Changed title.') + + test_dict: Dict[str, str] = {} + # Making changes to properties except content. + change_list_2 = [exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'property_name': 'widget_id', + 'new_value': None, + 'old_value': 'TextInput' }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'property_name': 'widget_customization_args', + 'new_value': test_dict, 'old_value': { - 'placeholder': - { - 'value': - { - 'content_id': 'ca_placeholder_0', + 'placeholder': { + 'value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG), 'unicode_str': '' } }, - 'rows': { - 'value': 1 - } - }, - 'property_name': 'widget_customization_args', - 'new_value': - { - 'placeholder': - { - 'value': - { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'Placeholder text' - } + 'rows': { + 'value': 1 }, - 'rows': - { - 'value': 2 + 'catchMisspellings': { + 'value': False } - }, - 'cmd': 'edit_state_property' + } }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': 'TextInput', + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', 'property_name': 'widget_id', - 'new_value': None, - 'cmd': 'edit_state_property' + 'new_value': 'Continue', + 'old_value': None }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': - { - 'placeholder': - { - 'value': - { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'Placeholder text' + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'property_name': 'widget_customization_args', + 'new_value': { + 'buttonText': { + 'value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG), + 'unicode_str': 'Continue' } - }, - 'rows': - { - 'value': 2 } }, - 'property_name': 'widget_customization_args', - 'new_value': {}, - 'cmd': 'edit_state_property' + 'old_value': test_dict + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changed Interaction.') + + # Changing content of second state. + change_list_3 = [exp_domain.ExplorationChange({ + 'property_name': 'content', + 'state_name': 'End', + 'cmd': 'edit_state_property', + 'old_value': { + 'html': '', + 'content_id': 'content_0' + }, + 'new_value': { + 'html': '

    Congratulations, you have finished!

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT) + } + })] + + # Checking that the changes can be applied when + # changing to same version. + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 3, change_list_3) + self.assertEqual(changes_are_mergeable, True) + + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 2, change_list_3) + self.assertEqual(changes_are_mergeable, True) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_3), + 'Changed content of End state.') + + # Changing content of first state. + change_list_4 = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'Renamed state' }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': 1, - 'property_name': 'next_content_id_index', - 'new_value': 3, - 'cmd': 'edit_state_property' + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Renamed state', + 'new_state_name': 'Renamed state again' }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': None, - 'property_name': 'widget_id', - 'new_value': 'NumericInput', - 'cmd': 'edit_state_property' + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Renamed state again', + 'new_state_name': 'Introduction' }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': - { - 'requireNonnegativeInput': - { - 'value': True - } + 'property_name': 'content', + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'old_value': { + 'html': '', + 'content_id': 'content_0' }, - 'property_name': 'widget_customization_args', - 'new_value': - { - 'requireNonnegativeInput': - { - 'value': False - } + 'new_value': { + 'html': '

    Hello

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT) + } + })] + + # Checking for the mergability of the fourth change list. + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 2, change_list_4) + self.assertEqual(changes_are_mergeable, True) + + # Checking for the mergability when working on latest version. + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 3, change_list_4) + self.assertEqual(changes_are_mergeable, True) + + def test_changes_are_not_mergeable_when_content_changes_conflict( + self + ) -> None: + # Making changes to content of the first state. + change_list = [exp_domain.ExplorationChange({ + 'property_name': 'content', + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'old_value': { + 'html': '', + 'content_id': 'content_0' }, - 'cmd': 'edit_state_property' + 'new_value': { + 'html': '

    Content 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT) + } + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Changed Content.') + + # Changing content of the same state to check that + # changes are not mergeable. + change_list_2 = [exp_domain.ExplorationChange({ + 'property_name': 'content', + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'old_value': { + 'html': '', + 'content_id': 'content_0' + }, + 'new_value': { + 'html': '

    Content 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT) + } + })] + + # Checking for the mergability of the second change list. + changes_are_not_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 1, change_list_2) + self.assertEqual(changes_are_not_mergeable, False) + + def test_changes_are_mergeable_when_interaction_id_changes_do_not_conflict( + self + ) -> None: + # Making changes in the properties which are + # not related to the interaction id. + change_list_2 = [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '

    This is the first state.

    ' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': 3, - 'property_name': 'next_content_id_index', - 'new_value': 4, - 'cmd': 'edit_state_property' + 'new_value': [{ + 'hint_content': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    This is a first hint.

    ' + } + }], + 'state_name': 'Introduction', + 'old_value': ['old_value'], + 'cmd': 'edit_state_property', + 'property_name': 'hints' }), exp_domain.ExplorationChange({ - 'state_name': 'Intro-rename', - 'old_value': [], - 'property_name': 'answer_groups', - 'new_value': - [ - { - 'rule_specs': - [ - { - 'inputs': - { - 'x': 50 - }, - 'rule_type': 'IsLessThanOrEqualTo' - } - ], - 'training_data': [], - 'tagged_skill_misconception_id': None, - 'outcome': - { - 'feedback': - { - 'content_id': 'feedback_3', - 'html': '

    Next

    ' - }, - 'param_changes': [], - 'refresher_exploration_id': None, - 'dest': 'End', - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False - } + 'new_value': [{ + 'hint_content': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    This is a first hint.

    ' } - ], - 'cmd': 'edit_state_property' + }, { + 'hint_content': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    This is the second hint.

    ' + } + }], + 'state_name': 'Introduction', + 'old_value': [{ + 'hint_content': { + 'content_id': 'hint_1', + 'html': '

    This is a first hint.

    ' + } + }], + 'cmd': 'edit_state_property', + 'property_name': 'hints' + }), exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '

    Congratulations, you have finished!

    ' + }, + 'state_name': 'End', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Changed Customization Args and related properties again') + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changed Contents and Hint') - # Changes to the customization_args in same - # state again to check that changes are not mergeable. - change_list_2 = [exp_domain.ExplorationChange({ + test_dict: Dict[str, str] = {} + # Changes to the properties affected by or affecting + # interaction id and in interaction_id itself. + change_list_3 = [exp_domain.ExplorationChange({ + 'new_value': None, + 'state_name': 'Introduction', + 'old_value': 'TextInput', + 'cmd': 'edit_state_property', + 'property_name': 'widget_id' + }), exp_domain.ExplorationChange({ + 'new_value': test_dict, 'state_name': 'Introduction', 'old_value': { - 'placeholder': - { - 'value': - { + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { 'content_id': 'ca_placeholder_0', 'unicode_str': '' } }, - 'rows': { - 'value': 1 + 'catchMisspellings': { + 'value': False } }, - 'property_name': 'widget_customization_args', - 'new_value': - { - 'placeholder': - { - 'value': - { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'Placeholder text 2.' + 'cmd': 'edit_state_property', + 'property_name': 'widget_customization_args' + }), exp_domain.ExplorationChange({ + 'new_value': 'Continue', + 'state_name': 'Introduction', + 'old_value': None, + 'cmd': 'edit_state_property', + 'property_name': 'widget_id' + }), exp_domain.ExplorationChange({ + 'new_value': { + 'buttonText': { + 'value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG), + 'unicode_str': 'Continue' } - }, - 'rows': - { - 'value': 2 } }, - 'cmd': 'edit_state_property' + 'state_name': 'Introduction', + 'old_value': test_dict, + 'cmd': 'edit_state_property', + 'property_name': 'widget_customization_args' })] - changes_are_not_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 1, change_list_2) - self.assertEqual(changes_are_not_mergeable, False) + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 1, change_list_3) + self.assertEqual(changes_are_mergeable, True) - def test_changes_are_mergeable_when_answer_groups_changes_do_not_conflict(self): # pylint: disable=line-too-long + # Creating second exploration to test the scenario + # when changes to same properties are made in two + # different states. self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') + self.EXP_1_ID, self.owner_id, end_state_name='End') - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + rights_manager.publish_exploration(self.owner, self.EXP_1_ID) - # Adding answer_groups and solutions to the existing state. - change_list = [exp_domain.ExplorationChange({ + # Using the old change_list_3 here because they already covers + # the changes related to interaction in first state. + exp_services.update_exploration( + self.owner_id, self.EXP_1_ID, + self.append_next_content_id_index_change(change_list_3), + 'Changed Interaction') + + # Changes related to interaction in the second state + # to check for mergeability. + change_list_4 = [exp_domain.ExplorationChange({ + 'state_name': 'End', 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 1, - 'state_name': 'Introduction', - 'new_value': 3 + 'new_value': None, + 'old_value': 'EndExploration', + 'property_name': 'widget_id' }), exp_domain.ExplorationChange({ + 'state_name': 'End', + 'cmd': 'edit_state_property', + 'new_value': test_dict, + 'old_value': { + 'recommendedExplorationIds': { + 'value': [] + } + }, + 'property_name': 'widget_customization_args' + }), exp_domain.ExplorationChange({ + 'state_name': 'End', + 'cmd': 'edit_state_property', + 'new_value': 'NumericInput', + 'old_value': None, + 'property_name': 'widget_id' + }), exp_domain.ExplorationChange({ + 'state_name': 'End', + 'cmd': 'edit_state_property', + 'new_value': { + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'labelled_as_correct': False, + 'param_changes': [], + 'feedback': { + 'html': '', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + } + }, + 'old_value': None, + 'property_name': 'default_outcome' + }), exp_domain.ExplorationChange({ + 'state_name': 'End', 'cmd': 'edit_state_property', - 'property_name': 'answer_groups', - 'old_value': [], - 'state_name': 'Introduction', 'new_value': [{ - 'rule_specs': [{ - 'rule_type': 'StartsWith', - 'inputs': { - 'x': { - 'contentId': 'rule_input_2', - 'normalizedStrSet': ['Hello', 'Hola'] - } - } - }], - 'tagged_skill_misconception_id': None, 'outcome': { - 'labelled_as_correct': False, - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, + 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, + 'labelled_as_correct': False, 'param_changes': [], - 'refresher_exploration_id': None + 'feedback': { + 'html': '

    Feedback

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + } }, + 'rule_specs': [{ + 'inputs': { + 'x': 60 + }, + 'rule_type': 'IsLessThanOrEqualTo' + }], + 'tagged_skill_misconception_id': None, 'training_data': [] - }] + }], + 'old_value': ['old_value'], + 'property_name': 'answer_groups' }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'old_value': [], + 'state_name': 'End', + 'property_name': 'solicit_answer_details', + 'new_value': True + })] + changes_are_mergeable_1 = exp_services.are_changes_mergeable( + self.EXP_1_ID, 1, change_list_4) + self.assertEqual(changes_are_mergeable_1, True) + + def test_changes_are_not_mergeable_when_interaction_id_changes_conflict( + self + ) -> None: + test_dict: Dict[str, str] = {} + # Changes to the properties affected by or affecting + # interaction id and in interaction_id itself. + change_list_2 = [exp_domain.ExplorationChange({ + 'new_value': None, 'state_name': 'Introduction', - 'new_value': [{ - 'hint_content': { - 'content_id': 'hint_3', - 'html': '

    Hint 1.

    ' + 'old_value': 'TextInput', + 'cmd': 'edit_state_property', + 'property_name': 'widget_id' + }), exp_domain.ExplorationChange({ + 'new_value': test_dict, + 'state_name': 'Introduction', + 'old_value': { + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' + } + }, + 'catchMisspellings': { + 'value': False } - }] + }, + 'cmd': 'edit_state_property', + 'property_name': 'widget_customization_args' }), exp_domain.ExplorationChange({ + 'new_value': 'Continue', + 'state_name': 'Introduction', + 'old_value': None, 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 3, + 'property_name': 'widget_id' + }), exp_domain.ExplorationChange({ + 'new_value': { + 'buttonText': { + 'value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG), + 'unicode_str': 'Continue' + } + } + }, + 'state_name': 'Introduction', + 'old_value': test_dict, + 'cmd': 'edit_state_property', + 'property_name': 'widget_customization_args' + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changed Contents and Hint') + + # Changes to the properties affected by or affecting + # interaction id and in interaction_id itself again + # to check that changes are not mergeable. + change_list_3 = [exp_domain.ExplorationChange({ + 'new_value': None, 'state_name': 'Introduction', - 'new_value': 4 + 'old_value': 'TextInput', + 'cmd': 'edit_state_property', + 'property_name': 'widget_id' }), exp_domain.ExplorationChange({ + 'new_value': test_dict, + 'state_name': 'Introduction', + 'old_value': { + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' + } + }, + 'catchMisspellings': { + 'value': False + } + }, 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'old_value': None, + 'property_name': 'widget_customization_args' + }), exp_domain.ExplorationChange({ + 'new_value': 'Continue', 'state_name': 'Introduction', + 'old_value': None, + 'cmd': 'edit_state_property', + 'property_name': 'widget_id' + }), exp_domain.ExplorationChange({ 'new_value': { - 'correct_answer': 'Hello Aryaman!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' - }, - 'answer_is_exclusive': False - } + 'buttonText': { + 'value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG), + 'unicode_str': 'Continue' + } + } + }, + 'state_name': 'Introduction', + 'old_value': test_dict, + 'cmd': 'edit_state_property', + 'property_name': 'widget_customization_args' })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Added answer groups and solution') + changes_are_not_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 1, change_list_3) + self.assertEqual(changes_are_not_mergeable, False) - # Changes to the properties that are not related to - # the answer_groups. These changes are done to check - # when the changes are made in unrelated properties, - # they can be merged easily. - change_list_2 = [exp_domain.ExplorationChange({ + def test_changes_are_mergeable_when_customization_args_changes_do_not_conflict( # pylint: disable=line-too-long + self + ) -> None: + test_dict: Dict[str, str] = {} + # Changes in the properties which aren't affected by + # customization args or doesn't affects customization_args. + change_list = [exp_domain.ExplorationChange({ 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    This is the first state.

    ' }, 'state_name': 'Introduction', 'old_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': '' }, 'cmd': 'edit_state_property', @@ -6359,269 +13500,204 @@ def test_changes_are_mergeable_when_answer_groups_changes_do_not_conflict(self): }), exp_domain.ExplorationChange({ 'new_value': [{ 'hint_content': { - 'content_id': 'hint_3', - 'html': '

    Hint 1.

    ' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    This is a first hint.

    ' + } + }], + 'state_name': 'Introduction', + 'old_value': ['old_value'], + 'cmd': 'edit_state_property', + 'property_name': 'hints' + }), exp_domain.ExplorationChange({ + 'new_value': [{ + 'hint_content': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    This is a first hint.

    ' } }, { 'hint_content': { - 'content_id': 'hint_4', - 'html': '

    This is a first hint.

    ' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    This is the second hint.

    ' } }], 'state_name': 'Introduction', 'old_value': [{ 'hint_content': { - 'content_id': 'hint_3', - 'html': '

    Hint 1.

    ' + 'content_id': 'hint_1', + 'html': '

    This is a first hint.

    ' } }], 'cmd': 'edit_state_property', 'property_name': 'hints' - }), exp_domain.ExplorationChange({ - 'new_value': 5, - 'state_name': 'Introduction', - 'old_value': 4, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index' }), exp_domain.ExplorationChange({ 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), 'html': '

    Congratulations, you have finished!

    ' }, 'state_name': 'End', 'old_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': '' }, 'cmd': 'edit_state_property', 'property_name': 'content' })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Changed Contents and Hint') + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Changed Contents and Hints') - change_list_3 = [exp_domain.ExplorationChange({ - 'property_name': 'default_outcome', + # Changes to the properties affecting customization_args + # or are affected by customization_args in the same state. + # This includes changes related to renaming a state in + # order to check that changes are applied even if states + # are renamed. + change_list_2 = [exp_domain.ExplorationChange({ + 'cmd': 'rename_state', + 'new_state_name': 'Intro-rename', + 'old_state_name': 'Introduction' + }), exp_domain.ExplorationChange({ + 'old_value': 'Introduction', + 'property_name': 'init_state_name', + 'new_value': 'Intro-rename', + 'cmd': 'edit_exploration_property' + }), exp_domain.ExplorationChange({ + 'state_name': 'Intro-rename', 'old_value': { - 'labelled_as_correct': False, - 'missing_prerequisite_skill_id': None, - 'refresher_exploration_id': None, - 'feedback': { - 'content_id': 'default_outcome', - 'html': '' + 'placeholder': + { + 'value': + { + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' + } }, - 'param_changes': [ - - ], - 'dest': 'End' - }, - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'new_value': { - 'labelled_as_correct': False, - 'missing_prerequisite_skill_id': None, - 'refresher_exploration_id': None, - 'feedback': { - 'content_id': 'default_outcome', - 'html': '

    Feedback 1.

    ' + 'rows': { + 'value': 1 }, - 'param_changes': [ - - ], - 'dest': 'End' - } - })] - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_3) - self.assertEqual(changes_are_mergeable, True) - - # Changes to the answer_groups and the properties that - # affects or are affected by answer_groups. - change_list_4 = [exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': [{ - 'outcome': { - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'param_changes': [], - 'labelled_as_correct': False, - 'refresher_exploration_id': None + 'catchMisspellings': { + 'value': False + } + }, + 'property_name': 'widget_customization_args', + 'new_value': + { + 'placeholder': + { + 'value': + { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG), + 'unicode_str': 'Placeholder text' + } }, - 'training_data': [], - 'rule_specs': [{ - 'inputs': { - 'x': { - 'normalizedStrSet': [ - 'Hello', - 'Hola', - 'Hi'], - 'contentId': 'rule_input_2' - } - }, - 'rule_type': 'StartsWith' - }], - 'tagged_skill_misconception_id': None - }], - 'cmd': 'edit_state_property', - 'property_name': 'answer_groups', - 'old_value': [{ - 'outcome': { - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'param_changes': [], - 'labelled_as_correct': False, - 'refresher_exploration_id': None + 'rows': + { + 'value': 2 }, - 'training_data': [], - 'rule_specs': [{ - 'inputs': { - 'x': { - 'normalizedStrSet': ['Hello', 'Hola'], - 'contentId': 'rule_input_2' - } - }, - 'rule_type': 'StartsWith' - }], - 'tagged_skill_misconception_id': None - }] - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': { - 'answer_is_exclusive': False, - 'correct_answer': 'Hi Aryaman!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' + 'catchMisspellings': { + 'value': False } }, - 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'old_value': { - 'answer_is_exclusive': False, - 'correct_answer': 'Hello Aryaman!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' - } - } + 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': 6, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 4 + 'state_name': 'Intro-rename', + 'old_value': 'TextInput', + 'property_name': 'widget_id', + 'new_value': None, + 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': [{ - 'outcome': { - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'param_changes': [], - 'labelled_as_correct': False, - 'refresher_exploration_id': None - }, - 'training_data': [], - 'rule_specs': [{ - 'inputs': { - 'x': { - 'normalizedStrSet': ['Hello', 'Hola', 'Hi'], - 'contentId': 'rule_input_2' - } - }, - 'rule_type': 'StartsWith' - }], - 'tagged_skill_misconception_id': None - }, { - 'outcome': { - 'feedback': { - 'content_id': 'feedback_4', - 'html': '' - }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'param_changes': [], - 'labelled_as_correct': False, - 'refresher_exploration_id': None - }, - 'training_data': [], - 'rule_specs': [{ - 'inputs': { - 'x': { - 'normalizedStrSet': ['Oppia', 'GSoC'], - 'contentId': 'rule_input_5' - } - }, - 'rule_type': 'Contains' - }], - 'tagged_skill_misconception_id': None - }], - 'cmd': 'edit_state_property', - 'property_name': 'answer_groups', - 'old_value': [{ - 'outcome': { - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'param_changes': [], - 'labelled_as_correct': False, - 'refresher_exploration_id': None - }, - 'training_data': [], - 'rule_specs': [{ - 'inputs': { - 'x': { - 'normalizedStrSet': ['Hello', 'Hola', 'Hi'], - 'contentId': 'rule_input_2' - } - }, - 'rule_type': 'StartsWith' - }], - 'tagged_skill_misconception_id': None - }] + 'state_name': 'Intro-rename', + 'old_value': + { + 'placeholder': + { + 'value': + { + 'content_id': 'ca_placeholder_0', + 'unicode_str': 'Placeholder text' + } + }, + 'rows': + { + 'value': 2 + }, + 'catchMisspellings': { + 'value': False + } + }, + 'property_name': 'widget_customization_args', + 'new_value': test_dict, + 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': { - 'answer_is_exclusive': False, - 'correct_answer': 'Oppia is selected for GSoC.', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' + 'state_name': 'Intro-rename', + 'old_value': None, + 'property_name': 'widget_id', + 'new_value': 'NumericInput', + 'cmd': 'edit_state_property' + }), exp_domain.ExplorationChange({ + 'state_name': 'Intro-rename', + 'old_value': + { + 'requireNonnegativeInput': + { + 'value': True } }, - 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'old_value': { - 'answer_is_exclusive': False, - 'correct_answer': 'Hi Aryaman!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' + 'property_name': 'widget_customization_args', + 'new_value': + { + 'requireNonnegativeInput': + { + 'value': False } - } + }, + 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'state_name': 'Introduction', - 'property_name': 'solicit_answer_details', - 'new_value': True + 'state_name': 'Intro-rename', + 'old_value': ['old_value'], + 'property_name': 'answer_groups', + 'new_value': + [ + { + 'rule_specs': + [ + { + 'inputs': + { + 'x': 50 + }, + 'rule_type': 'IsLessThanOrEqualTo' + } + ], + 'training_data': [], + 'tagged_skill_misconception_id': None, + 'outcome': + { + 'feedback': + { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '

    Next

    ' + }, + 'param_changes': [], + 'refresher_exploration_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False + } + } + ], + 'cmd': 'edit_state_property' })] changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_4) + self.EXP_0_ID, 1, change_list_2) self.assertEqual(changes_are_mergeable, True) # Creating second exploration to test the scenario @@ -6632,20 +13708,16 @@ def test_changes_are_mergeable_when_answer_groups_changes_do_not_conflict(self): rights_manager.publish_exploration(self.owner, self.EXP_1_ID) - # Using the old change_list_2 and change_list_3 here - # because they already covers the changes related to - # the answer_groups in the first state. - exp_services.update_exploration( - self.owner_id, self.EXP_1_ID, change_list_2, - 'Added Answer Group and Solution in One state') - + # Using the old change_list_2 here because they already covers + # the changes related to customization args in first state. exp_services.update_exploration( - self.owner_id, self.EXP_1_ID, change_list_3, - 'Changed Answer Groups and Solutions in One State') + self.owner_id, self.EXP_1_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changed Interactions and Customization_args in One State') - # Changes to the properties related to the answer_groups + # Changes to the properties related to the customization args # in the second state to check for mergeability. - change_list_5 = [exp_domain.ExplorationChange({ + change_list_3 = [exp_domain.ExplorationChange({ 'old_value': 'EndExploration', 'state_name': 'End', 'property_name': 'widget_id', @@ -6657,128 +13729,325 @@ def test_changes_are_mergeable_when_answer_groups_changes_do_not_conflict(self): 'value': [] } }, - 'state_name': 'End', + 'state_name': 'End', + 'property_name': 'widget_customization_args', + 'cmd': 'edit_state_property', + 'new_value': test_dict + }), exp_domain.ExplorationChange({ + 'old_value': None, + 'state_name': 'End', + 'property_name': 'widget_id', + 'cmd': 'edit_state_property', + 'new_value': 'ItemSelectionInput' + }), exp_domain.ExplorationChange({ + 'old_value': test_dict, + 'state_name': 'End', + 'property_name': 'widget_customization_args', + 'cmd': 'edit_state_property', + 'new_value': { + 'minAllowableSelectionCount': { + 'value': 1 + }, + 'choices': { + 'value': [{ + 'html': '

    A

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.RULE) + }, { + 'html': '

    B

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.RULE) + }, { + 'html': '

    C

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.RULE) + }, { + 'html': '

    D

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.RULE) + }] + }, + 'maxAllowableSelectionCount': { + 'value': 1 + } + } + }), exp_domain.ExplorationChange({ + 'old_value': None, + 'state_name': 'End', + 'property_name': 'default_outcome', + 'cmd': 'edit_state_property', + 'new_value': { + 'refresher_exploration_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'missing_prerequisite_skill_id': None, + 'feedback': { + 'html': '', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + }, + 'param_changes': [], + 'labelled_as_correct': False + } + }), exp_domain.ExplorationChange({ + 'old_value': ['old_value'], + 'state_name': 'End', + 'property_name': 'answer_groups', + 'cmd': 'edit_state_property', + 'new_value': + [ + { + 'training_data': [], + 'tagged_skill_misconception_id': None, + 'outcome': + { + 'refresher_exploration_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'missing_prerequisite_skill_id': None, + 'feedback': + { + 'html': '

    Good

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + }, + 'param_changes': [], + 'labelled_as_correct': False + }, + 'rule_specs': + [ + { + 'rule_type': 'Equals', + 'inputs': + { + 'x': + [ + 'ca_choices_1' + ] + } + } + ] + } + ] + })] + + changes_are_mergeable_1 = exp_services.are_changes_mergeable( + self.EXP_1_ID, 1, change_list_3) + self.assertEqual(changes_are_mergeable_1, True) + + def test_changes_are_not_mergeable_when_customization_args_changes_conflict( + self + ) -> None: + test_dict: Dict[str, str] = {} + # Changes in the properties which affected by or affecting + # customization_args. + change_list = [exp_domain.ExplorationChange({ + 'cmd': 'rename_state', + 'new_state_name': 'Intro-rename', + 'old_state_name': 'Introduction' + }), exp_domain.ExplorationChange({ + 'old_value': 'Introduction', + 'property_name': 'init_state_name', + 'new_value': 'Intro-rename', + 'cmd': 'edit_exploration_property' + }), exp_domain.ExplorationChange({ + 'state_name': 'Intro-rename', + 'old_value': { + 'placeholder': + { + 'value': + { + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' + } + }, + 'rows': { + 'value': 1 + }, + 'catchMisspellings': { + 'value': False + } + }, + 'property_name': 'widget_customization_args', + 'new_value': + { + 'placeholder': + { + 'value': + { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'unicode_str': 'Placeholder text' + } + }, + 'rows': + { + 'value': 2 + }, + 'catchMisspellings': { + 'value': False + } + }, + 'cmd': 'edit_state_property' + }), exp_domain.ExplorationChange({ + 'state_name': 'Intro-rename', + 'old_value': 'TextInput', + 'property_name': 'widget_id', + 'new_value': None, + 'cmd': 'edit_state_property' + }), exp_domain.ExplorationChange({ + 'state_name': 'Intro-rename', + 'old_value': + { + 'placeholder': + { + 'value': + { + 'content_id': 'ca_placeholder_0', + 'unicode_str': 'Placeholder text' + } + }, + 'rows': + { + 'value': 2 + }, + 'catchMisspellings': { + 'value': False + } + }, 'property_name': 'widget_customization_args', - 'cmd': 'edit_state_property', - 'new_value': {} - }), exp_domain.ExplorationChange({ - 'old_value': 0, - 'state_name': 'End', - 'property_name': 'next_content_id_index', - 'cmd': 'edit_state_property', - 'new_value': 4 + 'new_value': test_dict, + 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ + 'state_name': 'Intro-rename', 'old_value': None, - 'state_name': 'End', 'property_name': 'widget_id', - 'cmd': 'edit_state_property', - 'new_value': 'ItemSelectionInput' + 'new_value': 'NumericInput', + 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ - 'old_value': {}, - 'state_name': 'End', + 'state_name': 'Intro-rename', + 'old_value': + { + 'requireNonnegativeInput': + { + 'value': True + } + }, 'property_name': 'widget_customization_args', - 'cmd': 'edit_state_property', - 'new_value': { - 'minAllowableSelectionCount': { - 'value': 1 - }, - 'choices': { - 'value': [{ - 'html': '

    A

    ', - 'content_id': 'ca_choices_0' - }, { - 'html': '

    B

    ', - 'content_id': 'ca_choices_1' - }, { - 'html': '

    C

    ', - 'content_id': 'ca_choices_2' - }, { - 'html': '

    D

    ', - 'content_id': 'ca_choices_3' - }] - }, - 'maxAllowableSelectionCount': { - 'value': 1 + 'new_value': + { + 'requireNonnegativeInput': + { + 'value': False } - } - }), exp_domain.ExplorationChange({ - 'old_value': None, - 'state_name': 'End', - 'property_name': 'default_outcome', - 'cmd': 'edit_state_property', - 'new_value': { - 'refresher_exploration_id': None, - 'dest': 'End', - 'missing_prerequisite_skill_id': None, - 'feedback': { - 'html': '', - 'content_id': 'default_outcome' - }, - 'param_changes': [], - 'labelled_as_correct': False - } - }), exp_domain.ExplorationChange({ - 'old_value': 4, - 'state_name': 'End', - 'property_name': 'next_content_id_index', - 'cmd': 'edit_state_property', - 'new_value': 5 + }, + 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ - 'old_value': [], - 'state_name': 'End', + 'state_name': 'Intro-rename', + 'old_value': ['old_value'], 'property_name': 'answer_groups', - 'cmd': 'edit_state_property', - 'new_value': [{ - 'training_data': [], - 'tagged_skill_misconception_id': None, - 'outcome': { - 'refresher_exploration_id': None, - 'dest': 'End', - 'missing_prerequisite_skill_id': None, - 'feedback': { - 'html': '

    Good

    ', - 'content_id': 'feedback_4' - }, - 'param_changes': [], - 'labelled_as_correct': False - }, - 'rule_specs': [{ - 'rule_type': 'Equals', - 'inputs': { - 'x': ['ca_choices_1'] + 'new_value': + [ + { + 'rule_specs': + [ + { + 'inputs': + { + 'x': 50 + }, + 'rule_type': 'IsLessThanOrEqualTo' + } + ], + 'training_data': [], + 'tagged_skill_misconception_id': None, + 'outcome': + { + 'feedback': + { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '

    Next

    ' + }, + 'param_changes': [], + 'refresher_exploration_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False } - }] - }] + } + ], + 'cmd': 'edit_state_property' })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Changed Customization Args and related properties again') - changes_are_mergeable_1 = exp_services.are_changes_mergeable( - self.EXP_1_ID, 2, change_list_5) - self.assertEqual(changes_are_mergeable_1, True) - - def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') + # Changes to the customization_args in same + # state again to check that changes are not mergeable. + change_list_2 = [exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'old_value': { + 'placeholder': + { + 'value': + { + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' + } + }, + 'rows': { + 'value': 1 + }, + 'catchMisspellings': { + 'value': False + } + }, + 'property_name': 'widget_customization_args', + 'new_value': + { + 'placeholder': + { + 'value': + { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG), + 'unicode_str': 'Placeholder text 2.' + } + }, + 'rows': + { + 'value': 2 + }, + 'catchMisspellings': { + 'value': False + } + }, + 'cmd': 'edit_state_property' + })] - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + changes_are_not_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 1, change_list_2) + self.assertEqual(changes_are_not_mergeable, False) + def test_changes_are_mergeable_when_answer_groups_changes_do_not_conflict( + self + ) -> None: # Adding answer_groups and solutions to the existing state. change_list = [exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 1, - 'state_name': 'Introduction', - 'new_value': 3 - }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'property_name': 'answer_groups', - 'old_value': [], + 'old_value': ['old_value'], 'state_name': 'Introduction', 'new_value': [{ 'rule_specs': [{ 'rule_type': 'StartsWith', 'inputs': { 'x': { - 'contentId': 'rule_input_2', + 'contentId': self.content_id_generator.generate( + translation_domain.ContentType.RULE), 'normalizedStrSet': ['Hello', 'Hola'] } } @@ -6787,11 +14056,13 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # 'outcome': { 'labelled_as_correct': False, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'refresher_exploration_id': None }, @@ -6800,51 +14071,147 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'property_name': 'hints', - 'old_value': [], + 'old_value': ['old_value'], 'state_name': 'Introduction', 'new_value': [{ 'hint_content': { - 'content_id': 'hint_3', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Hint 1.

    ' } }] }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 3, + 'property_name': 'solution', + 'old_value': None, + 'state_name': 'Introduction', + 'new_value': { + 'correct_answer': 'Hello Aryaman!', + 'explanation': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '

    Explanation.

    ' + }, + 'answer_is_exclusive': False + } + })] + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Added answer groups and solution') + + # Changes to the properties that are not related to + # the answer_groups. These changes are done to check + # when the changes are made in unrelated properties, + # they can be merged easily. + change_list_2 = [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '

    This is the first state.

    ' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + }), exp_domain.ExplorationChange({ + 'new_value': [{ + 'hint_content': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    Hint 1.

    ' + } + }, { + 'hint_content': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    This is a first hint.

    ' + } + }], 'state_name': 'Introduction', - 'new_value': 4 + 'old_value': [{ + 'hint_content': { + 'content_id': 'hint_3', + 'html': '

    Hint 1.

    ' + } + }], + 'cmd': 'edit_state_property', + 'property_name': 'hints' }), exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '

    Congratulations, you have finished!

    ' + }, + 'state_name': 'End', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'old_value': None, + 'property_name': 'content' + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changed Contents and Hint') + + change_list_3 = [exp_domain.ExplorationChange({ + 'property_name': 'default_outcome', + 'old_value': { + 'labelled_as_correct': False, + 'missing_prerequisite_skill_id': None, + 'refresher_exploration_id': None, + 'feedback': { + 'content_id': 'default_outcome', + 'html': '' + }, + 'param_changes': [ + + ], + 'dest_if_really_stuck': None, + 'dest': 'End' + }, 'state_name': 'Introduction', + 'cmd': 'edit_state_property', 'new_value': { - 'correct_answer': 'Hello Aryaman!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' + 'labelled_as_correct': False, + 'missing_prerequisite_skill_id': None, + 'refresher_exploration_id': None, + 'feedback': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '

    Feedback 1.

    ' }, - 'answer_is_exclusive': False + 'param_changes': [ + + ], + 'dest_if_really_stuck': None, + 'dest': 'End' } })] - - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Added answer groups and solution') + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 2, change_list_3) + self.assertEqual(changes_are_mergeable, True) # Changes to the answer_groups and the properties that # affects or are affected by answer_groups. - change_list_2 = [exp_domain.ExplorationChange({ + change_list_4 = [exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': [{ 'outcome': { 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -6874,6 +14241,7 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -6896,7 +14264,8 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # 'answer_is_exclusive': False, 'correct_answer': 'Hi Aryaman!', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' } }, @@ -6910,22 +14279,18 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # 'html': '

    Explanation.

    ' } } - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': 6, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 4 }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': [{ 'outcome': { 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -6944,11 +14309,13 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # }, { 'outcome': { 'feedback': { - 'content_id': 'feedback_4', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -6975,6 +14342,7 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -6997,7 +14365,8 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # 'answer_is_exclusive': False, 'correct_answer': 'Oppia is selected for GSoC.', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' } }, @@ -7007,94 +14376,157 @@ def test_changes_are_not_mergeable_when_answer_groups_changes_conflict(self): # 'answer_is_exclusive': False, 'correct_answer': 'Hi Aryaman!', 'explanation': { - 'content_id': 'solution', + 'content_id': 'solution_5', 'html': '

    Explanation.

    ' } } + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_state_property', + 'state_name': 'Introduction', + 'property_name': 'solicit_answer_details', + 'new_value': True })] + + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 2, change_list_4) + self.assertEqual(changes_are_mergeable, True) + + # Creating second exploration to test the scenario + # when changes to same properties are made in two + # different states. + self.save_new_valid_exploration( + self.EXP_1_ID, self.owner_id, end_state_name='End') + + rights_manager.publish_exploration(self.owner, self.EXP_1_ID) + + # Using the old change_list_2 and change_list_3 here + # because they already covers the changes related to + # the answer_groups in the first state. exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Changed Answer Groups and related properties') + self.owner_id, self.EXP_1_ID, change_list_2, + 'Added Answer Group and Solution in One state') - # Changes to the answer group in same state again - # to check that changes are not mergeable. - change_list_3 = [exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': [{ - 'outcome': { - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'param_changes': [], - 'labelled_as_correct': False, - 'refresher_exploration_id': None + exp_services.update_exploration( + self.owner_id, self.EXP_1_ID, change_list_3, + 'Changed Answer Groups and Solutions in One State') + + test_dict: Dict[str, str] = {} + # Changes to the properties related to the answer_groups + # in the second state to check for mergeability. + change_list_5 = [exp_domain.ExplorationChange({ + 'old_value': 'EndExploration', + 'state_name': 'End', + 'property_name': 'widget_id', + 'cmd': 'edit_state_property', + 'new_value': None + }), exp_domain.ExplorationChange({ + 'old_value': { + 'recommendedExplorationIds': { + 'value': [] + } + }, + 'state_name': 'End', + 'property_name': 'widget_customization_args', + 'cmd': 'edit_state_property', + 'new_value': test_dict + }), exp_domain.ExplorationChange({ + 'old_value': None, + 'state_name': 'End', + 'property_name': 'widget_id', + 'cmd': 'edit_state_property', + 'new_value': 'ItemSelectionInput' + }), exp_domain.ExplorationChange({ + 'old_value': test_dict, + 'state_name': 'End', + 'property_name': 'widget_customization_args', + 'cmd': 'edit_state_property', + 'new_value': { + 'minAllowableSelectionCount': { + 'value': 1 }, - 'training_data': [], - 'rule_specs': [{ - 'inputs': { - 'x': { - 'normalizedStrSet': [ - 'Hello', - 'Hola', - 'Hey'], - 'contentId': 'rule_input_2' - } - }, - 'rule_type': 'StartsWith' - }], - 'tagged_skill_misconception_id': None - }], + 'choices': { + 'value': [{ + 'html': '

    A

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG) + }, { + 'html': '

    B

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG) + }, { + 'html': '

    C

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG) + }, { + 'html': '

    D

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG) + }] + }, + 'maxAllowableSelectionCount': { + 'value': 1 + } + } + }), exp_domain.ExplorationChange({ + 'old_value': None, + 'state_name': 'End', + 'property_name': 'default_outcome', 'cmd': 'edit_state_property', + 'new_value': { + 'refresher_exploration_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'missing_prerequisite_skill_id': None, + 'feedback': { + 'html': '', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + }, + 'param_changes': [], + 'labelled_as_correct': False + } + }), exp_domain.ExplorationChange({ + 'old_value': ['old_value'], + 'state_name': 'End', 'property_name': 'answer_groups', - 'old_value': [{ + 'cmd': 'edit_state_property', + 'new_value': [{ + 'training_data': [], + 'tagged_skill_misconception_id': None, 'outcome': { + 'refresher_exploration_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' + 'html': '

    Good

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', 'param_changes': [], - 'labelled_as_correct': False, - 'refresher_exploration_id': None + 'labelled_as_correct': False }, - 'training_data': [], 'rule_specs': [{ + 'rule_type': 'Equals', 'inputs': { - 'x': { - 'normalizedStrSet': ['Hello', 'Hola'], - 'contentId': 'rule_input_2' - } - }, - 'rule_type': 'StartsWith' - }], - 'tagged_skill_misconception_id': None + 'x': ['ca_choices_1'] + } + }] }] })] - changes_are_not_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_3) - self.assertEqual(changes_are_not_mergeable, False) - - def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + changes_are_mergeable_1 = exp_services.are_changes_mergeable( + self.EXP_1_ID, 2, change_list_5) + self.assertEqual(changes_are_mergeable_1, True) - # Adding new answer_groups and solutions. + def test_changes_are_not_mergeable_when_answer_groups_changes_conflict( + self + ) -> None: + # Adding answer_groups and solutions to the existing state. change_list = [exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 1, - 'state_name': 'Introduction', - 'new_value': 3 - }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'property_name': 'answer_groups', - 'old_value': [], + 'old_value': ['old_value'], 'state_name': 'Introduction', 'new_value': [{ 'rule_specs': [{ @@ -7102,10 +14534,7 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): 'inputs': { 'x': { 'contentId': 'rule_input_2', - 'normalizedStrSet': [ - 'Hello', - 'Hola' - ] + 'normalizedStrSet': ['Hello', 'Hola'] } } }], @@ -7113,131 +14542,65 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): 'outcome': { 'labelled_as_correct': False, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'refresher_exploration_id': None }, 'training_data': [] }] }), exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'old_value': [], - 'state_name': 'Introduction', - 'new_value': [{ - 'hint_content': { - 'content_id': 'hint_3', - 'html': '

    Hint 1.

    ' - } - }] - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 3, - 'state_name': 'Introduction', - 'new_value': 4 - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'old_value': None, - 'state_name': 'Introduction', - 'new_value': { - 'correct_answer': 'Hello Aryaman!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' - }, - 'answer_is_exclusive': False - } - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'state_name': 'Introduction', - 'property_name': 'solicit_answer_details', - 'new_value': True - })] - - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Added answer groups and solution') - - # Changes to the properties unrelated to the solutions. - change_list_2 = [exp_domain.ExplorationChange({ - 'new_value': { - 'content_id': 'content', - 'html': '

    This is the first state.

    ' - }, - 'state_name': 'Introduction', - 'old_value': { - 'content_id': 'content', - 'html': '' - }, - 'cmd': 'edit_state_property', - 'property_name': 'content' - }), exp_domain.ExplorationChange({ - 'new_value': [{ - 'hint_content': { - 'content_id': 'hint_3', - 'html': '

    Hint 1.

    ' - } - }, { - 'hint_content': { - 'content_id': 'hint_4', - 'html': '

    This is a first hint.

    ' - } - }], + 'cmd': 'edit_state_property', + 'property_name': 'hints', + 'old_value': ['old_value'], 'state_name': 'Introduction', - 'old_value': [{ + 'new_value': [{ 'hint_content': { - 'content_id': 'hint_3', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), 'html': '

    Hint 1.

    ' } - }], - 'cmd': 'edit_state_property', - 'property_name': 'hints' - }), exp_domain.ExplorationChange({ - 'new_value': 5, - 'state_name': 'Introduction', - 'old_value': 4, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index' - }), exp_domain.ExplorationChange({ - 'new_value': { - 'content_id': 'content', - 'html': '

    Congratulations, you have finished!

    ' - }, - 'state_name': 'End', - 'old_value': { - 'content_id': 'content', - 'html': '' - }, - 'cmd': 'edit_state_property', - 'property_name': 'content' + }] }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', + 'property_name': 'solution', + 'old_value': None, 'state_name': 'Introduction', - 'property_name': 'solicit_answer_details', - 'new_value': True + 'new_value': { + 'correct_answer': 'Hello Aryaman!', + 'explanation': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), + 'html': '

    Explanation.

    ' + }, + 'answer_is_exclusive': False + } })] + exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Changed Contents and Hint') + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Added answer groups and solution') - # Changes to the solutions and the properties that affects - # solutions to check for mergeability. - change_list_3 = [exp_domain.ExplorationChange({ + # Changes to the answer_groups and the properties that + # affects or are affected by answer_groups. + change_list_2 = [exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': [{ 'outcome': { 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7246,7 +14609,10 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): 'rule_specs': [{ 'inputs': { 'x': { - 'normalizedStrSet': ['Hello', 'Hola', 'Hi'], + 'normalizedStrSet': [ + 'Hello', + 'Hola', + 'Hi'], 'contentId': 'rule_input_2' } }, @@ -7264,6 +14630,7 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7286,7 +14653,8 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): 'answer_is_exclusive': False, 'correct_answer': 'Hi Aryaman!', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' } }, @@ -7300,22 +14668,18 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): 'html': '

    Explanation.

    ' } } - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': 6, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 4 }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': [{ 'outcome': { 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7334,11 +14698,13 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): }, { 'outcome': { 'feedback': { - 'content_id': 'feedback_4', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7365,6 +14731,7 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7387,7 +14754,8 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): 'answer_is_exclusive': False, 'correct_answer': 'Oppia is selected for GSoC.', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' } }, @@ -7401,181 +14769,85 @@ def test_changes_are_mergeable_when_solutions_changes_do_not_conflict(self): 'html': '

    Explanation.

    ' } } - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'state_name': 'Introduction', - 'property_name': 'solicit_answer_details', - 'new_value': False })] - - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_3) - self.assertEqual(changes_are_mergeable, True) - - # Creating second exploration to test the scenario - # when changes to same properties are made in two - # different states. - self.save_new_valid_exploration( - self.EXP_1_ID, self.owner_id, end_state_name='End') - - rights_manager.publish_exploration(self.owner, self.EXP_1_ID) - - # Using the old change_list_2 and change_list_3 here - # because they already covers the changes related to - # the solutions in the first state. - exp_services.update_exploration( - self.owner_id, self.EXP_1_ID, change_list_2, - 'Added Answer Group and Solution in One state') - exp_services.update_exploration( - self.owner_id, self.EXP_1_ID, change_list_3, - 'Changed Answer Groups and Solutions in One State') + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changed Answer Groups and related properties') - # Changes to the properties related to the solutions - # in the second state to check for mergeability. - change_list_4 = [exp_domain.ExplorationChange({ - 'old_value': 'EndExploration', - 'new_value': None, - 'cmd': 'edit_state_property', - 'property_name': 'widget_id', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': { - 'recommendedExplorationIds': { - 'value': [] - } - }, - 'new_value': {}, - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': None, - 'new_value': 'NumericInput', - 'cmd': 'edit_state_property', - 'property_name': 'widget_id', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': None, - 'new_value': { - 'dest': 'End', - 'missing_prerequisite_skill_id': None, - 'param_changes': [], - 'labelled_as_correct': False, - 'refresher_exploration_id': None, - 'feedback': { - 'html': '', - 'content_id': 'default_outcome' - } - }, - 'cmd': 'edit_state_property', - 'property_name': 'default_outcome', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': 0, - 'new_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': [], + # Changes to the answer group in same state again + # to check that changes are not mergeable. + change_list_3 = [exp_domain.ExplorationChange({ + 'state_name': 'Introduction', 'new_value': [{ 'outcome': { - 'dest': 'End', + 'feedback': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '

    Feedback

    ' + }, 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, - 'refresher_exploration_id': None, - 'feedback': { - 'html': '

    Good

    ', - 'content_id': 'feedback_0' - } + 'refresher_exploration_id': None }, 'training_data': [], - 'tagged_skill_misconception_id': None, 'rule_specs': [{ - 'rule_type': 'IsGreaterThanOrEqualTo', 'inputs': { - 'x': 20 - } - }] + 'x': { + 'normalizedStrSet': [ + 'Hello', + 'Hola', + 'Hey'], + 'contentId': 'rule_input_2' + } + }, + 'rule_type': 'StartsWith' + }], + 'tagged_skill_misconception_id': None }], 'cmd': 'edit_state_property', 'property_name': 'answer_groups', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': [], - 'new_value': [{ - 'hint_content': { - 'html': '

    Hint 1. State 2.

    ', - 'content_id': 'hint_1' - } - }], - 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': 1, - 'new_value': 2, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': None, - 'new_value': { - 'correct_answer': 30, - 'explanation': { - 'html': '

    Explanation.

    ', - 'content_id': 'solution' - }, - 'answer_is_exclusive': False - }, - 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'state_name': 'End' - }), exp_domain.ExplorationChange({ - 'old_value': { - 'correct_answer': 30, - 'explanation': { - 'html': '

    Explanation.

    ', - 'content_id': 'solution' - }, - 'answer_is_exclusive': False - }, - 'new_value': { - 'correct_answer': 10, - 'explanation': { - 'html': '

    Explanation.

    ', - 'content_id': 'solution' + 'old_value': [{ + 'outcome': { + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Feedback

    ' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'labelled_as_correct': False, + 'refresher_exploration_id': None }, - 'answer_is_exclusive': False - }, - 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'state_name': 'End' + 'training_data': [], + 'rule_specs': [{ + 'inputs': { + 'x': { + 'normalizedStrSet': ['Hello', 'Hola'], + 'contentId': 'rule_input_2' + } + }, + 'rule_type': 'StartsWith' + }], + 'tagged_skill_misconception_id': None + }] })] + changes_are_not_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 2, change_list_3) + self.assertEqual(changes_are_not_mergeable, False) - changes_are_mergeable_1 = exp_services.are_changes_mergeable( - self.EXP_1_ID, 2, change_list_4) - self.assertEqual(changes_are_mergeable_1, True) - - def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') - - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) - + def test_changes_are_mergeable_when_solutions_changes_do_not_conflict( + self + ) -> None: # Adding new answer_groups and solutions. change_list = [exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 1, - 'state_name': 'Introduction', - 'new_value': 3 - }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'property_name': 'answer_groups', - 'old_value': [], + 'old_value': ['old_value'], 'state_name': 'Introduction', 'new_value': [{ 'rule_specs': [{ @@ -7594,11 +14866,13 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): 'outcome': { 'labelled_as_correct': False, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'refresher_exploration_id': None }, @@ -7607,20 +14881,15 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'property_name': 'hints', - 'old_value': [], + 'old_value': ['old_value'], 'state_name': 'Introduction', 'new_value': [{ 'hint_content': { - 'content_id': 'hint_3', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), 'html': '

    Hint 1.

    ' } }] - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 3, - 'state_name': 'Introduction', - 'new_value': 4 }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', 'property_name': 'solution', @@ -7629,29 +14898,99 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): 'new_value': { 'correct_answer': 'Hello Aryaman!', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' }, 'answer_is_exclusive': False } + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_state_property', + 'state_name': 'Introduction', + 'property_name': 'solicit_answer_details', + 'new_value': True })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), 'Added answer groups and solution') + # Changes to the properties unrelated to the solutions. + change_list_2 = [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '

    This is the first state.

    ' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + }), exp_domain.ExplorationChange({ + 'new_value': [{ + 'hint_content': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    Hint 1.

    ' + } + }, { + 'hint_content': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    This is a first hint.

    ' + } + }], + 'state_name': 'Introduction', + 'old_value': [{ + 'hint_content': { + 'content_id': 'hint_3', + 'html': '

    Hint 1.

    ' + } + }], + 'cmd': 'edit_state_property', + 'property_name': 'hints' + }), exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'html': '

    Congratulations, you have finished!

    ' + }, + 'state_name': 'End', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_state_property', + 'state_name': 'Introduction', + 'property_name': 'solicit_answer_details', + 'new_value': True + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changed Contents and Hint') + # Changes to the solutions and the properties that affects # solutions to check for mergeability. - change_list_2 = [exp_domain.ExplorationChange({ + change_list_3 = [exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': [{ 'outcome': { 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7678,6 +15017,7 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7700,7 +15040,8 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): 'answer_is_exclusive': False, 'correct_answer': 'Hi Aryaman!', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' } }, @@ -7710,26 +15051,23 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): 'answer_is_exclusive': False, 'correct_answer': 'Hello Aryaman!', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' } } - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': 6, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 4 }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': [{ 'outcome': { 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Feedback

    ' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7748,11 +15086,13 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): }, { 'outcome': { 'feedback': { - 'content_id': 'feedback_4', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7779,6 +15119,7 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): }, 'missing_prerequisite_skill_id': None, 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None @@ -7801,7 +15142,8 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): 'answer_is_exclusive': False, 'correct_answer': 'Oppia is selected for GSoC.', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' } }, @@ -7811,577 +15153,568 @@ def test_changes_are_not_mergeable_when_solutions_changes_conflict(self): 'answer_is_exclusive': False, 'correct_answer': 'Hi Aryaman!', 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' } } - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Changed Solutions and affected properties') - - # Change to the solution of same state again - # to check that changes are not mergeable. - change_list_3 = [exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': { - 'answer_is_exclusive': False, - 'correct_answer': 'Hello Aryaman!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' - } - }, + }), exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'old_value': { - 'answer_is_exclusive': False, - 'correct_answer': 'Hello Aryaman!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    Changed Explanation.

    ' - } - } + 'state_name': 'Introduction', + 'property_name': 'solicit_answer_details', + 'new_value': False })] - changes_are_not_mergeable = exp_services.are_changes_mergeable( + changes_are_mergeable = exp_services.are_changes_mergeable( self.EXP_0_ID, 2, change_list_3) - self.assertEqual(changes_are_not_mergeable, False) + self.assertEqual(changes_are_mergeable, True) - def test_changes_are_mergeable_when_hints_changes_do_not_conflict(self): + # Creating second exploration to test the scenario + # when changes to same properties are made in two + # different states. self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') + self.EXP_1_ID, self.owner_id, end_state_name='End') - # Adding hints to the existing state. - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) - change_list = [exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' - } - }], - 'property_name': 'hints', - 'cmd': 'edit_state_property', - 'old_value': [] - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': 2, - 'property_name': 'next_content_id_index', - 'cmd': 'edit_state_property', - 'old_value': 1 - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': { - 'answer_is_exclusive': False, - 'explanation': { - 'html': '

    Explanation

    ', - 'content_id': 'solution' - }, - 'correct_answer': 'Hello' - }, - 'property_name': 'solution', - 'cmd': 'edit_state_property', - 'old_value': None - })] + rights_manager.publish_exploration(self.owner, self.EXP_1_ID) + # Using the old change_list_2 and change_list_3 here + # because they already covers the changes related to + # the solutions in the first state. exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Added Hint and Solution in Introduction state') + self.owner_id, self.EXP_1_ID, change_list_2, + 'Added Answer Group and Solution in One state') - # Changes to all state propeties other than the hints. - change_list_2 = [exp_domain.ExplorationChange({ - 'property_name': 'content', - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'old_value': { - 'html': '', - 'content_id': 'content' - }, - 'new_value': { - 'html': '

    Content in Introduction.

    ', - 'content_id': 'content' - } - }), exp_domain.ExplorationChange({ - 'property_name': 'solution', - 'state_name': 'Introduction', + exp_services.update_exploration( + self.owner_id, self.EXP_1_ID, + self.append_next_content_id_index_change(change_list_3), + 'Changed Answer Groups and Solutions in One State') + + test_dict: Dict[str, str] = {} + # Changes to the properties related to the solutions + # in the second state to check for mergeability. + change_list_4 = [exp_domain.ExplorationChange({ + 'old_value': 'EndExploration', + 'new_value': None, 'cmd': 'edit_state_property', - 'old_value': { - 'explanation': { - 'html': '

    Explanation

    ', - 'content_id': 'solution' - }, - 'answer_is_exclusive': False, - 'correct_answer': 'Hello' - }, - 'new_value': { - 'explanation': { - 'html': '

    Explanation

    ', - 'content_id': 'solution' - }, - 'answer_is_exclusive': False, - 'correct_answer': 'Hello Aryaman' - } - }), exp_domain.ExplorationChange({ 'property_name': 'widget_id', - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'old_value': 'TextInput', - 'new_value': None + 'state_name': 'End' }), exp_domain.ExplorationChange({ - 'property_name': 'widget_customization_args', - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', 'old_value': { - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': '' - } - }, - 'rows': { - 'value': 1 + 'recommendedExplorationIds': { + 'value': [] } }, - 'new_value': {} - }), exp_domain.ExplorationChange({ - 'property_name': 'solution', - 'state_name': 'Introduction', + 'new_value': test_dict, 'cmd': 'edit_state_property', - 'old_value': { - 'explanation': { - 'html': '

    Explanation

    ', - 'content_id': 'solution' - }, - 'answer_is_exclusive': False, - 'correct_answer': 'Hello Aryaman' - }, - 'new_value': None + 'property_name': 'widget_customization_args', + 'state_name': 'End' }), exp_domain.ExplorationChange({ - 'property_name': 'widget_id', - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', 'old_value': None, - 'new_value': 'NumericInput' + 'new_value': 'NumericInput', + 'cmd': 'edit_state_property', + 'property_name': 'widget_id', + 'state_name': 'End' }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'old_value': - { - 'requireNonnegativeInput': - { - 'value': True - } - }, - 'property_name': 'widget_customization_args', - 'new_value': - { - 'requireNonnegativeInput': - { - 'value': False + 'old_value': None, + 'new_value': { + 'dest': 'End', + 'dest_if_really_stuck': None, + 'missing_prerequisite_skill_id': None, + 'param_changes': [], + 'labelled_as_correct': False, + 'refresher_exploration_id': None, + 'feedback': { + 'html': '', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) } }, - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'property_name': 'next_content_id_index', - 'state_name': 'Introduction', 'cmd': 'edit_state_property', - 'old_value': 2, - 'new_value': 3 + 'property_name': 'default_outcome', + 'state_name': 'End' }), exp_domain.ExplorationChange({ - 'property_name': 'answer_groups', - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'old_value': [], + 'old_value': ['old_value'], 'new_value': [{ - 'rule_specs': [{ - 'inputs': { - 'x': 46 - }, - 'rule_type': 'IsLessThanOrEqualTo' - }], - 'training_data': [], - 'tagged_skill_misconception_id': None, 'outcome': { + 'dest': 'End', + 'dest_if_really_stuck': None, + 'missing_prerequisite_skill_id': None, + 'param_changes': [], 'labelled_as_correct': False, 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', 'feedback': { - 'html': '', - 'content_id': 'feedback_2' - }, - 'param_changes': [] - } - }] + 'html': '

    Good

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + } + }, + 'training_data': [], + 'tagged_skill_misconception_id': None, + 'rule_specs': [{ + 'rule_type': 'IsGreaterThanOrEqualTo', + 'inputs': { + 'x': 20 + } + }] + }], + 'cmd': 'edit_state_property', + 'property_name': 'answer_groups', + 'state_name': 'End' }), exp_domain.ExplorationChange({ - 'property_name': 'solution', - 'state_name': 'Introduction', + 'old_value': ['old_value'], + 'new_value': [{ + 'hint_content': { + 'html': '

    Hint 1. State 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }], 'cmd': 'edit_state_property', + 'property_name': 'hints', + 'state_name': 'End' + }), exp_domain.ExplorationChange({ 'old_value': None, 'new_value': { + 'correct_answer': 30, 'explanation': { - 'html': '

    Explanation

    ', - 'content_id': 'solution' + 'html': '

    Explanation.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) }, - 'answer_is_exclusive': False, - 'correct_answer': 42 - } - }), exp_domain.ExplorationChange({ - 'property_name': 'content', - 'state_name': 'End', - 'cmd': 'edit_state_property', - 'old_value': { - 'html': '', - 'content_id': 'content' + 'answer_is_exclusive': False }, - 'new_value': { - 'html': '

    Congratulations, you have finished!

    ', - 'content_id': 'content' - } - }), exp_domain.ExplorationChange({ - 'property_name': 'title', - 'cmd': 'edit_exploration_property', - 'old_value': 'A title', - 'new_value': 'First Title' - }), exp_domain.ExplorationChange({ - 'property_name': 'solution', - 'state_name': 'Introduction', 'cmd': 'edit_state_property', + 'property_name': 'solution', + 'state_name': 'End' + }), exp_domain.ExplorationChange({ 'old_value': { + 'correct_answer': 30, 'explanation': { - 'html': '

    Explanation

    ', - 'content_id': 'solution' + 'html': '

    Explanation.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) }, - 'answer_is_exclusive': False, - 'correct_answer': 42 + 'answer_is_exclusive': False }, 'new_value': { + 'correct_answer': 10, 'explanation': { - 'html': '

    Explanation

    ', - 'content_id': 'solution' + 'html': '

    Explanation.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) }, - 'answer_is_exclusive': False, - 'correct_answer': 40 - } + 'answer_is_exclusive': False + }, + 'cmd': 'edit_state_property', + 'property_name': 'solution', + 'state_name': 'End' })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Made changes in interaction, contents, solutions, answer_groups in both states') # pylint: disable=line-too-long + changes_are_mergeable_1 = exp_services.are_changes_mergeable( + self.EXP_1_ID, 2, change_list_4) + self.assertEqual(changes_are_mergeable_1, True) - # Changes to the old hints and also deleted and added - # new hints to take all the cases to check for mergeability. - change_list_3 = [exp_domain.ExplorationChange({ - 'old_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' - } - }], + def test_changes_are_not_mergeable_when_solutions_changes_conflict( + self + ) -> None: + # Adding new answer_groups and solutions. + change_list = [exp_domain.ExplorationChange({ 'cmd': 'edit_state_property', - 'property_name': 'hints', + 'property_name': 'answer_groups', + 'old_value': ['old_value'], + 'state_name': 'Introduction', 'new_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' - } - }, { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' - } - }], - 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': 2, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 3, - 'state_name': 'Introduction' + 'rule_specs': [{ + 'rule_type': 'StartsWith', + 'inputs': { + 'x': { + 'contentId': 'rule_input_2', + 'normalizedStrSet': [ + 'Hello', + 'Hola' + ] + } + } + }], + 'tagged_skill_misconception_id': None, + 'outcome': { + 'labelled_as_correct': False, + 'feedback': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '

    Feedback

    ' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'refresher_exploration_id': None + }, + 'training_data': [] + }] }), exp_domain.ExplorationChange({ - 'old_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' - } - }, { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' - } - }], 'cmd': 'edit_state_property', 'property_name': 'hints', + 'old_value': ['old_value'], + 'state_name': 'Introduction', 'new_value': [{ 'hint_content': { - 'html': '

    Changed hint 1.

    ', - 'content_id': 'hint_1' - } - }, { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), + 'html': '

    Hint 1.

    ' } - }], - 'state_name': 'Introduction' + }] }), exp_domain.ExplorationChange({ - 'old_value': [{ - 'hint_content': { - 'html': '

    Changed hint 1.

    ', - 'content_id': 'hint_1' - } - }, { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' - } - }], 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'new_value': [ - { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' - } - }, { - 'hint_content': { - 'html': '

    Changed hint 1.

    ', - 'content_id': 'hint_1' - } - } - ], - 'state_name': 'Introduction' + 'property_name': 'solution', + 'old_value': None, + 'state_name': 'Introduction', + 'new_value': { + 'correct_answer': 'Hello Aryaman!', + 'explanation': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), + 'html': '

    Explanation.

    ' + }, + 'answer_is_exclusive': False + } })] - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_3) - self.assertEqual(changes_are_mergeable, True) - - def test_changes_are_not_mergeable_when_hints_changes_conflict(self): - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Added answer groups and solution') - # Adding hints to the existing state. - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) - change_list = [exp_domain.ExplorationChange({ + # Changes to the solutions and the properties that affects + # solutions to check for mergeability. + change_list_2 = [exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' - } - }], - 'property_name': 'hints', - 'cmd': 'edit_state_property', - 'old_value': [] - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': 2, - 'property_name': 'next_content_id_index', + 'outcome': { + 'feedback': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '

    Feedback

    ' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'labelled_as_correct': False, + 'refresher_exploration_id': None + }, + 'training_data': [], + 'rule_specs': [{ + 'inputs': { + 'x': { + 'normalizedStrSet': ['Hello', 'Hola', 'Hi'], + 'contentId': 'rule_input_2' + } + }, + 'rule_type': 'StartsWith' + }], + 'tagged_skill_misconception_id': None + }], 'cmd': 'edit_state_property', - 'old_value': 1 + 'property_name': 'answer_groups', + 'old_value': [{ + 'outcome': { + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Feedback

    ' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'labelled_as_correct': False, + 'refresher_exploration_id': None + }, + 'training_data': [], + 'rule_specs': [{ + 'inputs': { + 'x': { + 'normalizedStrSet': ['Hello', 'Hola'], + 'contentId': 'rule_input_2' + } + }, + 'rule_type': 'StartsWith' + }], + 'tagged_skill_misconception_id': None + }] }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': { 'answer_is_exclusive': False, + 'correct_answer': 'Hi Aryaman!', 'explanation': { - 'html': '

    Explanation

    ', - 'content_id': 'solution' - }, - 'correct_answer': 'Hello' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), + 'html': '

    Explanation.

    ' + } }, - 'property_name': 'solution', 'cmd': 'edit_state_property', - 'old_value': None - })] - - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Added Hint and Solution in Introduction state') - - # Changes to the old hints and also deleted and added - # new hints to take all the cases to check for mergeability. - change_list_2 = [exp_domain.ExplorationChange({ - 'old_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' + 'property_name': 'solution', + 'old_value': { + 'answer_is_exclusive': False, + 'correct_answer': 'Hello Aryaman!', + 'explanation': { + 'content_id': 'solution', + 'html': '

    Explanation.

    ' } - }], - 'cmd': 'edit_state_property', - 'property_name': 'hints', + } + }), exp_domain.ExplorationChange({ + 'state_name': 'Introduction', 'new_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' - } + 'outcome': { + 'feedback': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '

    Feedback

    ' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'labelled_as_correct': False, + 'refresher_exploration_id': None + }, + 'training_data': [], + 'rule_specs': [{ + 'inputs': { + 'x': { + 'normalizedStrSet': ['Hello', 'Hola', 'Hi'], + 'contentId': 'rule_input_2' + } + }, + 'rule_type': 'StartsWith' + }], + 'tagged_skill_misconception_id': None }, { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' - } + 'outcome': { + 'feedback': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + 'html': '' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'labelled_as_correct': False, + 'refresher_exploration_id': None + }, + 'training_data': [], + 'rule_specs': [{ + 'inputs': { + 'x': { + 'normalizedStrSet': ['Oppia', 'GSoC'], + 'contentId': 'rule_input_5' + } + }, + 'rule_type': 'Contains' + }], + 'tagged_skill_misconception_id': None }], - 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': 2, 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 3, - 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ + 'property_name': 'answer_groups', 'old_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' - } - }, { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' - } - }], - 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'new_value': [{ - 'hint_content': { - 'html': '

    Changed hint 1.

    ', - 'content_id': 'hint_1' - } - }, { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' - } - }], - 'state_name': 'Introduction' + 'outcome': { + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Feedback

    ' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'labelled_as_correct': False, + 'refresher_exploration_id': None + }, + 'training_data': [], + 'rule_specs': [{ + 'inputs': { + 'x': { + 'normalizedStrSet': ['Hello', 'Hola', 'Hi'], + 'contentId': 'rule_input_2' + } + }, + 'rule_type': 'StartsWith' + }], + 'tagged_skill_misconception_id': None + }] }), exp_domain.ExplorationChange({ - 'old_value': [{ - 'hint_content': { - 'html': '

    Changed hint 1.

    ', - 'content_id': 'hint_1' - } - }, { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' + 'state_name': 'Introduction', + 'new_value': { + 'answer_is_exclusive': False, + 'correct_answer': 'Oppia is selected for GSoC.', + 'explanation': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), + 'html': '

    Explanation.

    ' } - }], + }, 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'new_value': [ - { - 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_2' - } - }, { - 'hint_content': { - 'html': '

    Changed hint 1.

    ', - 'content_id': 'hint_1' - } + 'property_name': 'solution', + 'old_value': { + 'answer_is_exclusive': False, + 'correct_answer': 'Hi Aryaman!', + 'explanation': { + 'content_id': 'solution', + 'html': '

    Explanation.

    ' } - ], - 'state_name': 'Introduction' + } })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Changes in the hints again.') + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changed Solutions and affected properties') + # Change to the solution of same state again + # to check that changes are not mergeable. change_list_3 = [exp_domain.ExplorationChange({ - 'old_value': [{ - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' + 'state_name': 'Introduction', + 'new_value': { + 'answer_is_exclusive': False, + 'correct_answer': 'Hello Aryaman!', + 'explanation': { + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), + 'html': '

    Explanation.

    ' } - }], + }, 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'new_value': [{ - 'hint_content': { - 'html': '

    Changed Hint 1.

    ', - 'content_id': 'hint_1' + 'property_name': 'solution', + 'old_value': { + 'answer_is_exclusive': False, + 'correct_answer': 'Hello Aryaman!', + 'explanation': { + 'content_id': 'solution', + 'html': '

    Changed Explanation.

    ' } - }], - 'state_name': 'Introduction' + } })] changes_are_not_mergeable = exp_services.are_changes_mergeable( self.EXP_0_ID, 2, change_list_3) self.assertEqual(changes_are_not_mergeable, False) - def test_changes_are_mergeable_when_exploration_properties_changes_do_not_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') - - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) - - # Changes to all the properties of both states other than - # exploration properties i.e. title, category, objective etc. - # Also included rename states changes to check that - # renaming states doesn't affect anything. + def test_changes_are_mergeable_when_hints_changes_do_not_conflict( + self + ) -> None: change_list = [exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'new_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }], + 'property_name': 'hints', + 'cmd': 'edit_state_property', + 'old_value': ['old_value'] + }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'new_value': { - 'html': '

    Content

    ', - 'content_id': 'content' + 'answer_is_exclusive': False, + 'explanation': { + 'html': '

    Explanation

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) + }, + 'correct_answer': 'Hello' }, + 'property_name': 'solution', 'cmd': 'edit_state_property', + 'old_value': None + })] + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Added Hint and Solution in Introduction state') + + test_dict: Dict[str, str] = {} + # Changes to all state propeties other than the hints. + change_list_2 = [exp_domain.ExplorationChange({ 'property_name': 'content', + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', 'old_value': { 'html': '', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT) + }, + 'new_value': { + 'html': '

    Content in Introduction.

    ', + 'content_id': 'content_0' } }), exp_domain.ExplorationChange({ + 'property_name': 'solution', 'state_name': 'Introduction', - 'new_value': [ - { - 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' - } - } - ], - 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'old_value': [ - - ] - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': 2, 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 1 + 'old_value': { + 'explanation': { + 'html': '

    Explanation

    ', + 'content_id': 'solution' + }, + 'answer_is_exclusive': False, + 'correct_answer': 'Hello' + }, + 'new_value': { + 'explanation': { + 'html': '

    Explanation

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) + }, + 'answer_is_exclusive': False, + 'correct_answer': 'Hello Aryaman' + } }), exp_domain.ExplorationChange({ + 'property_name': 'widget_id', 'state_name': 'Introduction', - 'new_value': None, 'cmd': 'edit_state_property', - 'property_name': 'widget_id', - 'old_value': 'TextInput' + 'old_value': 'TextInput', + 'new_value': None }), exp_domain.ExplorationChange({ + 'property_name': 'widget_customization_args', 'state_name': 'Introduction', - 'new_value': {}, 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args', 'old_value': { - 'rows': { - 'value': 1 - }, 'placeholder': { 'value': { - 'unicode_str': '', - 'content_id': 'ca_placeholder_0' + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' } + }, + 'rows': { + 'value': 1 + }, + 'catchMisspellings': { + 'value': False } - } + }, + 'new_value': test_dict }), exp_domain.ExplorationChange({ + 'property_name': 'solution', 'state_name': 'Introduction', - 'new_value': 'NumericInput', 'cmd': 'edit_state_property', + 'old_value': { + 'explanation': { + 'html': '

    Explanation

    ', + 'content_id': 'solution' + }, + 'answer_is_exclusive': False, + 'correct_answer': 'Hello Aryaman' + }, + 'new_value': None + }), exp_domain.ExplorationChange({ 'property_name': 'widget_id', - 'old_value': None + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'old_value': None, + 'new_value': 'NumericInput' }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'old_value': @@ -8401,695 +15734,770 @@ def test_changes_are_mergeable_when_exploration_properties_changes_do_not_confli }, 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ + 'property_name': 'answer_groups', 'state_name': 'Introduction', - 'new_value': 3, 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 2 - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': [ - { - 'outcome': { - 'refresher_exploration_id': None, - 'feedback': { - 'html': '

    Good.

    ', - 'content_id': 'feedback_2' - }, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False, - 'dest': 'End', - 'param_changes': [] + 'old_value': ['old_value'], + 'new_value': [{ + 'rule_specs': [{ + 'inputs': { + 'x': 46 }, - 'training_data': [], - 'rule_specs': [ - { - 'inputs': { - 'x': 50 - }, - 'rule_type': 'IsLessThanOrEqualTo' - } - ], - 'tagged_skill_misconception_id': None + 'rule_type': 'IsLessThanOrEqualTo' + }], + 'training_data': [], + 'tagged_skill_misconception_id': None, + 'outcome': { + 'labelled_as_correct': False, + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'feedback': { + 'html': '', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + }, + 'param_changes': [] } - ], - 'cmd': 'edit_state_property', - 'property_name': 'answer_groups', - 'old_value': [ - - ] + }] }), exp_domain.ExplorationChange({ + 'property_name': 'solution', 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'old_value': None, 'new_value': { - 'refresher_exploration_id': None, - 'feedback': { - 'html': '

    Try Again.

    ', - 'content_id': 'default_outcome' + 'explanation': { + 'html': '

    Explanation

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) }, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False, - 'dest': 'End', - 'param_changes': [] - }, + 'answer_is_exclusive': False, + 'correct_answer': 42 + } + }), exp_domain.ExplorationChange({ + 'property_name': 'content', + 'state_name': 'End', 'cmd': 'edit_state_property', - 'property_name': 'default_outcome', 'old_value': { - 'refresher_exploration_id': None, - 'feedback': { - 'html': '', - 'content_id': 'default_outcome' - }, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False, - 'dest': 'End', - 'param_changes': [ - - ] + 'html': '', + 'content_id': 'content_0' + }, + 'new_value': { + 'html': '

    Congratulations, you have finished!

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT) } }), exp_domain.ExplorationChange({ + 'property_name': 'title', + 'cmd': 'edit_exploration_property', + 'old_value': 'A title', + 'new_value': 'First Title' + }), exp_domain.ExplorationChange({ + 'property_name': 'solution', 'state_name': 'Introduction', - 'new_value': { - 'refresher_exploration_id': None, - 'feedback': { - 'html': '

    Try Again.

    ', - 'content_id': 'default_outcome' - }, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False, - 'dest': 'Introduction', - 'param_changes': [ - - ] - }, 'cmd': 'edit_state_property', - 'property_name': 'default_outcome', 'old_value': { - 'refresher_exploration_id': None, - 'feedback': { - 'html': '

    Try Again.

    ', - 'content_id': 'default_outcome' + 'explanation': { + 'html': '

    Explanation

    ', + 'content_id': 'solution' }, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False, - 'dest': 'End', - 'param_changes': [ - - ] + 'answer_is_exclusive': False, + 'correct_answer': 42 + }, + 'new_value': { + 'explanation': { + 'html': '

    Explanation

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) + }, + 'answer_is_exclusive': False, + 'correct_answer': 40 } })] + exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Made changes in interaction, contents, solutions, answer_groups in introduction state.') # pylint: disable=line-too-long + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Made changes in interaction, contents, solutions, answer_groups in both states') # pylint: disable=line-too-long - # Changes to properties of second state. - change_list_2 = [exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': { - 'answer_is_exclusive': False, - 'correct_answer': 25, - 'explanation': { - 'html': '

    Explanation.

    ', - 'content_id': 'solution' + # Changes to the old hints and also deleted and added + # new hints to take all the cases to check for mergeability. + change_list_3 = [exp_domain.ExplorationChange({ + 'old_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': 'hint_1' + } + }], + 'cmd': 'edit_state_property', + 'property_name': 'hints', + 'new_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }, { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }], + 'state_name': 'Introduction' + }), exp_domain.ExplorationChange({ + 'old_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': 'hint_1' + } + }, { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': 'hint_2' } - }, + }], 'cmd': 'edit_state_property', - 'property_name': 'solution', - 'old_value': None + 'property_name': 'hints', + 'new_value': [{ + 'hint_content': { + 'html': '

    Changed hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }, { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }], + 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', + 'old_value': [{ + 'hint_content': { + 'html': '

    Changed hint 1.

    ', + 'content_id': 'hint_1' + } + }, { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': 'hint_2' + } + }], + 'cmd': 'edit_state_property', + 'property_name': 'hints', 'new_value': [ { 'hint_content': { - 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' + 'html': '

    Hint 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) } - }, - { + }, { 'hint_content': { - 'html': '

    Hint 2.

    ', - 'content_id': 'hint_3' + 'html': '

    Changed hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) } } ], - 'cmd': 'edit_state_property', - 'property_name': 'hints', - 'old_value': [{ + 'state_name': 'Introduction' + })] + + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 2, change_list_3) + self.assertEqual(changes_are_mergeable, True) + + def test_changes_are_not_mergeable_when_hints_changes_conflict( + self + ) -> None: + change_list = [exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'new_value': [{ 'hint_content': { 'html': '

    Hint 1.

    ', - 'content_id': 'hint_1' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) } - }] - }), exp_domain.ExplorationChange({ - 'state_name': 'Introduction', - 'new_value': 4, + }], + 'property_name': 'hints', 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'old_value': 3 + 'old_value': ['old_value'] }), exp_domain.ExplorationChange({ - 'state_name': 'End', + 'state_name': 'Introduction', 'new_value': { - 'html': '

    Congratulations, you have finished!

    ', - 'content_id': 'content' + 'answer_is_exclusive': False, + 'explanation': { + 'html': '

    Explanation

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) + }, + 'correct_answer': 'Hello' }, + 'property_name': 'solution', 'cmd': 'edit_state_property', - 'property_name': 'content', - 'old_value': { - 'html': '', - 'content_id': 'content' - } - }), exp_domain.ExplorationChange({ - 'new_state_name': 'End-State', - 'cmd': 'rename_state', - 'old_state_name': 'End' + 'old_value': None })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Made changes in solutions in introduction state and content, state_name in end state.') # pylint: disable=line-too-long - - # Changes to the exploration properties to check - # for mergeability. - change_list_3 = [exp_domain.ExplorationChange({ - 'property_name': 'title', - 'cmd': 'edit_exploration_property', - 'old_value': 'A title', - 'new_value': 'A changed title.' - }), exp_domain.ExplorationChange({ - 'property_name': 'objective', - 'cmd': 'edit_exploration_property', - 'old_value': 'An objective', - 'new_value': 'A changed objective.' - }), exp_domain.ExplorationChange({ - 'property_name': 'category', - 'cmd': 'edit_exploration_property', - 'old_value': 'A category', - 'new_value': 'A changed category' - }), exp_domain.ExplorationChange({ - 'property_name': 'auto_tts_enabled', - 'cmd': 'edit_exploration_property', - 'old_value': True, - 'new_value': False - }), exp_domain.ExplorationChange({ - 'property_name': 'tags', - 'cmd': 'edit_exploration_property', - 'old_value': [ + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Added Hint and Solution in Introduction state') - ], - 'new_value': [ - 'new' - ] - }), exp_domain.ExplorationChange({ - 'property_name': 'tags', - 'cmd': 'edit_exploration_property', - 'old_value': [ - 'new' - ], - 'new_value': [ - 'new', - 'skill' - ] - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_exploration_property', - 'property_name': 'language_code', - 'new_value': 'bn', - 'old_value': 'en' - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_exploration_property', - 'property_name': 'author_notes', - 'new_value': 'author_notes' - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_exploration_property', - 'property_name': 'blurb', - 'new_value': 'blurb' - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_exploration_property', - 'property_name': 'init_state_name', - 'new_value': 'End', - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_exploration_property', - 'property_name': 'init_state_name', - 'new_value': 'Introduction', - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_exploration_property', - 'property_name': 'auto_tts_enabled', - 'new_value': False - }), exp_domain.ExplorationChange({ - 'cmd': 'edit_exploration_property', - 'property_name': 'correctness_feedback_enabled', - 'new_value': True - }), exp_domain.ExplorationChange({ + # Changes to the old hints and also deleted and added + # new hints to take all the cases to check for mergeability. + change_list_2 = [exp_domain.ExplorationChange({ + 'old_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': 'hint_1' + } + }], 'cmd': 'edit_state_property', - 'property_name': 'confirmed_unclassified_answers', - 'state_name': 'Introduction', - 'new_value': ['test'] + 'property_name': 'hints', + 'new_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }, { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }], + 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ + 'old_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': 'hint_1' + } + }, { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }], 'cmd': 'edit_state_property', - 'state_name': 'Introduction', - 'property_name': 'linked_skill_id', - 'new_value': 'string_1' + 'property_name': 'hints', + 'new_value': [{ + 'hint_content': { + 'html': '

    Changed hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }, { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }], + 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ + 'old_value': [{ + 'hint_content': { + 'html': '

    Changed hint 1.

    ', + 'content_id': 'hint_1' + } + }, { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': 'hint_2' + } + }], 'cmd': 'edit_state_property', - 'state_name': 'Introduction', - 'property_name': 'card_is_checkpoint', - 'new_value': True - })] - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 1, change_list_3) - self.assertEqual(changes_are_mergeable, True) - - def test_changes_are_not_mergeable_when_exploration_properties_changes_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') - - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) - - # Changes to the exploration properties to check - # for mergeability. - change_list = [exp_domain.ExplorationChange({ - 'property_name': 'title', - 'cmd': 'edit_exploration_property', - 'old_value': 'A title', - 'new_value': 'A changed title.' - }), exp_domain.ExplorationChange({ - 'property_name': 'objective', - 'cmd': 'edit_exploration_property', - 'old_value': 'An objective', - 'new_value': 'A changed objective.' - }), exp_domain.ExplorationChange({ - 'property_name': 'category', - 'cmd': 'edit_exploration_property', - 'old_value': 'A category', - 'new_value': 'A changed category' - }), exp_domain.ExplorationChange({ - 'property_name': 'auto_tts_enabled', - 'cmd': 'edit_exploration_property', - 'old_value': True, - 'new_value': False - }), exp_domain.ExplorationChange({ - 'property_name': 'tags', - 'cmd': 'edit_exploration_property', - 'old_value': [ - - ], - 'new_value': [ - 'new' - ] - }), exp_domain.ExplorationChange({ - 'property_name': 'tags', - 'cmd': 'edit_exploration_property', - 'old_value': [ - 'new' - ], + 'property_name': 'hints', 'new_value': [ - 'new', - 'skill' - ] + { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }, { + 'hint_content': { + 'html': '

    Changed hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + } + ], + 'state_name': 'Introduction' })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Changes in the Exploration Properties.') - - change_list_2 = [exp_domain.ExplorationChange({ - 'property_name': 'title', - 'cmd': 'edit_exploration_property', - 'old_value': 'A title', - 'new_value': 'A new title.' - }), exp_domain.ExplorationChange({ - 'property_name': 'objective', - 'cmd': 'edit_exploration_property', - 'old_value': 'An objective', - 'new_value': 'A new objective.' - }), exp_domain.ExplorationChange({ - 'property_name': 'category', - 'cmd': 'edit_exploration_property', - 'old_value': 'A category', - 'new_value': 'A new category' - }), exp_domain.ExplorationChange({ - 'property_name': 'auto_tts_enabled', - 'cmd': 'edit_exploration_property', - 'old_value': True, - 'new_value': False - }), exp_domain.ExplorationChange({ - 'property_name': 'tags', - 'cmd': 'edit_exploration_property', - 'old_value': [ + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Changes in the hints again.') - ], - 'new_value': [ - 'new' - ] - }), exp_domain.ExplorationChange({ - 'property_name': 'tags', - 'cmd': 'edit_exploration_property', - 'old_value': [ - 'new' - ], - 'new_value': [ - 'new', - 'skill' - ] + change_list_3 = [exp_domain.ExplorationChange({ + 'old_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': 'hint_1' + } + }], + 'cmd': 'edit_state_property', + 'property_name': 'hints', + 'new_value': [{ + 'hint_content': { + 'html': '

    Changed Hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }], + 'state_name': 'Introduction' })] changes_are_not_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 1, change_list_2) + self.EXP_0_ID, 2, change_list_3) self.assertEqual(changes_are_not_mergeable, False) - def test_changes_are_mergeable_when_translations_changes_do_not_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') - - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) - - # Adding content, feedbacks, solutions so that - # translations can be added later on. + def test_changes_are_mergeable_when_exploration_properties_changes_do_not_conflict( # pylint: disable=line-too-long + self + ) -> None: + test_dict: Dict[str, str] = {} + # Changes to all the properties of both states other than + # exploration properties i.e. title, category, objective etc. + # Also included rename states changes to check that + # renaming states doesn't affect anything. change_list = [exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'new_value': { + 'html': '

    Content

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + }, + 'cmd': 'edit_state_property', 'property_name': 'content', 'old_value': { - 'content_id': 'content', - 'html': '' - }, + 'html': '', + 'content_id': 'content_0' + } + }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', + 'new_value': [ + { + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + } + ], 'cmd': 'edit_state_property', - 'new_value': { - 'content_id': 'content', - 'html': '

    First State Content.

    ' - } + 'property_name': 'hints', + 'old_value': ['old_value'] + }), exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'new_value': None, + 'cmd': 'edit_state_property', + 'property_name': 'widget_id', + 'old_value': 'TextInput' }), exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'new_value': test_dict, + 'cmd': 'edit_state_property', 'property_name': 'widget_customization_args', 'old_value': { + 'rows': { + 'value': 1 + }, 'placeholder': { 'value': { 'unicode_str': '', 'content_id': 'ca_placeholder_0' } }, - 'rows': { - 'value': 1 + 'catchMisspellings': { + 'value': False + } + } + }), exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'new_value': 'NumericInput', + 'cmd': 'edit_state_property', + 'property_name': 'widget_id', + 'old_value': None + }), exp_domain.ExplorationChange({ + 'state_name': 'Introduction', + 'old_value': + { + 'requireNonnegativeInput': + { + 'value': True + } + }, + 'property_name': 'widget_customization_args', + 'new_value': + { + 'requireNonnegativeInput': + { + 'value': False } }, + 'cmd': 'edit_state_property' + }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', + 'new_value': [ + { + 'outcome': { + 'refresher_exploration_id': None, + 'feedback': { + 'html': '

    Good.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + }, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [] + }, + 'training_data': [], + 'rule_specs': [ + { + 'inputs': { + 'x': 50 + }, + 'rule_type': 'IsLessThanOrEqualTo' + } + ], + 'tagged_skill_misconception_id': None + } + ], 'cmd': 'edit_state_property', + 'property_name': 'answer_groups', + 'old_value': ['old_value'] + }), exp_domain.ExplorationChange({ + 'state_name': 'Introduction', 'new_value': { - 'placeholder': { - 'value': { - 'unicode_str': 'Placeholder', - 'content_id': 'ca_placeholder_0' - } + 'refresher_exploration_id': None, + 'feedback': { + 'html': '

    Try Again.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, - 'rows': { - 'value': 1 - } - } - }), exp_domain.ExplorationChange({ + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [] + }, + 'cmd': 'edit_state_property', 'property_name': 'default_outcome', 'old_value': { - 'labelled_as_correct': False, - 'missing_prerequisite_skill_id': None, 'refresher_exploration_id': None, 'feedback': { - 'content_id': 'default_outcome', - 'html': '' + 'html': '', + 'content_id': 'default_outcome' }, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False, + 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [ - ], - 'dest': 'End' - }, + ] + } + }), exp_domain.ExplorationChange({ 'state_name': 'Introduction', - 'cmd': 'edit_state_property', 'new_value': { - 'labelled_as_correct': False, + 'refresher_exploration_id': None, + 'feedback': { + 'html': '

    Try Again.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + }, 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False, + 'dest': 'Introduction', + 'dest_if_really_stuck': None, + 'param_changes': [ + + ] + }, + 'cmd': 'edit_state_property', + 'property_name': 'default_outcome', + 'old_value': { 'refresher_exploration_id': None, 'feedback': { - 'content_id': 'default_outcome', - 'html': '

    Feedback 1.

    ' + 'html': '

    Try Again.

    ', + 'content_id': 'default_outcome' }, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False, + 'dest': 'End', + 'dest_if_really_stuck': None, 'param_changes': [ - ], - 'dest': 'End' + ] } - }), exp_domain.ExplorationChange({ - 'property_name': 'hints', - 'old_value': [ + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Made changes in interaction, contents, solutions, answer_groups in introduction state.') # pylint: disable=line-too-long - ], + # Changes to properties of second state. + change_list_2 = [exp_domain.ExplorationChange({ 'state_name': 'Introduction', + 'new_value': { + 'answer_is_exclusive': False, + 'correct_answer': 25, + 'explanation': { + 'html': '

    Explanation.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION) + } + }, 'cmd': 'edit_state_property', + 'property_name': 'solution', + 'old_value': None + }), exp_domain.ExplorationChange({ + 'state_name': 'Introduction', 'new_value': [ { 'hint_content': { - 'content_id': 'hint_1', - 'html': '

    Hint 1.

    ' + 'html': '

    Hint 1.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) + } + }, + { + 'hint_content': { + 'html': '

    Hint 2.

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) } } - ] - }), exp_domain.ExplorationChange({ - 'property_name': 'next_content_id_index', - 'old_value': 1, - 'state_name': 'Introduction', + ], 'cmd': 'edit_state_property', - 'new_value': 2 + 'property_name': 'hints', + 'old_value': [{ + 'hint_content': { + 'html': '

    Hint 1.

    ', + 'content_id': 'hint_1' + } + }] }), exp_domain.ExplorationChange({ - 'property_name': 'solution', - 'old_value': None, - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', + 'state_name': 'End', 'new_value': { - 'answer_is_exclusive': False, - 'explanation': { - 'content_id': 'solution', - 'html': '

    Explanation.

    ' - }, - 'correct_answer': 'Solution' - } - }), exp_domain.ExplorationChange({ - 'property_name': 'content', - 'old_value': { - 'content_id': 'content', - 'html': '' + 'html': '

    Congratulations, you have finished!

    ', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT) }, - 'state_name': 'End', 'cmd': 'edit_state_property', - 'new_value': { - 'content_id': 'content', - 'html': '

    Second State Content.

    ' + 'property_name': 'content', + 'old_value': { + 'html': '', + 'content_id': 'content_0' } + }), exp_domain.ExplorationChange({ + 'new_state_name': 'End-State', + 'cmd': 'rename_state', + 'old_state_name': 'End' })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - 'Added various contents.') - change_list_2 = [exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'answer_groups', - 'old_value': [], - 'state_name': 'Introduction', - 'new_value': [{ - 'rule_specs': [{ - 'rule_type': 'StartsWith', - 'inputs': { - 'x': { - 'contentId': 'rule_input_2', - 'normalizedStrSet': [ - 'Hello', - 'Hola' - ] - } - } - }], - 'tagged_skill_misconception_id': None, - 'outcome': { - 'labelled_as_correct': False, - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'param_changes': [], - 'refresher_exploration_id': None - }, - 'training_data': [] - }] - })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Added answer group.') + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), + 'Made changes in solutions in introduction state and content, state_name in end state.') # pylint: disable=line-too-long - # Adding some translations to the first state. + # Changes to the exploration properties to check + # for mergeability. change_list_3 = [exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'content', - 'translation_html': '

    Translation Content.

    ', + 'property_name': 'title', + 'cmd': 'edit_exploration_property', + 'old_value': 'A title', + 'new_value': 'A changed title.' + }), exp_domain.ExplorationChange({ + 'property_name': 'objective', + 'cmd': 'edit_exploration_property', + 'old_value': 'An objective', + 'new_value': 'A changed objective.' + }), exp_domain.ExplorationChange({ + 'property_name': 'category', + 'cmd': 'edit_exploration_property', + 'old_value': 'A category', + 'new_value': 'A changed category' + }), exp_domain.ExplorationChange({ + 'property_name': 'auto_tts_enabled', + 'cmd': 'edit_exploration_property', + 'old_value': True, + 'new_value': False + }), exp_domain.ExplorationChange({ + 'property_name': 'tags', + 'cmd': 'edit_exploration_property', + 'old_value': ['old_value'], + 'new_value': [ + 'new' + ] + }), exp_domain.ExplorationChange({ + 'property_name': 'tags', + 'cmd': 'edit_exploration_property', + 'old_value': [ + 'new' + ], + 'new_value': [ + 'new', + 'skill' + ] + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'language_code', + 'new_value': 'bn', + 'old_value': 'en' + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'author_notes', + 'new_value': 'author_notes' + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'blurb', + 'new_value': 'blurb' + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'init_state_name', + 'new_value': 'End', + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'init_state_name', + 'new_value': 'Introduction', + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'auto_tts_enabled', + 'new_value': False + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'correctness_feedback_enabled', + 'new_value': True + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_state_property', + 'property_name': 'confirmed_unclassified_answers', 'state_name': 'Introduction', - 'content_html': 'N/A' + 'new_value': ['test'] }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'default_outcome', - 'translation_html': '

    Translation Feedback 1.

    ', + 'cmd': 'edit_state_property', 'state_name': 'Introduction', - 'content_html': 'N/A' + 'property_name': 'linked_skill_id', + 'new_value': 'string_1' }), exp_domain.ExplorationChange({ - 'cmd': 'mark_written_translations_as_needing_update', + 'cmd': 'edit_state_property', 'state_name': 'Introduction', - 'content_id': 'default_outcome' + 'property_name': 'card_is_checkpoint', + 'new_value': True })] - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_3) + self.EXP_0_ID, 1, change_list_3) self.assertEqual(changes_are_mergeable, True) - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_3, - 'Added some translations.') - # Adding translations again to the different contents - # of same state to check that they can be merged. - change_list_4 = [exp_domain.ExplorationChange({ - 'new_state_name': 'Intro-Rename', - 'cmd': 'rename_state', - 'old_state_name': 'Introduction' + def test_changes_are_not_mergeable_when_exploration_properties_changes_conflict( # pylint: disable=line-too-long + self + ) -> None: + self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, end_state_name='End') + + rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + + # Changes to the exploration properties to check + # for mergeability. + change_list = [exp_domain.ExplorationChange({ + 'property_name': 'title', + 'cmd': 'edit_exploration_property', + 'old_value': 'A title', + 'new_value': 'A changed title.' }), exp_domain.ExplorationChange({ - 'content_html': 'N/A', - 'translation_html': 'Placeholder Translation.', - 'state_name': 'Intro-Rename', - 'language_code': 'de', - 'content_id': 'ca_placeholder_0', - 'cmd': 'add_written_translation', - 'data_format': 'unicode' + 'property_name': 'objective', + 'cmd': 'edit_exploration_property', + 'old_value': 'An objective', + 'new_value': 'A changed objective.' }), exp_domain.ExplorationChange({ - 'content_html': 'N/A', - 'translation_html': '

    Hints Translation.

    ', - 'state_name': 'Intro-Rename', - 'language_code': 'de', - 'content_id': 'hint_1', - 'cmd': 'add_written_translation', - 'data_format': 'html' - }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'rule_input_2', - 'translation_html': '

    Translation Rule Input.

    ', - 'state_name': 'Intro-Rename', - 'content_html': 'N/A' - }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'feedback_1', - 'translation_html': '

    Translation Feedback.

    ', - 'state_name': 'Intro-Rename', - 'content_html': 'N/A' - }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'solution', - 'translation_html': '

    Translation Solution.

    ', - 'state_name': 'Intro-Rename', - 'content_html': 'N/A' + 'property_name': 'category', + 'cmd': 'edit_exploration_property', + 'old_value': 'A category', + 'new_value': 'A changed category' }), exp_domain.ExplorationChange({ - 'new_state_name': 'Introduction', - 'cmd': 'rename_state', - 'old_state_name': 'Intro-Rename' - })] - - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 3, change_list_4) - self.assertEqual(changes_are_mergeable, True) - - # Adding translations to the second state to check - # that they can be merged even in the same property. - change_list_5 = [exp_domain.ExplorationChange({ - 'content_html': 'N/A', - 'translation_html': '

    State 2 Content Translation.

    ', - 'state_name': 'End', - 'language_code': 'de', - 'content_id': 'content', - 'cmd': 'add_written_translation', - 'data_format': 'html' - })] - - changes_are_mergeable_1 = exp_services.are_changes_mergeable( - self.EXP_0_ID, 3, change_list_5) - self.assertEqual(changes_are_mergeable_1, True) - - # Add changes to the different content of first state to - # check that translation changes to some properties doesn't - # affects the changes of content of other properties. - change_list_6 = [exp_domain.ExplorationChange({ - 'old_value': { - 'rows': { - 'value': 1 - }, - 'placeholder': { - 'value': { - 'unicode_str': 'Placeholder', - 'content_id': 'ca_placeholder_0' - } - } - }, - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'property_name': 'widget_customization_args', - 'new_value': { - 'rows': { - 'value': 1 - }, - 'placeholder': { - 'value': { - 'unicode_str': 'Placeholder Changed.', - 'content_id': 'ca_placeholder_0' - } - } - } + 'property_name': 'auto_tts_enabled', + 'cmd': 'edit_exploration_property', + 'old_value': True, + 'new_value': False + }), exp_domain.ExplorationChange({ + 'property_name': 'tags', + 'cmd': 'edit_exploration_property', + 'old_value': ['old_value'], + 'new_value': [ + 'new' + ] }), exp_domain.ExplorationChange({ - 'property_name': 'default_outcome', - 'old_value': { - 'labelled_as_correct': False, - 'missing_prerequisite_skill_id': None, - 'refresher_exploration_id': None, - 'feedback': { - 'content_id': 'default_outcome', - 'html': 'Feedback 1.' - }, - 'param_changes': [ + 'property_name': 'tags', + 'cmd': 'edit_exploration_property', + 'old_value': [ + 'new' + ], + 'new_value': [ + 'new', + 'skill' + ] + })] - ], - 'dest': 'End' - }, - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'new_value': { - 'labelled_as_correct': False, - 'missing_prerequisite_skill_id': None, - 'refresher_exploration_id': None, - 'feedback': { - 'content_id': 'default_outcome', - 'html': '

    Feedback 2.

    ' - }, - 'param_changes': [ + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), + 'Changes in the Exploration Properties.') - ], - 'dest': 'End' - } + change_list_2 = [exp_domain.ExplorationChange({ + 'property_name': 'title', + 'cmd': 'edit_exploration_property', + 'old_value': 'A title', + 'new_value': 'A new title.' + }), exp_domain.ExplorationChange({ + 'property_name': 'objective', + 'cmd': 'edit_exploration_property', + 'old_value': 'An objective', + 'new_value': 'A new objective.' + }), exp_domain.ExplorationChange({ + 'property_name': 'category', + 'cmd': 'edit_exploration_property', + 'old_value': 'A category', + 'new_value': 'A new category' + }), exp_domain.ExplorationChange({ + 'property_name': 'auto_tts_enabled', + 'cmd': 'edit_exploration_property', + 'old_value': True, + 'new_value': False + }), exp_domain.ExplorationChange({ + 'property_name': 'tags', + 'cmd': 'edit_exploration_property', + 'old_value': ['old_value'], + 'new_value': [ + 'new' + ] + }), exp_domain.ExplorationChange({ + 'property_name': 'tags', + 'cmd': 'edit_exploration_property', + 'old_value': [ + 'new' + ], + 'new_value': [ + 'new', + 'skill' + ] })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_6, - 'Changing Customization Args Placeholder in First State.') - changes_are_mergeable_3 = exp_services.are_changes_mergeable( - self.EXP_0_ID, 4, change_list_5) - self.assertEqual(changes_are_mergeable_3, True) + changes_are_not_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 1, change_list_2) + self.assertEqual(changes_are_not_mergeable, False) - def test_changes_are_not_mergeable_when_translations_changes_conflict(self): # pylint: disable=line-too-long + def test_changes_are_mergeable_when_translations_changes_do_not_conflict( + self + ) -> None: self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='End') @@ -9098,38 +16506,6 @@ def test_changes_are_not_mergeable_when_translations_changes_conflict(self): # p # Adding content, feedbacks, solutions so that # translations can be added later on. change_list = [exp_domain.ExplorationChange({ - 'cmd': 'edit_state_property', - 'property_name': 'answer_groups', - 'old_value': [], - 'state_name': 'Introduction', - 'new_value': [{ - 'rule_specs': [{ - 'rule_type': 'StartsWith', - 'inputs': { - 'x': { - 'contentId': 'rule_input_2', - 'normalizedStrSet': [ - 'Hello', - 'Hola' - ] - } - } - }], - 'tagged_skill_misconception_id': None, - 'outcome': { - 'labelled_as_correct': False, - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'missing_prerequisite_skill_id': None, - 'dest': 'End', - 'param_changes': [], - 'refresher_exploration_id': None - }, - 'training_data': [] - }] - }), exp_domain.ExplorationChange({ 'property_name': 'content', 'old_value': { 'content_id': 'content', @@ -9152,6 +16528,9 @@ def test_changes_are_not_mergeable_when_translations_changes_conflict(self): # p }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } }, 'state_name': 'Introduction', @@ -9165,6 +16544,9 @@ def test_changes_are_not_mergeable_when_translations_changes_conflict(self): # p }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } } }), exp_domain.ExplorationChange({ @@ -9177,9 +16559,8 @@ def test_changes_are_not_mergeable_when_translations_changes_conflict(self): # p 'content_id': 'default_outcome', 'html': '' }, - 'param_changes': [ - - ], + 'param_changes': [], + 'dest_if_really_stuck': None, 'dest': 'End' }, 'state_name': 'Introduction', @@ -9195,13 +16576,12 @@ def test_changes_are_not_mergeable_when_translations_changes_conflict(self): # p 'param_changes': [ ], + 'dest_if_really_stuck': None, 'dest': 'End' } }), exp_domain.ExplorationChange({ 'property_name': 'hints', - 'old_value': [ - - ], + 'old_value': ['old_value'], 'state_name': 'Introduction', 'cmd': 'edit_state_property', 'new_value': [ @@ -9212,12 +16592,6 @@ def test_changes_are_not_mergeable_when_translations_changes_conflict(self): # p } } ] - }), exp_domain.ExplorationChange({ - 'property_name': 'next_content_id_index', - 'old_value': 1, - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'new_value': 2 }), exp_domain.ExplorationChange({ 'property_name': 'solution', 'old_value': None, @@ -9248,168 +16622,188 @@ def test_changes_are_not_mergeable_when_translations_changes_conflict(self): # p self.owner_id, self.EXP_0_ID, change_list, 'Added various contents.') - # Adding some translations to the first state. change_list_2 = [exp_domain.ExplorationChange({ + 'cmd': 'edit_state_property', + 'property_name': 'answer_groups', + 'old_value': ['old_value'], 'state_name': 'Introduction', - 'old_value': { - 'content_id': 'content', - 'html': '

    First State Content.

    ' - }, - 'new_value': { - 'content_id': 'content', - 'html': '

    Changed First State Content.

    ' - }, - 'property_name': 'content', - 'cmd': 'edit_state_property' - }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', + 'new_value': [{ + 'rule_specs': [{ + 'rule_type': 'StartsWith', + 'inputs': { + 'x': { + 'contentId': 'rule_input_2', + 'normalizedStrSet': [ + 'Hello', + 'Hola' + ] + } + } + }], + 'tagged_skill_misconception_id': None, + 'outcome': { + 'labelled_as_correct': False, + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Feedback

    ' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'refresher_exploration_id': None + }, + 'training_data': [] + }] + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list_2, + 'Added answer group.') + + # Adding some translations to the first state. + change_list_3 = [exp_domain.ExplorationChange({ + 'cmd': 'mark_translations_needs_update', 'content_id': 'content', - 'translation_html': '

    Translation Content.

    ', - 'state_name': 'Introduction', - 'content_html': 'N/A' - }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'default_outcome', - 'translation_html': '

    Translation Feedback 1.

    ', - 'state_name': 'Introduction', - 'content_html': 'N/A' - }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'ca_placeholder_0', - 'translation_html': '

    Translation Placeholder.

    ', - 'state_name': 'Introduction', - 'content_html': 'N/A' - }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'hint_1', - 'translation_html': '

    Translation Hint.

    ', - 'state_name': 'Introduction', - 'content_html': 'N/A' }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'solution', - 'translation_html': '

    Translation Solution.

    ', - 'state_name': 'Introduction', - 'content_html': 'N/A' - }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'rule_input_2', - 'translation_html': '

    Translation Rule Input.

    ', - 'state_name': 'Introduction', - 'content_html': 'N/A' + 'cmd': 'mark_translations_needs_update', + 'content_id': 'default_outcome' }), exp_domain.ExplorationChange({ + 'cmd': 'remove_translations', + 'content_id': 'default_outcome' + })] + + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 2, change_list_3) + self.assertEqual(changes_are_mergeable, False) + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list_3, + 'Added some translations.') + + # Adding translations again to the different contents + # of same state to check that they can be merged. + change_list_4 = [exp_domain.ExplorationChange({ 'new_state_name': 'Intro-Rename', 'cmd': 'rename_state', 'old_state_name': 'Introduction' }), exp_domain.ExplorationChange({ - 'language_code': 'de', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'feedback_1', - 'translation_html': '

    Translation Feedback.

    ', - 'state_name': 'Intro-Rename', - 'content_html': 'N/A' + 'content_id': 'ca_placeholder_0', + 'cmd': 'remove_translations' + }), exp_domain.ExplorationChange({ + 'content_id': 'hint_1', + 'cmd': 'remove_translations' }), exp_domain.ExplorationChange({ 'new_state_name': 'Introduction', 'cmd': 'rename_state', 'old_state_name': 'Intro-Rename' })] + changes_are_mergeable = exp_services.are_changes_mergeable( + self.EXP_0_ID, 3, change_list_4) + self.assertEqual(changes_are_mergeable, False) exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, - 'Added some translations.') + self.owner_id, self.EXP_0_ID, change_list_4, + 'Resname state.') - # Adding translations again to the same contents - # of same state to check that they can not be - # merged. - change_list_3 = [exp_domain.ExplorationChange({ - 'language_code': 'bn', - 'data_format': 'html', - 'cmd': 'add_written_translation', + # Adding translations to the second state to check + # that they can be merged even in the same property. + change_list_5 = [exp_domain.ExplorationChange({ 'content_id': 'content', - 'translation_html': '

    Translation Content.

    ', - 'state_name': 'Introduction', - 'content_html': 'N/A' - }), exp_domain.ExplorationChange({ - 'language_code': 'bn', - 'data_format': 'html', - 'cmd': 'add_written_translation', - 'content_id': 'default_outcome', - 'translation_html': '

    Translation Feedback 1.

    ', - 'state_name': 'Introduction', - 'content_html': 'N/A' + 'cmd': 'mark_translations_needs_update' })] - changes_are_not_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list_3) - self.assertEqual(changes_are_not_mergeable, False) + changes_are_mergeable_1 = exp_services.are_changes_mergeable( + self.EXP_0_ID, 4, change_list_5) + self.assertEqual(changes_are_mergeable_1, False) - # Changes to the content of second state to check that - # the changes to the translations can not be made in - # same state if the property which can be translated is - # changed. - change_list_3 = [exp_domain.ExplorationChange({ - 'state_name': 'End', + # Add changes to the different content of first state to + # check that translation changes to some properties doesn't + # affects the changes of content of other properties. + change_list_6 = [exp_domain.ExplorationChange({ + 'old_value': { + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { + 'unicode_str': 'Placeholder', + 'content_id': 'ca_placeholder_0' + } + }, + 'catchMisspellings': { + 'value': False + } + }, + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', + 'property_name': 'widget_customization_args', + 'new_value': { + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { + 'unicode_str': 'Placeholder Changed.', + 'content_id': 'ca_placeholder_0' + } + }, + 'catchMisspellings': { + 'value': False + } + } + }), exp_domain.ExplorationChange({ + 'property_name': 'default_outcome', 'old_value': { - 'content_id': 'content', - 'html': '

    Second State Content.

    ' + 'labelled_as_correct': False, + 'missing_prerequisite_skill_id': None, + 'refresher_exploration_id': None, + 'feedback': { + 'content_id': 'default_outcome', + 'html': 'Feedback 1.' + }, + 'param_changes': [ + + ], + 'dest_if_really_stuck': None, + 'dest': 'End' }, + 'state_name': 'Introduction', + 'cmd': 'edit_state_property', 'new_value': { - 'content_id': 'content', - 'html': '

    Changed Second State Content.

    ' - }, - 'property_name': 'content', - 'cmd': 'edit_state_property' - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_3, - 'Changing Content in Second State.') + 'labelled_as_correct': False, + 'missing_prerequisite_skill_id': None, + 'refresher_exploration_id': None, + 'feedback': { + 'content_id': 'default_outcome', + 'html': '

    Feedback 2.

    ' + }, + 'param_changes': [ - # Adding translations to the same property in - # second state to check that they can not be merged. - change_list_4 = [exp_domain.ExplorationChange({ - 'content_html': 'N/A', - 'translation_html': '

    State 2 Content Translation.

    ', - 'state_name': 'End', - 'language_code': 'de', - 'content_id': 'content', - 'cmd': 'add_written_translation', - 'data_format': 'html' + ], + 'dest_if_really_stuck': None, + 'dest': 'End' + } })] - changes_are_not_mergeable_1 = exp_services.are_changes_mergeable( - self.EXP_0_ID, 3, change_list_4) - self.assertEqual(changes_are_not_mergeable_1, False) - def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # pylint: disable=line-too-long - self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, end_state_name='End') - - rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list_6, + 'Changing Customization Args Placeholder in First State.') + changes_are_mergeable_3 = exp_services.are_changes_mergeable( + self.EXP_0_ID, 4, change_list_5) + self.assertEqual(changes_are_mergeable_3, False) + def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict( + self + ) -> None: # Adding content, feedbacks, solutions so that # voiceovers can be added later on. change_list = [exp_domain.ExplorationChange({ 'property_name': 'content', - 'old_value': { - 'content_id': 'content', - 'html': '' - }, + 'old_value': None, 'state_name': 'Introduction', 'cmd': 'edit_state_property', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), 'html': '

    First State Content.

    ' } }), exp_domain.ExplorationChange({ @@ -9418,11 +16812,14 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # 'placeholder': { 'value': { 'unicode_str': '', - 'content_id': 'ca_placeholder_0' + 'content_id': 'cust_arg_1' } }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } }, 'state_name': 'Introduction', @@ -9431,11 +16828,16 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # 'placeholder': { 'value': { 'unicode_str': 'Placeholder', - 'content_id': 'ca_placeholder_0' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='placeholder') } }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } } }), exp_domain.ExplorationChange({ @@ -9445,12 +16847,13 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # 'missing_prerequisite_skill_id': None, 'refresher_exploration_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': 'feedback_5', 'html': '' }, 'param_changes': [ ], + 'dest_if_really_stuck': None, 'dest': 'End' }, 'state_name': 'Introduction', @@ -9460,33 +16863,30 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # 'missing_prerequisite_skill_id': None, 'refresher_exploration_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), 'html': '

    Feedback 1.

    ' }, 'param_changes': [ ], + 'dest_if_really_stuck': None, 'dest': 'End' } }), exp_domain.ExplorationChange({ 'property_name': 'hints', - 'old_value': [], + 'old_value': ['old_value'], 'state_name': 'Introduction', 'cmd': 'edit_state_property', 'new_value': [ { 'hint_content': { - 'content_id': 'hint_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), 'html': '

    Hint 1.

    ' } } ] - }), exp_domain.ExplorationChange({ - 'property_name': 'next_content_id_index', - 'old_value': 1, - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'new_value': 2 }), exp_domain.ExplorationChange({ 'property_name': 'solution', 'old_value': None, @@ -9495,7 +16895,8 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # 'new_value': { 'answer_is_exclusive': False, 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' }, 'correct_answer': 'Solution' @@ -9503,18 +16904,20 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # }), exp_domain.ExplorationChange({ 'property_name': 'content', 'old_value': { - 'content_id': 'content', + 'content_id': 'content_6', 'html': '' }, 'state_name': 'End', 'cmd': 'edit_state_property', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), 'html': '

    Second State Content.

    ' } })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), 'Added various contents.') # Adding change to the field which is neither @@ -9526,7 +16929,8 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # 'new_value': True })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), 'Added single unrelated change.') # Adding some voiceovers to the first state. @@ -9580,10 +16984,10 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # 'state_name': 'Introduction', 'new_value': { 'voiceovers_mapping': { - 'hint_1': {}, - 'default_outcome': {}, - 'solution': {}, - 'ca_placeholder_0': { + 'hint_8': {}, + 'default_outcome_7': {}, + 'solution_9': {}, + 'ca_placeholder_6': { 'en': { 'needs_update': False, 'filename': 'ca_placeholder_0-en-mfy5l6logg.mp3', @@ -9591,7 +16995,7 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # 'duration_secs': 10.971375 } }, - 'content': { + 'content_5': { 'en': { 'needs_update': False, 'filename': 'content-en-xrss3z3nso.mp3', @@ -9608,7 +17012,8 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # self.assertEqual(changes_are_mergeable, True) exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_3, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_3), 'Added some voiceovers.') # Adding voiceovers again to the same first state @@ -9687,11 +17092,13 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # change_list_6 = [exp_domain.ExplorationChange({ 'state_name': 'Introduction', 'old_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    First State Content.

    ' }, 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Changed First State Content.

    ' }, 'property_name': 'content', @@ -9699,7 +17106,8 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_6, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_6), 'Changing Content in First State.') changes_are_mergeable_3 = exp_services.are_changes_mergeable( self.EXP_0_ID, 4, change_list_5) @@ -9712,11 +17120,13 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # change_list_6 = [exp_domain.ExplorationChange({ 'state_name': 'End', 'old_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Second State Content.

    ' }, 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Changed Second State Content.

    ' }, 'property_name': 'content', @@ -9724,14 +17134,17 @@ def test_changes_are_mergeable_when_voiceovers_changes_do_not_conflict(self): # })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_6, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_6), 'Changing Content in Second State.') changes_are_not_mergeable = exp_services.are_changes_mergeable( self.EXP_0_ID, 4, change_list_4) self.assertEqual(changes_are_not_mergeable, False) - def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): + def test_changes_are_not_mergeable_when_voiceovers_changes_conflict( + self + ) -> None: self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='End') @@ -9742,13 +17155,14 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): change_list = [exp_domain.ExplorationChange({ 'property_name': 'content', 'old_value': { - 'content_id': 'content', + 'content_id': 'content_5', 'html': '' }, 'state_name': 'Introduction', 'cmd': 'edit_state_property', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), 'html': '

    First State Content.

    ' } }), exp_domain.ExplorationChange({ @@ -9757,11 +17171,14 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): 'placeholder': { 'value': { 'unicode_str': '', - 'content_id': 'ca_placeholder_0' + 'content_id': 'cust_arg_5' } }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } }, 'state_name': 'Introduction', @@ -9770,11 +17187,16 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): 'placeholder': { 'value': { 'unicode_str': 'Placeholder', - 'content_id': 'ca_placeholder_0' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='placeholder') } }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } } }), exp_domain.ExplorationChange({ @@ -9784,12 +17206,13 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): 'missing_prerequisite_skill_id': None, 'refresher_exploration_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': 'feedback_7', 'html': '' }, 'param_changes': [ ], + 'dest_if_really_stuck': None, 'dest': 'End' }, 'state_name': 'Introduction', @@ -9799,33 +17222,30 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): 'missing_prerequisite_skill_id': None, 'refresher_exploration_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), 'html': '

    Feedback 1.

    ' }, 'param_changes': [ ], + 'dest_if_really_stuck': None, 'dest': 'End' } }), exp_domain.ExplorationChange({ 'property_name': 'hints', - 'old_value': [], + 'old_value': ['old_value'], 'state_name': 'Introduction', 'cmd': 'edit_state_property', 'new_value': [ { 'hint_content': { - 'content_id': 'hint_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.HINT), 'html': '

    Hint 1.

    ' } } ] - }), exp_domain.ExplorationChange({ - 'property_name': 'next_content_id_index', - 'old_value': 1, - 'state_name': 'Introduction', - 'cmd': 'edit_state_property', - 'new_value': 2 }), exp_domain.ExplorationChange({ 'property_name': 'solution', 'old_value': None, @@ -9834,7 +17254,8 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): 'new_value': { 'answer_is_exclusive': False, 'explanation': { - 'content_id': 'solution', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.SOLUTION), 'html': '

    Explanation.

    ' }, 'correct_answer': 'Solution' @@ -9848,12 +17269,14 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): 'state_name': 'End', 'cmd': 'edit_state_property', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.CONTENT), 'html': '

    Second State Content.

    ' } })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), 'Added various contents.') # Adding some voiceovers to the first state. @@ -9907,10 +17330,10 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): 'state_name': 'Introduction', 'new_value': { 'voiceovers_mapping': { - 'hint_1': {}, - 'default_outcome': {}, - 'solution': {}, - 'ca_placeholder_0': { + 'hint_8': {}, + 'default_outcome_7': {}, + 'solution_9': {}, + 'ca_placeholder_6': { 'en': { 'needs_update': False, 'filename': 'ca_placeholder_0-en-mfy5l6logg.mp3', @@ -9918,7 +17341,7 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): 'duration_secs': 10.971375 } }, - 'content': { + 'content_5': { 'en': { 'needs_update': False, 'filename': 'content-en-xrss3z3nso.mp3', @@ -9932,7 +17355,8 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), 'Added some voiceovers.') # Adding voiceovers again to the same first state @@ -9974,12 +17398,15 @@ def test_changes_are_not_mergeable_when_voiceovers_changes_conflict(self): self.EXP_0_ID, 2, change_list_3) self.assertEqual(changes_are_mergeable, False) - def test_changes_are_not_mergeable_when_state_added_or_deleted(self): + def test_changes_are_not_mergeable_when_state_added_or_deleted( + self + ) -> None: self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='End') rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + test_dict: Dict[str, str] = {} # Changes to the various properties of the first and # second state. change_list = [exp_domain.ExplorationChange({ @@ -9992,17 +17419,21 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'old_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'unicode_str': '' } }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } }, 'cmd': 'edit_state_property', 'property_name': 'widget_customization_args', - 'new_value': {}, + 'new_value': test_dict, 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ 'old_value': None, @@ -10029,13 +17460,7 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): }, 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ - 'old_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 2, - 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': [], + 'old_value': ['old_value'], 'cmd': 'edit_state_property', 'property_name': 'answer_groups', 'new_value': [ @@ -10052,10 +17477,12 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'training_data': [], 'outcome': { 'param_changes': [], + 'dest_if_really_stuck': None, 'dest': 'End', 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10065,40 +17492,38 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): ], 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ - 'old_value': [], + 'old_value': ['old_value'], 'cmd': 'edit_state_property', 'property_name': 'hints', 'new_value': [ { 'hint_content': { - 'content_id': 'hint_2', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Hint.

    ' } } ], 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': 2, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 3, - 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ 'old_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': 'Congratulations, you have finished!' }, 'cmd': 'edit_state_property', 'property_name': 'content', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    2Congratulations, you have finished!

    ' }, 'state_name': 'End' })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), 'Changed various properties in both states.') # Change to the unrelated property to check that @@ -10106,11 +17531,13 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): change_list_2 = [exp_domain.ExplorationChange({ 'old_value': { 'html': '', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, 'new_value': { 'html': '

    Hello Aryaman!

    ', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, 'state_name': 'Introduction', 'property_name': 'content', @@ -10133,13 +17560,17 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'state_name': 'End-State' }), exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'End' + 'state_name': 'End', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' }), exp_domain.ExplorationChange({ 'cmd': 'delete_state', 'state_name': 'End' }), exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'End' + 'state_name': 'End', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' }), exp_domain.ExplorationChange({ 'new_state_name': 'End-State', 'cmd': 'rename_state', @@ -10161,9 +17592,11 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'outcome': { 'param_changes': [], 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10184,9 +17617,11 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'outcome': { 'param_changes': [], 'dest': 'End', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10198,9 +17633,11 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'old_value': { 'param_changes': [], 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10211,9 +17648,11 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'new_value': { 'param_changes': [], 'dest': 'End', + 'dest_if_really_stuck': 'End', 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10222,13 +17661,15 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ 'old_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'cmd': 'edit_state_property', 'property_name': 'content', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': 'Congratulations, you have finished!' }, 'state_name': 'End' @@ -10239,7 +17680,7 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'new_value': 'EndExploration', 'state_name': 'End' }), exp_domain.ExplorationChange({ - 'old_value': {}, + 'old_value': test_dict, 'cmd': 'edit_state_property', 'property_name': 'widget_customization_args', 'new_value': { @@ -10252,9 +17693,11 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): 'old_value': { 'param_changes': [], 'dest': 'End', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10267,7 +17710,8 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_3, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_3), 'Added and deleted states.') # Checking that old changes that could be @@ -10277,12 +17721,15 @@ def test_changes_are_not_mergeable_when_state_added_or_deleted(self): self.EXP_0_ID, 1, change_list_2) self.assertEqual(changes_are_not_mergeable, False) - def test_changes_are_not_mergeable_when_frontend_version_exceeds_backend_version(self): # pylint: disable=line-too-long + def test_changes_are_not_mergeable_when_frontend_version_exceeds_backend_version( # pylint: disable=line-too-long + self + ) -> None: self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='End') rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + test_dict: Dict[str, str] = {} # Changes to the various properties of the first and # second state. change_list = [exp_domain.ExplorationChange({ @@ -10295,17 +17742,21 @@ def test_changes_are_not_mergeable_when_frontend_version_exceeds_backend_version 'old_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'unicode_str': '' } }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } }, 'cmd': 'edit_state_property', 'property_name': 'widget_customization_args', - 'new_value': {}, + 'new_value': test_dict, 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ 'old_value': None, @@ -10314,13 +17765,7 @@ def test_changes_are_not_mergeable_when_frontend_version_exceeds_backend_version 'new_value': 'NumericInput', 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ - 'old_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 2, - 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': [], + 'old_value': ['old_value'], 'cmd': 'edit_state_property', 'property_name': 'answer_groups', 'new_value': [ @@ -10338,9 +17783,11 @@ def test_changes_are_not_mergeable_when_frontend_version_exceeds_backend_version 'outcome': { 'param_changes': [], 'dest': 'End', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10350,33 +17797,30 @@ def test_changes_are_not_mergeable_when_frontend_version_exceeds_backend_version ], 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ - 'old_value': [], + 'old_value': ['old_value'], 'cmd': 'edit_state_property', 'property_name': 'hints', 'new_value': [ { 'hint_content': { - 'content_id': 'hint_2', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Hint.

    ' } } ], 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': 2, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 3, - 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ 'old_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': 'Congratulations, you have finished!' }, 'cmd': 'edit_state_property', 'property_name': 'content', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    2Congratulations, you have finished!

    ' }, 'state_name': 'End' @@ -10394,7 +17838,8 @@ def test_changes_are_not_mergeable_when_frontend_version_exceeds_backend_version self.assertEqual(changes_are_not_mergeable, False) def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( - self): + self + ) -> None: self.login(self.OWNER_EMAIL) with self.swap(feconf, 'CAN_SEND_EMAILS', True): messages = self._get_sent_email_messages( @@ -10405,6 +17850,7 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + test_dict: Dict[str, str] = {} # Changes to the various properties of the first and # second state. change_list = [exp_domain.ExplorationChange({ @@ -10417,17 +17863,21 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'old_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'unicode_str': '' } }, 'rows': { 'value': 1 + }, + 'catchMisspellings': { + 'value': False } }, 'cmd': 'edit_state_property', 'property_name': 'widget_customization_args', - 'new_value': {}, + 'new_value': test_dict, 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ 'old_value': None, @@ -10454,13 +17904,7 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( }, 'cmd': 'edit_state_property' }), exp_domain.ExplorationChange({ - 'old_value': 1, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 2, - 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': [], + 'old_value': ['old_value'], 'cmd': 'edit_state_property', 'property_name': 'answer_groups', 'new_value': [ @@ -10478,9 +17922,14 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'outcome': { 'param_changes': [], 'dest': 'End', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': ( + self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK + ) + ), 'html': '' }, 'labelled_as_correct': False, @@ -10490,40 +17939,38 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( ], 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ - 'old_value': [], + 'old_value': ['old_value'], 'cmd': 'edit_state_property', 'property_name': 'hints', 'new_value': [ { 'hint_content': { - 'content_id': 'hint_2', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    Hint.

    ' } } ], 'state_name': 'Introduction' - }), exp_domain.ExplorationChange({ - 'old_value': 2, - 'cmd': 'edit_state_property', - 'property_name': 'next_content_id_index', - 'new_value': 3, - 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ 'old_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': 'Congratulations, you have finished!' }, 'cmd': 'edit_state_property', 'property_name': 'content', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '

    2Congratulations, you have finished!

    ' }, 'state_name': 'End' })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), 'Changed various properties in both states.') change_list_2 = [exp_domain.ExplorationChange({ @@ -10535,13 +17982,17 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'state_name': 'End-State' }), exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'End' + 'state_name': 'End', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' }), exp_domain.ExplorationChange({ 'cmd': 'delete_state', 'state_name': 'End' }), exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'End' + 'state_name': 'End', + 'content_id_for_state_content': 'content_0', + 'content_id_for_default_outcome': 'default_outcome_1' }), exp_domain.ExplorationChange({ 'new_state_name': 'End-State', 'cmd': 'rename_state', @@ -10563,9 +18014,11 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'outcome': { 'param_changes': [], 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10586,9 +18039,11 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'outcome': { 'param_changes': [], 'dest': 'End', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10600,9 +18055,11 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'old_value': { 'param_changes': [], 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10613,9 +18070,11 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'new_value': { 'param_changes': [], 'dest': 'End', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10624,13 +18083,15 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'state_name': 'Introduction' }), exp_domain.ExplorationChange({ 'old_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'cmd': 'edit_state_property', 'property_name': 'content', 'new_value': { - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': 'Congratulations, you have finished!' }, 'state_name': 'End' @@ -10641,7 +18102,7 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'new_value': 'EndExploration', 'state_name': 'End' }), exp_domain.ExplorationChange({ - 'old_value': {}, + 'old_value': test_dict, 'cmd': 'edit_state_property', 'property_name': 'widget_customization_args', 'new_value': { @@ -10654,9 +18115,11 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( 'old_value': { 'param_changes': [], 'dest': 'End', + 'dest_if_really_stuck': None, 'missing_prerequisite_skill_id': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'html': '' }, 'labelled_as_correct': False, @@ -10669,16 +18132,19 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), 'Added and deleted states.') change_list_3 = [exp_domain.ExplorationChange({ 'old_value': { 'html': '', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, 'new_value': { 'html': '

    Hello Aryaman!

    ', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, 'state_name': 'Introduction', 'property_name': 'content', @@ -10688,19 +18154,7 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( self.EXP_0_ID, 1, change_list_3) self.assertEqual(changes_are_not_mergeable, False) - change_list_3_dict = [{ - 'cmd': 'edit_state_property', - 'property_name': 'content', - 'state_name': 'Introduction', - 'new_value': { - 'html': '

    Hello Aryaman!

    ', - 'content_id': 'content' - }, - 'old_value': { - 'html': '', - 'content_id': 'content' - }, - }] + change_list_3_dict = [change.to_dict() for change in change_list_3] expected_email_html_body = ( '(Sent from dev-project-id)

    ' @@ -10719,7 +18173,8 @@ def test_email_is_sent_to_admin_in_case_of_adding_deleting_state_changes( self.assertEqual(messages[0].html, expected_email_html_body) def test_email_is_sent_to_admin_in_case_of_state_renames_changes_conflict( - self): + self + ) -> None: self.login(self.OWNER_EMAIL) with self.swap(feconf, 'CAN_SEND_EMAILS', True): messages = self._get_sent_email_messages( @@ -10732,18 +18187,21 @@ def test_email_is_sent_to_admin_in_case_of_state_renames_changes_conflict( change_list = [exp_domain.ExplorationChange({ 'old_value': { 'html': '', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, 'new_value': { 'html': '

    End State

    ', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, 'state_name': 'End', 'property_name': 'content', 'cmd': 'edit_state_property' })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list), 'Changed various properties in both states.') # State name changed. @@ -10754,17 +18212,20 @@ def test_email_is_sent_to_admin_in_case_of_state_renames_changes_conflict( })] exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, + self.owner_id, self.EXP_0_ID, + self.append_next_content_id_index_change(change_list_2), 'Changed various properties in both states.') change_list_3 = [exp_domain.ExplorationChange({ 'old_value': { 'html': 'End State', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, 'new_value': { 'html': '

    End State Changed

    ', - 'content_id': 'content' + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) }, 'state_name': 'End', 'property_name': 'content', @@ -10774,19 +18235,7 @@ def test_email_is_sent_to_admin_in_case_of_state_renames_changes_conflict( self.EXP_0_ID, 2, change_list_3) self.assertEqual(changes_are_not_mergeable, False) - change_list_3_dict = [{ - 'cmd': 'edit_state_property', - 'property_name': 'content', - 'state_name': 'End', - 'new_value': { - 'html': '

    End State Changed

    ', - 'content_id': 'content' - }, - 'old_value': { - 'html': 'End State', - 'content_id': 'content' - }, - }] + change_list_3_dict = [change.to_dict() for change in change_list_3] expected_email_html_body = ( '(Sent from dev-project-id)

    ' 'Hi Admin,

    ' @@ -10809,7 +18258,8 @@ def test_email_is_sent_to_admin_in_case_of_state_renames_changes_conflict( 'translation_html': '

    State 2 Content Translation.

    ', 'state_name': 'End', 'language_code': 'de', - 'content_id': 'content', + 'content_id': self.content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), 'cmd': 'add_written_translation', 'data_format': 'html' })] @@ -10817,27 +18267,206 @@ def test_email_is_sent_to_admin_in_case_of_state_renames_changes_conflict( self.EXP_0_ID, 2, change_list_4) self.assertEqual(changes_are_not_mergeable_2, False) - change_list_4_dict = [{ - 'cmd': 'add_written_translation', - 'state_name': 'End', - 'content_id': 'content', - 'language_code': 'de', - 'content_html': 'N/A', - 'translation_html': '

    State 2 Content Translation.

    ', - 'data_format': 'html' - }] - expected_email_html_body_2 = ( - '(Sent from dev-project-id)

    ' - 'Hi Admin,

    ' - 'Some draft changes were rejected in exploration %s because ' - 'the changes were conflicting and could not be saved. Please ' - 'see the rejected change list below:
    ' - 'Discarded change list: %s

    ' - 'Frontend Version: %s
    ' - 'Backend Version: %s

    ' - 'Thanks!' % (self.EXP_0_ID, change_list_4_dict, 2, 3) + +class ExplorationMetadataDomainUnitTests(test_utils.GenericTestBase): + + def _require_metadata_properties_to_be_synced(self) -> None: + """Raises error if there is a new metadata property in the Exploration + object and it is not added in the ExplorationMetadata domain object. + + Raises: + Exception. All the metadata properties are not synced. + """ + exploration = exp_domain.Exploration.create_default_exploration('0') + exploration_dict = exploration.to_dict() + for key in exploration_dict: + if ( + key not in constants.NON_METADATA_PROPERTIES and + key not in constants.METADATA_PROPERTIES + ): + raise Exception( + 'Looks like a new property %s was added to the Exploration' + ' domain object. Please include this property in ' + 'constants.METADATA_PROPERTIES if you want to use this ' + 'as a metadata property. Otherwise, add this in the ' + 'constants.NON_METADATA_PROPERTIES if you don\'t want ' + 'to use this as a metadata property.' % (key) + ) + + exploration_metadata = exploration.get_metadata() + exploration_metadata_dict = exploration_metadata.to_dict() + for metadata_property in constants.METADATA_PROPERTIES: + if metadata_property not in exploration_metadata_dict: + raise Exception( + 'A new metadata property %s was added to the Exploration ' + 'domain object but not included in the ' + 'ExplorationMetadata domain object. Please include this ' + 'new property in the ExplorationMetadata domain object ' + 'also.' % (metadata_property) + ) + + def test_exploration_metadata_gets_created(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration('0') + exploration.update_param_specs({ + 'ExampleParamOne': ( + param_domain.ParamSpec('UnicodeString').to_dict()) + }) + exploration.update_param_changes([ + param_domain.ParamChange( + 'ParamChange', 'RandomSelector', { + 'list_of_values': ['3', '4'], + 'parse_with_jinja': True + } + ), + param_domain.ParamChange( + 'ParamChange', 'RandomSelector', { + 'list_of_values': ['5', '6'], + 'parse_with_jinja': True + } ) - messages = self._get_sent_email_messages( - feconf.ADMIN_EMAIL_ADDRESS) - self.assertEqual(len(messages), 2) - self.assertEqual(expected_email_html_body_2, messages[1].html) + ]) + actual_metadata_dict = exp_domain.ExplorationMetadata( + exploration.title, exploration. category, exploration.objective, + exploration.language_code, exploration.tags, exploration.blurb, + exploration.author_notes, exploration.states_schema_version, + exploration.init_state_name, exploration.param_specs, + exploration.param_changes, exploration.auto_tts_enabled, + exploration.correctness_feedback_enabled, exploration.edits_allowed + ).to_dict() + expected_metadata_dict = { + 'title': exploration.title, + 'category': exploration.category, + 'objective': exploration.objective, + 'language_code': exploration.language_code, + 'tags': exploration.tags, + 'blurb': exploration.blurb, + 'author_notes': exploration.author_notes, + 'states_schema_version': exploration.states_schema_version, + 'init_state_name': exploration.init_state_name, + 'param_specs': { + 'ExampleParamOne': ( + param_domain.ParamSpec('UnicodeString').to_dict()) + }, + 'param_changes': [ + param_domain.ParamChange( + 'ParamChange', 'RandomSelector', { + 'list_of_values': ['3', '4'], + 'parse_with_jinja': True + } + ).to_dict(), + param_domain.ParamChange( + 'ParamChange', 'RandomSelector', { + 'list_of_values': ['5', '6'], + 'parse_with_jinja': True + } + ).to_dict() + ], + 'auto_tts_enabled': exploration.auto_tts_enabled, + 'correctness_feedback_enabled': ( + exploration.correctness_feedback_enabled), + 'edits_allowed': exploration.edits_allowed + } + + self.assertEqual(actual_metadata_dict, expected_metadata_dict) + + def test_metadata_properties_are_synced(self) -> None: + self._require_metadata_properties_to_be_synced() + + swapped_metadata_properties = self.swap( + constants, 'METADATA_PROPERTIES', [ + 'title', 'category', 'objective', 'language_code', + 'blurb', 'author_notes', 'states_schema_version', + 'init_state_name', 'param_specs', 'param_changes', + 'auto_tts_enabled', 'correctness_feedback_enabled', + 'edits_allowed' + ] + ) + error_message = ( + 'Looks like a new property tags was added to the Exploration' + ' domain object. Please include this property in ' + 'constants.METADATA_PROPERTIES if you want to use this ' + 'as a metadata property. Otherwise, add this in the ' + 'constants.NON_METADATA_PROPERTIES if you don\'t want ' + 'to use this as a metadata property.' + ) + with swapped_metadata_properties, self.assertRaisesRegex( + Exception, error_message + ): + self._require_metadata_properties_to_be_synced() + + swapped_metadata_properties = self.swap( + constants, 'METADATA_PROPERTIES', [ + 'title', 'category', 'objective', 'language_code', 'tags', + 'blurb', 'author_notes', 'states_schema_version', + 'init_state_name', 'param_specs', 'param_changes', + 'auto_tts_enabled', 'correctness_feedback_enabled', + 'edits_allowed', 'new_property' + ] + ) + error_message = ( + 'A new metadata property %s was added to the Exploration ' + 'domain object but not included in the ' + 'ExplorationMetadata domain object. Please include this ' + 'new property in the ExplorationMetadata domain object ' + 'also.' % ('new_property') + ) + with swapped_metadata_properties, self.assertRaisesRegex( + Exception, error_message + ): + self._require_metadata_properties_to_be_synced() + + +class MetadataVersionHistoryDomainUnitTests(test_utils.GenericTestBase): + + def test_metadata_version_history_gets_created(self) -> None: + expected_dict = { + 'last_edited_version_number': 1, + 'last_edited_committer_id': 'user_1' + } + actual_dict = exp_domain.MetadataVersionHistory(1, 'user_1').to_dict() + + self.assertEqual(expected_dict, actual_dict) + + def test_metadata_version_history_gets_created_from_dict(self) -> None: + metadata_version_history_dict: exp_domain.MetadataVersionHistoryDict = { + 'last_edited_version_number': 1, + 'last_edited_committer_id': 'user_1' + } + metadata_version_history = ( + exp_domain.MetadataVersionHistory.from_dict( + metadata_version_history_dict)) + + self.assertEqual( + metadata_version_history.last_edited_version_number, + metadata_version_history_dict['last_edited_version_number']) + self.assertEqual( + metadata_version_history.last_edited_committer_id, + metadata_version_history_dict['last_edited_committer_id']) + + +class ExplorationVersionHistoryUnitTests(test_utils.GenericTestBase): + + def test_exploration_version_history_gets_created(self) -> None: + state_version_history_dict = { + 'state 1': state_domain.StateVersionHistory( + 1, 'state 1', 'user1' + ).to_dict() + } + metadata_version_history = exp_domain.MetadataVersionHistory( + None, 'user1' + ) + expected_dict = { + 'exploration_id': 'exp_1', + 'exploration_version': 2, + 'state_version_history': state_version_history_dict, + 'metadata_version_history': metadata_version_history.to_dict(), + 'committer_ids': ['user1'] + } + actual_dict = exp_domain.ExplorationVersionHistory( + 'exp_1', 2, state_version_history_dict, + metadata_version_history.last_edited_version_number, + metadata_version_history.last_edited_committer_id, + ['user1'] + ).to_dict() + + self.assertEqual(actual_dict, expected_dict) diff --git a/core/domain/exp_fetchers.py b/core/domain/exp_fetchers.py index 486d37a435e8..61accda33f0b 100644 --- a/core/domain/exp_fetchers.py +++ b/core/domain/exp_fetchers.py @@ -31,13 +31,27 @@ from core.domain import caching_services from core.domain import exp_domain from core.domain import subscription_services +from core.domain import user_domain from core.platform import models -(exp_models,) = models.Registry.import_models([models.NAMES.exploration]) +from typing import Dict, List, Literal, Optional, Sequence, overload + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import exp_models + from mypy_imports import user_models + +(exp_models, user_models) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.USER +]) datastore_services = models.Registry.import_datastore_services() -def _migrate_states_schema(versioned_exploration_states, init_state_name): +def _migrate_states_schema( + versioned_exploration_states: exp_domain.VersionedExplorationStatesDict, + init_state_name: str, language_code: str +) -> Optional[int]: """Holds the responsibility of performing a step-by-step, sequential update of an exploration states structure based on the schema version of the input exploration dictionary. This is very similar to the YAML conversion process @@ -55,6 +69,10 @@ def _migrate_states_schema(versioned_exploration_states, init_state_name): - states: the dict of states comprising the exploration. The keys in this dict are state names. init_state_name: str. Name of initial state. + language_code: str. The language code of the exploration. + + Returns: + None|int. The next content Id index for generating new content Id. Raises: Exception. The given states_schema_version is invalid. @@ -71,15 +89,29 @@ def _migrate_states_schema(versioned_exploration_states, init_state_name): feconf.EARLIEST_SUPPORTED_STATE_SCHEMA_VERSION, feconf.CURRENT_STATE_SCHEMA_VERSION)) + next_content_id_index = None while (states_schema_version < feconf.CURRENT_STATE_SCHEMA_VERSION): - exp_domain.Exploration.update_states_from_model( - versioned_exploration_states, - states_schema_version, init_state_name) + if states_schema_version == 54: + # State conversion function from 54 to 55 removes + # next_content_id_index from the state level, hence this if case + # populates the next_content_id_index from the old state, which will + # be used for introducing next_content_id_index into + # exploration level. + next_content_id_index = ( + exp_domain.Exploration.update_states_from_model( + versioned_exploration_states, + states_schema_version, init_state_name, language_code) + ) + else: + exp_domain.Exploration.update_states_from_model( + versioned_exploration_states, + states_schema_version, init_state_name, language_code) states_schema_version += 1 + return next_content_id_index -def get_new_exploration_id(): +def get_new_exploration_id() -> str: """Returns a new exploration id. Returns: @@ -88,8 +120,19 @@ def get_new_exploration_id(): return exp_models.ExplorationModel.get_new_id('') +def get_new_unique_progress_url_id() -> str: + """Returns a new unique progress url id. + + Returns: + str. A new unique progress url id. + """ + return exp_models.TransientCheckpointUrlModel.get_new_progress_id() + + def get_multiple_versioned_exp_interaction_ids_mapping_by_version( - exp_id, version_numbers): + exp_id: str, + version_numbers: List[int] +) -> List[exp_domain.VersionedExplorationInteractionIdsMapping]: """Returns a list of VersionedExplorationInteractionIdsMapping domain objects corresponding to the specified versions. @@ -131,7 +174,10 @@ def get_multiple_versioned_exp_interaction_ids_mapping_by_version( return versioned_exp_interaction_ids_mapping -def get_exploration_from_model(exploration_model, run_conversion=True): +def get_exploration_from_model( + exploration_model: exp_models.ExplorationModel, + run_conversion: bool = True +) -> exp_domain.Exploration: """Returns an Exploration domain object given an exploration model loaded from the datastore. @@ -155,17 +201,22 @@ def get_exploration_from_model(exploration_model, run_conversion=True): """ # Ensure the original exploration model does not get altered. - versioned_exploration_states = { + versioned_exploration_states: exp_domain.VersionedExplorationStatesDict = { 'states_schema_version': exploration_model.states_schema_version, 'states': copy.deepcopy(exploration_model.states) } init_state_name = exploration_model.init_state_name + next_content_id_index = None + language_code = exploration_model.language_code # If the exploration uses the latest states schema version, no conversion # is necessary. if (run_conversion and exploration_model.states_schema_version != feconf.CURRENT_STATE_SCHEMA_VERSION): - _migrate_states_schema(versioned_exploration_states, init_state_name) + next_content_id_index = _migrate_states_schema( + versioned_exploration_states, init_state_name, language_code) + if next_content_id_index is not None: + exploration_model.next_content_id_index = next_content_id_index return exp_domain.Exploration( exploration_model.id, exploration_model.title, @@ -178,23 +229,48 @@ def get_exploration_from_model(exploration_model, run_conversion=True): exploration_model.param_specs, exploration_model.param_changes, exploration_model.version, exploration_model.auto_tts_enabled, exploration_model.correctness_feedback_enabled, + exploration_model.next_content_id_index, + exploration_model.edits_allowed, created_on=exploration_model.created_on, last_updated=exploration_model.last_updated) -def get_exploration_summary_by_id(exploration_id): +@overload +def get_exploration_summary_by_id( + exploration_id: str, +) -> exp_domain.ExplorationSummary: ... + + +@overload +def get_exploration_summary_by_id( + exploration_id: str, *, strict: Literal[True] +) -> exp_domain.ExplorationSummary: ... + + +@overload +def get_exploration_summary_by_id( + exploration_id: str, *, strict: Literal[False] +) -> Optional[exp_domain.ExplorationSummary]: ... + + +def get_exploration_summary_by_id( + exploration_id: str, + strict: bool = True +) -> Optional[exp_domain.ExplorationSummary]: """Returns a domain object representing an exploration summary. Args: exploration_id: str. The id of the ExplorationSummary to be returned. + strict: bool. Whether to fail noisily if no exploration with a given id + exists. Returns: - ExplorationSummary. The summary domain object corresponding to the - given exploration. + ExplorationSummary|None. The summary domain object corresponding to the + given exploration, and none if no ExpSummaryModel exists for given id. """ # TODO(msl): Maybe use memcache similarly to get_exploration_by_id. exp_summary_model = exp_models.ExpSummaryModel.get( - exploration_id, strict=False) + exploration_id, strict=strict) if exp_summary_model: exp_summary = get_exploration_summary_from_model(exp_summary_model) return exp_summary @@ -202,7 +278,9 @@ def get_exploration_summary_by_id(exploration_id): return None -def get_exploration_summaries_from_models(exp_summary_models): +def get_exploration_summaries_from_models( + exp_summary_models: Sequence[exp_models.ExpSummaryModel] +) -> Dict[str, exp_domain.ExplorationSummary]: """Returns a dict with ExplorationSummary domain objects as values, keyed by their exploration id. @@ -223,7 +301,9 @@ def get_exploration_summaries_from_models(exp_summary_models): return result -def get_exploration_summary_from_model(exp_summary_model): +def get_exploration_summary_from_model( + exp_summary_model: exp_models.ExpSummaryModel +) -> exp_domain.ExplorationSummary: """Returns an ExplorationSummary domain object. Args: @@ -252,7 +332,9 @@ def get_exploration_summary_from_model(exp_summary_model): ) -def get_exploration_summaries_matching_ids(exp_ids): +def get_exploration_summaries_matching_ids( + exp_ids: List[str] +) -> List[Optional[exp_domain.ExplorationSummary]]: """Returns a list of ExplorationSummary domain objects (or None if the corresponding summary does not exist) corresponding to the given list of exploration ids. @@ -269,7 +351,9 @@ def get_exploration_summaries_matching_ids(exp_ids): for model in exp_models.ExpSummaryModel.get_multi(exp_ids)] -def get_exploration_summaries_subscribed_to(user_id): +def get_exploration_summaries_subscribed_to( + user_id: str +) -> List[exp_domain.ExplorationSummary]: """Returns a list of ExplorationSummary domain objects that the user subscribes to. @@ -288,7 +372,43 @@ def get_exploration_summaries_subscribed_to(user_id): ] -def get_exploration_by_id(exploration_id, strict=True, version=None): +@overload +def get_exploration_by_id( + exploration_id: str, +) -> exp_domain.Exploration: ... + + +@overload +def get_exploration_by_id( + exploration_id: str, + *, + version: Optional[int] = None +) -> exp_domain.Exploration: ... + + +@overload +def get_exploration_by_id( + exploration_id: str, + *, + strict: Literal[True], + version: Optional[int] = None +) -> exp_domain.Exploration: ... + + +@overload +def get_exploration_by_id( + exploration_id: str, + *, + strict: Literal[False], + version: Optional[int] = None +) -> Optional[exp_domain.Exploration]: ... + + +def get_exploration_by_id( + exploration_id: str, + strict: bool = True, + version: Optional[int] = None +) -> Optional[exp_domain.Exploration]: """Returns an Exploration domain object. Args: @@ -299,7 +419,8 @@ def get_exploration_by_id(exploration_id, strict=True, version=None): If None, the latest version of the exploration is returned. Returns: - Exploration. The domain object corresponding to the given exploration. + Exploration|None. The domain object corresponding to the given + exploration. """ sub_namespace = str(version) if version else None cached_exploration = caching_services.get_multi( @@ -326,7 +447,9 @@ def get_exploration_by_id(exploration_id, strict=True, version=None): return None -def get_multiple_explorations_by_id(exp_ids, strict=True): +def get_multiple_explorations_by_id( + exp_ids: List[str], strict: bool = True +) -> Dict[str, exp_domain.Exploration]: """Returns a dict of domain objects representing explorations with the given ids as keys. If an exp_id is not present, it is not included in the return dict. @@ -388,7 +511,9 @@ def get_multiple_explorations_by_id(exp_ids, strict=True): return result -def get_exploration_summaries_where_user_has_role(user_id): +def get_exploration_summaries_where_user_has_role( + user_id: str +) -> List[exp_domain.ExplorationSummary]: """Returns a list of ExplorationSummary domain objects where the user has some role. @@ -399,16 +524,152 @@ def get_exploration_summaries_where_user_has_role(user_id): list(ExplorationSummary). List of ExplorationSummary domain objects where the user has some role. """ - exp_summary_models = exp_models.ExpSummaryModel.query( - datastore_services.any_of( - exp_models.ExpSummaryModel.owner_ids == user_id, - exp_models.ExpSummaryModel.editor_ids == user_id, - exp_models.ExpSummaryModel.voice_artist_ids == user_id, - exp_models.ExpSummaryModel.viewer_ids == user_id, - exp_models.ExpSummaryModel.contributor_ids == user_id - ) - ).fetch() + exp_summary_models: Sequence[exp_models.ExpSummaryModel] = ( + exp_models.ExpSummaryModel.query( + datastore_services.any_of( + exp_models.ExpSummaryModel.owner_ids == user_id, + exp_models.ExpSummaryModel.editor_ids == user_id, + exp_models.ExpSummaryModel.voice_artist_ids == user_id, + exp_models.ExpSummaryModel.viewer_ids == user_id, + exp_models.ExpSummaryModel.contributor_ids == user_id + ) + ).fetch() + ) return [ get_exploration_summary_from_model(exp_summary_model) for exp_summary_model in exp_summary_models ] + + +def get_exploration_user_data( + user_id: str, exp_id: str +) -> Optional[user_domain.ExplorationUserData]: + """Returns an ExplorationUserData domain object. + + Args: + user_id: str. The Id of the user. + exp_id: str. The Id of the exploration. + + Returns: + ExplorationUserData or None. The domain object corresponding to the + given user and exploration. If the model corresponsing to given user + and exploration is not found, return None. + """ + exp_user_data_model = user_models.ExplorationUserDataModel.get( + user_id, exp_id) + + if exp_user_data_model is None: + return None + + return user_domain.ExplorationUserData( + exp_user_data_model.user_id, + exp_user_data_model.exploration_id, + exp_user_data_model.rating, + exp_user_data_model.rated_on, + exp_user_data_model.draft_change_list, + exp_user_data_model.draft_change_list_last_updated, + exp_user_data_model.draft_change_list_exp_version, + exp_user_data_model.draft_change_list_id, + exp_user_data_model.mute_suggestion_notifications, + exp_user_data_model.mute_feedback_notifications, + exp_user_data_model.furthest_reached_checkpoint_exp_version, + exp_user_data_model.furthest_reached_checkpoint_state_name, + exp_user_data_model.most_recently_reached_checkpoint_exp_version, + exp_user_data_model.most_recently_reached_checkpoint_state_name + ) + + +@overload +def get_logged_out_user_progress( + unique_progress_url_id: str, *, strict: Literal[True] +) -> exp_domain.TransientCheckpointUrl: ... + + +@overload +def get_logged_out_user_progress( + unique_progress_url_id: str +) -> Optional[exp_domain.TransientCheckpointUrl]: ... + + +@overload +def get_logged_out_user_progress( + unique_progress_url_id: str, *, strict: Literal[False] +) -> Optional[exp_domain.TransientCheckpointUrl]: ... + + +@overload +def get_logged_out_user_progress( + unique_progress_url_id: str, *, strict: bool +) -> Optional[exp_domain.TransientCheckpointUrl]: ... + + +def get_logged_out_user_progress( + unique_progress_url_id: str, strict: bool = False +) -> Optional[exp_domain.TransientCheckpointUrl]: + """Returns an TransientCheckpointUrl domain object. + + Args: + unique_progress_url_id: str. The 6 digit long unique id + assigned to the progress made by a logged-out user. + strict: bool. Whether to fail noisily if no TransientCheckpointUrlModel + with the given unique_progress_url_id exists in the datastore. + + Returns: + TransientCheckpointUrl or None. The domain object corresponding to the + given unique_progress_url_id. If the model corresponding to given + unique_progress_url_id is not found, return None. + """ + logged_out_user_progress_model = ( + exp_models.TransientCheckpointUrlModel.get( + unique_progress_url_id, strict=strict)) + + if logged_out_user_progress_model is None: + return None + + return exp_domain.TransientCheckpointUrl( + logged_out_user_progress_model.exploration_id, + logged_out_user_progress_model.furthest_reached_checkpoint_state_name, + logged_out_user_progress_model.furthest_reached_checkpoint_exp_version, + logged_out_user_progress_model. + most_recently_reached_checkpoint_state_name, + logged_out_user_progress_model. + most_recently_reached_checkpoint_exp_version + ) + + +def get_exploration_version_history( + exp_id: str, exp_version: int +) -> Optional[exp_domain.ExplorationVersionHistory]: + """Returns an ExplorationVersionHistory domain object by fetching the + ExplorationVersionHistoryModel for the given exploration id and version. + + Args: + exp_id: str. The id of the exploration. + exp_version: int. The version number of the exploration. + + Returns: + ExplorationVersionHistory. The exploration version history domain + object for the ExplorationVersionHistoryModel corresponding to the + given exploration id and version. + """ + version_history_model_id = ( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + exp_id, exp_version + ) + ) + version_history_model = ( + exp_models.ExplorationVersionHistoryModel.get( + version_history_model_id, strict=False + ) + ) + + if version_history_model is None: + return None + + return exp_domain.ExplorationVersionHistory( + exp_id, exp_version, + version_history_model.state_version_history, + version_history_model.metadata_last_edited_version_number, + version_history_model.metadata_last_edited_committer_id, + version_history_model.committer_ids + ) diff --git a/core/domain/exp_fetchers_test.py b/core/domain/exp_fetchers_test.py index 66efcfef82fd..f5e16d1f3853 100644 --- a/core/domain/exp_fetchers_test.py +++ b/core/domain/exp_fetchers_test.py @@ -23,48 +23,124 @@ from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services +from core.domain import rights_manager +from core.domain import state_domain +from core.domain import stats_services +from core.domain import translation_domain +from core.domain import user_services from core.platform import models from core.tests import test_utils -(exp_models,) = models.Registry.import_models([models.NAMES.exploration]) +from typing import Final + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) class ExplorationRetrievalTests(test_utils.GenericTestBase): """Test the exploration retrieval methods.""" - EXP_1_ID = 'exploration_1_id' - EXP_2_ID = 'exploration_2_id' - EXP_3_ID = 'exploration_3_id' + EXP_1_ID: Final = 'exploration_1_id' + EXP_2_ID: Final = 'exploration_2_id' + EXP_3_ID: Final = 'exploration_3_id' - def setUp(self): - super(ExplorationRetrievalTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.exploration_1 = self.save_new_default_exploration( self.EXP_1_ID, self.owner_id, title='Aa') + self.content_id_generator_1 = translation_domain.ContentIdGenerator( + self.exploration_1.next_content_id_index + ) self.exploration_2 = self.save_new_default_exploration( self.EXP_2_ID, self.owner_id, title='Bb') self.exploration_3 = self.save_new_default_exploration( self.EXP_3_ID, self.owner_id, title='Cc') - def test_get_exploration_summaries_matching_ids(self): + def test_get_exploration_summaries_matching_ids(self) -> None: summaries = exp_fetchers.get_exploration_summaries_matching_ids([ self.EXP_1_ID, self.EXP_2_ID, self.EXP_3_ID, 'nonexistent']) + # Here, we are Ruling out the possibility of None for individual + # elements of the list. Because `summaries` is of List[Optional[...]] + # type. + assert summaries[0] is not None self.assertEqual(summaries[0].title, self.exploration_1.title) + assert summaries[1] is not None self.assertEqual(summaries[1].title, self.exploration_2.title) + assert summaries[2] is not None self.assertEqual(summaries[2].title, self.exploration_3.title) self.assertIsNone(summaries[3]) - def test_get_exploration_summaries_subscribed_to(self): + def test_get_exploration_summaries_subscribed_to(self) -> None: summaries = exp_fetchers.get_exploration_summaries_subscribed_to( self.owner_id) self.assertEqual(summaries[0].title, self.exploration_1.title) self.assertEqual(summaries[1].title, self.exploration_2.title) self.assertEqual(summaries[2].title, self.exploration_3.title) - def test_retrieval_of_explorations(self): + def test_get_new_exploration_id(self) -> None: + self.assertIsNotNone( + exp_fetchers.get_new_exploration_id() + ) + + def test_get_new_unique_progress_url_id(self) -> None: + self.assertIsNotNone( + exp_fetchers.get_new_unique_progress_url_id() + ) + + def test_get_exploration_summary_by_id(self) -> None: + fake_eid = 'fake_eid' + fake_exp = exp_fetchers.get_exploration_summary_by_id( + fake_eid, strict=False + ) + self.assertIsNone(fake_exp) + exp_summary = exp_fetchers.get_exploration_summary_by_id( + self.EXP_1_ID + ) + self.assertIsNotNone(exp_summary) + self.assertEqual(exp_summary.id, self.EXP_1_ID) + + def test_get_exploration_summaries_from_models(self) -> None: + exp_ids = [self.EXP_1_ID, self.EXP_2_ID, self.EXP_3_ID] + exp_summary_models = [] + exp_summary_models_with_none = exp_models.ExpSummaryModel.get_multi( + exp_ids + ) + for model in exp_summary_models_with_none: + # Ruling out the possibility of None for mypy type checking. + assert model is not None + exp_summary_models.append(model) + + exp_summary_dict = ( + exp_fetchers.get_exploration_summaries_from_models( + exp_summary_models + ) + ) + for key in exp_summary_dict: + self.assertIn(key, exp_ids) + + def test_retrieval_of_fake_exploration(self) -> None: + self.assertIsNone( + exp_fetchers.get_exploration_by_id('fake_eid', strict=False) + ) + + def test_get_exploration_summaries_where_user_has_role(self) -> None: + exp_ids = [self.EXP_1_ID, self.EXP_2_ID, self.EXP_3_ID] + exp_summaries = ( + exp_fetchers.get_exploration_summaries_where_user_has_role( + self.owner_id + )) + self.assertEqual(len(exp_summaries), 3) + for exp_summary in exp_summaries: + self.assertIn(exp_summary.id, exp_ids) + + def test_retrieval_of_explorations(self) -> None: """Test the get_exploration_by_id() method.""" - with self.assertRaisesRegexp(Exception, 'Entity .* not found'): + with self.assertRaisesRegex(Exception, 'Entity .* not found'): exp_fetchers.get_exploration_by_id('fake_eid') retrieved_exploration = ( @@ -72,26 +148,61 @@ def test_retrieval_of_explorations(self): self.assertEqual(self.exploration_1.id, retrieved_exploration.id) self.assertEqual(self.exploration_1.title, retrieved_exploration.title) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationModel with id fake_exploration' ' not found'): exp_fetchers.get_exploration_by_id('fake_exploration') - def test_retrieval_of_multiple_exploration_versions_for_fake_exp_id(self): - with self.assertRaisesRegexp( + def test_retrieval_of_multiple_exploration_versions_for_fake_exp_id( + self + ) -> None: + with self.assertRaisesRegex( ValueError, 'The given entity_id fake_exp_id is invalid'): ( exp_fetchers .get_multiple_versioned_exp_interaction_ids_mapping_by_version( 'fake_exp_id', [1, 2, 3])) - def test_retrieval_of_multiple_exploration_versions(self): + def test_retrieval_of_exp_versions_for_invalid_state_schema_version( + self + ) -> None: + error_regex = ( + 'Exploration\\(id=%s, version=%s, states_schema_version=%s\\) ' + 'does not match the latest schema version %s' % ( + self.EXP_1_ID, + '1', + feconf.CURRENT_STATE_SCHEMA_VERSION, + '61' + ) + ) + with self.swap(feconf, 'CURRENT_STATE_SCHEMA_VERSION', 61): + with self.assertRaisesRegex(Exception, error_regex): + ( + exp_fetchers + .get_multiple_versioned_exp_interaction_ids_mapping_by_version( + self.EXP_1_ID, [1]) + ) + + def test_retrieval_of_multiple_exploration_versions(self) -> None: # Update exploration to version 2. - change_list = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'New state', - })] + change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'New state', + 'content_id_for_state_content': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.DEFAULT_OUTCOME)) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': self.content_id_generator_1.next_content_id_index + })] exp_services.update_exploration( feconf.SYSTEM_COMMITTER_ID, self.EXP_1_ID, change_list, '') @@ -99,7 +210,19 @@ def test_retrieval_of_multiple_exploration_versions(self): change_list = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'New state 2', - })] + 'content_id_for_state_content': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.DEFAULT_OUTCOME)) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': self.content_id_generator_1.next_content_id_index + })] exp_services.update_exploration( feconf.SYSTEM_COMMITTER_ID, self.EXP_1_ID, change_list, '') @@ -117,12 +240,26 @@ def test_retrieval_of_multiple_exploration_versions(self): self.assertEqual(explorations[1].version, 2) self.assertEqual(explorations[2].version, 3) - def test_version_number_errors_for_get_multiple_exploration_versions(self): + def test_version_number_errors_for_get_multiple_exploration_versions( + self + ) -> None: # Update exploration to version 2. change_list = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'New state', - })] + 'content_id_for_state_content': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.DEFAULT_OUTCOME)) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': self.content_id_generator_1.next_content_id_index + })] exp_services.update_exploration( feconf.SYSTEM_COMMITTER_ID, self.EXP_1_ID, change_list, '') @@ -130,11 +267,23 @@ def test_version_number_errors_for_get_multiple_exploration_versions(self): change_list = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'New state 2', - })] + 'content_id_for_state_content': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.DEFAULT_OUTCOME)) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': self.content_id_generator_1.next_content_id_index + })] exp_services.update_exploration( feconf.SYSTEM_COMMITTER_ID, self.EXP_1_ID, change_list, '') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'Requested version number 4 cannot be higher than the current ' 'version number 3.'): @@ -143,15 +292,29 @@ def test_version_number_errors_for_get_multiple_exploration_versions(self): .get_multiple_versioned_exp_interaction_ids_mapping_by_version( self.EXP_1_ID, [1, 2, 3, 4])) - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( ValueError, 'At least one version number is invalid'): ( exp_fetchers .get_multiple_versioned_exp_interaction_ids_mapping_by_version( - self.EXP_1_ID, [1, 2, 2.5, 3])) + self.EXP_1_ID, [1, 2, 2.5, 3])) # type: ignore[list-item] - def test_retrieval_of_multiple_explorations(self): + def test_retrieval_of_multiple_uncached_explorations(self) -> None: + exp_ids = [self.EXP_1_ID, self.EXP_2_ID, self.EXP_3_ID] + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, exp_ids) + uncached_explorations = exp_fetchers.get_multiple_explorations_by_id( + exp_ids, False + ) + self.assertEqual(len(uncached_explorations), 3) + for key in uncached_explorations: + self.assertIn(key, uncached_explorations) + + def test_retrieval_of_multiple_explorations(self) -> None: exps = {} chars = 'abcde' exp_ids = ['%s%s' % (self.EXP_1_ID, c) for c in chars] @@ -162,40 +325,176 @@ def test_retrieval_of_multiple_explorations(self): result = exp_fetchers.get_multiple_explorations_by_id( exp_ids) for _id in exp_ids: - self.assertEqual(result.get(_id).title, exps.get(_id).title) + self.assertEqual(result[_id].title, exps[_id].title) # Test retrieval of non-existent ids. result = exp_fetchers.get_multiple_explorations_by_id( exp_ids + ['doesnt_exist'], strict=False ) for _id in exp_ids: - self.assertEqual(result.get(_id).title, exps.get(_id).title) + self.assertEqual(result[_id].title, exps[_id].title) self.assertNotIn('doesnt_exist', result) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Couldn\'t find explorations with the following ids:\n' 'doesnt_exist'): exp_fetchers.get_multiple_explorations_by_id( exp_ids + ['doesnt_exist']) + def test_exploration_user_data_is_none_before_starting_exploration( + self + ) -> None: + auth_id = 'test_id' + user_email = 'test@email.com' + user_id = user_services.create_new_user(auth_id, user_email).user_id + self.assertIsNone(exp_fetchers.get_exploration_user_data( + user_id, self.EXP_1_ID)) + + def test_get_exploration_user_data(self) -> None: + auth_id = 'test_id' + username = 'testname' + user_email = 'test@email.com' + user_id = user_services.create_new_user(auth_id, user_email).user_id + user_services.set_username(user_id, username) + + user_services.update_learner_checkpoint_progress( + user_id, self.EXP_1_ID, 'Introduction', 1) + expected_user_data_dict = { + 'rating': None, + 'rated_on': None, + 'draft_change_list': None, + 'draft_change_list_last_updated': None, + 'draft_change_list_exp_version': None, + 'draft_change_list_id': 0, + 'mute_suggestion_notifications': ( + feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), + 'mute_feedback_notifications': ( + feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE), + 'furthest_reached_checkpoint_exp_version': 1, + 'furthest_reached_checkpoint_state_name': 'Introduction', + 'most_recently_reached_checkpoint_exp_version': 1, + 'most_recently_reached_checkpoint_state_name': 'Introduction' + } + exp_user_data = exp_fetchers.get_exploration_user_data( + user_id, self.EXP_1_ID) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + self.assertEqual(expected_user_data_dict, exp_user_data.to_dict()) + + def test_get_exploration_version_history(self) -> None: + version_history = exp_fetchers.get_exploration_version_history( + self.EXP_1_ID, 2 + ) + + self.assertIsNone(version_history) + + exp_services.update_exploration( + self.owner_id, self.EXP_1_ID, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'New state', + 'content_id_for_state_content': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + self.content_id_generator_1.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'next_content_id_index', + 'new_value': ( + self.content_id_generator_1.next_content_id_index), + 'old_value': 0 + }) + ], 'A commit message.' + ) + version_history = exp_fetchers.get_exploration_version_history( + self.EXP_1_ID, 2 + ) + + self.assertIsNotNone(version_history) + if version_history is not None: + self.assertEqual(version_history.committer_ids, [self.owner_id]) + self.assertEqual( + version_history.state_version_history['New state'].to_dict(), + state_domain.StateVersionHistory( + None, None, self.owner_id + ).to_dict() + ) + + +class LoggedOutUserProgressTests(test_utils.GenericTestBase): + """Tests the fetching of the logged-out user progress.""" + + UNIQUE_PROGRESS_URL_ID = 'pid123' + EXP_1_ID = 'exploration_1_id' + + def setUp(self) -> None: + super().setUp() + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.exploration_1 = self.save_new_default_exploration( + self.EXP_1_ID, self.owner_id, title='Aa') + + def test_get_logged_out_user_progress(self) -> None: + + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + self.assertIsNone(logged_out_user_data) + + exp_services.update_logged_out_user_progress( + self.EXP_1_ID, self.UNIQUE_PROGRESS_URL_ID, 'Introduction', 1) + + expected_progress_dict = { + 'exploration_id': self.EXP_1_ID, + 'furthest_reached_checkpoint_state_name': 'Introduction', + 'furthest_reached_checkpoint_exp_version': 1, + 'most_recently_reached_checkpoint_state_name': 'Introduction', + 'most_recently_reached_checkpoint_exp_version': 1, + 'last_updated': None + } + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + # Ruling out the possibility of None for mypy type checking. + assert logged_out_user_data is not None + self.assertEqual( + expected_progress_dict['exploration_id'], + logged_out_user_data.exploration_id) + self.assertEqual( + expected_progress_dict['furthest_reached_checkpoint_state_name'], + logged_out_user_data.furthest_reached_checkpoint_state_name) + self.assertEqual( + expected_progress_dict['furthest_reached_checkpoint_exp_version'], + logged_out_user_data.furthest_reached_checkpoint_exp_version) + self.assertEqual( + expected_progress_dict['most_recently_reached_checkpoint_state_name'], # pylint: disable=line-too-long + logged_out_user_data.most_recently_reached_checkpoint_state_name) + self.assertEqual( + expected_progress_dict['most_recently_reached_checkpoint_exp_version'], # pylint: disable=line-too-long + logged_out_user_data.most_recently_reached_checkpoint_exp_version) + class ExplorationConversionPipelineTests(test_utils.GenericTestBase): """Tests the exploration model -> exploration conversion pipeline.""" - OLD_EXP_ID = 'exp_id0' - NEW_EXP_ID = 'exp_id1' + OLD_EXP_ID: Final = 'exp_id0' + NEW_EXP_ID: Final = 'exp_id1' - UPGRADED_EXP_YAML = ( + UPGRADED_EXP_YAML: Final = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: A category -correctness_feedback_enabled: false +category: Art +correctness_feedback_enabled: true +edits_allowed: true init_state_name: Introduction language_code: en -objective: An objective +next_content_id_index: 6 +objective: Exp objective... param_changes: [] param_specs: {} schema_version: %d @@ -204,8 +503,8 @@ class ExplorationConversionPipelineTests(test_utils.GenericTestBase): card_is_checkpoint: false classifier_model_id: null content: - content_id: content - html: '' + content_id: content_0 + html:

    Congratulations, you have finished!

    interaction: answer_groups: [] confirmed_unclassified_answers: [] @@ -217,35 +516,53 @@ class ExplorationConversionPipelineTests(test_utils.GenericTestBase): id: EndExploration solution: null linked_skill_id: null - next_content_id_index: 0 param_changes: [] recorded_voiceovers: voiceovers_mapping: - content: {} + content_0: {} solicit_answer_details: false - written_translations: - translations_mapping: - content: {} %s: card_is_checkpoint: true classifier_model_id: null content: - content_id: content + content_id: content_1 html: '' interaction: - answer_groups: [] + answer_groups: + - outcome: + dest: End + dest_if_really_stuck: null + feedback: + content_id: feedback_3 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_4 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] confirmed_unclassified_answers: [] customization_args: + catchMisspellings: + value: false placeholder: value: - content_id: ca_placeholder_0 + content_id: ca_placeholder_5 unicode_str: '' rows: value: 1 default_outcome: - dest: End + dest: Introduction + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_2 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null @@ -255,19 +572,15 @@ class ExplorationConversionPipelineTests(test_utils.GenericTestBase): id: TextInput solution: null linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_placeholder_5: {} + content_1: {} + default_outcome_2: {} + feedback_3: {} + rule_input_4: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} states_schema_version: %d tags: [] title: Old Title @@ -276,11 +589,126 @@ class ExplorationConversionPipelineTests(test_utils.GenericTestBase): feconf.DEFAULT_INIT_STATE_NAME, feconf.CURRENT_STATE_SCHEMA_VERSION) - ALBERT_EMAIL = 'albert@example.com' - ALBERT_NAME = 'albert' + STATES_AT_V41 = { + 'Introduction': { + 'classifier_model_id': None, + 'content': {'content_id': 'content', 'html': ''}, + 'interaction': { + 'answer_groups': [ + { + 'outcome': { + 'dest': 'End', + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Correct!

    ', + }, + 'labelled_as_correct': False, + 'missing_prerequisite_skill_id': None, + 'param_changes': [], + 'refresher_exploration_id': None, + }, + 'rule_specs': [ + { + 'inputs': { + 'x': { + 'contentId': 'rule_input_3', + 'normalizedStrSet': ['InputString'], + } + }, + 'rule_type': 'Equals', + } + ], + 'tagged_skill_misconception_id': None, + 'training_data': [], + } + ], + 'confirmed_unclassified_answers': [], + 'customization_args': { + 'placeholder': { + 'value': { + 'content_id': 'ca_placeholder_2', + 'unicode_str': '' + } + }, + 'rows': {'value': 1}, + }, + 'default_outcome': { + 'dest': 'Introduction', + 'feedback': { + 'content_id': 'default_outcome', + 'html': '' + }, + 'labelled_as_correct': False, + 'missing_prerequisite_skill_id': None, + 'param_changes': [], + 'refresher_exploration_id': None, + }, + 'hints': [], + 'id': 'TextInput', + 'solution': None, + }, + 'next_content_id_index': 4, + 'param_changes': [], + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'ca_placeholder_2': {}, + 'content': {}, + 'default_outcome': {}, + 'feedback_1': {}, + 'rule_input_3': {}, + } + }, + 'solicit_answer_details': False, + 'written_translations': { + 'translations_mapping': { + 'ca_placeholder_2': {}, + 'content': {}, + 'default_outcome': {}, + 'feedback_1': {}, + 'rule_input_3': {}, + } + }, + }, + 'End': { + 'classifier_model_id': None, + 'content': { + 'content_id': 'content', + 'html': '

    Congratulations, you have finished!

    ', + }, + 'interaction': { + 'answer_groups': [], + 'confirmed_unclassified_answers': [], + 'customization_args': { + 'recommendedExplorationIds': { + 'value': [] + } + }, + 'default_outcome': None, + 'hints': [], + 'id': 'EndExploration', + 'solution': None, + }, + 'next_content_id_index': 0, + 'param_changes': [], + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content': {} + } + }, + 'solicit_answer_details': False, + 'written_translations': { + 'translations_mapping': { + 'content': {} + } + }, + }, + } + + ALBERT_EMAIL: Final = 'albert@example.com' + ALBERT_NAME: Final = 'albert' - def setUp(self): - super(ExplorationConversionPipelineTests, self).setUp() + def setUp(self) -> None: + super().setUp() # Setup user who will own the test explorations. self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) @@ -293,9 +721,21 @@ def setUp(self): swap_exp_schema_46 = self.swap( exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46) with swap_states_schema_41, swap_exp_schema_46: - self.save_new_valid_exploration( - self.OLD_EXP_ID, self.albert_id, title='Old Title', - end_state_name='End') + exploration = exp_domain.Exploration.create_default_exploration( + self.OLD_EXP_ID, title='Old Title', category='Art', + objective='Exp objective...') + exploration_model = exp_models.ExplorationModel(id=self.OLD_EXP_ID) + exp_services.populate_exp_model_fields( + exploration_model, exploration) + + exploration_model.states = self.STATES_AT_V41 + rights_manager.create_new_exploration_rights( + exploration_model.id, self.albert_id) + exploration_model.commit(self.albert_id, 'Created new exploration.', []) + exp_services.regenerate_exploration_summary_with_new_contributor( + self.OLD_EXP_ID, self.albert_id) + stats_services.create_exp_issues_for_new_exploration( + exploration_model.id, exploration_model.version) # Create standard exploration that should not be converted. new_exp = self.save_new_valid_exploration( @@ -308,21 +748,42 @@ def setUp(self): caching_services.CACHE_NAMESPACE_EXPLORATION, None, [self.OLD_EXP_ID, self.NEW_EXP_ID]) - def test_converts_exp_model_with_default_states_schema_version(self): + def test_converts_exp_model_with_default_states_schema_version( + self + ) -> None: exploration = exp_fetchers.get_exploration_by_id(self.OLD_EXP_ID) self.assertEqual( exploration.states_schema_version, feconf.CURRENT_STATE_SCHEMA_VERSION) self.assertEqual(exploration.to_yaml(), self.UPGRADED_EXP_YAML) - def test_does_not_convert_up_to_date_exploration(self): + def test_does_not_convert_up_to_date_exploration(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual( exploration.states_schema_version, feconf.CURRENT_STATE_SCHEMA_VERSION) self.assertEqual(exploration.to_yaml(), self._up_to_date_yaml) - def test_migration_then_reversion_maintains_valid_exploration(self): + def test_migration_with_invalid_state_schema(self) -> None: + self.save_new_valid_exploration('fake_eid', self.albert_id) + swap_earlier_state_to_60 = ( + self.swap(feconf, 'EARLIEST_SUPPORTED_STATE_SCHEMA_VERSION', 60) + ) + swap_current_state_61 = self.swap( + feconf, 'CURRENT_STATE_SCHEMA_VERSION', 61) + with swap_earlier_state_to_60, swap_current_state_61: + exploration_model = exp_models.ExplorationModel.get( + 'fake_eid', strict=True, version=None) + error_regex = ( + 'Sorry, we can only process v%d\\-v%d exploration state schemas at ' + 'present.' % ( + feconf.EARLIEST_SUPPORTED_STATE_SCHEMA_VERSION, + feconf.CURRENT_STATE_SCHEMA_VERSION) + ) + with self.assertRaisesRegex(Exception, error_regex): + exp_fetchers.get_exploration_from_model(exploration_model) + + def test_migration_then_reversion_maintains_valid_exploration(self) -> None: """This integration test simulates the behavior of the domain layer prior to the introduction of a states schema. In particular, it deals with an exploration that was created before any states schema @@ -340,8 +801,8 @@ def test_migration_then_reversion_maintains_valid_exploration(self): as exp_fetchers.get_exploration_by_id as it skips many steps which include the conversion pipeline (which is crucial to this test). """ - exp_id = 'exp_id2' - end_state_name = 'End' + exp_id: str = 'exp_id2' + end_state_name: str = 'End' # Create an exploration with an old states schema version. swap_states_schema_41 = self.swap( @@ -349,9 +810,22 @@ def test_migration_then_reversion_maintains_valid_exploration(self): swap_exp_schema_46 = self.swap( exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46) with swap_states_schema_41, swap_exp_schema_46: - self.save_new_valid_exploration( - exp_id, self.albert_id, title='Old Title', - end_state_name=end_state_name) + exploration = exp_domain.Exploration.create_default_exploration( + exp_id, title='Old Title', category='Art', + objective='Exp objective...') + exploration_model = exp_models.ExplorationModel(id=exp_id) + exp_services.populate_exp_model_fields( + exploration_model, exploration) + + exploration_model.states = self.STATES_AT_V41 + rights_manager.create_new_exploration_rights( + exploration_model.id, self.albert_id) + exploration_model.commit(self.albert_id, 'Created new exploration.', []) + exp_services.regenerate_exploration_summary_with_new_contributor( + exp_id, self.albert_id) + stats_services.create_exp_issues_for_new_exploration( + exploration_model.id, exploration_model.version) + caching_services.delete_multi( caching_services.CACHE_NAMESPACE_EXPLORATION, None, [exp_id]) @@ -365,7 +839,8 @@ def test_migration_then_reversion_maintains_valid_exploration(self): # In version 1, the title was 'Old title'. # In version 2, the title becomes 'New title'. exploration_model.title = 'New title' - exploration_model.commit(self.albert_id, 'Changed title.', []) + exploration_model.commit( + self.albert_id, 'Changed title and states.', []) # Version 2 of exploration. exploration_model = exp_models.ExplorationModel.get( diff --git a/core/domain/exp_services.py b/core/domain/exp_services.py index d5ec1d8fe960..09795f6be7ba 100644 --- a/core/domain/exp_services.py +++ b/core/domain/exp_services.py @@ -35,11 +35,11 @@ from core import android_validation_constants from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import activity_services from core.domain import caching_services +from core.domain import change_domain from core.domain import classifier_services from core.domain import draft_upgrade_services from core.domain import email_manager @@ -47,7 +47,7 @@ from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import feedback_services -from core.domain import fs_domain +from core.domain import fs_services from core.domain import html_cleaner from core.domain import html_validation_service from core.domain import opportunity_services @@ -57,29 +57,100 @@ from core.domain import rights_manager from core.domain import search_services from core.domain import state_domain +from core.domain import stats_domain from core.domain import stats_services from core.domain import taskqueue_services +from core.domain import translation_services +from core.domain import user_domain from core.domain import user_services from core.platform import models +from extensions import domain + +import deepdiff +from typing import ( + Dict, Final, List, Literal, Optional, Sequence, Tuple, Type, TypedDict, + Union, cast, overload +) + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + from mypy_imports import exp_models + from mypy_imports import stats_models + from mypy_imports import user_models + +(base_models, exp_models, stats_models, user_models) = ( + models.Registry.import_models([ + models.Names.BASE_MODEL, + models.Names.EXPLORATION, + models.Names.STATISTICS, + models.Names.USER + ]) +) datastore_services = models.Registry.import_datastore_services() -(exp_models, feedback_models, user_models) = models.Registry.import_models([ - models.NAMES.exploration, models.NAMES.feedback, models.NAMES.user -]) + +AcceptableActivityModelTypes = Union[ + user_models.CompletedActivitiesModel, + user_models.IncompleteActivitiesModel +] + + +class UserExplorationDataDict(TypedDict): + """Dictionary representing the user's specific exploration data.""" + + exploration_id: str + title: str + category: str + objective: str + language_code: str + tags: List[str] + init_state_name: str + states: Dict[str, state_domain.StateDict] + param_specs: Dict[str, param_domain.ParamSpecDict] + param_changes: List[param_domain.ParamChangeDict] + version: int + auto_tts_enabled: bool + correctness_feedback_enabled: bool + edits_allowed: bool + draft_change_list_id: int + rights: rights_domain.ActivityRightsDict + show_state_editor_tutorial_on_load: bool + show_state_translation_tutorial_on_load: bool + is_version_of_draft_valid: Optional[bool] + draft_changes: Dict[str, str] + email_preferences: user_domain.UserExplorationPrefsDict + next_content_id_index: int + exploration_metadata: exp_domain.ExplorationMetadataDict + + +class SnapshotsMetadataDict(TypedDict): + """Dictionary representing the snapshot metadata for exploration model.""" + + committer_id: str + commit_message: str + commit_cmds: List[Dict[str, change_domain.AcceptableChangeDictTypes]] + commit_type: str + version_number: int + created_on_ms: float + # Name for the exploration search index. -SEARCH_INDEX_EXPLORATIONS = 'explorations' +SEARCH_INDEX_EXPLORATIONS: Final = 'explorations' # The maximum number of iterations allowed for populating the results of a # search query. -MAX_ITERATIONS = 10 +MAX_ITERATIONS: Final = 10 # NOTE TO DEVELOPERS: The get_story_ids_linked_to_explorations function was # removed in #13021 as part of the migration to Apache Beam. Please refer to # that PR if you need to reinstate it. -def is_exp_summary_editable(exp_summary, user_id=None): +def is_exp_summary_editable( + exp_summary: exp_domain.ExplorationSummary, user_id: str +) -> bool: """Checks if a given user has permissions to edit the exploration. Args: @@ -96,7 +167,9 @@ def is_exp_summary_editable(exp_summary, user_id=None): # Query methods. -def get_exploration_titles_and_categories(exp_ids): +def get_exploration_titles_and_categories( + exp_ids: List[str] +) -> Dict[str, Dict[str, str]]: """Returns exploration titles and categories for the given ids. The result is a dict with exploration ids as keys. The corresponding values @@ -116,7 +189,8 @@ def get_exploration_titles_and_categories(exp_ids): """ explorations = [ (exp_fetchers.get_exploration_from_model(e) if e else None) - for e in exp_models.ExplorationModel.get_multi(exp_ids)] + for e in exp_models.ExplorationModel.get_multi( + exp_ids, include_deleted=True)] result = {} for exploration in explorations: @@ -132,7 +206,11 @@ def get_exploration_titles_and_categories(exp_ids): def get_exploration_ids_matching_query( - query_string, categories, language_codes, offset=None): + query_string: str, + categories: List[str], + language_codes: List[str], + offset: Optional[int] = None +) -> Tuple[List[str], Optional[int]]: """Returns a list with all exploration ids matching the given search query string, as well as a search offset for future fetches. @@ -166,7 +244,7 @@ def get_exploration_ids_matching_query( not occur, an error will be logged.) search_offset: int. Search offset for future fetches. """ - returned_exploration_ids = [] + returned_exploration_ids: List[str] = [] search_offset = offset for _ in range(MAX_ITERATIONS): @@ -202,7 +280,8 @@ def get_exploration_ids_matching_query( return (returned_exploration_ids, search_offset) -def get_non_private_exploration_summaries(): +def get_non_private_exploration_summaries( +) -> Dict[str, exp_domain.ExplorationSummary]: """Returns a dict with all non-private exploration summary domain objects, keyed by their id. @@ -214,7 +293,9 @@ def get_non_private_exploration_summaries(): exp_models.ExpSummaryModel.get_non_private()) -def get_top_rated_exploration_summaries(limit): +def get_top_rated_exploration_summaries( + limit: int +) -> Dict[str, exp_domain.ExplorationSummary]: """Returns a dict with top rated exploration summary model instances, keyed by their id. At most 'limit' entries are returned. @@ -231,7 +312,9 @@ def get_top_rated_exploration_summaries(limit): exp_models.ExpSummaryModel.get_top_rated(limit)) -def get_recently_published_exp_summaries(limit): +def get_recently_published_exp_summaries( + limit: int +) -> Dict[str, exp_domain.ExplorationSummary]: """Returns a dict with recently published ExplorationSummary model instances, keyed by their exploration id. At most 'limit' entries are returned. @@ -249,7 +332,7 @@ def get_recently_published_exp_summaries(limit): exp_models.ExpSummaryModel.get_recently_published(limit)) -def get_story_id_linked_to_exploration(exp_id): +def get_story_id_linked_to_exploration(exp_id: str) -> Optional[str]: """Returns the ID of the story that the exploration is a part of, or None if the exploration is not part of a story. @@ -262,12 +345,17 @@ def get_story_id_linked_to_exploration(exp_id): """ exploration_context_model = exp_models.ExplorationContextModel.get( exp_id, strict=False) - if exploration_context_model is not None: - return exploration_context_model.story_id - return None + if exploration_context_model is None: + return None + + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + story_id: str = exploration_context_model.story_id + return story_id -def get_all_exploration_summaries(): +def get_all_exploration_summaries() -> Dict[str, exp_domain.ExplorationSummary]: """Returns a dict with all exploration summary domain objects, keyed by their id. @@ -276,11 +364,13 @@ def get_all_exploration_summaries(): exploration id. """ return exp_fetchers.get_exploration_summaries_from_models( - exp_models.ExpSummaryModel.get_all()) + exp_models.ExpSummaryModel.get_all().fetch()) # Methods for exporting states and explorations to other formats. -def export_to_zip_file(exploration_id, version=None): +def export_to_zip_file( + exploration_id: str, version: Optional[int] = None +) -> io.BytesIO: """Returns a ZIP archive of the exploration. Args: @@ -306,9 +396,8 @@ def export_to_zip_file(exploration_id, version=None): else: zfile.writestr('%s.yaml' % exploration.title, yaml_repr) - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, exploration_id)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, exploration_id) html_string_list = exploration.get_all_html_content_strings() image_filenames = ( html_cleaner.get_image_filenames_from_html_strings( @@ -324,7 +413,9 @@ def export_to_zip_file(exploration_id, version=None): return temp_file -def export_states_to_yaml(exploration_id, version=None, width=80): +def export_states_to_yaml( + exploration_id: str, version: Optional[int] = None, width: int = 80 +) -> Dict[str, str]: """Returns a dictionary of the exploration, whose keys are state names and values are yaml strings representing the state contents with lines wrapped at 'width' characters. @@ -345,13 +436,17 @@ def export_states_to_yaml(exploration_id, version=None, width=80): exploration_id, version=version) exploration_dict = {} for state in exploration.states: - exploration_dict[state] = python_utils.yaml_from_dict( - exploration.states[state].to_dict(), width=width) + exploration_dict[state] = utils.yaml_from_dict( + exploration.states[state].to_dict(), + width=width + ) return exploration_dict # Repository SAVE and DELETE methods. -def apply_change_list(exploration_id, change_list): +def apply_change_list( + exploration_id: str, change_list: Sequence[exp_domain.ExplorationChange] +) -> exp_domain.Exploration: """Applies a changelist to a pristine exploration and returns the result. Each entry in change_list is a dict that represents an ExplorationChange @@ -368,84 +463,185 @@ def apply_change_list(exploration_id, change_list): Raises: Exception. Any entries in the changelist are invalid. + Exception. Solution cannot exist with None interaction id. """ exploration = exp_fetchers.get_exploration_by_id(exploration_id) try: to_param_domain = param_domain.ParamChange.from_dict for change in change_list: if change.cmd == exp_domain.CMD_ADD_STATE: - exploration.add_states([change.state_name]) + # Here we use cast because we are narrowing down the type from + # ExplorationChange to a specific change command. + add_state_cmd = cast( + exp_domain.AddExplorationStateCmd, + change + ) + exploration.add_state( + add_state_cmd.state_name, + add_state_cmd.content_id_for_state_content, + add_state_cmd.content_id_for_default_outcome + ) elif change.cmd == exp_domain.CMD_RENAME_STATE: + # Here we use cast because we are narrowing down the type from + # ExplorationChange to a specific change command. + rename_state_cmd = cast( + exp_domain.RenameExplorationStateCmd, + change + ) exploration.rename_state( - change.old_state_name, change.new_state_name) + rename_state_cmd.old_state_name, + rename_state_cmd.new_state_name + ) elif change.cmd == exp_domain.CMD_DELETE_STATE: - exploration.delete_state(change.state_name) + # Here we use cast because we are narrowing down the type from + # ExplorationChange to a specific change command. + delete_state_cmd = cast( + exp_domain.DeleteExplorationStateCmd, + change + ) + exploration.delete_state(delete_state_cmd.state_name) elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY: - state = exploration.states[change.state_name] + state: state_domain.State = exploration.states[ + change.state_name] if (change.property_name == exp_domain.STATE_PROPERTY_PARAM_CHANGES): + # Here we use cast because this 'if' condition forces + # change to have type EditExpStatePropertyParamChangesCmd. + edit_param_changes_cmd = cast( + exp_domain.EditExpStatePropertyParamChangesCmd, + change + ) state.update_param_changes(list(map( - to_param_domain, change.new_value))) + to_param_domain, edit_param_changes_cmd.new_value + ))) elif change.property_name == exp_domain.STATE_PROPERTY_CONTENT: + # Here we use cast because this 'elif' condition forces + # change to have type EditExpStatePropertyContentCmd. + edit_content_cmd = cast( + exp_domain.EditExpStatePropertyContentCmd, + change + ) content = ( - state_domain.SubtitledHtml.from_dict(change.new_value)) + state_domain.SubtitledHtml.from_dict( + edit_content_cmd.new_value + ) + ) content.validate() state.update_content(content) elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_ID): state.update_interaction_id(change.new_value) - elif (change.property_name == - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX): - next_content_id_index = max( - change.new_value, state.next_content_id_index) - state.update_next_content_id_index(next_content_id_index) elif (change.property_name == exp_domain.STATE_PROPERTY_LINKED_SKILL_ID): - state.update_linked_skill_id(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyLinkedSkillIdCmd. + edit_linked_skill_id_cmd = cast( + exp_domain.EditExpStatePropertyLinkedSkillIdCmd, + change + ) + state.update_linked_skill_id( + edit_linked_skill_id_cmd.new_value + ) elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS): + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyInteractionCustArgsCmd. + edit_interaction_cust_arg_cmd = cast( + exp_domain.EditExpStatePropertyInteractionCustArgsCmd, + change + ) state.update_interaction_customization_args( - change.new_value) + edit_interaction_cust_arg_cmd.new_value) elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_HANDLERS): raise utils.InvalidInputException( 'Editing interaction handlers is no longer supported') elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS): + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyInteractionAnswerGroupsCmd. + edit_interaction_answer_group_cmd = cast( + exp_domain.EditExpStatePropertyInteractionAnswerGroupsCmd, # pylint: disable=line-too-long + change + ) + answer_groups = ( + edit_interaction_answer_group_cmd.new_value + ) new_answer_groups = [ - state_domain.AnswerGroup.from_dict(answer_groups) - for answer_groups in change.new_value + state_domain.AnswerGroup.from_dict(answer_group) + for answer_group in answer_groups ] state.update_interaction_answer_groups(new_answer_groups) elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME): new_outcome = None if change.new_value: + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyInteractionDefaultOutcomeCmd. + edit_interaction_default_outcome_cmd = cast( + exp_domain.EditExpStatePropertyInteractionDefaultOutcomeCmd, # pylint: disable=line-too-long + change + ) new_outcome = state_domain.Outcome.from_dict( - change.new_value + edit_interaction_default_outcome_cmd.new_value ) state.update_interaction_default_outcome(new_outcome) elif (change.property_name == exp_domain.STATE_PROPERTY_UNCLASSIFIED_ANSWERS): + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyUnclassifiedAnswersCmd. + edit_unclassified_answers_cmd = cast( + exp_domain.EditExpStatePropertyUnclassifiedAnswersCmd, + change + ) state.update_interaction_confirmed_unclassified_answers( - change.new_value) + edit_unclassified_answers_cmd.new_value) elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_HINTS): - if not isinstance(change.new_value, list): + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyInteractionHintsCmd. + edit_state_interaction_hints_cmd = cast( + exp_domain.EditExpStatePropertyInteractionHintsCmd, + change + ) + hint_dicts = ( + edit_state_interaction_hints_cmd.new_value + ) + if not isinstance(hint_dicts, list): raise Exception( 'Expected hints_list to be a list,' - ' received %s' % change.new_value) + ' received %s' % hint_dicts) new_hints_list = [ state_domain.Hint.from_dict(hint_dict) - for hint_dict in change.new_value + for hint_dict in hint_dicts ] state.update_interaction_hints(new_hints_list) elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_SOLUTION): new_solution = None - if change.new_value is not None: + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyInteractionSolutionCmd. + edit_interaction_solution_cmd = cast( + exp_domain.EditExpStatePropertyInteractionSolutionCmd, + change + ) + if edit_interaction_solution_cmd.new_value is not None: + if state.interaction.id is None: + raise Exception( + 'solution cannot exist with None ' + 'interaction id.' + ) new_solution = state_domain.Solution.from_dict( - state.interaction.id, change.new_value) + state.interaction.id, + edit_interaction_solution_cmd.new_value + ) state.update_interaction_solution(new_solution) elif (change.property_name == exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS): @@ -453,14 +649,32 @@ def apply_change_list(exploration_id, change_list): raise Exception( 'Expected solicit_answer_details to be a ' + 'bool, received %s' % change.new_value) - state.update_solicit_answer_details(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertySolicitAnswerDetailsCmd. + edit_solicit_answer_details_cmd = cast( + exp_domain.EditExpStatePropertySolicitAnswerDetailsCmd, + change + ) + state.update_solicit_answer_details( + edit_solicit_answer_details_cmd.new_value + ) elif (change.property_name == exp_domain.STATE_PROPERTY_CARD_IS_CHECKPOINT): if not isinstance(change.new_value, bool): raise Exception( 'Expected card_is_checkpoint to be a ' + 'bool, received %s' % change.new_value) - state.update_card_is_checkpoint(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyCardIsCheckpointCmd. + edit_card_is_checkpoint_cmd = cast( + exp_domain.EditExpStatePropertyCardIsCheckpointCmd, + change + ) + state.update_card_is_checkpoint( + edit_card_is_checkpoint_cmd.new_value + ) elif (change.property_name == exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS): if not isinstance(change.new_value, dict): @@ -474,8 +688,18 @@ def apply_change_list(exploration_id, change_list): # treats any number that can be float and int as # int (no explicit types). For example, # 10.000 is not 10.000 it is 10. + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExpStatePropertyRecordedVoiceoversCmd. + edit_recorded_voiceovers_cmd = cast( + exp_domain.EditExpStatePropertyRecordedVoiceoversCmd, + change + ) new_voiceovers_mapping = ( - change.new_value['voiceovers_mapping']) + edit_recorded_voiceovers_cmd.new_value[ + 'voiceovers_mapping' + ] + ) language_codes_to_audio_metadata = ( new_voiceovers_mapping.values()) for language_codes in language_codes_to_audio_metadata: @@ -487,70 +711,138 @@ def apply_change_list(exploration_id, change_list): state_domain.RecordedVoiceovers.from_dict( change.new_value)) state.update_recorded_voiceovers(recorded_voiceovers) - elif (change.property_name == - exp_domain.STATE_PROPERTY_WRITTEN_TRANSLATIONS): - if not isinstance(change.new_value, dict): - raise Exception( - 'Expected written_translations to be a dict, ' - 'received %s' % change.new_value) - cleaned_written_translations_dict = ( - state_domain.WrittenTranslations - .convert_html_in_written_translations( - change.new_value, html_cleaner.clean)) - written_translations = ( - state_domain.WrittenTranslations.from_dict( - cleaned_written_translations_dict)) - state.update_written_translations(written_translations) - elif change.cmd == exp_domain.DEPRECATED_CMD_ADD_TRANSLATION: - # DEPRECATED: This command is deprecated. Please do not use. - # The command remains here to support old suggestions. - exploration.states[change.state_name].add_translation( - change.content_id, change.language_code, - change.translation_html) - elif change.cmd == exp_domain.CMD_ADD_WRITTEN_TRANSLATION: - exploration.states[change.state_name].add_written_translation( - change.content_id, change.language_code, - change.translation_html, change.data_format) - elif (change.cmd == - exp_domain.CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE): - exploration.states[ - change.state_name - ].mark_written_translation_as_needing_update( - change.content_id, - change.language_code - ) - elif (change.cmd == - exp_domain.CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE): - exploration.states[ - change.state_name - ].mark_written_translations_as_needing_update(change.content_id) elif change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY: if change.property_name == 'title': - exploration.update_title(change.new_value) + # Here we use cast because this 'if' condition forces + # change to have type EditExplorationPropertyTitleCmd. + edit_title_cmd = cast( + exp_domain.EditExplorationPropertyTitleCmd, + change + ) + exploration.update_title(edit_title_cmd.new_value) elif change.property_name == 'category': - exploration.update_category(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type EditExplorationPropertyCategoryCmd. + edit_category_cmd = cast( + exp_domain.EditExplorationPropertyCategoryCmd, + change + ) + exploration.update_category(edit_category_cmd.new_value) elif change.property_name == 'objective': - exploration.update_objective(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type EditExplorationPropertyObjectiveCmd. + edit_objective_cmd = cast( + exp_domain.EditExplorationPropertyObjectiveCmd, + change + ) + exploration.update_objective(edit_objective_cmd.new_value) elif change.property_name == 'language_code': - exploration.update_language_code(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExplorationPropertyLanguageCodeCmd. + edit_language_code_cmd = cast( + exp_domain.EditExplorationPropertyLanguageCodeCmd, + change + ) + exploration.update_language_code( + edit_language_code_cmd.new_value + ) elif change.property_name == 'tags': - exploration.update_tags(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type EditExplorationPropertyTagsCmd. + edit_tags_cmd = cast( + exp_domain.EditExplorationPropertyTagsCmd, + change + ) + exploration.update_tags(edit_tags_cmd.new_value) elif change.property_name == 'blurb': - exploration.update_blurb(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type EditExplorationPropertyBlurbCmd. + edit_blurb_cmd = cast( + exp_domain.EditExplorationPropertyBlurbCmd, + change + ) + exploration.update_blurb(edit_blurb_cmd.new_value) elif change.property_name == 'author_notes': - exploration.update_author_notes(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type EditExplorationPropertyAuthorNotesCmd. + edit_author_notes_cmd = cast( + exp_domain.EditExplorationPropertyAuthorNotesCmd, + change + ) + exploration.update_author_notes( + edit_author_notes_cmd.new_value + ) elif change.property_name == 'param_specs': - exploration.update_param_specs(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type EditExplorationPropertyParamSpecsCmd. + edit_param_specs_cmd = cast( + exp_domain.EditExplorationPropertyParamSpecsCmd, + change + ) + exploration.update_param_specs( + edit_param_specs_cmd.new_value + ) elif change.property_name == 'param_changes': - exploration.update_param_changes(list( - map(to_param_domain, change.new_value))) + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExplorationPropertyParamChangesCmd. + edit_exp_param_changes_cmd = cast( + exp_domain.EditExplorationPropertyParamChangesCmd, + change + ) + exploration.update_param_changes( + list( + map( + to_param_domain, + edit_exp_param_changes_cmd.new_value + ) + ) + ) elif change.property_name == 'init_state_name': - exploration.update_init_state_name(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExplorationPropertyInitStateNameCmd. + edit_init_state_name_cmd = cast( + exp_domain.EditExplorationPropertyInitStateNameCmd, + change + ) + exploration.update_init_state_name( + edit_init_state_name_cmd.new_value + ) elif change.property_name == 'auto_tts_enabled': - exploration.update_auto_tts_enabled(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExplorationPropertyAutoTtsEnabledCmd. + edit_auto_tts_enabled_cmd = cast( + exp_domain.EditExplorationPropertyAutoTtsEnabledCmd, + change + ) + exploration.update_auto_tts_enabled( + edit_auto_tts_enabled_cmd.new_value + ) elif change.property_name == 'correctness_feedback_enabled': + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExplorationPropertyCorrectnessFeedbackEnabledCmd. + edit_correctness_feedback_enabled_cmd = cast( + exp_domain.EditExplorationPropertyCorrectnessFeedbackEnabledCmd, # pylint: disable=line-too-long + change + ) exploration.update_correctness_feedback_enabled( - change.new_value) + edit_correctness_feedback_enabled_cmd.new_value) + elif change.property_name == 'next_content_id_index': + # Here we use cast because this 'elif' + # condition forces change to have type + # EditExplorationPropertyNextContentIdIndexCmd. + cmd = cast( + exp_domain.EditExplorationPropertyNextContentIdIndexCmd, + change + ) + next_content_id_index = max( + cmd.new_value, exploration.next_content_id_index) + exploration.update_next_content_id_index( + next_content_id_index) elif (change.cmd == exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION): # Loading the exploration model from the datastore into an @@ -560,28 +852,403 @@ def apply_change_list(exploration_id, change_list): # schema update. Thus, no action is needed here other than # to make sure that the version that the user is trying to # migrate to is the latest version. + # Here we use cast because we are narrowing down the type from + # ExplorationChange to a specific change command. + migrate_states_schema_cmd = cast( + exp_domain.MigrateStatesSchemaToLatestVersionCmd, + change + ) target_version_is_current_state_schema_version = ( - change.to_version == - str(feconf.CURRENT_STATE_SCHEMA_VERSION)) + migrate_states_schema_cmd.to_version == + str(feconf.CURRENT_STATE_SCHEMA_VERSION) + ) if not target_version_is_current_state_schema_version: raise Exception( 'Expected to migrate to the latest state schema ' 'version %s, received %s' % ( feconf.CURRENT_STATE_SCHEMA_VERSION, - change.to_version)) + migrate_states_schema_cmd.to_version)) return exploration except Exception as e: logging.error( '%s %s %s %s' % ( e.__class__.__name__, e, exploration_id, - pprint.pprint(change_list)) + pprint.pformat(change_list)) ) raise e -def _save_exploration(committer_id, exploration, commit_message, change_list): - """Validates an exploration and commits it to persistent storage. +def populate_exp_model_fields( + exp_model: exp_models.ExplorationModel, exploration: exp_domain.Exploration +) -> exp_models.ExplorationModel: + """Populate exploration model with the data from Exploration object. + + Args: + exp_model: ExplorationModel. The model to populate. + exploration: Exploration. The exploration domain object which should be + used to populate the model. + + Returns: + ExplorationModel. Populated model. + """ + exp_model.title = exploration.title + exp_model.category = exploration.category + exp_model.objective = exploration.objective + exp_model.language_code = exploration.language_code + exp_model.tags = exploration.tags + exp_model.blurb = exploration.blurb + exp_model.author_notes = exploration.author_notes + exp_model.states_schema_version = exploration.states_schema_version + exp_model.init_state_name = exploration.init_state_name + exp_model.states = { + state_name: state.to_dict() + for (state_name, state) in exploration.states.items()} + exp_model.param_specs = exploration.param_specs_dict + exp_model.param_changes = exploration.param_change_dicts + exp_model.auto_tts_enabled = exploration.auto_tts_enabled + exp_model.correctness_feedback_enabled = ( + exploration.correctness_feedback_enabled) + exp_model.edits_allowed = exploration.edits_allowed + exp_model.next_content_id_index = exploration.next_content_id_index + + return exp_model + + +def populate_exp_summary_model_fields( + exp_summary_model: Optional[exp_models.ExpSummaryModel], + exp_summary: exp_domain.ExplorationSummary +) -> exp_models.ExpSummaryModel: + """Populate exploration summary model with the data from + ExplorationSummary object. + + Args: + exp_summary_model: ExpSummaryModel|None. The model to populate. + If None, we create a new model instead. + exp_summary: ExplorationSummary. The exploration domain object which + should be used to populate the model. + + Returns: + ExpSummaryModel. Populated model. + """ + exp_summary_dict = { + 'title': exp_summary.title, + 'category': exp_summary.category, + 'objective': exp_summary.objective, + 'language_code': exp_summary.language_code, + 'tags': exp_summary.tags, + 'ratings': exp_summary.ratings, + 'scaled_average_rating': exp_summary.scaled_average_rating, + 'exploration_model_last_updated': ( + exp_summary.exploration_model_last_updated), + 'exploration_model_created_on': ( + exp_summary.exploration_model_created_on), + 'first_published_msec': exp_summary.first_published_msec, + 'status': exp_summary.status, + 'community_owned': exp_summary.community_owned, + 'owner_ids': exp_summary.owner_ids, + 'editor_ids': exp_summary.editor_ids, + 'voice_artist_ids': exp_summary.voice_artist_ids, + 'viewer_ids': exp_summary.viewer_ids, + 'contributor_ids': list(exp_summary.contributors_summary.keys()), + 'contributors_summary': exp_summary.contributors_summary, + 'version': exp_summary.version + } + if exp_summary_model is not None: + exp_summary_model.populate(**exp_summary_dict) + else: + exp_summary_dict['id'] = exp_summary.id + exp_summary_model = exp_models.ExpSummaryModel(**exp_summary_dict) + + return exp_summary_model + + +def update_states_version_history( + states_version_history: Dict[str, state_domain.StateVersionHistory], + change_list: Sequence[exp_domain.ExplorationChange], + old_states_dict: Dict[str, state_domain.StateDict], + new_states_dict: Dict[str, state_domain.StateDict], + current_version: int, + committer_id: str +) -> Dict[str, state_domain.StateVersionHistory]: + """Updates the version history of each state at a particular version + of an exploration. + + Args: + states_version_history: dict(str, StateVersionHistory). The version + history data of each state in the previous version of the + exploration. + change_list: list(ExplorationChange). A list of changes introduced in + this commit. + old_states_dict: dict(str, dict). The states in the previous version of + the exploration. + new_states_dict: dict(str, dict). The states in the current version of + the exploration. + current_version: int. The latest version of the exploration. + committer_id: str. The id of the user who made the commit. + + Returns: + states_version_history: dict(str, StateVersionHistory). The updated + version history data of each state. + """ + exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) + prev_version = current_version - 1 + + # Firstly, delete the states from the state version history which were + # deleted during this commit. + for state_name in exp_versions_diff.deleted_state_names: + del states_version_history[state_name] + + # Now, handle the updation of version history of states which were renamed. + # Firstly, we need to clean up the exp_versions_diff.old_to_new_state_names + # dict from the state names which are not effectively changed. For example, + # if a state was renamed from state_1 to state_2 and then from state_2 to + # state_1 in the same commit, then there is no effective change in state + # name and we need to clear them from this dict. + effective_old_to_new_state_names = {} + for old_state_name, new_state_name in ( + exp_versions_diff.old_to_new_state_names.items() + ): + if old_state_name != new_state_name: + effective_old_to_new_state_names[old_state_name] = new_state_name + for old_state_name in effective_old_to_new_state_names: + del states_version_history[old_state_name] + for old_state_name, new_state_name in ( + effective_old_to_new_state_names.items() + ): + states_version_history[new_state_name] = ( + state_domain.StateVersionHistory( + prev_version, old_state_name, committer_id)) + + # The following list includes states which exist in both the old states + # and new states and were not renamed. + states_which_were_not_renamed = [] + for state_name in old_states_dict: + if ( + state_name not in exp_versions_diff.deleted_state_names and + state_name not in effective_old_to_new_state_names + ): + states_which_were_not_renamed.append(state_name) + + # We have dealt with state additions, deletions and renames. + # Now we deal with states which were present in both versions and + # underwent changes only through the command EDIT_STATE_PROPERTY. + # The following dict stores whether the properties of states present + # in states_which_were_not_renamed were changed using EDIT_STATE_PROPERTY. + state_property_changed_data = { + state_name: False + for state_name in states_which_were_not_renamed + } + # The following ignore list contains those state properties which are + # related to voiceovers. Hence, they are ignored in order to avoid + # updating the version history in case of voiceover-only commits. + state_property_ignore_list = [ + exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS + ] + for change in change_list: + if ( + change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY and + change.property_name not in state_property_ignore_list + ): + state_name = change.state_name + if state_name in state_property_changed_data: + state_property_changed_data[state_name] = True + + for state_name, state_property_changed in ( + state_property_changed_data.items()): + if state_property_changed: + # The purpose of checking the diff_dict between the two state + # dicts ensure that we do not change the version history of that + # particular state if the overall changes (by EDIT_STATE_PROPERTY) + # get cancelled by each other and there is no 'net change'. + diff_dict = deepdiff.DeepDiff( + old_states_dict[state_name], new_states_dict[state_name]) + if diff_dict: + states_version_history[state_name] = ( + state_domain.StateVersionHistory( + prev_version, state_name, committer_id + )) + + # Finally, add the states which were newly added during this commit. The + # version history of these states are initialized as None because they + # were newly added and have no 'previously edited version'. + for state_name in exp_versions_diff.added_state_names: + states_version_history[state_name] = ( + state_domain.StateVersionHistory(None, None, committer_id)) + + return states_version_history + + +def update_metadata_version_history( + metadata_version_history: exp_domain.MetadataVersionHistory, + change_list: Sequence[exp_domain.ExplorationChange], + old_metadata_dict: exp_domain.ExplorationMetadataDict, + new_metadata_dict: exp_domain.ExplorationMetadataDict, + current_version: int, + committer_id: str +) -> exp_domain.MetadataVersionHistory: + """Updates the version history of the exploration at a particular version + of an exploration. + + Args: + metadata_version_history: MetadataVersionHistory. The metadata version + history at the previous version of the exploration. + change_list: list(ExplorationChange). A list of changes introduced in + this commit. + old_metadata_dict: dict. The exploration metadata at the + previous version of the exploration. + new_metadata_dict: dict. The exploration metadata at the + current version of the exploration. + current_version: int. The latest version of the exploration. + committer_id: str. The id of the user who made the commit. + + Returns: + MetadataVersionHistory. The updated metadata version history. + """ + prev_version = current_version - 1 + + metadata_was_changed = any( + change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY + for change in change_list + ) + + if metadata_was_changed: + # The purpose of checking the diff_dict between the two metadata + # dicts ensure that we do not change the version history if the + # overall changes (by EDIT_EXPLORATION_PROPERTY) get cancelled by + # each other and there is no 'net change'. + diff_dict = deepdiff.DeepDiff(old_metadata_dict, new_metadata_dict) + if diff_dict: + metadata_version_history.last_edited_version_number = prev_version + metadata_version_history.last_edited_committer_id = committer_id + + return metadata_version_history + + +def get_updated_committer_ids( + states_version_history: Dict[str, state_domain.StateVersionHistory], + metadata_last_edited_committer_id: str +) -> List[str]: + """Extracts a list of user ids who made the 'previous commit' on each state + and the exploration metadata from the exploration states and metadata + version history data. + + Args: + states_version_history: dict(str, StateVersionHistory). The version + history data of each state at a particular version of an + exploration. + metadata_last_edited_committer_id: str. User id of the user who + committed the last change in the exploration metadata. + + Returns: + list[str]. A list of user ids who made the 'previous commit' on each + state and the exploration metadata. + """ + committer_ids = { + version_history.committer_id + for version_history in states_version_history.values() + } + committer_ids.add(metadata_last_edited_committer_id) + return list(committer_ids) + + +def get_updated_version_history_model( + exploration: exp_domain.Exploration, + change_list: Sequence[exp_domain.ExplorationChange], + committer_id: str, + old_states: Dict[str, state_domain.State], + old_metadata: exp_domain.ExplorationMetadata +) -> Optional[exp_models.ExplorationVersionHistoryModel]: + """Returns an updated ExplorationVersionHistoryModel for the new version + of the exploration (after the commit). + + Args: + exploration: Exploration. The explortion after the latest commit. + change_list: list(ExplorationChange). A list of changes introduced in + the latest commit. + committer_id: str. The id of the user who made the latest commit. + old_states: dict(str, State). The states in the previous version of + the exploration (before the latest commit). + old_metadata: ExplorationMetadata. The exploration metadata at the + previous version of the exploration (before the latest commit). + + Returns: + ExplorationVersionHistoryModel. The updated version history model. + """ + version_history_model_id = ( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + exploration.id, exploration.version - 1)) + version_history_model = exp_models.ExplorationVersionHistoryModel.get( + version_history_model_id, strict=False) + + # TODO(#16433): Remove this check once version history models are generated + # for all exploration versions. + if version_history_model is not None: + old_states_dict = { + state_name: state.to_dict() + for state_name, state in old_states.items() + } + new_states_dict = { + state_name: state.to_dict() + for state_name, state in exploration.states.items() + } + old_metadata_dict = old_metadata.to_dict() + new_metadata_dict = exploration.get_metadata().to_dict() + states_version_history = { + state_name: state_domain.StateVersionHistory.from_dict( + state_version_history_dict) + for state_name, state_version_history_dict in ( + version_history_model.state_version_history.items()) + } + metadata_version_history = exp_domain.MetadataVersionHistory( + version_history_model.metadata_last_edited_version_number, + version_history_model.metadata_last_edited_committer_id) + + updated_states_version_history = update_states_version_history( + states_version_history, change_list, old_states_dict, + new_states_dict, exploration.version, committer_id + ) + updated_metadata_version_history = update_metadata_version_history( + metadata_version_history, change_list, old_metadata_dict, + new_metadata_dict, exploration.version, committer_id) + updated_committer_ids = get_updated_committer_ids( + updated_states_version_history, + updated_metadata_version_history.last_edited_committer_id) + + updated_version_history_model_id = ( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + exploration.id, exploration.version)) + updated_version_history_model = ( + exp_models.ExplorationVersionHistoryModel( + id=updated_version_history_model_id, + exploration_id=exploration.id, + exploration_version=exploration.version, + state_version_history={ + state_name: version_history.to_dict() + for state_name, version_history in ( + updated_states_version_history.items()) + }, + metadata_last_edited_version_number=( + updated_metadata_version_history.last_edited_version_number + ), + metadata_last_edited_committer_id=( + updated_metadata_version_history.last_edited_committer_id + ), + committer_ids=updated_committer_ids + )) + return updated_version_history_model + return None + + +def _compute_models_for_updating_exploration( + committer_id: str, + exploration: exp_domain.Exploration, + commit_message: Optional[str], + change_list: Sequence[exp_domain.ExplorationChange] +) -> List[base_models.BaseModel]: + """Returns a list of updated models related to the exploration model to be + put to the datastore. The caller should ensure that the Exploration is + strictly valid before calling this function. If successful, increments the version number of the incoming exploration domain object by 1. @@ -589,20 +1256,20 @@ def _save_exploration(committer_id, exploration, commit_message, change_list): Args: committer_id: str. The id of the user who made the commit. exploration: Exploration. The exploration to be saved. - commit_message: str. The commit message. + commit_message: str or None. A description of changes made to the state. + For published explorations, this must be present; for unpublished + explorations, it should be equal to None. change_list: list(ExplorationChange). A list of changes introduced in this commit. Raises: Exception. The versions of the given exploration and the currently stored exploration model do not match. - """ - exploration_rights = rights_manager.get_exploration_rights(exploration.id) - if exploration_rights.status != rights_domain.ACTIVITY_STATUS_PRIVATE: - exploration.validate(strict=True) - else: - exploration.validate() + Returns: + list(BaseModel). A list of models to be put to the datastore. + """ + models_to_put: List[base_models.BaseModel] = [] exploration_model = exp_models.ExplorationModel.get(exploration.id) if exploration.version > exploration_model.version: @@ -610,7 +1277,8 @@ def _save_exploration(committer_id, exploration, commit_message, change_list): 'Unexpected error: trying to update version %s of exploration ' 'from version %s. Please reload the page and try again.' % (exploration_model.version, exploration.version)) - elif exploration.version < exploration_model.version: + + if exploration.version < exploration_model.version: raise Exception( 'Trying to update version %s of exploration from version %s, ' 'which is too old. Please reload the page and try again.' @@ -618,42 +1286,70 @@ def _save_exploration(committer_id, exploration, commit_message, change_list): old_states = exp_fetchers.get_exploration_from_model( exploration_model).states - exploration_model.category = exploration.category - exploration_model.title = exploration.title - exploration_model.objective = exploration.objective - exploration_model.language_code = exploration.language_code - exploration_model.tags = exploration.tags - exploration_model.blurb = exploration.blurb - exploration_model.author_notes = exploration.author_notes - - exploration_model.states_schema_version = exploration.states_schema_version - exploration_model.init_state_name = exploration.init_state_name - exploration_model.states = { - state_name: state.to_dict() - for (state_name, state) in exploration.states.items()} - exploration_model.param_specs = exploration.param_specs_dict - exploration_model.param_changes = exploration.param_change_dicts - exploration_model.auto_tts_enabled = exploration.auto_tts_enabled - exploration_model.correctness_feedback_enabled = ( - exploration.correctness_feedback_enabled) + old_metadata = exp_fetchers.get_exploration_from_model( + exploration_model).get_metadata() + + exploration_model = populate_exp_model_fields( + exploration_model, exploration) change_list_dict = [change.to_dict() for change in change_list] - exploration_model.commit(committer_id, commit_message, change_list_dict) + models_to_put.extend( + exploration_model.get_models_to_put_values( + committer_id, + commit_message, + change_list_dict, + ) + ) caching_services.delete_multi( caching_services.CACHE_NAMESPACE_EXPLORATION, None, - [exploration.id]) - + [exploration.id] + ) exploration.version += 1 exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) + # Update the version history data for each state and the exploration + # metadata in the new version of the exploration. + version_history_model = get_updated_version_history_model( + exploration, + change_list, + committer_id, + old_states, + old_metadata + ) + if version_history_model is not None: + models_to_put.append(version_history_model) + # Trigger statistics model update. new_exp_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, exploration.version, list(exploration.states.keys()), + exp_versions_diff, revert_to_version=None) - stats_services.create_stats_model(new_exp_stats) + new_state_stats_mapping = stats_services.get_state_stats_mapping( + new_exp_stats + ) + + new_exp_stats_instance_id = ( + stats_models.ExplorationStatsModel.get_entity_id( + new_exp_stats.exp_id, + new_exp_stats.exp_version + ) + ) + models_to_put.append( + stats_models.ExplorationStatsModel( + id=new_exp_stats_instance_id, + exp_id=new_exp_stats.exp_id, + exp_version=new_exp_stats.exp_version, + num_starts_v1=new_exp_stats.num_starts_v1, + num_starts_v2=new_exp_stats.num_starts_v2, + num_actual_starts_v1=new_exp_stats.num_actual_starts_v1, + num_actual_starts_v2=new_exp_stats.num_actual_starts_v2, + num_completions_v1=new_exp_stats.num_completions_v1, + num_completions_v2=new_exp_stats.num_completions_v2, + state_stats_mapping=new_state_stats_mapping + ) + ) if feconf.ENABLE_ML_CLASSIFIERS: trainable_states_dict = exploration.get_trainable_states_dict( @@ -664,27 +1360,48 @@ def _save_exploration(committer_id, exploration, commit_message, change_list): 'state_names_with_unchanged_answer_groups'] state_names_to_train_classifier = state_names_with_changed_answer_groups if state_names_with_unchanged_answer_groups: - state_names_without_classifier = ( - classifier_services.handle_non_retrainable_states( + ( + state_names_without_classifier, + state_training_jobs_mapping_models_to_put + ) = ( + classifier_services + .get_new_job_models_for_non_trainable_states( exploration, state_names_with_unchanged_answer_groups, - exp_versions_diff)) + exp_versions_diff + ) + ) state_names_to_train_classifier.extend( state_names_without_classifier) + models_to_put.extend(state_training_jobs_mapping_models_to_put) if state_names_to_train_classifier: - classifier_services.handle_trainable_states( - exploration, state_names_to_train_classifier) + models_to_put.extend( + classifier_services.get_new_job_models_for_trainable_states( + exploration, state_names_to_train_classifier + ) + ) # Trigger exploration issues model updation. - stats_services.update_exp_issues_for_new_exp_version( - exploration, exp_versions_diff, None) + models_to_put.extend( + stats_services.get_updated_exp_issues_models_for_new_exp_version( + exploration, + exp_versions_diff, + None + ) + ) + return models_to_put def _create_exploration( - committer_id, exploration, commit_message, commit_cmds): + committer_id: str, + exploration: exp_domain.Exploration, + commit_message: str, + commit_cmds: List[exp_domain.ExplorationChange] +) -> None: """Ensures that rights for a new exploration are saved first. - This is because _save_exploration() depends on the rights object being - present to tell it whether to do strict validation or not. + This is because _compute_models_for_updating_exploration() + depends on the rights object being present to tell it whether to do strict + validation or not. Args: committer_id: str. The id of the user who made the commit. @@ -716,15 +1433,34 @@ def _create_exploration( param_specs=exploration.param_specs_dict, param_changes=exploration.param_change_dicts, auto_tts_enabled=exploration.auto_tts_enabled, - correctness_feedback_enabled=exploration.correctness_feedback_enabled + correctness_feedback_enabled=exploration.correctness_feedback_enabled, + next_content_id_index=exploration.next_content_id_index ) commit_cmds_dict = [commit_cmd.to_dict() for commit_cmd in commit_cmds] model.commit(committer_id, commit_message, commit_cmds_dict) exploration.version += 1 + version_history_model = exp_models.ExplorationVersionHistoryModel( + id=exp_models.ExplorationVersionHistoryModel.get_instance_id( + exploration.id, exploration.version), + exploration_id=exploration.id, + exploration_version=exploration.version, + state_version_history={ + state_name: state_domain.StateVersionHistory( + None, None, committer_id + ).to_dict() + for state_name in exploration.states + }, + metadata_last_edited_version_number=None, + metadata_last_edited_committer_id=committer_id, + committer_ids=[committer_id] + ) + version_history_model.update_timestamps() + version_history_model.put() + # Trigger statistics model creation. exploration_stats = stats_services.get_stats_for_new_exploration( - exploration.id, exploration.version, exploration.states) + exploration.id, exploration.version, list(exploration.states.keys())) stats_services.create_stats_model(exploration_stats) if feconf.ENABLE_ML_CLASSIFIERS: @@ -736,8 +1472,12 @@ def _create_exploration( state_names_to_train.append(state_name) if state_names_to_train: - classifier_services.handle_trainable_states( - exploration, state_names_to_train) + datastore_services.put_multi( + classifier_services.get_new_job_models_for_trainable_states( + exploration, + state_names_to_train + ) + ) # Trigger exploration issues model creation. stats_services.create_exp_issues_for_new_exploration( @@ -747,7 +1487,9 @@ def _create_exploration( exploration.id, committer_id) -def save_new_exploration(committer_id, exploration): +def save_new_exploration( + committer_id: str, exploration: exp_domain.Exploration +) -> None: """Saves a newly created exploration. Args: @@ -759,17 +1501,25 @@ def save_new_exploration(committer_id, exploration): if exploration.title else 'New exploration created.') _create_exploration( committer_id, exploration, commit_message, [ - exp_domain.ExplorationChange({ + exp_domain.CreateNewExplorationCmd({ 'cmd': exp_domain.CMD_CREATE_NEW, 'title': exploration.title, 'category': exploration.category, })]) - user_services.add_created_exploration_id(committer_id, exploration.id) - user_services.add_edited_exploration_id(committer_id, exploration.id) + user_contributions = user_services.get_or_create_new_user_contributions( + committer_id + ) + user_contributions.add_created_exploration_id(exploration.id) + user_contributions.add_edited_exploration_id(exploration.id) + user_services.save_user_contributions(user_contributions) user_services.record_user_created_an_exploration(committer_id) -def delete_exploration(committer_id, exploration_id, force_deletion=False): +def delete_exploration( + committer_id: str, + exploration_id: str, + force_deletion: bool = False +) -> None: """Deletes the exploration with the given exploration_id. IMPORTANT: Callers of this function should ensure that committer_id has @@ -791,7 +1541,11 @@ def delete_exploration(committer_id, exploration_id, force_deletion=False): committer_id, [exploration_id], force_deletion=force_deletion) -def delete_explorations(committer_id, exploration_ids, force_deletion=False): +def delete_explorations( + committer_id: str, + exploration_ids: List[str], + force_deletion: bool = False +) -> None: """Delete the explorations with the given exploration_ids. IMPORTANT: Callers of this function should ensure that committer_id has @@ -850,7 +1604,7 @@ def delete_explorations(committer_id, exploration_ids, force_deletion=False): taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS, exploration_ids) -def delete_explorations_from_user_models(exploration_ids): +def delete_explorations_from_user_models(exploration_ids: List[str]) -> None: """Remove explorations from all subscribers' exploration_ids. Args: @@ -859,26 +1613,34 @@ def delete_explorations_from_user_models(exploration_ids): if not exploration_ids: return - subscription_models = user_models.UserSubscriptionsModel.query( + subscription_models: Sequence[ + user_models.UserSubscriptionsModel + ] = user_models.UserSubscriptionsModel.query( user_models.UserSubscriptionsModel.exploration_ids.IN(exploration_ids) ).fetch() for model in subscription_models: model.exploration_ids = [ id_ for id_ in model.exploration_ids if id_ not in exploration_ids] user_models.UserSubscriptionsModel.update_timestamps_multi( - subscription_models) - user_models.UserSubscriptionsModel.put_multi(subscription_models) + list(subscription_models)) + user_models.UserSubscriptionsModel.put_multi(list(subscription_models)) - exp_user_data_models = ( + exp_user_data_models: Sequence[ + user_models.ExplorationUserDataModel + ] = ( user_models.ExplorationUserDataModel.get_all().filter( user_models.ExplorationUserDataModel.exploration_id.IN( exploration_ids ) ).fetch() ) - user_models.ExplorationUserDataModel.delete_multi(exp_user_data_models) + user_models.ExplorationUserDataModel.delete_multi( + list(exp_user_data_models) + ) - user_contributions_models = ( + user_contributions_models: Sequence[ + user_models.UserContributionsModel + ] = ( user_models.UserContributionsModel.get_all().filter( datastore_services.any_of( user_models.UserContributionsModel.created_exploration_ids.IN( @@ -890,21 +1652,23 @@ def delete_explorations_from_user_models(exploration_ids): ) ).fetch() ) - for model in user_contributions_models: - model.created_exploration_ids = [ - exp_id for exp_id in model.created_exploration_ids + for contribution_model in user_contributions_models: + contribution_model.created_exploration_ids = [ + exp_id for exp_id in contribution_model.created_exploration_ids if exp_id not in exploration_ids ] - model.edited_exploration_ids = [ - exp_id for exp_id in model.edited_exploration_ids + contribution_model.edited_exploration_ids = [ + exp_id for exp_id in contribution_model.edited_exploration_ids if exp_id not in exploration_ids ] user_models.UserContributionsModel.update_timestamps_multi( - user_contributions_models) - user_models.UserContributionsModel.put_multi(user_contributions_models) + list(user_contributions_models)) + user_models.UserContributionsModel.put_multi( + list(user_contributions_models) + ) -def delete_explorations_from_activities(exploration_ids): +def delete_explorations_from_activities(exploration_ids: List[str]) -> None: """Remove explorations from exploration_ids field in completed and incomplete activities models. @@ -914,13 +1678,20 @@ def delete_explorations_from_activities(exploration_ids): if not exploration_ids: return - model_classes = ( + model_classes: List[ + Union[ + Type[user_models.CompletedActivitiesModel], + Type[user_models.IncompleteActivitiesModel] + ] + ] = [ user_models.CompletedActivitiesModel, user_models.IncompleteActivitiesModel, - ) - all_entities = [] + ] + all_entities: List[AcceptableActivityModelTypes] = [] for model_class in model_classes: - entities = model_class.query( + entities: Sequence[ + AcceptableActivityModelTypes + ] = model_class.query( model_class.exploration_ids.IN(exploration_ids) ).fetch() for model in entities: @@ -934,7 +1705,9 @@ def delete_explorations_from_activities(exploration_ids): # Operations on exploration snapshots. -def get_exploration_snapshots_metadata(exploration_id, allow_deleted=False): +def get_exploration_snapshots_metadata( + exploration_id: str, allow_deleted: bool = False +) -> List[SnapshotsMetadataDict]: """Returns the snapshots for this exploration, as dicts, up to and including the latest version of the exploration. @@ -958,7 +1731,7 @@ def get_exploration_snapshots_metadata(exploration_id, allow_deleted=False): exploration_id, version_nums, allow_deleted=allow_deleted) -def get_last_updated_by_human_ms(exp_id): +def get_last_updated_by_human_ms(exp_id: str) -> float: """Return the last time, in milliseconds, when the given exploration was updated by a human. @@ -971,7 +1744,7 @@ def get_last_updated_by_human_ms(exp_id): """ # Iterate backwards through the exploration history metadata until we find # the most recent snapshot that was committed by a human. - last_human_update_ms = 0 + last_human_update_ms: float = 0 snapshots_metadata = get_exploration_snapshots_metadata(exp_id) for snapshot_metadata in reversed(snapshots_metadata): if snapshot_metadata['committer_id'] != feconf.MIGRATION_BOT_USER_ID: @@ -981,7 +1754,9 @@ def get_last_updated_by_human_ms(exp_id): return last_human_update_ms -def publish_exploration_and_update_user_profiles(committer, exp_id): +def publish_exploration_and_update_user_profiles( + committer: user_domain.UserActionsInfo, exp_id: str +) -> None: """Publishes the exploration with publish_exploration() function in rights_manager.py, as well as updates first_contribution_msec. Sends an email to the subscribers of the committer informing them that an exploration @@ -994,7 +1769,16 @@ def publish_exploration_and_update_user_profiles(committer, exp_id): committer: UserActionsInfo. UserActionsInfo object for the user who made the commit. exp_id: str. The id of the exploration to be published. + + Raises: + Exception. To publish explorations and update users\' profiles, + user must be logged in and have admin access. """ + if committer.user_id is None: + raise Exception( + 'To publish explorations and update users\' profiles, ' + 'user must be logged in and have admin access.' + ) rights_manager.publish_exploration(committer, exp_id) exp_title = exp_fetchers.get_exploration_by_id(exp_id).title email_subscription_services.inform_subscribers( @@ -1003,11 +1787,20 @@ def publish_exploration_and_update_user_profiles(committer, exp_id): contributor_ids = exp_fetchers.get_exploration_summary_by_id( exp_id).contributor_ids for contributor in contributor_ids: - user_services.update_first_contribution_msec_if_not_set( - contributor, contribution_time_msec) + contributor_user_settings = user_services.get_user_settings( + contributor, + strict=False + ) + if contributor_user_settings is not None: + contributor_user_settings.update_first_contribution_msec( + contribution_time_msec + ) + user_services.save_user_settings(contributor_user_settings) -def validate_exploration_for_story(exp, strict): +def validate_exploration_for_story( + exp: exp_domain.Exploration, strict: bool +) -> List[str]: """Validates an exploration with story validations. Args: @@ -1024,10 +1817,18 @@ def validate_exploration_for_story(exp, strict): Raises: ValidationError. Invalid language found for exploration. + ValidationError. Non default category found for exploration. ValidationError. Expected no exploration to have parameter values in it. ValidationError. Invalid interaction in exploration. ValidationError. RTE content in state of exploration with ID is not supported on mobile. + ValidationError. Expected no exploration to have classifier models. + ValidationError. Expected no exploration to contain training data in + any answer group. + ValidationError. Expected no exploration to have parameter values in + the default outcome of any state interaction. + ValidationError. Expected no exploration to have video tags. + ValidationError. Expected no exploration to have link tags. """ validation_error_messages = [] if ( @@ -1035,14 +1836,15 @@ def validate_exploration_for_story(exp, strict): android_validation_constants.SUPPORTED_LANGUAGES): error_string = ( 'Invalid language %s found for exploration ' - 'with ID %s.' % (exp.language_code, exp.id)) + 'with ID %s. This language is not supported for explorations ' + 'in a story on the mobile app.' % (exp.language_code, exp.id)) if strict: raise utils.ValidationError(error_string) validation_error_messages.append(error_string) if exp.param_specs or exp.param_changes: error_string = ( - 'Expected no exploration to have parameter ' + 'Expected no exploration in a story to have parameter ' 'values in it. Invalid exploration: %s' % exp.id) if strict: raise utils.ValidationError(error_string) @@ -1050,18 +1852,30 @@ def validate_exploration_for_story(exp, strict): if not exp.correctness_feedback_enabled: error_string = ( - 'Expected all explorations to have correctness feedback ' + 'Expected all explorations in a story to ' + 'have correctness feedback ' 'enabled. Invalid exploration: %s' % exp.id) if strict: raise utils.ValidationError(error_string) validation_error_messages.append(error_string) + if exp.category not in constants.ALL_CATEGORIES: + error_string = ( + 'Expected all explorations in a story to ' + 'be of a default category. ' + 'Invalid exploration: %s' % exp.id) + if strict: + raise utils.ValidationError(error_string) + validation_error_messages.append(error_string) + for state_name in exp.states: state = exp.states[state_name] if not state.interaction.is_supported_on_android_app(): error_string = ( 'Invalid interaction %s in exploration ' - 'with ID: %s.' % (state.interaction.id, exp.id)) + 'with ID: %s. This interaction is not supported for ' + 'explorations in a story on the ' + 'mobile app.' % (state.interaction.id, exp.id)) if strict: raise utils.ValidationError(error_string) validation_error_messages.append(error_string) @@ -1069,30 +1883,102 @@ def validate_exploration_for_story(exp, strict): if not state.is_rte_content_supported_on_android(): error_string = ( 'RTE content in state %s of exploration ' - 'with ID %s is not supported on mobile.' - % (state_name, exp.id)) + 'with ID %s is not supported on mobile for explorations ' + 'in a story.' % (state_name, exp.id)) if strict: raise utils.ValidationError(error_string) validation_error_messages.append(error_string) if state.interaction.id == 'EndExploration': - recommended_exploration_ids = ( + # Here we use cast because we are narrowing down the type + # from various customization args value types to List[str] + # type, and this is done because here we are accessing + # 'recommendedExplorationIds' key from EndExploration + # customization arg whose value is always of List[str] type. + recommended_exploration_ids = cast( + List[str], state.interaction.customization_args[ - 'recommendedExplorationIds'].value) + 'recommendedExplorationIds' + ].value + ) if len(recommended_exploration_ids) != 0: error_string = ( - 'Exploration with ID: %s contains exploration ' - 'recommendations in its EndExploration interaction.' - % (exp.id)) + 'Explorations in a story are not expected to contain ' + 'exploration recommendations. Exploration with ID: ' + '%s contains exploration recommendations in its ' + 'EndExploration interaction.' % (exp.id)) + if strict: + raise utils.ValidationError(error_string) + validation_error_messages.append(error_string) + + if state.interaction.id == 'MultipleChoiceInput': + # Here we use cast because we are narrowing down the type from + # various customization args value types to List[SubtitledHtml] + # type, and this is done because here we are accessing 'choices' + # key from MultipleChoiceInput customization arg whose value is + # always of List[SubtitledHtml] type. + choices = cast( + List[state_domain.SubtitledHtml], + state.interaction.customization_args['choices'].value + ) + if len(choices) < 4: + error_string = ( + 'Exploration in a story having MultipleChoiceInput ' + 'interaction should have at least 4 choices present. ' + 'Exploration with ID %s and state name %s have fewer than ' + '4 choices.' % (exp.id, state_name) + ) + if strict: + raise utils.ValidationError(error_string) + validation_error_messages.append(error_string) + + if state.classifier_model_id is not None: + error_string = ( + 'Explorations in a story are not expected to contain ' + 'classifier models. State %s of exploration with ID %s ' + 'contains classifier models.' % (state_name, exp.id)) + if strict: + raise utils.ValidationError(error_string) + validation_error_messages.append(error_string) + + for answer_group in state.interaction.answer_groups: + if len(answer_group.training_data) > 0: + error_string = ( + 'Explorations in a story are not expected to contain ' + 'training data for any answer group. State %s of ' + 'exploration with ID %s contains training data in one of ' + 'its answer groups.' % (state_name, exp.id) + ) if strict: raise utils.ValidationError(error_string) validation_error_messages.append(error_string) + break + + if ( + state.interaction.default_outcome is not None and + len(state.interaction.default_outcome.param_changes) > 0 + ): + error_string = ( + 'Explorations in a story are not expected to contain ' + 'parameter values. State %s of exploration with ID %s ' + 'contains parameter values in its default outcome.' % ( + state_name, exp.id + ) + ) + if strict: + raise utils.ValidationError(error_string) + validation_error_messages.append(error_string) + return validation_error_messages def update_exploration( - committer_id, exploration_id, change_list, commit_message, - is_suggestion=False, is_by_voice_artist=False): + committer_id: str, + exploration_id: str, + change_list: Optional[Sequence[exp_domain.ExplorationChange]], + commit_message: Optional[str], + is_by_voice_artist: bool = False +) -> None: """Update an exploration. Commits changes. Args: @@ -1107,8 +1993,6 @@ def update_exploration( explorations, it should be equal to None. For suggestions that are being accepted, and only for such commits, it should start with feconf.COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX. - is_suggestion: bool. Whether the update is due to a suggestion being - accepted. is_by_voice_artist: bool. Whether the changes are made by a voice artist. @@ -1120,6 +2004,65 @@ def update_exploration( message starts with the same prefix as the commit message for accepted suggestions. """ + models_to_put = compute_models_to_put_when_saving_new_exp_version( + committer_id=committer_id, + exploration_id=exploration_id, + change_list=change_list, + commit_message=commit_message, + is_by_voice_artist=is_by_voice_artist + ) + + datastore_services.update_timestamps_multi(models_to_put) + datastore_services.put_multi(models_to_put) + index_explorations_given_ids([exploration_id]) + # Explicitly clear the cache for explorations after putting the new + # version. + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [exploration_id] + ) + + +def compute_models_to_put_when_saving_new_exp_version( + committer_id: str, + exploration_id: str, + change_list: Optional[Sequence[exp_domain.ExplorationChange]], + commit_message: Optional[str], + is_by_voice_artist: bool = False, +) -> List[base_models.BaseModel]: + """Computes the exploration and other related models for putting to + the datastore. This method does not perform the put operation. The caller + of this method needs to perform the put operation. + + Args: + committer_id: str. The id of the user who is performing the update + action. + exploration_id: str. The id of the exploration to be updated. + change_list: list(ExplorationChange) or None. A change list to be + applied to the given exploration. If None, it corresponds to an + empty list. + commit_message: str or None. A description of changes made to the state. + For published explorations, this must be present; for unpublished + explorations, it should be equal to None. For suggestions that are + being accepted, and only for such commits, it should start with + feconf.COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX. + is_by_voice_artist: bool. Whether the changes are made by a + voice artist. + + Raises: + ValueError. No commit message is supplied and the exploration is public. + ValueError. The update is due to a suggestion and the commit message is + invalid. + ValueError. The update is not due to a suggestion, and the commit + message starts with the same prefix as the commit message for + accepted suggestions. + + Returns: + list(BaseModel). A list of the models that were updated. + """ + models_to_put: List[ + base_models.BaseModel + ] = [] if change_list is None: change_list = [] @@ -1134,46 +2077,117 @@ def update_exploration( 'Exploration is public so expected a commit message but ' 'received none.') - if (is_suggestion and ( - not commit_message or - not commit_message.startswith( - feconf.COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX))): - raise ValueError('Invalid commit message for suggestion.') - if (not is_suggestion and commit_message and commit_message.startswith( - feconf.COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX)): - raise ValueError( - 'Commit messages for non-suggestions may not start with \'%s\'' % - feconf.COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX) + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, + None, + [exploration_id] + ) + old_exploration = exp_fetchers.get_exploration_by_id(exploration_id) + old_content_id_set = set(old_exploration.get_translatable_content_ids()) updated_exploration = apply_change_list(exploration_id, change_list) if get_story_id_linked_to_exploration(exploration_id) is not None: validate_exploration_for_story(updated_exploration, True) - _save_exploration( - committer_id, updated_exploration, commit_message, change_list) - - discard_draft(exploration_id, committer_id) + updated_exploration.validate(strict=is_public) + models_to_put.extend( + _compute_models_for_updating_exploration( + committer_id, + updated_exploration, + commit_message, + change_list + ) + ) - # Update summary of changed exploration in a deferred task. - taskqueue_services.defer( - taskqueue_services.FUNCTION_ID_REGENERATE_EXPLORATION_SUMMARY, - taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS, exploration_id, - committer_id) + new_content_id_set = set(updated_exploration.get_translatable_content_ids()) + content_ids_corresponding_translations_to_remove = ( + old_content_id_set - new_content_id_set + ) + content_ids_corresponding_translations_to_mark_needs_update = set() + for change in change_list: + if change.cmd == exp_domain.CMD_MARK_TRANSLATIONS_NEEDS_UPDATE: + content_ids_corresponding_translations_to_mark_needs_update.add( + change.content_id) + continue + + if change.cmd == exp_domain.CMD_REMOVE_TRANSLATIONS: + content_ids_corresponding_translations_to_remove.add( + change.content_id) + new_translation_models, translation_counts = ( + translation_services.compute_translation_related_change( + updated_exploration, + list(content_ids_corresponding_translations_to_remove), + list(content_ids_corresponding_translations_to_mark_needs_update), + ) + ) + models_to_put.extend(new_translation_models) + exp_user_data_model_to_put = get_exp_user_data_model_with_draft_discarded( + exploration_id, committer_id + ) + if exp_user_data_model_to_put: + models_to_put.append(exp_user_data_model_to_put) if committer_id != feconf.MIGRATION_BOT_USER_ID: - user_services.add_edited_exploration_id(committer_id, exploration_id) - user_services.record_user_edited_an_exploration(committer_id) - if not rights_manager.is_exploration_private(exploration_id): - user_services.update_first_contribution_msec_if_not_set( - committer_id, utils.get_current_time_in_millisecs()) + user_contributions = user_services.get_or_create_new_user_contributions( + committer_id + ) + user_contributions.add_edited_exploration_id( + exploration_id + ) + models_to_put.append( + user_services.get_validated_user_contributions_model( + user_contributions + ) + ) + user_settings = user_services.get_user_settings( + committer_id, + strict=False + ) + if user_settings is not None: + user_settings.record_user_edited_an_exploration() + if not rights_manager.is_exploration_private(exploration_id): + user_settings.update_first_contribution_msec( + utils.get_current_time_in_millisecs() + ) + + models_to_put.append( + user_services.convert_to_user_settings_model( + user_settings + ) + ) if opportunity_services.is_exploration_available_for_contribution( - exploration_id): - opportunity_services.update_opportunity_with_updated_exploration( - exploration_id) + exploration_id + ): + models_to_put.extend( + opportunity_services + .compute_opportunity_models_with_updated_exploration( + exploration_id, + updated_exploration.get_content_count(), + translation_counts + ) + ) + exp_rights = rights_manager.get_exploration_rights(exploration_id) + exp_summary_model = exp_models.ExpSummaryModel.get(exploration_id) + exp_summary = update_exploration_summary( + updated_exploration, + exp_rights, + exp_fetchers.get_exploration_summary_from_model(exp_summary_model), + skip_exploration_model_last_updated=True + ) + exp_summary.add_contribution_by_user(committer_id) + exp_summary.version += 1 + updated_exp_summary_model: exp_models.ExpSummaryModel = ( + populate_exp_summary_model_fields( + exp_summary_model, exp_summary + ) + ) + models_to_put.append(updated_exp_summary_model) + return models_to_put def regenerate_exploration_summary_with_new_contributor( - exploration_id, contributor_id): + exploration_id: str, contributor_id: str +) -> None: """Regenerate a summary of the given exploration and add a new contributor to the contributors summary. If the summary does not exist, this function generates a new one. @@ -1183,13 +2197,28 @@ def regenerate_exploration_summary_with_new_contributor( contributor_id: str. ID of the contributor to be added to the exploration summary. """ - exploration = exp_fetchers.get_exploration_by_id(exploration_id) - exp_summary = _compute_summary_of_exploration(exploration) - exp_summary.add_contribution_by_user(contributor_id) - save_exploration_summary(exp_summary) + exploration = exp_fetchers.get_exploration_by_id( + exploration_id, strict=False) + exp_summary = exp_fetchers.get_exploration_summary_by_id( + exploration_id, strict=False) + if exploration is not None: + exp_rights = rights_manager.get_exploration_rights( + exploration_id, strict=True) + if exp_summary is None: + updated_exp_summary = generate_new_exploration_summary( + exploration, exp_rights) + else: + updated_exp_summary = update_exploration_summary( + exploration, exp_rights, exp_summary) + updated_exp_summary.add_contribution_by_user(contributor_id) + save_exploration_summary(updated_exp_summary) + else: + logging.error('Could not find exploration with ID %s', exploration_id) -def regenerate_exploration_and_contributors_summaries(exploration_id): +def regenerate_exploration_and_contributors_summaries( + exploration_id: str +) -> None: """Regenerate a summary of the given exploration and also regenerate the contributors summary from the snapshots. If the summary does not exist, this function generates a new one. @@ -1198,57 +2227,120 @@ def regenerate_exploration_and_contributors_summaries(exploration_id): exploration_id: str. ID of the exploration. """ exploration = exp_fetchers.get_exploration_by_id(exploration_id) - exp_summary = _compute_summary_of_exploration(exploration) - exp_summary.contributors_summary = ( - compute_exploration_contributors_summary(exp_summary.id)) - save_exploration_summary(exp_summary) - - -def _compute_summary_of_exploration(exploration): - """Create an ExplorationSummary domain object for a given Exploration - domain object and return it. + exp_rights = rights_manager.get_exploration_rights( + exploration_id, strict=True) + exp_summary = exp_fetchers.get_exploration_summary_by_id( + exploration_id, strict=True) + updated_exp_summary = update_exploration_summary( + exploration, exp_rights, exp_summary) + updated_exp_summary.contributors_summary = ( + compute_exploration_contributors_summary(updated_exp_summary.id)) + save_exploration_summary(updated_exp_summary) + + +def update_exploration_summary( + exploration: exp_domain.Exploration, + exp_rights: rights_domain.ActivityRights, + exp_summary: exp_domain.ExplorationSummary, + skip_exploration_model_last_updated: bool = False +) -> exp_domain.ExplorationSummary: + """Updates an exploration summary domain object from a given exploration + and its rights. Args: exploration: Exploration. The exploration whose summary is to be computed. + exp_rights: ActivityRights. The exploration rights model, used + to compute summary. + exp_summary: ExplorationSummary. The exploration summary + model whose summary needs to be recomputed. + skip_exploration_model_last_updated: bool. Whether the update of + exploration_model_last_updated should be skipped. + The exploration_model_last_updated is computed from the last human + update of the exploration. The update for this value should + be skipped when we know that the current workflow isn't + due to a human-initiated update. Returns: ExplorationSummary. The resulting exploration summary domain object. + + Raises: + Exception. No data available for when the exploration was created_on. """ - exp_rights = exp_models.ExplorationRightsModel.get_by_id(exploration.id) - exp_summary_model = exp_models.ExpSummaryModel.get_by_id(exploration.id) - if exp_summary_model: - old_exp_summary = exp_fetchers.get_exploration_summary_from_model( - exp_summary_model) - ratings = old_exp_summary.ratings or feconf.get_empty_ratings() - scaled_average_rating = get_scaled_average_rating(ratings) + scaled_average_rating = get_scaled_average_rating(exp_summary.ratings) + + if skip_exploration_model_last_updated: + exploration_model_last_updated = ( + exp_summary.exploration_model_last_updated) else: - ratings = feconf.get_empty_ratings() - scaled_average_rating = feconf.EMPTY_SCALED_AVERAGE_RATING + # TODO(#15895): Revisit this after we have validations for the model to + # see whether exploration_model_last_updated and + # ExplorationModel.last_updated are in sync or not. + exploration_model_last_updated = datetime.datetime.fromtimestamp( + get_last_updated_by_human_ms(exploration.id) / 1000.0) - contributors_summary = ( - exp_summary_model.contributors_summary if exp_summary_model else {}) - contributor_ids = list(contributors_summary.keys()) + contributor_ids = list(exp_summary.contributors_summary.keys()) - exploration_model_last_updated = datetime.datetime.fromtimestamp( - python_utils.divide( - get_last_updated_by_human_ms(exploration.id), 1000.0)) - exploration_model_created_on = exploration.created_on - first_published_msec = exp_rights.first_published_msec - exp_summary = exp_domain.ExplorationSummary( + if exploration.created_on is None: + raise Exception( + 'No data available for when the exploration was created_on.' + ) + + return exp_domain.ExplorationSummary( exploration.id, exploration.title, exploration.category, - exploration.objective, exploration.language_code, - exploration.tags, ratings, scaled_average_rating, exp_rights.status, - exp_rights.community_owned, exp_rights.owner_ids, - exp_rights.editor_ids, exp_rights.voice_artist_ids, - exp_rights.viewer_ids, contributor_ids, contributors_summary, - exploration.version, exploration_model_created_on, - exploration_model_last_updated, first_published_msec) + exploration.objective, exploration.language_code, exploration.tags, + exp_summary.ratings, scaled_average_rating, exp_rights.status, + exp_rights.community_owned, exp_rights.owner_ids, exp_rights.editor_ids, + exp_rights.voice_artist_ids, exp_rights.viewer_ids, contributor_ids, + exp_summary.contributors_summary, exploration.version, + exploration.created_on, exploration_model_last_updated, + exp_rights.first_published_msec + ) - return exp_summary +def generate_new_exploration_summary( + exploration: exp_domain.Exploration, + exp_rights: rights_domain.ActivityRights +) -> exp_domain.ExplorationSummary: + """Generates a new exploration summary domain object from a given + exploration and its rights. -def compute_exploration_contributors_summary(exploration_id): + Args: + exploration: Exploration. The exploration whose summary is to be + computed. + exp_rights: ActivityRights. The exploration rights model, used + to compute summary. + + Returns: + ExplorationSummary. The resulting exploration summary domain object. + + Raises: + Exception. No data available for when the exploration was created_on. + """ + ratings = feconf.get_empty_ratings() + scaled_average_rating = get_scaled_average_rating(ratings) + exploration_model_last_updated = datetime.datetime.fromtimestamp( + get_last_updated_by_human_ms(exploration.id) / 1000.0) + + if exploration.created_on is None: + raise Exception( + 'No data available for when the exploration was created_on.' + ) + + return exp_domain.ExplorationSummary( + exploration.id, exploration.title, exploration.category, + exploration.objective, exploration.language_code, exploration.tags, + ratings, scaled_average_rating, exp_rights.status, + exp_rights.community_owned, exp_rights.owner_ids, exp_rights.editor_ids, + exp_rights.voice_artist_ids, exp_rights.viewer_ids, [], {}, + exploration.version, exploration.created_on, + exploration_model_last_updated, exp_rights.first_published_msec + ) + + +def compute_exploration_contributors_summary( + exploration_id: str +) -> Dict[str, int]: """Returns a dict whose keys are user_ids and whose values are the number of (non-revert) commits made to the given exploration by that user_id. This does not count commits which have since been reverted. @@ -1264,7 +2356,7 @@ def compute_exploration_contributors_summary(exploration_id): """ snapshots_metadata = get_exploration_snapshots_metadata(exploration_id) current_version = len(snapshots_metadata) - contributors_summary = collections.defaultdict(int) + contributors_summary: Dict[str, int] = collections.defaultdict(int) while True: snapshot_metadata = snapshots_metadata[current_version - 1] committer_id = snapshot_metadata['committer_id'] @@ -1275,65 +2367,41 @@ def compute_exploration_contributors_summary(exploration_id): break if is_revert: - current_version = snapshot_metadata['commit_cmds'][0][ + version_number = snapshot_metadata['commit_cmds'][0][ 'version_number'] + # Ruling out the possibility of any other type for mypy + # type checking. + assert isinstance(version_number, int) + current_version = version_number else: current_version -= 1 contributor_ids = list(contributors_summary) # Remove IDs that are deleted or do not exist. users_settings = user_services.get_users_settings(contributor_ids) - for contributor_id, user_settings in python_utils.ZIP( - contributor_ids, users_settings): + for contributor_id, user_settings in zip(contributor_ids, users_settings): if user_settings is None: del contributors_summary[contributor_id] return contributors_summary -def save_exploration_summary(exp_summary): +def save_exploration_summary( + exp_summary: exp_domain.ExplorationSummary +) -> None: """Save an exploration summary domain object as an ExpSummaryModel entity in the datastore. Args: exp_summary: ExplorationSummary. The exploration summary to save. """ - exp_summary_dict = { - 'title': exp_summary.title, - 'category': exp_summary.category, - 'objective': exp_summary.objective, - 'language_code': exp_summary.language_code, - 'tags': exp_summary.tags, - 'ratings': exp_summary.ratings, - 'scaled_average_rating': exp_summary.scaled_average_rating, - 'status': exp_summary.status, - 'community_owned': exp_summary.community_owned, - 'owner_ids': exp_summary.owner_ids, - 'editor_ids': exp_summary.editor_ids, - 'voice_artist_ids': exp_summary.voice_artist_ids, - 'viewer_ids': exp_summary.viewer_ids, - 'contributor_ids': list(exp_summary.contributors_summary.keys()), - 'contributors_summary': exp_summary.contributors_summary, - 'version': exp_summary.version, - 'exploration_model_last_updated': ( - exp_summary.exploration_model_last_updated), - 'exploration_model_created_on': ( - exp_summary.exploration_model_created_on), - 'first_published_msec': ( - exp_summary.first_published_msec) - } - - exp_summary_model = (exp_models.ExpSummaryModel.get_by_id(exp_summary.id)) - if exp_summary_model is not None: - exp_summary_model.populate(**exp_summary_dict) - exp_summary_model.update_timestamps() - exp_summary_model.put() - else: - exp_summary_dict['id'] = exp_summary.id - model = exp_models.ExpSummaryModel(**exp_summary_dict) - model.update_timestamps() - model.put() + existing_exp_summary_model = ( + exp_models.ExpSummaryModel.get(exp_summary.id, strict=False)) + exp_summary_model = populate_exp_summary_model_fields( + existing_exp_summary_model, exp_summary) + exp_summary_model.update_timestamps() + exp_summary_model.put() # The index should be updated after saving the exploration # summary instead of after saving the exploration since the # index contains documents computed on basis of exploration @@ -1341,7 +2409,7 @@ def save_exploration_summary(exp_summary): index_explorations_given_ids([exp_summary.id]) -def delete_exploration_summaries(exploration_ids): +def delete_exploration_summaries(exploration_ids: List[str]) -> None: """Delete multiple exploration summary models. Args: @@ -1356,8 +2424,77 @@ def delete_exploration_summaries(exploration_ids): exp_models.ExpSummaryModel.delete_multi(existing_summary_models) +def revert_version_history( + exploration_id: str, current_version: int, revert_to_version: int +) -> None: + """Reverts the version history to the given version number. Puts the + reverted version history model into the datastore. + + Args: + exploration_id: str. The id of the exploration for which the version + history is to be reverted to the current version. + current_version: int. The current version of the exploration. + revert_to_version: int. The version to which the version history + is to be reverted. + """ + version_history_model_id = ( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + exploration_id, revert_to_version)) + version_history_model = exp_models.ExplorationVersionHistoryModel.get( + version_history_model_id, strict=False) + + if version_history_model is not None: + new_version_history_model = exp_models.ExplorationVersionHistoryModel( + id=exp_models.ExplorationVersionHistoryModel.get_instance_id( + exploration_id, current_version + 1), + exploration_id=exploration_id, + exploration_version=current_version + 1, + state_version_history=version_history_model.state_version_history, + metadata_last_edited_version_number=( + version_history_model.metadata_last_edited_version_number), + metadata_last_edited_committer_id=( + version_history_model.metadata_last_edited_committer_id), + committer_ids=version_history_model.committer_ids + ) + new_version_history_model.update_timestamps() + new_version_history_model.put() + + +def get_exploration_validation_error( + exploration_id: str, revert_to_version: int +) -> Optional[str]: + """Tests whether an exploration can be reverted to the given version + number. Does not commit any changes. + + Args: + exploration_id: str. The id of the exploration to be reverted to the + current version. + revert_to_version: int. The version to which the given exploration + is to be reverted. + + Returns: + Optional[str]. None if the revert_to_version passes all backend + validation checks, or the error string otherwise. + """ + # Validate the previous version of the exploration. + exploration = exp_fetchers.get_exploration_by_id( + exploration_id, version=revert_to_version) + exploration_rights = rights_manager.get_exploration_rights(exploration.id) + try: + exploration.validate( + exploration_rights.status == rights_domain.ACTIVITY_STATUS_PUBLIC) + except Exception as ex: + return str(ex) + + return None + + def revert_exploration( - committer_id, exploration_id, current_version, revert_to_version): + committer_id: str, + exploration_id: str, + current_version: int, + revert_to_version: int +) -> None: """Reverts an exploration to the given version number. Commits changes. Args: @@ -1373,14 +2510,15 @@ def revert_exploration( currently-stored exploration model. """ exploration_model = exp_models.ExplorationModel.get( - exploration_id, strict=False) + exploration_id, strict=True) if current_version > exploration_model.version: raise Exception( 'Unexpected error: trying to update version %s of exploration ' 'from version %s. Please reload the page and try again.' % (exploration_model.version, current_version)) - elif current_version < exploration_model.version: + + if current_version < exploration_model.version: raise Exception( 'Trying to update version %s of exploration from version %s, ' 'which is too old. Please reload the page and try again.' @@ -1391,10 +2529,10 @@ def revert_exploration( exploration = exp_fetchers.get_exploration_by_id( exploration_id, version=revert_to_version) exploration_rights = rights_manager.get_exploration_rights(exploration.id) - if exploration_rights.status != rights_domain.ACTIVITY_STATUS_PRIVATE: - exploration.validate(strict=True) - else: - exploration.validate() + exploration_is_public = ( + exploration_rights.status != rights_domain.ACTIVITY_STATUS_PRIVATE + ) + exploration.validate(strict=exploration_is_public) exp_models.ExplorationModel.revert( exploration_model, committer_id, @@ -1404,17 +2542,25 @@ def revert_exploration( caching_services.CACHE_NAMESPACE_EXPLORATION, None, [exploration.id]) + revert_version_history(exploration_id, current_version, revert_to_version) + regenerate_exploration_and_contributors_summaries(exploration_id) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, current_version + 1, exploration.states, + exploration.id, current_version + 1, list(exploration.states.keys()), None, revert_to_version) stats_services.create_stats_model(exploration_stats) current_exploration = exp_fetchers.get_exploration_by_id( exploration_id, version=current_version) - stats_services.update_exp_issues_for_new_exp_version( - current_exploration, None, revert_to_version) + exp_issues_models_to_put = ( + stats_services.get_updated_exp_issues_models_for_new_exp_version( + current_exploration, + None, + revert_to_version + ) + ) + datastore_services.put_multi(exp_issues_models_to_put) if feconf.ENABLE_ML_CLASSIFIERS: exploration_to_revert_to = exp_fetchers.get_exploration_by_id( @@ -1424,7 +2570,9 @@ def revert_exploration( # Creation and deletion methods. -def get_demo_exploration_components(demo_path): +def get_demo_exploration_components( + demo_path: str +) -> Tuple[str, List[Tuple[str, bytes]]]: """Gets the content of `demo_path` in the sample explorations folder. Args: @@ -1452,8 +2600,12 @@ def get_demo_exploration_components(demo_path): def save_new_exploration_from_yaml_and_assets( - committer_id, yaml_content, exploration_id, assets_list, - strip_voiceovers=False): + committer_id: str, + yaml_content: str, + exploration_id: str, + assets_list: List[Tuple[str, bytes]], + strip_voiceovers: bool = False +) -> None: """Saves a new exploration given its representation in YAML form and the list of assets associated with it. @@ -1461,17 +2613,14 @@ def save_new_exploration_from_yaml_and_assets( committer_id: str. The id of the user who made the commit. yaml_content: str. The YAML representation of the exploration. exploration_id: str. The id of the exploration. - assets_list: list(list(str)). A list of lists of assets, which contains - asset's filename and content. + assets_list: list(tuple(str, bytes)). A list of lists of assets, which + contains asset's filename and content. strip_voiceovers: bool. Whether to strip away all audio voiceovers from the imported exploration. Raises: Exception. The yaml file is invalid due to a missing schema version. """ - if assets_list is None: - assets_list = [] - yaml_dict = utils.dict_from_yaml(yaml_content) if 'schema_version' not in yaml_dict: raise Exception('Invalid YAML file: missing schema version') @@ -1481,9 +2630,8 @@ def save_new_exploration_from_yaml_and_assets( # images. So we need to have images in the datastore before we could # perform the migration. for (asset_filename, asset_content) in assets_list: - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, exploration_id)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, exploration_id) fs.commit(asset_filename, asset_content) exploration = exp_domain.Exploration.from_yaml(exploration_id, yaml_content) @@ -1499,14 +2647,14 @@ def save_new_exploration_from_yaml_and_assets( _create_exploration( committer_id, exploration, create_commit_message, [ - exp_domain.ExplorationChange({ + exp_domain.CreateNewExplorationCmd({ 'cmd': exp_domain.CMD_CREATE_NEW, 'title': exploration.title, 'category': exploration.category, })]) -def delete_demo(exploration_id): +def delete_demo(exploration_id: str) -> None: """Deletes a single demo exploration. Args: @@ -1529,7 +2677,7 @@ def delete_demo(exploration_id): feconf.SYSTEM_COMMITTER_ID, exploration_id, force_deletion=True) -def load_demo(exploration_id): +def load_demo(exploration_id: str) -> None: """Loads a demo exploration. The resulting exploration will have two commits in its history (one for @@ -1561,8 +2709,10 @@ def load_demo(exploration_id): def get_next_page_of_all_non_private_commits( - page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None, - max_age=None): + page_size: int = feconf.COMMIT_LIST_PAGE_SIZE, + urlsafe_start_cursor: Optional[str] = None, + max_age: Optional[datetime.timedelta] = None +) -> Tuple[List[exp_domain.ExplorationCommitLogEntry], Optional[str], bool]: """Returns a page of non-private commits in reverse time order. If max_age is given, it should be a datetime.timedelta instance. @@ -1607,7 +2757,9 @@ def get_next_page_of_all_non_private_commits( ) for entry in results], new_urlsafe_start_cursor, more) -def get_image_filenames_from_exploration(exploration): +def get_image_filenames_from_exploration( + exploration: exp_domain.Exploration +) -> List[str]: """Get the image filenames from the exploration. Args: @@ -1619,8 +2771,16 @@ def get_image_filenames_from_exploration(exploration): filenames = [] for state in exploration.states.values(): if state.interaction.id == 'ImageClickInput': - filenames.append(state.interaction.customization_args[ - 'imageAndRegions'].value['imagePath']) + # Here we use cast because we are narrowing down the type from + # various customization args value types to ImageAndRegionDict + # type, and this is done because here we are accessing + # 'imageAndRegions' key from ImageClickInput customization arg + # whose values is always of ImageAndRegionDict type. + image_paths = cast( + domain.ImageAndRegionDict, + state.interaction.customization_args['imageAndRegions'].value + ) + filenames.append(image_paths['imagePath']) html_list = exploration.get_all_html_content_strings() filenames.extend( @@ -1628,7 +2788,7 @@ def get_image_filenames_from_exploration(exploration): return filenames -def get_number_of_ratings(ratings): +def get_number_of_ratings(ratings: Dict[str, int]) -> int: """Gets the total number of ratings represented by the given ratings object. @@ -1642,7 +2802,7 @@ def get_number_of_ratings(ratings): return sum(ratings.values()) if ratings else 0 -def get_average_rating(ratings): +def get_average_rating(ratings: Dict[str, int]) -> float: """Returns the average rating of the ratings as a float. If there are no ratings, it will return 0. @@ -1663,10 +2823,11 @@ def get_average_rating(ratings): for rating_value, rating_count in ratings.items(): rating_sum += rating_weightings[rating_value] * rating_count - return python_utils.divide(rating_sum, (number_of_ratings * 1.0)) + return rating_sum / number_of_ratings + return 0 -def get_scaled_average_rating(ratings): +def get_scaled_average_rating(ratings: Dict[str, int]) -> float: """Returns the lower bound wilson score of the ratings. If there are no ratings, it will return 0. The confidence of this result is 95%. @@ -1683,19 +2844,16 @@ def get_scaled_average_rating(ratings): return 0 average_rating = get_average_rating(ratings) z = 1.9599639715843482 - x = python_utils.divide((average_rating - 1), 4) + x = (average_rating - 1) / 4 # The following calculates the lower bound Wilson Score as documented # http://www.goproblems.com/test/wilson/wilson.php?v1=0&v2=0&v3=0&v4=&v5=1 - a = x + python_utils.divide((z**2), (2 * n)) - b = z * math.sqrt( - python_utils.divide((x * (1 - x)), n) + python_utils.divide( - (z**2), (4 * n**2))) - wilson_score_lower_bound = python_utils.divide( - (a - b), (1 + python_utils.divide(z**2, n))) + a = x + ((z**2) / (2 * n)) + b = z * math.sqrt(((x * (1 - x)) / n) + ((z**2) / (4 * n**2))) + wilson_score_lower_bound = (a - b) / (1 + ((z**2) / n)) return 1 + 4 * wilson_score_lower_bound -def index_explorations_given_ids(exp_ids): +def index_explorations_given_ids(exp_ids: List[str]) -> None: """Indexes the explorations corresponding to the given exploration ids. Args: @@ -1708,7 +2866,9 @@ def index_explorations_given_ids(exp_ids): if exploration_summary is not None]) -def is_voiceover_change_list(change_list): +def is_voiceover_change_list( + change_list: Sequence[exp_domain.ExplorationChange] +) -> bool: """Checks whether the change list contains only the changes which are allowed for voice artist to do. @@ -1727,7 +2887,9 @@ def is_voiceover_change_list(change_list): return True -def get_composite_change_list(exp_id, from_version, to_version): +def get_composite_change_list( + exp_id: str, from_version: int, to_version: int +) -> List[exp_domain.ExplorationChange]: """Returns a list of ExplorationChange domain objects consisting of changes from from_version to to_version in an exploration. @@ -1741,6 +2903,9 @@ def get_composite_change_list(exp_id, from_version, to_version): Returns: list(ExplorationChange). List of ExplorationChange domain objects consisting of changes from from_version to to_version. + + Raises: + Exception. From version is higher than to version. """ if from_version > to_version: raise Exception( @@ -1763,7 +2928,11 @@ def get_composite_change_list(exp_id, from_version, to_version): return composite_change_list -def are_changes_mergeable(exp_id, change_list_version, change_list): +def are_changes_mergeable( + exp_id: str, + change_list_version: int, + change_list: List[exp_domain.ExplorationChange] +) -> bool: """Checks whether the change list can be merged when the intended exploration version of changes_list is not same as the current exploration version. @@ -1784,7 +2953,6 @@ def are_changes_mergeable(exp_id, change_list_version, change_list): return True if current_exploration.version < change_list_version: return False - # A complete list of changes from one version to another # is composite_change_list. composite_change_list = get_composite_change_list( @@ -1808,7 +2976,7 @@ def are_changes_mergeable(exp_id, change_list_version, change_list): return changes_are_mergeable -def is_version_of_draft_valid(exp_id, version): +def is_version_of_draft_valid(exp_id: str, version: int) -> bool: """Checks if the draft version is the same as the latest version of the exploration. @@ -1825,7 +2993,11 @@ def is_version_of_draft_valid(exp_id, version): def get_user_exploration_data( - user_id, exploration_id, apply_draft=False, version=None): + user_id: str, + exploration_id: str, + apply_draft: bool = False, + version: Optional[int] = None +) -> UserExplorationDataDict: """Returns a description of the given exploration.""" exp_user_data = user_models.ExplorationUserDataModel.get( user_id, exploration_id) @@ -1860,7 +3032,7 @@ def get_user_exploration_data( user_services.get_email_preferences_for_exploration( user_id, exploration_id)) - editor_dict = { + editor_dict: UserExplorationDataDict = { 'auto_tts_enabled': exploration.auto_tts_enabled, 'category': exploration.category, 'correctness_feedback_enabled': ( @@ -1874,8 +3046,8 @@ def get_user_exploration_data( 'param_specs': exploration.param_specs_dict, 'rights': rights_manager.get_exploration_rights( exploration_id).to_dict(), - 'show_state_editor_tutorial_on_load': None, - 'show_state_translation_tutorial_on_load': None, + 'show_state_editor_tutorial_on_load': False, + 'show_state_translation_tutorial_on_load': False, 'states': states, 'tags': exploration.tags, 'title': exploration.title, @@ -1883,14 +3055,22 @@ def get_user_exploration_data( 'is_version_of_draft_valid': is_valid_draft_version, 'draft_changes': draft_changes, 'email_preferences': exploration_email_preferences.to_dict(), + 'next_content_id_index': exploration.next_content_id_index, + 'edits_allowed': exploration.edits_allowed, + 'exploration_metadata': exploration.get_metadata().to_dict() } return editor_dict def create_or_update_draft( - exp_id, user_id, change_list, exp_version, current_datetime, - is_by_voice_artist=False): + exp_id: str, + user_id: str, + change_list: Sequence[exp_domain.ExplorationChange], + exp_version: int, + current_datetime: datetime.datetime, + is_by_voice_artist: bool = False +) -> None: """Create a draft with the given change list, or update the change list of the draft if it already exists. A draft is updated only if the change list timestamp of the new change list is greater than the change list @@ -1936,7 +3116,9 @@ def create_or_update_draft( exp_user_data.put() -def get_exp_with_draft_applied(exp_id, user_id): +def get_exp_with_draft_applied( + exp_id: str, user_id: str +) -> Optional[exp_domain.Exploration]: """If a draft exists for the given user and exploration, apply it to the exploration. @@ -1948,8 +3130,11 @@ def get_exp_with_draft_applied(exp_id, user_id): Exploration or None. Returns the exploration domain object with draft applied, or None if draft can not be applied. """ + # TODO(#15075): Refactor this function. + exp_user_data = user_models.ExplorationUserDataModel.get(user_id, exp_id) exploration = exp_fetchers.get_exploration_by_id(exp_id) + draft_change_list = [] if exp_user_data: if exp_user_data.draft_change_list: draft_change_list_exp_version = ( @@ -1995,7 +3180,7 @@ def get_exp_with_draft_applied(exp_id, user_id): return updated_exploration -def discard_draft(exp_id, user_id): +def discard_draft(exp_id: str, user_id: str) -> None: """Discard the draft for the given user and exploration. Args: @@ -2013,15 +3198,41 @@ def discard_draft(exp_id, user_id): exp_user_data.put() -def get_interaction_id_for_state(exp_id, state_name): +def get_exp_user_data_model_with_draft_discarded( + exp_id: str, + user_id: str +) -> Optional[user_models.ExplorationUserDataModel]: + """Clears change list related fields in the ExplorationUserDataModel and + returns it. + + Args: + exp_id: str. The id of the exploration. + user_id: str. The id of the user whose draft is to be discarded. + + Returns: + ExplorationUserDataModel|None. The ExplorationUserDataModel with + draft discarded if it exists, otherwise None. + """ + + exp_user_data = user_models.ExplorationUserDataModel.get( + user_id, exp_id) + if exp_user_data: + exp_user_data.draft_change_list = None + exp_user_data.draft_change_list_last_updated = None + exp_user_data.draft_change_list_exp_version = None + return exp_user_data + return None + + +def get_interaction_id_for_state(exp_id: str, state_name: str) -> Optional[str]: """Returns the interaction id for the given state name. Args: - exp_id: str. The ID of the exp. + exp_id: str. The ID of the exploration. state_name: str. The name of the state. Returns: - str. The ID of the interaction. + str|None. The ID of the interaction. Raises: Exception. If the state with the given state name does not exist in @@ -2032,3 +3243,705 @@ def get_interaction_id_for_state(exp_id, state_name): return exploration.get_interaction_id_by_state_name(state_name) raise Exception( 'There exist no state in the exploration with the given state name.') + + +def regenerate_missing_stats_for_exploration( + exp_id: str +) -> Tuple[List[str], List[str], int, int]: + """Regenerates missing ExplorationStats models and entries for all + corresponding states in an exploration. + + Args: + exp_id: str. The ID of the exp. + + Returns: + 4-tuple(missing_exp_stats, missing_state_stats, num_valid_exp_stats, + num_valid_state_stats). where: + missing_exp_stats: list(str). List of missing exploration stats. + missing_state_stats: list(str). List of missing state stats. + num_valid_exp_stats: int. Number of valid exploration stats. + num_valid_state_stats: int. Number of valid state stats. + + Raises: + Exception. Fetching exploration versions failed. + Exception. No ExplorationStatsModels found. + Exception. Exploration snapshots contain invalid commit_cmds. + Exception. Exploration does not have a given state. + """ + exploration = exp_fetchers.get_exploration_by_id(exp_id) + + num_valid_state_stats = 0 + num_valid_exp_stats = 0 + + exp_versions = list(range(1, exploration.version + 1)) + missing_exp_stats_indices = [] + + exp_stats_list = stats_services.get_multiple_exploration_stats_by_version( + exp_id, exp_versions) + + exp_list = ( + exp_fetchers + .get_multiple_versioned_exp_interaction_ids_mapping_by_version( + exp_id, exp_versions)) + + if all(exp_stats is None for exp_stats in exp_stats_list): + for index, version in enumerate(exp_versions): + exp_stats_for_version = ( + stats_services.get_stats_for_new_exploration( + exp_id, version, + list(exp_list[index].state_interaction_ids_dict.keys()))) + stats_services.create_stats_model(exp_stats_for_version) + raise Exception('No ExplorationStatsModels found') + + snapshots = exp_models.ExplorationModel.get_snapshots_metadata( + exp_id, exp_versions) + change_lists = [] + for snapshot in snapshots: + change_list_for_snapshot = [] + for commit_cmd in snapshot['commit_cmds']: + try: + change_list_for_snapshot.append( + exp_domain.ExplorationChange(commit_cmd) + ) + except utils.ValidationError: + logging.error( + 'Exploration(id=%r) snapshots contains invalid ' + 'commit_cmd: %r' + % (exp_id, commit_cmd) + ) + continue + change_lists.append(change_list_for_snapshot) + + missing_exp_stats = [] + missing_state_stats = [] + + zipped_items = list( + zip(exp_stats_list, exp_list, change_lists)) + revert_commit_cmd = exp_models.ExplorationModel.CMD_REVERT_COMMIT + for i, (exp_stats, exp, change_list) in enumerate(zipped_items): + revert_to_version = next( + ( + int(change.version_number) for change in change_list + if change.cmd == revert_commit_cmd + ), None) + new_exp_version = None + + if revert_to_version is not None: + exp_versions_diff = None + # We subtract 2 from revert_to_version to get the index of the + # previous exploration version because exp_stats_list and + # prev_exp start with version 1 in the 0th index. + prev_exp_version_index = revert_to_version - 2 + prev_exp_stats = exp_stats_list[prev_exp_version_index] + prev_exp = exp_list[prev_exp_version_index] + new_exp_version = revert_to_version + else: + exp_versions_diff = exp_domain.ExplorationVersionsDiff( + change_list) + # We subtract 2 from exp.version to get the index of the + # previous exploration version because exp_stats_list and + # prev_exp start with version 1 in the 0th index. + prev_exp_version_index = exp.version - 2 + prev_exp_stats = exp_stats_list[prev_exp_version_index] + prev_exp = exp_list[prev_exp_version_index] + new_exp_version = exp.version + + # Fill missing Exploration-level stats. + if exp_stats: + num_valid_exp_stats += 1 + elif exp.version == 1: + new_exploration_stats = ( + stats_services.get_stats_for_new_exploration( + exp_id, exp.version, + list(exp.state_interaction_ids_dict.keys()))) + stats_services.create_stats_model(new_exploration_stats) + missing_exp_stats_indices.append(i) + missing_exp_stats.append( + 'ExplorationStats(exp_id=%r, exp_version=%r)' + % (exp_id, exp.version)) + num_valid_state_stats += len( + new_exploration_stats.state_stats_mapping) + continue + else: + exp_stats = prev_exp_stats and prev_exp_stats.clone() + + if exp_stats is None: + new_exploration_stats = ( + stats_services.get_stats_for_new_exploration( + exp_id, exp.version, + list(exp.state_interaction_ids_dict.keys()))) + stats_services.create_stats_model(new_exploration_stats) + missing_exp_stats_indices.append(i) + missing_exp_stats.append( + 'ExplorationStats(exp_id=%r, exp_version=%r)' + % (exp_id, exp.version)) + num_valid_state_stats += len( + new_exploration_stats.state_stats_mapping) + continue + + if exp_versions_diff: + exp_stats = stats_services.advance_version_of_exp_stats( + new_exp_version, exp_versions_diff, exp_stats, None, + None) + else: + exp_stats.exp_version = exp.version + stats_services.create_stats_model(exp_stats) + missing_exp_stats_indices.append(i) + missing_exp_stats.append( + 'ExplorationStats(exp_id=%r, exp_version=%r)' + % (exp_id, exp.version)) + + # Fill missing State-level stats. + state_stats_mapping = exp_stats.state_stats_mapping + for state_name in exp.state_interaction_ids_dict.keys(): + if state_name in state_stats_mapping: + num_valid_state_stats += 1 + continue + + if exp_versions_diff: + prev_state_name = ( + exp_versions_diff.new_to_old_state_names.get( + state_name, state_name)) + else: + prev_state_name = state_name + + try: + prev_interaction_id = ( + prev_exp.state_interaction_ids_dict[prev_state_name] + if prev_state_name in prev_exp.state_interaction_ids_dict + else None) + current_interaction_id = ( + exp.state_interaction_ids_dict[state_name]) + exp_stats_list_item = exp_stats_list[i] + assert exp_stats_list_item is not None + # In early schema versions of ExplorationModel, the END + # card was a persistant, implicit state present in every + # exploration. The snapshots of these old explorations have + # since been migrated but they do not have corresponding state + # stats models for the END state. So for such versions, a + # default state stats model should be created. + if ( + current_interaction_id != prev_interaction_id or + ( + current_interaction_id == 'EndExploration' and + prev_state_name == 'END' + ) + ): + exp_stats_list_item.state_stats_mapping[state_name] = ( + stats_domain.StateStats.create_default() + ) + else: + assert prev_exp_stats is not None + exp_stats_list_item.state_stats_mapping[state_name] = ( + prev_exp_stats.state_stats_mapping[ + prev_state_name].clone() + ) + missing_state_stats.append( + 'StateStats(exp_id=%r, exp_version=%r, ' + 'state_name=%r)' % (exp_id, exp.version, state_name)) + except Exception as e: + assert exp_versions_diff is not None + raise Exception( + 'Exploration(id=%r, exp_version=%r) has no ' + 'State(name=%r): %r' % ( + exp_id, exp_stats.exp_version, prev_state_name, { + 'added_state_names': ( + exp_versions_diff.added_state_names), + 'deleted_state_names': ( + exp_versions_diff.deleted_state_names), + 'new_to_old_state_names': ( + exp_versions_diff.new_to_old_state_names), + 'old_to_new_state_names': ( + exp_versions_diff.old_to_new_state_names), + 'prev_exp.states': ( + prev_exp.state_interaction_ids_dict.keys()), + 'prev_exp_stats': prev_exp_stats + })) from e + + for index, exp_stats in enumerate(exp_stats_list): + if index not in missing_exp_stats_indices: + assert exp_stats is not None + stats_services.save_stats_model(exp_stats) + + return ( + missing_exp_stats, missing_state_stats, + num_valid_exp_stats, num_valid_state_stats + ) + + +def update_logged_out_user_progress( + exploration_id: str, + unique_progress_url_id: str, + state_name: str, + exp_version: int, +) -> None: + """Updates the logged-out user's progress in the + associated TransientCheckpointUrlModel. + + Args: + exploration_id: str. The ID of the exploration. + unique_progress_url_id: str. Unique 6-digit url to track a + logged-out user's progress. + state_name: str. State name of the most recently + reached checkpoint in the exploration. + exp_version: int. Exploration version in which a + checkpoint was most recently reached. + """ + # Fetch the model associated with the unique_progress_url_id. + checkpoint_url_model = exp_models.TransientCheckpointUrlModel.get( + unique_progress_url_id, strict=False) + + # Create a model if it doesn't already exist. + if checkpoint_url_model is None: + checkpoint_url_model = exp_models.TransientCheckpointUrlModel.create( + exploration_id, unique_progress_url_id) + + current_exploration = exp_fetchers.get_exploration_by_id( + exploration_id, strict=True, version=exp_version) + + # If the exploration is being visited the first time. + if checkpoint_url_model.furthest_reached_checkpoint_state_name is None: + checkpoint_url_model.furthest_reached_checkpoint_exp_version = ( + exp_version) + checkpoint_url_model.furthest_reached_checkpoint_state_name = ( + state_name) + elif checkpoint_url_model.furthest_reached_checkpoint_exp_version <= exp_version: # pylint: disable=line-too-long + furthest_reached_checkpoint_exp = ( + exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=checkpoint_url_model.furthest_reached_checkpoint_exp_version # pylint: disable=line-too-long + ) + ) + checkpoints_in_current_exp = user_services.get_checkpoints_in_order( + current_exploration.init_state_name, current_exploration.states) + checkpoints_in_older_exp = user_services.get_checkpoints_in_order( + furthest_reached_checkpoint_exp.init_state_name, + furthest_reached_checkpoint_exp.states) + + # Get the furthest reached checkpoint in current exploration. + furthest_reached_checkpoint_in_current_exp = ( + user_services. + get_most_distant_reached_checkpoint_in_current_exploration( + checkpoints_in_current_exp, + checkpoints_in_older_exp, + checkpoint_url_model.furthest_reached_checkpoint_state_name + ) + ) + + # If the furthest reached checkpoint doesn't exist in current + # exploration. + if furthest_reached_checkpoint_in_current_exp is None: + checkpoint_url_model.furthest_reached_checkpoint_exp_version = ( + exp_version) + checkpoint_url_model.furthest_reached_checkpoint_state_name = ( + state_name) + else: + # Index of the furthest reached checkpoint. + frc_index = checkpoints_in_current_exp.index( + furthest_reached_checkpoint_in_current_exp) + # If furthest reached checkpoint is behind most recently + # reached checkpoint. + if frc_index <= checkpoints_in_current_exp.index(state_name): + checkpoint_url_model.furthest_reached_checkpoint_exp_version = ( # pylint: disable=line-too-long + exp_version) + checkpoint_url_model.furthest_reached_checkpoint_state_name = ( + state_name) + + checkpoint_url_model.most_recently_reached_checkpoint_exp_version = ( + exp_version) + checkpoint_url_model.most_recently_reached_checkpoint_state_name = ( + state_name) + checkpoint_url_model.last_updated = datetime.datetime.utcnow() + checkpoint_url_model.update_timestamps() + checkpoint_url_model.put() + + +@overload +def sync_logged_out_learner_checkpoint_progress_with_current_exp_version( + exploration_id: str, unique_progress_url_id: str, *, strict: Literal[True] +) -> exp_domain.TransientCheckpointUrl: ... + + +@overload +def sync_logged_out_learner_checkpoint_progress_with_current_exp_version( + exploration_id: str, unique_progress_url_id: str, +) -> Optional[exp_domain.TransientCheckpointUrl]: ... + + +@overload +def sync_logged_out_learner_checkpoint_progress_with_current_exp_version( + exploration_id: str, unique_progress_url_id: str, *, strict: Literal[False] +) -> Optional[exp_domain.TransientCheckpointUrl]: ... + + +def sync_logged_out_learner_checkpoint_progress_with_current_exp_version( + exploration_id: str, unique_progress_url_id: str, strict: bool = False +) -> Optional[exp_domain.TransientCheckpointUrl]: + """Synchronizes the most recently reached checkpoint and the furthest + reached checkpoint with the latest exploration. + + Args: + exploration_id: str. The Id of the exploration. + unique_progress_url_id: str. Unique 6-digit url to track a + logged-out user's progress. + strict: bool. Whether to fail noisily if no TransientCheckpointUrlModel + with the given unique_progress_url_id exists in the datastore. + + Returns: + TransientCheckpointUrl. The domain object corresponding to the + TransientCheckpointUrlModel. + """ + # Fetch the model associated with the unique_progress_url_id. + checkpoint_url_model = exp_models.TransientCheckpointUrlModel.get( + unique_progress_url_id, strict=strict) + + if checkpoint_url_model is None: + return None + + latest_exploration = exp_fetchers.get_exploration_by_id(exploration_id) + most_recently_interacted_exploration = ( + exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=( + checkpoint_url_model.most_recently_reached_checkpoint_exp_version + ) + )) + furthest_reached_exploration = ( + exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=checkpoint_url_model.furthest_reached_checkpoint_exp_version + )) + + most_recently_reached_checkpoint_in_current_exploration = ( + user_services. + get_most_distant_reached_checkpoint_in_current_exploration( + user_services.get_checkpoints_in_order( + latest_exploration.init_state_name, + latest_exploration.states), + user_services.get_checkpoints_in_order( + most_recently_interacted_exploration.init_state_name, + most_recently_interacted_exploration.states), + checkpoint_url_model.most_recently_reached_checkpoint_state_name + ) + ) + + furthest_reached_checkpoint_in_current_exploration = ( + user_services. + get_most_distant_reached_checkpoint_in_current_exploration( + user_services.get_checkpoints_in_order( + latest_exploration.init_state_name, + latest_exploration.states), + user_services.get_checkpoints_in_order( + furthest_reached_exploration.init_state_name, + furthest_reached_exploration.states), + checkpoint_url_model.furthest_reached_checkpoint_state_name + ) + ) + + # If the most recently reached checkpoint doesn't exist in current + # exploration. + if ( + most_recently_reached_checkpoint_in_current_exploration != + checkpoint_url_model.most_recently_reached_checkpoint_state_name + ): + checkpoint_url_model.most_recently_reached_checkpoint_state_name = ( + most_recently_reached_checkpoint_in_current_exploration) + checkpoint_url_model.most_recently_reached_checkpoint_exp_version = ( + latest_exploration.version) + checkpoint_url_model.update_timestamps() + checkpoint_url_model.put() + + # If the furthest reached checkpoint doesn't exist in current + # exploration. + if ( + furthest_reached_checkpoint_in_current_exploration != + checkpoint_url_model.furthest_reached_checkpoint_state_name + ): + checkpoint_url_model.furthest_reached_checkpoint_state_name = ( + furthest_reached_checkpoint_in_current_exploration) + checkpoint_url_model.furthest_reached_checkpoint_exp_version = ( + latest_exploration.version) + checkpoint_url_model.update_timestamps() + checkpoint_url_model.put() + + return exp_fetchers.get_logged_out_user_progress(unique_progress_url_id) + + +def sync_logged_out_learner_progress_with_logged_in_progress( + user_id: str, exploration_id: str, unique_progress_url_id: str +) -> None: + + """Syncs logged out and logged in learner's checkpoints progress.""" + + logged_out_user_data = ( + exp_fetchers.get_logged_out_user_progress(unique_progress_url_id)) + + # If logged out progress has been cleared by the cron job. + if logged_out_user_data is None: + return + + latest_exploration = exp_fetchers.get_exploration_by_id(exploration_id) + exp_user_data = exp_fetchers.get_exploration_user_data( + user_id, + exploration_id + ) + + logged_in_user_model = user_models.ExplorationUserDataModel.get( + user_id, exploration_id) + + if logged_in_user_model is None or exp_user_data is None: + logged_in_user_model = user_models.ExplorationUserDataModel.create( + user_id, exploration_id) + + logged_in_user_model.most_recently_reached_checkpoint_exp_version = ( + logged_out_user_data.most_recently_reached_checkpoint_exp_version + ) + logged_in_user_model.most_recently_reached_checkpoint_state_name = ( + logged_out_user_data.most_recently_reached_checkpoint_state_name + ) + logged_in_user_model.furthest_reached_checkpoint_exp_version = ( + logged_out_user_data.furthest_reached_checkpoint_exp_version + ) + logged_in_user_model.furthest_reached_checkpoint_state_name = ( + logged_out_user_data.furthest_reached_checkpoint_state_name + ) + logged_in_user_model.update_timestamps() + logged_in_user_model.put() + + elif logged_in_user_model.most_recently_reached_checkpoint_exp_version == logged_out_user_data.most_recently_reached_checkpoint_exp_version: # pylint: disable=line-too-long + current_exploration = exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=( + logged_out_user_data.most_recently_reached_checkpoint_exp_version + ) + ) + recent_checkpoint_state_name = ( + exp_user_data.most_recently_reached_checkpoint_state_name + ) + # Ruling out the possibility of None for mypy type checking. + assert recent_checkpoint_state_name is not None + most_recently_reached_checkpoint_index_in_logged_in_progress = ( + user_services.get_checkpoints_in_order( + current_exploration.init_state_name, + current_exploration.states + ).index( + recent_checkpoint_state_name + ) + ) + + most_recently_reached_checkpoint_index_in_logged_out_progress = ( + user_services.get_checkpoints_in_order( + current_exploration.init_state_name, + current_exploration.states + ).index( + logged_out_user_data.most_recently_reached_checkpoint_state_name + ) + ) + + if most_recently_reached_checkpoint_index_in_logged_in_progress < most_recently_reached_checkpoint_index_in_logged_out_progress: # pylint: disable=line-too-long + logged_in_user_model.most_recently_reached_checkpoint_exp_version = ( # pylint: disable=line-too-long + logged_out_user_data.most_recently_reached_checkpoint_exp_version # pylint: disable=line-too-long + ) + logged_in_user_model.most_recently_reached_checkpoint_state_name = ( + logged_out_user_data.most_recently_reached_checkpoint_state_name + ) + logged_in_user_model.furthest_reached_checkpoint_exp_version = ( + logged_out_user_data.furthest_reached_checkpoint_exp_version + ) + logged_in_user_model.furthest_reached_checkpoint_state_name = ( + logged_out_user_data.furthest_reached_checkpoint_state_name + ) + logged_in_user_model.update_timestamps() + logged_in_user_model.put() + + elif ( + logged_in_user_model.most_recently_reached_checkpoint_exp_version < + logged_out_user_data.most_recently_reached_checkpoint_exp_version + ): + most_recently_interacted_exploration = ( + exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=exp_user_data.most_recently_reached_checkpoint_exp_version # pylint: disable=line-too-long + ) + ) + furthest_reached_exploration = ( + exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=exp_user_data.furthest_reached_checkpoint_exp_version + ) + ) + + recent_checkpoint_state_name = ( + exp_user_data.most_recently_reached_checkpoint_state_name + ) + # Ruling out the possibility of None for mypy type checking. + assert recent_checkpoint_state_name is not None + most_recently_reached_checkpoint_in_current_exploration = ( + user_services.get_most_distant_reached_checkpoint_in_current_exploration( # pylint: disable=line-too-long + user_services.get_checkpoints_in_order( + latest_exploration.init_state_name, + latest_exploration.states), + user_services.get_checkpoints_in_order( + most_recently_interacted_exploration.init_state_name, + most_recently_interacted_exploration.states), + recent_checkpoint_state_name + ) + ) + + furthest_checkpoint_state_name = ( + exp_user_data.furthest_reached_checkpoint_state_name + ) + # Ruling out the possibility of None for mypy type checking. + assert furthest_checkpoint_state_name is not None + furthest_reached_checkpoint_in_current_exploration = ( + user_services.get_most_distant_reached_checkpoint_in_current_exploration( # pylint: disable=line-too-long + user_services.get_checkpoints_in_order( + latest_exploration.init_state_name, + latest_exploration.states), + user_services.get_checkpoints_in_order( + furthest_reached_exploration.init_state_name, + furthest_reached_exploration.states), + furthest_checkpoint_state_name + ) + ) + + # If the most recently reached checkpoint doesn't exist in current + # exploration. + if ( + most_recently_reached_checkpoint_in_current_exploration != + exp_user_data.most_recently_reached_checkpoint_state_name + ): + exp_user_data.most_recently_reached_checkpoint_state_name = ( + most_recently_reached_checkpoint_in_current_exploration) + exp_user_data.most_recently_reached_checkpoint_exp_version = ( + latest_exploration.version) + + # If the furthest reached checkpoint doesn't exist in current + # exploration. + if ( + furthest_reached_checkpoint_in_current_exploration != + exp_user_data.furthest_reached_checkpoint_state_name + ): + exp_user_data.furthest_reached_checkpoint_state_name = ( + furthest_reached_checkpoint_in_current_exploration) + exp_user_data.furthest_reached_checkpoint_exp_version = ( + latest_exploration.version) + + recent_checkpoint_state_name = ( + exp_user_data.most_recently_reached_checkpoint_state_name + ) + # Ruling out the possibility of None for mypy type checking. + assert recent_checkpoint_state_name is not None + most_recently_reached_checkpoint_index_in_logged_in_progress = ( + user_services.get_checkpoints_in_order( + latest_exploration.init_state_name, + latest_exploration.states + ).index( + recent_checkpoint_state_name + ) + ) + + most_recently_reached_checkpoint_index_in_logged_out_progress = ( + user_services.get_checkpoints_in_order( + latest_exploration.init_state_name, + latest_exploration.states + ).index( + logged_out_user_data.most_recently_reached_checkpoint_state_name + )) + + if most_recently_reached_checkpoint_index_in_logged_in_progress < most_recently_reached_checkpoint_index_in_logged_out_progress: # pylint: disable=line-too-long + logged_in_user_model.most_recently_reached_checkpoint_exp_version = ( # pylint: disable=line-too-long + logged_out_user_data.most_recently_reached_checkpoint_exp_version # pylint: disable=line-too-long + ) + logged_in_user_model.most_recently_reached_checkpoint_state_name = ( + logged_out_user_data.most_recently_reached_checkpoint_state_name + ) + logged_in_user_model.furthest_reached_checkpoint_exp_version = ( + logged_out_user_data.furthest_reached_checkpoint_exp_version + ) + logged_in_user_model.furthest_reached_checkpoint_state_name = ( + logged_out_user_data.furthest_reached_checkpoint_state_name + ) + logged_in_user_model.update_timestamps() + logged_in_user_model.put() + + +def set_exploration_edits_allowed(exp_id: str, edits_are_allowed: bool) -> None: + """Toggled edits allowed field in the exploration. + + Args: + exp_id: str. The ID of the exp. + edits_are_allowed: boolean. Whether exploration edits are allowed. + """ + exploration_model = exp_models.ExplorationModel.get(exp_id) + exploration_model.edits_allowed = edits_are_allowed + # Updating the edits_allowed field in an exploration should not result in a + # version update. So put_multi is used instead of a commit. + base_models.BaseModel.update_timestamps_multi([exploration_model]) + base_models.BaseModel.put_multi([exploration_model]) + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, [exp_id]) + + +def rollback_exploration_to_safe_state(exp_id: str) -> int: + """Rolls back exploration to the latest state where related metadata + models are valid. + + Args: + exp_id: str. The ID of the exp. + + Returns: + int. The version of the exploration. + """ + exploration_model = exp_models.ExplorationModel.get(exp_id) + current_version_in_exp_model = exploration_model.version + last_known_safe_version: int = exploration_model.version + snapshot_content_model = None + snapshot_metadata_model = None + models_to_delete: List[Union[ + exp_models.ExplorationSnapshotContentModel, + exp_models.ExplorationSnapshotMetadataModel + ]] = [] + for version in range(current_version_in_exp_model, 1, -1): + snapshot_content_model = ( + exp_models.ExplorationSnapshotContentModel.get( + '%s-%s' % (exp_id, version), strict=False)) + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + '%s-%s' % (exp_id, version), strict=False)) + if snapshot_content_model is None and snapshot_metadata_model is None: + last_known_safe_version = version - 1 + elif ( + snapshot_content_model is None and + snapshot_metadata_model is not None + ): + models_to_delete.append(snapshot_metadata_model) + last_known_safe_version = version - 1 + elif ( + snapshot_content_model is not None and + snapshot_metadata_model is None + ): + models_to_delete.append(snapshot_content_model) + last_known_safe_version = version - 1 + else: + break + + if last_known_safe_version != current_version_in_exp_model: + exp_summary_model = exp_models.ExpSummaryModel.get(exp_id) + exp_summary_model.version = last_known_safe_version + safe_exp_model = exp_models.ExplorationModel.get( + exp_id, strict=True, version=last_known_safe_version) + safe_exp_model.version = last_known_safe_version + base_models.BaseModel.update_timestamps_multi( + [safe_exp_model, exp_summary_model]) + base_models.BaseModel.put_multi([safe_exp_model, exp_summary_model]) + base_models.BaseModel.delete_multi(models_to_delete) + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, [exp_id]) + return last_known_safe_version diff --git a/core/domain/exp_services_test.py b/core/domain/exp_services_test.py index 3d748c1fe6c8..fbe6d3acf331 100644 --- a/core/domain/exp_services_test.py +++ b/core/domain/exp_services_test.py @@ -25,41 +25,82 @@ import zipfile from core import feconf -from core import python_utils from core import utils +from core.constants import constants +from core.domain import change_domain from core.domain import classifier_services -from core.domain import draft_upgrade_services from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services from core.domain import feedback_services -from core.domain import fs_domain +from core.domain import fs_services +from core.domain import opportunity_services from core.domain import param_domain from core.domain import rating_services from core.domain import rights_domain from core.domain import rights_manager from core.domain import search_services from core.domain import state_domain +from core.domain import stats_services +from core.domain import story_domain +from core.domain import story_services from core.domain import subscription_services +from core.domain import topic_fetchers +from core.domain import topic_services +from core.domain import translation_domain +from core.domain import translation_fetchers +from core.domain import translation_services from core.domain import user_services from core.platform import models from core.tests import test_utils +from extensions import domain + +from typing import ( + Dict, Final, List, Optional, Sequence, Tuple, Type, Union, cast +) + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + from mypy_imports import feedback_models + from mypy_imports import opportunity_models + from mypy_imports import recommendations_models + from mypy_imports import stats_models + from mypy_imports import user_models ( - feedback_models, exp_models, opportunity_models, - recommendations_models, stats_models, user_models + feedback_models, + exp_models, + opportunity_models, + recommendations_models, + translation_models, + stats_models, + user_models ) = models.Registry.import_models([ - models.NAMES.feedback, models.NAMES.exploration, models.NAMES.opportunity, - models.NAMES.recommendations, models.NAMES.statistics, models.NAMES.user + models.Names.FEEDBACK, + models.Names.EXPLORATION, + models.Names.OPPORTUNITY, + models.Names.RECOMMENDATIONS, + models.Names.TRANSLATION, + models.Names.STATISTICS, + models.Names.USER ]) + search_services = models.Registry.import_search_services() -transaction_services = models.Registry.import_transaction_services() # TODO(msl): Test ExpSummaryModel changes if explorations are updated, # reverted, deleted, created, rights changed. -def count_at_least_editable_exploration_summaries(user_id): +TestCustArgDictType = Dict[ + str, + Dict[str, Union[bool, Dict[str, Union[str, List[Dict[str, Union[str, Dict[ + str, Union[str, List[List[float]]]] + ]]]]]]] +] + + +def count_at_least_editable_exploration_summaries(user_id: str) -> int: """Counts exp summaries that are at least editable by the given user. Args: @@ -77,13 +118,13 @@ def count_at_least_editable_exploration_summaries(user_id): class ExplorationServicesUnitTests(test_utils.GenericTestBase): """Test the exploration services module.""" - EXP_0_ID = 'An_exploration_0_id' - EXP_1_ID = 'An_exploration_1_id' - EXP_2_ID = 'An_exploration_2_id' + EXP_0_ID: Final = 'An_exploration_0_id' + EXP_1_ID: Final = 'An_exploration_1_id' + EXP_2_ID: Final = 'An_exploration_2_id' - def setUp(self): + def setUp(self) -> None: """Before each individual test, create a dummy exploration.""" - super(ExplorationServicesUnitTests, self).setUp() + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) @@ -110,7 +151,63 @@ class ExplorationRevertClassifierTests(ExplorationServicesUnitTests): is reverted. """ - def test_reverting_an_exploration_maintains_classifier_models(self): + def test_raises_key_error_for_invalid_id(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration( + 'tes_exp_id', title='some title', category='Algebra', + language_code=constants.DEFAULT_LANGUAGE_CODE + ) + exploration.objective = 'An objective' + exploration.correctness_feedback_enabled = False + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + self.set_interaction_for_state( + exploration.states[exploration.init_state_name], 'NumericInput', + content_id_generator + ) + exp_services.save_new_exploration(self.owner_id, exploration) + + interaction_answer_groups = [{ + 'rule_specs': [{ + 'inputs': { + 'x': 60 + }, + 'rule_type': 'IsLessThanOrEqualTo' + }], + 'outcome': { + 'dest': feconf.DEFAULT_INIT_STATE_NAME, + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Try again

    ' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': ['answer1', 'answer2', 'answer3'], + 'tagged_skill_misconception_id': None + }] + + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'property_name': ( + exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS), + 'new_value': interaction_answer_groups + })] + with self.assertRaisesRegex( + Exception, + 'No classifier algorithm found for NumericInput interaction' + ): + with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True): + with self.swap(feconf, 'MIN_TOTAL_TRAINING_EXAMPLES', 2): + with self.swap(feconf, 'MIN_ASSIGNED_LABELS', 1): + exp_services.update_exploration( + self.owner_id, 'tes_exp_id', change_list, '') + + def test_reverting_an_exploration_maintains_classifier_models(self) -> None: """Test that when exploration is reverted to previous version it maintains appropriate classifier models mapping. """ @@ -119,18 +216,19 @@ def test_reverting_an_exploration_maintains_classifier_models(self): self.EXP_0_ID, self.owner_id, title='Bridges in England', category='Architecture', language_code='en') - interaction_answer_groups = [{ + interaction_answer_groups: List[state_domain.AnswerGroupDict] = [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': { 'x': { - 'contentId': 'rule_input_4', + 'contentId': 'rule_input_3', 'normalizedStrSet': ['abc'] } }, }], 'outcome': { 'dest': feconf.DEFAULT_INIT_STATE_NAME, + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Try again

    ' @@ -150,12 +248,6 @@ def test_reverting_an_exploration_maintains_classifier_models(self): 'property_name': ( exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS), 'new_value': interaction_answer_groups - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'property_name': ( - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX), - 'new_value': 4 })] with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True): @@ -167,6 +259,8 @@ def test_reverting_an_exploration_maintains_classifier_models(self): exp = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) interaction_id = exp.states[ feconf.DEFAULT_INIT_STATE_NAME].interaction.id + # Ruling out the possibility of None for mypy type checking. + assert interaction_id is not None algorithm_id = feconf.INTERACTION_CLASSIFIER_MAPPING[ interaction_id]['algorithm_id'] job = classifier_services.get_classifier_training_job( @@ -195,14 +289,29 @@ def test_reverting_an_exploration_maintains_classifier_models(self): new_job = classifier_services.get_classifier_training_job( self.EXP_0_ID, exp.version, feconf.DEFAULT_INIT_STATE_NAME, algorithm_id) - self.assertIsNotNone(new_job) + # Ruling out the possibility of None for mypy type checking. + assert new_job is not None + assert job is not None self.assertEqual(job.job_id, new_job.job_id) class ExplorationQueriesUnitTests(ExplorationServicesUnitTests): """Tests query methods.""" - def test_get_exploration_titles_and_categories(self): + def test_raises_error_if_guest_user_try_to_publish_the_exploration( + self + ) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'To publish explorations and update users\' profiles, ' + 'user must be logged in and have admin access.' + ): + exp_services.publish_exploration_and_update_user_profiles( + guest_user, 'exp_id' + ) + + def test_get_exploration_titles_and_categories(self) -> None: self.assertEqual( exp_services.get_exploration_titles_and_categories([]), {}) @@ -242,7 +351,7 @@ def test_get_exploration_titles_and_categories(self): } }) - def test_get_interaction_id_for_state(self): + def test_get_interaction_id_for_state(self) -> None: self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) exp = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) self.assertEqual(exp.has_state_name('Introduction'), True) @@ -267,17 +376,13 @@ def test_get_interaction_id_for_state(self): }] }, 'showChoicesInShuffledOrder': {'value': False} - }) + - _get_change_list( - 'Introduction', - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 2 + } ), '' ) self.assertEqual(exp_services.get_interaction_id_for_state( self.EXP_0_ID, 'Introduction'), 'MultipleChoiceInput') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'There exist no state in the exploration'): exp_services.get_interaction_id_for_state( self.EXP_0_ID, 'Fake state name') @@ -288,17 +393,17 @@ class ExplorationSummaryQueriesUnitTests(ExplorationServicesUnitTests): objects. """ - EXP_ID_0 = '0_en_arch_bridges_in_england' - EXP_ID_1 = '1_fi_arch_sillat_suomi' - EXP_ID_2 = '2_en_welcome_introduce_oppia' - EXP_ID_3 = '3_en_welcome_introduce_oppia_interactions' - EXP_ID_4 = '4_en_welcome' - EXP_ID_5 = '5_fi_welcome_vempain' - EXP_ID_6 = '6_en_languages_learning_basic_verbs_in_spanish' - EXP_ID_7 = '7_en_languages_private_exploration_in_spanish' + EXP_ID_0: Final = '0_en_arch_bridges_in_england' + EXP_ID_1: Final = '1_fi_arch_sillat_suomi' + EXP_ID_2: Final = '2_en_welcome_introduce_oppia' + EXP_ID_3: Final = '3_en_welcome_introduce_oppia_interactions' + EXP_ID_4: Final = '4_en_welcome' + EXP_ID_5: Final = '5_fi_welcome_vempain' + EXP_ID_6: Final = '6_en_languages_learning_basic_verbs_in_spanish' + EXP_ID_7: Final = '7_en_languages_private_exploration_in_spanish' - def setUp(self): - super(ExplorationSummaryQueriesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() # Setup the explorations to fit into 2 different categoriers and 2 # different language groups. Also, ensure 2 of them have similar @@ -346,7 +451,7 @@ def setUp(self): self.EXP_ID_0, self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_4, self.EXP_ID_5, self.EXP_ID_6]) - def test_get_exploration_summaries_with_no_query(self): + def test_get_exploration_summaries_with_no_query(self) -> None: # An empty query should return all explorations. (exp_ids, search_offset) = ( exp_services.get_exploration_ids_matching_query('', [], [])) @@ -356,7 +461,7 @@ def test_get_exploration_summaries_with_no_query(self): ]) self.assertIsNone(search_offset) - def test_get_exploration_summaries_with_deleted_explorations(self): + def test_get_exploration_summaries_with_deleted_explorations(self) -> None: # Ensure a deleted exploration does not show up in search results. exp_services.delete_exploration(self.owner_id, self.EXP_ID_0) exp_services.delete_exploration(self.owner_id, self.EXP_ID_1) @@ -377,7 +482,9 @@ def test_get_exploration_summaries_with_deleted_explorations(self): exp_services.get_exploration_ids_matching_query('', [], []), ([], None)) - def test_get_exploration_summaries_with_deleted_explorations_multi(self): + def test_get_exploration_summaries_with_deleted_explorations_multi( + self + ) -> None: # Ensure a deleted exploration does not show up in search results. exp_services.delete_explorations( self.owner_id, @@ -397,7 +504,9 @@ def test_get_exploration_summaries_with_deleted_explorations_multi(self): exp_services.get_exploration_ids_matching_query('', [], []), ([], None)) - def test_get_subscribed_users_activity_ids_with_deleted_explorations(self): + def test_get_subscribed_users_activity_ids_with_deleted_explorations( + self + ) -> None: # Ensure a deleted exploration does not show up in subscribed users # activity ids. subscription_services.subscribe_to_exploration( @@ -413,7 +522,7 @@ def test_get_subscribed_users_activity_ids_with_deleted_explorations(self): subscription_services.get_exploration_ids_subscribed_to( self.owner_id)) - def test_search_exploration_summaries(self): + def test_search_exploration_summaries(self) -> None: # Search within the 'Architecture' category. exp_ids, _ = exp_services.get_exploration_ids_matching_query( '', ['Architecture'], []) @@ -457,7 +566,9 @@ def test_search_exploration_summaries(self): 'in', ['Architecture', 'Welcome'], []) self.assertEqual(sorted(exp_ids), [self.EXP_ID_0, self.EXP_ID_3]) - def test_exploration_summaries_pagination_in_filled_search_results(self): + def test_exploration_summaries_pagination_in_filled_search_results( + self + ) -> None: # Ensure the maximum number of explorations that can fit on the search # results page is maintained by the summaries function. with self.swap(feconf, 'SEARCH_RESULTS_PAGE_SIZE', 3): @@ -497,10 +608,11 @@ def test_exploration_summaries_pagination_in_filled_search_results(self): self.EXP_ID_4, self.EXP_ID_5, self.EXP_ID_6]) def test_get_exploration_ids_matching_query_with_stale_exploration_ids( - self): + self + ) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.error().""" observed_log_messages.append(msg % args) @@ -509,7 +621,9 @@ def _mock_logging_function(msg, *args): feconf, 'SEARCH_RESULTS_PAGE_SIZE', 6) max_iterations_swap = self.swap(exp_services, 'MAX_ITERATIONS', 1) - def _mock_delete_documents_from_index(unused_doc_ids, unused_index): + def _mock_delete_documents_from_index( + unused_doc_ids: List[str], unused_index: str + ) -> None: """Mocks delete_documents_from_index() so that the exploration is not deleted from the document on deleting the exploration. This is required to fetch stale exploration ids. @@ -541,7 +655,7 @@ def _mock_delete_documents_from_index(unused_doc_ids, unused_index): class ExplorationCreateAndDeleteUnitTests(ExplorationServicesUnitTests): """Test creation and deletion methods.""" - def test_soft_deletion_of_exploration(self): + def test_soft_deletion_of_exploration(self) -> None: """Test that soft deletion of exploration works correctly.""" # TODO(sll): Add tests for deletion of states and version snapshots. @@ -551,7 +665,7 @@ def test_soft_deletion_of_exploration(self): count_at_least_editable_exploration_summaries(self.owner_id), 1) exp_services.delete_exploration(self.owner_id, self.EXP_0_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationModel with id An_exploration_0_id ' 'not found'): @@ -591,12 +705,12 @@ def test_soft_deletion_of_exploration(self): exp_models.ExplorationRightsSnapshotContentModel.get_by_id( exp_rights_snapshot_id)) - def test_deletion_of_multiple_explorations_empty(self): + def test_deletion_of_multiple_explorations_empty(self) -> None: """Test that delete_explorations with empty list works correctly.""" exp_services.delete_explorations(self.owner_id, []) self.process_and_flush_pending_tasks() - def test_soft_deletion_of_multiple_explorations(self): + def test_soft_deletion_of_multiple_explorations(self) -> None: """Test that soft deletion of explorations works correctly.""" # TODO(sll): Add tests for deletion of states and version snapshots. @@ -608,12 +722,12 @@ def test_soft_deletion_of_multiple_explorations(self): exp_services.delete_explorations( self.owner_id, [self.EXP_0_ID, self.EXP_1_ID]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationModel with id An_exploration_0_id ' 'not found'): exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationModel with id An_exploration_1_id ' 'not found'): @@ -675,7 +789,7 @@ def test_soft_deletion_of_multiple_explorations(self): exp_models.ExplorationRightsSnapshotContentModel.get_by_id( exp_1_rights_snapshot_id)) - def test_hard_deletion_of_exploration(self): + def test_hard_deletion_of_exploration(self) -> None: """Test that hard deletion of exploration works correctly.""" self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) # The exploration shows up in queries. @@ -684,7 +798,7 @@ def test_hard_deletion_of_exploration(self): exp_services.delete_exploration( self.owner_id, self.EXP_0_ID, force_deletion=True) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationModel with id An_exploration_0_id ' 'not found'): @@ -698,7 +812,7 @@ def test_hard_deletion_of_exploration(self): self.assertIsNone( exp_models.ExplorationModel.get_by_id(self.EXP_0_ID)) - def test_hard_deletion_of_multiple_explorations(self): + def test_hard_deletion_of_multiple_explorations(self) -> None: """Test that hard deletion of explorations works correctly.""" self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) self.save_new_default_exploration(self.EXP_1_ID, self.owner_id) @@ -708,12 +822,12 @@ def test_hard_deletion_of_multiple_explorations(self): exp_services.delete_explorations( self.owner_id, [self.EXP_0_ID, self.EXP_1_ID], force_deletion=True) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationModel with id An_exploration_0_id ' 'not found'): exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationModel with id An_exploration_1_id ' 'not found'): @@ -735,7 +849,7 @@ def test_hard_deletion_of_multiple_explorations(self): self.assertIsNone( exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)) - def test_summaries_of_hard_deleted_explorations(self): + def test_summaries_of_hard_deleted_explorations(self) -> None: """Test that summaries of hard deleted explorations are correctly deleted. """ @@ -743,7 +857,7 @@ def test_summaries_of_hard_deleted_explorations(self): exp_services.delete_exploration( self.owner_id, self.EXP_0_ID, force_deletion=True) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationModel with id An_exploration_0_id ' 'not found'): @@ -757,7 +871,7 @@ def test_summaries_of_hard_deleted_explorations(self): self.assertIsNone( exp_models.ExpSummaryModel.get_by_id(self.EXP_0_ID)) - def test_recommendations_of_deleted_explorations_are_deleted(self): + def test_recommendations_of_deleted_explorations_are_deleted(self) -> None: """Test that recommendations for deleted explorations are correctly deleted. """ @@ -783,7 +897,7 @@ def test_recommendations_of_deleted_explorations_are_deleted(self): recommendations_models.ExplorationRecommendationsModel.get_by_id( self.EXP_1_ID)) - def test_opportunity_of_deleted_explorations_are_deleted(self): + def test_opportunity_of_deleted_explorations_are_deleted(self) -> None: """Test that opportunity summary for deleted explorations are correctly deleted. """ @@ -819,7 +933,7 @@ def test_opportunity_of_deleted_explorations_are_deleted(self): opportunity_models.ExplorationOpportunitySummaryModel.get_by_id( self.EXP_1_ID)) - def test_activities_of_deleted_explorations_are_deleted(self): + def test_activities_of_deleted_explorations_are_deleted(self) -> None: """Test that opportunity summary for deleted explorations are correctly deleted. """ @@ -840,18 +954,18 @@ def test_activities_of_deleted_explorations_are_deleted(self): self.assertEqual( user_models.CompletedActivitiesModel.get( - self.editor_id, strict=False + self.editor_id, strict=True ).exploration_ids, [] ) self.assertEqual( user_models.IncompleteActivitiesModel.get( - self.owner_id, strict=False + self.owner_id, strict=True ).exploration_ids, [] ) - def test_user_data_of_deleted_explorations_are_deleted(self): + def test_user_data_of_deleted_explorations_are_deleted(self) -> None: """Test that user data for deleted explorations are deleted.""" self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) user_models.ExplorationUserDataModel( @@ -886,7 +1000,9 @@ def test_user_data_of_deleted_explorations_are_deleted(self): user_models.ExplorationUserDataModel.get( self.owner_id, self.EXP_1_ID)) - def test_deleted_explorations_are_removed_from_user_contributions(self): + def test_deleted_explorations_are_removed_from_user_contributions( + self + ) -> None: """Test that user data for deleted explorations are deleted.""" self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) user_models.UserContributionsModel( @@ -933,7 +1049,7 @@ def test_deleted_explorations_are_removed_from_user_contributions(self): [] ) - def test_feedbacks_belonging_to_exploration_are_deleted(self): + def test_feedbacks_belonging_to_exploration_are_deleted(self) -> None: """Tests that feedbacks belonging to exploration are deleted.""" self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) thread_1_id = feedback_services.create_thread( @@ -958,13 +1074,13 @@ def test_feedbacks_belonging_to_exploration_are_deleted(self): self.assertIsNone(feedback_models.GeneralFeedbackThreadModel.get_by_id( thread_2_id)) - def test_exploration_is_removed_from_index_when_deleted(self): + def test_exploration_is_removed_from_index_when_deleted(self) -> None: """Tests that exploration is removed from the search index when deleted. """ self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) - def mock_delete_docs(doc_ids, index): + def mock_delete_docs(doc_ids: List[Dict[str, str]], index: str) -> None: self.assertEqual(index, exp_services.SEARCH_INDEX_EXPLORATIONS) self.assertEqual(doc_ids, [self.EXP_0_ID]) @@ -974,14 +1090,14 @@ def mock_delete_docs(doc_ids, index): with delete_docs_swap: exp_services.delete_exploration(self.owner_id, self.EXP_0_ID) - def test_explorations_are_removed_from_index_when_deleted(self): + def test_explorations_are_removed_from_index_when_deleted(self) -> None: """Tests that explorations are removed from the search index when deleted. """ self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) self.save_new_default_exploration(self.EXP_1_ID, self.owner_id) - def mock_delete_docs(doc_ids, index): + def mock_delete_docs(doc_ids: List[Dict[str, str]], index: str) -> None: self.assertEqual(index, exp_services.SEARCH_INDEX_EXPLORATIONS) self.assertEqual(doc_ids, [self.EXP_0_ID, self.EXP_1_ID]) @@ -992,21 +1108,40 @@ def mock_delete_docs(doc_ids, index): exp_services.delete_explorations( self.owner_id, [self.EXP_0_ID, self.EXP_1_ID]) - def test_no_errors_are_raised_when_creating_default_exploration(self): + def test_no_errors_are_raised_when_creating_default_exploration( + self + ) -> None: exploration = exp_domain.Exploration.create_default_exploration( self.EXP_0_ID) exp_services.save_new_exploration(self.owner_id, exploration) - def test_that_default_exploration_fails_strict_validation(self): + def test_that_default_exploration_fails_strict_validation(self) -> None: exploration = exp_domain.Exploration.create_default_exploration( self.EXP_0_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'This state does not have any interaction specified.' ): exploration.validate(strict=True) - def test_save_and_retrieve_exploration(self): + def test_save_new_exploration_with_ml_classifiers(self) -> None: + exploration_id = 'eid' + test_exp_filepath = os.path.join( + feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') + yaml_content = utils.get_file_contents(test_exp_filepath) + assets_list: List[Tuple[str, bytes]] = [] + with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True): + exp_services.save_new_exploration_from_yaml_and_assets( + feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id, + assets_list) + + exploration = exp_fetchers.get_exploration_by_id(exploration_id) + state_with_training_data = exploration.states['Home'] + self.assertIsNotNone( + state_with_training_data) + self.assertEqual(len(state_with_training_data.to_dict()), 8) + + def test_save_and_retrieve_exploration(self) -> None: self.save_new_valid_exploration(self.EXP_0_ID, self.owner_id) exp_services.update_exploration( self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ @@ -1022,13 +1157,13 @@ def test_save_and_retrieve_exploration(self): retrieved_exploration = exp_fetchers.get_exploration_by_id( self.EXP_0_ID) self.assertEqual(retrieved_exploration.title, 'A title') - self.assertEqual(retrieved_exploration.category, 'A category') + self.assertEqual(retrieved_exploration.category, 'Algebra') self.assertEqual(len(retrieved_exploration.states), 1) self.assertEqual(len(retrieved_exploration.param_specs), 1) self.assertEqual( list(retrieved_exploration.param_specs.keys())[0], 'theParameter') - def test_save_and_retrieve_exploration_summary(self): + def test_save_and_retrieve_exploration_summary(self) -> None: self.save_new_valid_exploration(self.EXP_0_ID, self.owner_id) # Change param spec. @@ -1062,7 +1197,487 @@ def test_save_and_retrieve_exploration_summary(self): self.assertEqual(retrieved_exp_summary.category, 'A new category') self.assertEqual(retrieved_exp_summary.contributor_ids, [self.owner_id]) - def test_update_exploration_by_migration_bot(self): + def test_apply_change_list(self) -> None: + self.save_new_linear_exp_with_state_names_and_interactions( + self.EXP_0_ID, self.owner_id, ['State 1', 'State 2'], + ['TextInput'], category='Algebra', + correctness_feedback_enabled=True) + + recorded_voiceovers_dict = { + 'voiceovers_mapping': { + 'content': { + 'en': { + 'filename': 'filename3.mp3', + 'file_size_bytes': 3000, + 'needs_update': False, + 'duration_secs': 42.43 + } + }, + 'default_outcome': {}, + 'ca_placeholder_0': {} + } + } + change_list_voiceover = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': ( + exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS), + 'state_name': 'State 1', + 'new_value': recorded_voiceovers_dict + })] + changed_exploration_voiceover = ( + exp_services.apply_change_list( + self.EXP_0_ID, change_list_voiceover)) + changed_exp_voiceover_obj = ( + changed_exploration_voiceover.states['State 1'].recorded_voiceovers + ) + self.assertDictEqual( + changed_exp_voiceover_obj.to_dict(), + recorded_voiceovers_dict) + change_list_objective = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'objective', + 'new_value': 'new objective' + })] + changed_exploration_objective = ( + exp_services.apply_change_list( + self.EXP_0_ID, + change_list_objective)) + self.assertEqual( + changed_exploration_objective.objective, + 'new objective') + + def test_publish_exploration_and_update_user_profiles(self) -> None: + self.save_new_valid_exploration(self.EXP_0_ID, self.owner_id) + exp_services.update_exploration( + self.editor_id, self.EXP_0_ID, + [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'A new title' + }) + ], + 'changed title' + ) + exp_services.update_exploration( + self.voice_artist_id, self.EXP_0_ID, + [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'Another new title' + }) + ], + 'changed title again' + ) + owner_action = user_services.get_user_actions_info(self.owner_id) + exp_services.publish_exploration_and_update_user_profiles( + owner_action, self.EXP_0_ID) + updated_summary = ( + exp_fetchers.get_exploration_summary_by_id(self.EXP_0_ID)) + contributer_ids = updated_summary.contributor_ids + self.assertEqual(len(contributer_ids), 3) + self.assertFalse(updated_summary.is_private()) + self.assertIn(self.owner_id, contributer_ids) + self.assertIn(self.editor_id, contributer_ids) + self.assertIn(self.voice_artist_id, contributer_ids) + + def test_is_voiceover_change_list(self) -> None: + recorded_voiceovers_dict = { + 'voiceovers_mapping': { + 'content': { + 'en': { + 'filename': 'filename3.mp3', + 'file_size_bytes': 3000, + 'needs_update': False, + 'duration_secs': 42.43 + } + }, + 'default_outcome': {}, + 'ca_placeholder_0': {} + } + } + change_list_voiceover = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': ( + exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS), + 'state_name': 'State 1', + 'new_value': recorded_voiceovers_dict + })] + self.assertTrue( + exp_services.is_voiceover_change_list(change_list_voiceover)) + not_voiceover_change_list = [exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'title', + 'new_value': 'New title' + })] + self.assertFalse( + exp_services.is_voiceover_change_list(not_voiceover_change_list)) + + def test_validation_for_valid_exploration(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, + correctness_feedback_enabled=True, + category='Algebra' + ) + errors = exp_services.validate_exploration_for_story(exploration, False) + self.assertEqual(len(errors), 0) + + def test_validation_fail_for_exploration_for_invalid_language(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, end_state_name='end', + language_code='bn', correctness_feedback_enabled=True, + category='Algebra') + error_string = ( + 'Invalid language %s found for exploration ' + 'with ID %s. This language is not supported for explorations ' + 'in a story on the mobile app.' % + (exploration.language_code, exploration.id)) + errors = exp_services.validate_exploration_for_story(exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex(utils.ValidationError, error_string): + exp_services.validate_exploration_for_story(exploration, True) + + def test_validate_exploration_for_correctness_feedback_not_enabled( + self + ) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, category='Algebra') + error_string = ( + 'Expected all explorations in a story to ' + 'have correctness feedback ' + 'enabled. Invalid exploration: %s' % exploration.id) + errors = exp_services.validate_exploration_for_story(exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex(utils.ValidationError, error_string): + exp_services.validate_exploration_for_story(exploration, True) + + def test_validate_exploration_for_default_category(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Test') + error_string = ( + 'Expected all explorations in a story to ' + 'be of a default category. ' + 'Invalid exploration: %s' % exploration.id) + errors = exp_services.validate_exploration_for_story(exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex(utils.ValidationError, error_string): + exp_services.validate_exploration_for_story(exploration, True) + + def test_validate_exploration_for_param_specs(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Algebra') + exploration.param_specs = { + 'myParam': param_domain.ParamSpec('UnicodeString')} + error_string = ( + 'Expected no exploration in a story to have parameter ' + 'values in it. Invalid exploration: %s' % exploration.id) + errors = exp_services.validate_exploration_for_story(exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex(utils.ValidationError, error_string): + exp_services.validate_exploration_for_story(exploration, True) + + def test_validate_exploration_for_invalid_interaction_id(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Algebra') + error_string = ( + 'Invalid interaction %s in exploration ' + 'with ID: %s. This interaction is not supported for ' + 'explorations in a story on the ' + 'mobile app.' % ('CodeRepl', exploration.id)) + change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': exploration.init_state_name, + 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID, + 'new_value': 'CodeRepl' + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': exploration.init_state_name, + 'property_name': ( + exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS), + 'new_value': { + 'language': { + 'value': 'python' + }, + 'placeholder': { + 'value': '# Type your code here.' + }, + 'preCode': { + 'value': '' + }, + 'postCode': { + 'value': '' + } + } + }) + ] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list, 'Changed to CodeRepl') + updated_exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + errors = exp_services.validate_exploration_for_story( + updated_exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex( + utils.ValidationError, error_string): + exp_services.validate_exploration_for_story( + updated_exploration, True) + + def test_validation_fail_for_end_exploration(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Algebra') + error_string = ( + 'Explorations in a story are not expected to contain ' + 'exploration recommendations. Exploration with ID: ' + '%s contains exploration recommendations in its ' + 'EndExploration interaction.' % (exploration.id)) + change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': exploration.init_state_name, + 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID, + 'new_value': 'EndExploration' + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': exploration.init_state_name, + 'property_name': ( + exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS), + 'new_value': { + 'recommendedExplorationIds': { + 'value': [ + 'EXP_1', + 'EXP_2' + ] + } + } + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': ( + exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME), + 'state_name': exploration.init_state_name, + 'new_value': None}) + ] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + change_list, 'Changed to EndExploration') + updated_exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + errors = exp_services.validate_exploration_for_story( + updated_exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex( + utils.ValidationError, error_string): + exp_services.validate_exploration_for_story( + updated_exploration, True) + + def test_validation_fail_for_multiple_choice_exploration(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Algebra') + error_string = ( + 'Exploration in a story having MultipleChoiceInput ' + 'interaction should have at least 4 choices present. ' + 'Exploration with ID %s and state name %s have fewer than ' + '4 choices.' % (exploration.id, exploration.init_state_name)) + change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': exploration.init_state_name, + 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID, + 'new_value': 'MultipleChoiceInput' + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': exploration.init_state_name, + 'property_name': ( + exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS), + 'new_value': { + 'choices': { + 'value': [ + { + 'content_id': 'ca_choices_0', + 'html': '

    1

    ' + }, + { + 'content_id': 'ca_choices_1', + 'html': '

    2

    ' + } + ] + }, + 'showChoicesInShuffledOrder': { + 'value': True + } + } + }) + ] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + change_list, 'Changed to MultipleChoiceInput') + updated_exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + errors = exp_services.validate_exploration_for_story( + updated_exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex( + utils.ValidationError, error_string): + exp_services.validate_exploration_for_story( + updated_exploration, True) + + def test_validation_fail_for_android_rte_content(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Algebra') + error_string = ( + 'RTE content in state %s of exploration ' + 'with ID %s is not supported on mobile for explorations ' + 'in a story.' % (exploration.init_state_name, exploration.id)) + init_state = exploration.states[exploration.init_state_name] + init_state.update_interaction_id('TextInput') + solution_dict: state_domain.SolutionDict = { + 'answer_is_exclusive': False, + 'correct_answer': 'helloworld!', + 'explanation': { + 'content_id': 'solution', + 'html': ( + '

     

    ') + }, + } + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.id is not None + solution = state_domain.Solution.from_dict( + init_state.interaction.id, solution_dict + ) + init_state.update_interaction_solution(solution) + exploration.states[exploration.init_state_name] = init_state + errors = exp_services.validate_exploration_for_story( + exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex(utils.ValidationError, error_string): + exp_services.validate_exploration_for_story( + exploration, True) + + def test_validation_fail_for_state_classifier_model(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Algebra') + exploration.states[ + feconf.DEFAULT_INIT_STATE_NAME].classifier_model_id = '2' + error_string = ( + 'Explorations in a story are not expected to contain ' + 'classifier models. State %s of exploration with ID %s ' + 'contains classifier models.' % ( + feconf.DEFAULT_INIT_STATE_NAME, exploration.id + )) + errors = exp_services.validate_exploration_for_story( + exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex( + utils.ValidationError, error_string): + exp_services.validate_exploration_for_story( + exploration, True) + + def test_validation_fail_for_answer_groups(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Algebra') + exploration.states[ + feconf.DEFAULT_INIT_STATE_NAME + ].interaction.answer_groups = [state_domain.AnswerGroup( + state_domain.Outcome( + 'state 1', None, state_domain.SubtitledHtml( + 'feedback_1', '

    state outcome html

    '), + False, [], None, None), + [ + state_domain.RuleSpec( + 'Equals', { + 'x': { + 'contentId': 'rule_input_Equals', + 'normalizedStrSet': ['Test'] + } + } + ) + ], + [ + 'cheerful', + 'merry', + 'ecstatic', + 'glad', + 'overjoyed', + 'pleased', + 'thrilled', + 'smile' + ], + None + )] + error_string = ( + 'Explorations in a story are not expected to contain ' + 'training data for any answer group. State %s of ' + 'exploration with ID %s contains training data in one of ' + 'its answer groups.' % ( + feconf.DEFAULT_INIT_STATE_NAME, exploration.id + ) + ) + errors = exp_services.validate_exploration_for_story( + exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex( + utils.ValidationError, error_string): + exp_services.validate_exploration_for_story( + exploration, True) + + def test_validation_fail_for_default_outcome(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id, correctness_feedback_enabled=True, + category='Algebra') + exploration.states[ + feconf.DEFAULT_INIT_STATE_NAME + ].interaction.default_outcome = ( + state_domain.Outcome( + 'state 1', None, state_domain.SubtitledHtml( + 'default_outcome', '

    Default outcome for state 4

    ' + ), False, [param_domain.ParamChange( + 'ParamChange', 'RandomSelector', { + 'list_of_values': ['3', '4'], + 'parse_with_jinja': True + } + )], None, None + ) + ) + error_string = ( + 'Explorations in a story are not expected to contain ' + 'parameter values. State %s of exploration with ID %s ' + 'contains parameter values in its default outcome.' % ( + feconf.DEFAULT_INIT_STATE_NAME, exploration.id + ) + ) + errors = exp_services.validate_exploration_for_story( + exploration, False) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0], error_string) + with self.assertRaisesRegex( + utils.ValidationError, error_string): + exp_services.validate_exploration_for_story( + exploration, True) + + def test_update_exploration_by_migration_bot(self) -> None: self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='end') rights_manager.publish_exploration(self.owner, self.EXP_0_ID) @@ -1076,9 +1691,8 @@ def test_update_exploration_by_migration_bot(self): })], 'Did migration.') def test_update_exploration_by_migration_bot_not_updates_contribution_model( - self): - user_services.create_user_contributions( - feconf.MIGRATION_BOT_USER_ID, [], []) + self + ) -> None: self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='end') rights_manager.publish_exploration(self.owner, self.EXP_0_ID) @@ -1100,7 +1714,8 @@ def test_update_exploration_by_migration_bot_not_updates_contribution_model( self.assertIsNone(migration_bot_contributions_model) def test_update_exploration_by_migration_bot_not_updates_settings_model( - self): + self + ) -> None: self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='end') rights_manager.publish_exploration(self.owner, self.EXP_0_ID) @@ -1118,7 +1733,7 @@ def test_update_exploration_by_migration_bot_not_updates_settings_model( feconf.MIGRATION_BOT_USERNAME)) self.assertEqual(migration_bot_settings_model, None) - def test_get_multiple_explorations_from_model_by_id(self): + def test_get_multiple_explorations_from_model_by_id(self) -> None: self.save_new_valid_exploration( 'exp_id_1', self.owner_id, title='title 1', category='category 1', objective='objective 1') @@ -1141,7 +1756,8 @@ def test_get_multiple_explorations_from_model_by_id(self): explorations['exp_id_2'].objective, 'objective 2') def test_cannot_get_interaction_ids_mapping_by_version_with_invalid_handler( - self): + self + ) -> None: rights_manager.create_new_exploration_rights( 'exp_id_1', self.owner_id) @@ -1156,6 +1772,7 @@ def test_cannot_get_interaction_ids_mapping_by_version_with_invalid_handler( 'name': 'invalid_handler_name', 'rule_specs': [{ 'dest': 'END', + 'dest_if_really_stuck': None, 'feedback': [], 'param_changes': [], 'definition': {'rule_type': 'default'} @@ -1183,7 +1800,7 @@ def test_cannot_get_interaction_ids_mapping_by_version_with_invalid_handler( 'category': 'category 1', }]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape( 'Exploration(id=exp_id_1, version=1, states_schema_version=3) ' @@ -1197,7 +1814,9 @@ def test_cannot_get_interaction_ids_mapping_by_version_with_invalid_handler( class LoadingAndDeletionOfExplorationDemosTests(ExplorationServicesUnitTests): - def test_loading_and_validation_and_deletion_of_demo_explorations(self): + def test_loading_and_validation_and_deletion_of_demo_explorations( + self + ) -> None: """Test loading, validation and deletion of the demo explorations.""" self.assertEqual( exp_models.ExplorationModel.get_exploration_count(), 0) @@ -1215,8 +1834,7 @@ def test_loading_and_validation_and_deletion_of_demo_explorations(self): exploration.validate(strict=True) duration = datetime.datetime.utcnow() - start_time - processing_time = duration.seconds + python_utils.divide( - duration.microseconds, 1E6) + processing_time = duration.seconds + (duration.microseconds / 1E6) self.log_line( 'Loaded and validated exploration %s (%.2f seconds)' % (exploration.title, processing_time)) @@ -1230,13 +1848,17 @@ def test_loading_and_validation_and_deletion_of_demo_explorations(self): self.assertEqual( exp_models.ExplorationModel.get_exploration_count(), 0) - def test_load_demo_with_invalid_demo_exploration_id_raises_error(self): - with self.assertRaisesRegexp( + def test_load_demo_with_invalid_demo_exploration_id_raises_error( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Invalid demo exploration id invalid_exploration_id'): exp_services.load_demo('invalid_exploration_id') - def test_delete_demo_with_invalid_demo_exploration_id_raises_error(self): - with self.assertRaisesRegexp( + def test_delete_demo_with_invalid_demo_exploration_id_raises_error( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Invalid demo exploration id invalid_exploration_id'): exp_services.delete_demo('invalid_exploration_id') @@ -1244,23 +1866,24 @@ def test_delete_demo_with_invalid_demo_exploration_id_raises_error(self): class ExplorationYamlImportingTests(test_utils.GenericTestBase): """Tests for loading explorations using imported YAML.""" - EXP_ID = 'exp_id0' - DEMO_EXP_ID = '0' - TEST_ASSET_PATH = 'test_asset.txt' - TEST_ASSET_CONTENT = b'Hello Oppia' + EXP_ID: Final = 'exp_id0' + DEMO_EXP_ID: Final = '0' + TEST_ASSET_PATH: Final = 'test_asset.txt' + TEST_ASSET_CONTENT: Final = b'Hello Oppia' - INTRO_AUDIO_FILE = 'introduction_state.mp3' - ANSWER_GROUP_AUDIO_FILE = 'correct_answer_feedback.mp3' - DEFAULT_OUTCOME_AUDIO_FILE = 'unknown_answer_feedback.mp3' - HINT_AUDIO_FILE = 'answer_hint.mp3' - SOLUTION_AUDIO_FILE = 'answer_solution.mp3' + INTRO_AUDIO_FILE: Final = 'introduction_state.mp3' + ANSWER_GROUP_AUDIO_FILE: Final = 'correct_answer_feedback.mp3' + DEFAULT_OUTCOME_AUDIO_FILE: Final = 'unknown_answer_feedback.mp3' + HINT_AUDIO_FILE: Final = 'answer_hint.mp3' + SOLUTION_AUDIO_FILE: Final = 'answer_solution.mp3' - YAML_WITH_AUDIO_TRANSLATIONS = ( + YAML_WITH_AUDIO_TRANSLATIONS: str = ( """author_notes: '' auto_tts_enabled: true blurb: '' category: Category correctness_feedback_enabled: false +edits_allowed: true init_state_name: Introduction language_code: en objective: '' @@ -1277,6 +1900,7 @@ class ExplorationYamlImportingTests(test_utils.GenericTestBase): answer_groups: - outcome: dest: New state + dest_if_really_stuck: null feedback: content_id: feedback_1 html:

    Correct!

    @@ -1301,8 +1925,11 @@ class ExplorationYamlImportingTests(test_utils.GenericTestBase): unicode_str: '' rows: value: 1 + catchMisspellings: + value: false default_outcome: dest: Introduction + dest_if_really_stuck: null feedback: content_id: default_outcome html: '' @@ -1377,9 +2004,17 @@ class ExplorationYamlImportingTests(test_utils.GenericTestBase): interaction: answer_groups: [] confirmed_unclassified_answers: [] - customization_args: {} + customization_args: + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 default_outcome: dest: New state + dest_if_really_stuck: null feedback: content_id: default_outcome html: '' @@ -1388,7 +2023,7 @@ class ExplorationYamlImportingTests(test_utils.GenericTestBase): param_changes: [] refresher_exploration_id: null hints: [] - id: null + id: TextInput solution: null linked_skill_id: null next_content_id_index: 0 @@ -1397,12 +2032,14 @@ class ExplorationYamlImportingTests(test_utils.GenericTestBase): voiceovers_mapping: content: {} default_outcome: {} + ca_placeholder_2: {} solicit_answer_details: false card_is_checkpoint: false written_translations: translations_mapping: content: {} default_outcome: {} + ca_placeholder_2: {} states_schema_version: 42 tags: [] title: Title @@ -1410,42 +2047,48 @@ class ExplorationYamlImportingTests(test_utils.GenericTestBase): INTRO_AUDIO_FILE, DEFAULT_OUTCOME_AUDIO_FILE, ANSWER_GROUP_AUDIO_FILE, HINT_AUDIO_FILE, SOLUTION_AUDIO_FILE) - def setUp(self): - super(ExplorationYamlImportingTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) - def test_loading_recent_yaml_loads_exploration_for_user(self): + def test_loading_recent_yaml_loads_exploration_for_user(self) -> None: exp_services.save_new_exploration_from_yaml_and_assets( self.owner_id, self.SAMPLE_YAML_CONTENT, self.EXP_ID, []) exp = exp_fetchers.get_exploration_by_id(self.EXP_ID) self.assertEqual(exp.to_yaml(), self.SAMPLE_YAML_CONTENT) - def test_loading_recent_yaml_does_not_default_exp_title_category(self): + def test_loading_recent_yaml_does_not_default_exp_title_category( + self + ) -> None: exp_services.save_new_exploration_from_yaml_and_assets( self.owner_id, self.SAMPLE_YAML_CONTENT, self.EXP_ID, []) exp = exp_fetchers.get_exploration_by_id(self.EXP_ID) self.assertNotEqual(exp.title, feconf.DEFAULT_EXPLORATION_TITLE) self.assertNotEqual(exp.category, feconf.DEFAULT_EXPLORATION_CATEGORY) - def test_loading_yaml_with_assets_loads_assets_from_filesystem(self): + def test_loading_yaml_with_assets_loads_assets_from_filesystem( + self + ) -> None: test_asset = (self.TEST_ASSET_PATH, self.TEST_ASSET_CONTENT) exp_services.save_new_exploration_from_yaml_and_assets( self.owner_id, self.SAMPLE_YAML_CONTENT, self.EXP_ID, [test_asset]) - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID) self.assertEqual( fs.get(self.TEST_ASSET_PATH), self.TEST_ASSET_CONTENT) - def test_can_load_yaml_with_voiceovers(self): + def test_can_load_yaml_with_voiceovers(self) -> None: exp_services.save_new_exploration_from_yaml_and_assets( self.owner_id, self.YAML_WITH_AUDIO_TRANSLATIONS, self.EXP_ID, []) exp = exp_fetchers.get_exploration_by_id(self.EXP_ID) state = exp.states[exp.init_state_name] interaction = state.interaction + # Ruling out the possibility of None for mypy type checking. + assert interaction.solution is not None + assert interaction.default_outcome is not None content_id = state.content.content_id voiceovers_mapping = state.recorded_voiceovers.voiceovers_mapping content_voiceovers = voiceovers_mapping[content_id] @@ -1470,7 +2113,7 @@ def test_can_load_yaml_with_voiceovers(self): self.assertEqual( solution_voiceovers['en'].filename, self.SOLUTION_AUDIO_FILE) - def test_can_load_yaml_with_stripped_voiceovers(self): + def test_can_load_yaml_with_stripped_voiceovers(self) -> None: exp_services.save_new_exploration_from_yaml_and_assets( self.owner_id, self.YAML_WITH_AUDIO_TRANSLATIONS, self.EXP_ID, [], strip_voiceovers=True) @@ -1478,6 +2121,9 @@ def test_can_load_yaml_with_stripped_voiceovers(self): state = exp.states[exp.init_state_name] interaction = state.interaction + # Ruling out the possibility of None for mypy type checking. + assert interaction.solution is not None + assert interaction.default_outcome is not None content_id = state.content.content_id voiceovers_mapping = state.recorded_voiceovers.voiceovers_mapping content_voiceovers = voiceovers_mapping[content_id] @@ -1496,7 +2142,7 @@ def test_can_load_yaml_with_stripped_voiceovers(self): self.assertEqual(hint_voiceovers, {}) self.assertEqual(solution_voiceovers, {}) - def test_cannot_load_yaml_with_no_schema_version(self): + def test_cannot_load_yaml_with_no_schema_version(self) -> None: yaml_with_no_schema_version = ( """ author_notes: '' @@ -1504,6 +2150,7 @@ def test_cannot_load_yaml_with_no_schema_version(self): blurb: '' category: Category correctness_feedback_enabled: false + edits_allowed: true init_state_name: Introduction language_code: en objective: '' @@ -1523,6 +2170,7 @@ def test_cannot_load_yaml_with_no_schema_version(self): answer_groups: - outcome: dest: New state + dest_if_really_stuck: null feedback: audio_translations: en: @@ -1542,6 +2190,7 @@ def test_cannot_load_yaml_with_no_schema_version(self): customization_args: {} default_outcome: dest: Introduction + dest_if_really_stuck: null feedback: audio_translations: en: @@ -1584,6 +2233,7 @@ def test_cannot_load_yaml_with_no_schema_version(self): customization_args: {} default_outcome: dest: New state + dest_if_really_stuck: null feedback: audio_translations: {} html: '' @@ -1603,36 +2253,42 @@ def test_cannot_load_yaml_with_no_schema_version(self): self.DEFAULT_OUTCOME_AUDIO_FILE, self.HINT_AUDIO_FILE, self.SOLUTION_AUDIO_FILE) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Invalid YAML file: missing schema version'): exp_services.save_new_exploration_from_yaml_and_assets( - self.owner_id, yaml_with_no_schema_version, self.EXP_ID, None) + self.owner_id, yaml_with_no_schema_version, self.EXP_ID, []) class GetImageFilenamesFromExplorationTests(ExplorationServicesUnitTests): - def test_get_image_filenames_from_exploration(self): + def test_get_image_filenames_from_exploration(self) -> None: exploration = exp_domain.Exploration.create_default_exploration( 'eid', title='title', category='category') exploration.add_states(['state1', 'state2', 'state3']) state1 = exploration.states['state1'] + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) state2 = exploration.states['state2'] state3 = exploration.states['state3'] - content1_dict = { - 'content_id': 'content', + content1_dict: state_domain.SubtitledHtmlDict = { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CONTENT), 'html': ( '
    Hello, this is state1
    ' '' - '') + '"&quot;&quot;" alt-with-value="&quot;image>' + '&quot;"') } - content2_dict = { - 'content_id': 'content', + content2_dict: state_domain.SubtitledHtmlDict = { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CONTENT), 'html': '
    Hello, this is state2
    ' } - content3_dict = { - 'content_id': 'content', + content3_dict: state_domain.SubtitledHtmlDict = { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CONTENT), 'html': '

    Hello, this is state3

    ' } state1.update_content( @@ -1642,11 +2298,16 @@ def test_get_image_filenames_from_exploration(self): state3.update_content( state_domain.SubtitledHtml.from_dict(content3_dict)) - self.set_interaction_for_state(state1, 'ImageClickInput') - self.set_interaction_for_state(state2, 'MultipleChoiceInput') - self.set_interaction_for_state(state3, 'ItemSelectionInput') + self.set_interaction_for_state( + state1, 'ImageClickInput', content_id_generator) + self.set_interaction_for_state( + state2, 'MultipleChoiceInput', content_id_generator) + self.set_interaction_for_state( + state3, 'ItemSelectionInput', content_id_generator) - customization_args_dict1 = { + customization_args_dict1: Dict[ + str, Dict[str, Union[bool, domain.ImageAndRegionDict]] + ] = { 'highlightRegionsOnHover': {'value': True}, 'imageAndRegions': { 'value': { @@ -1664,57 +2325,73 @@ def test_get_image_filenames_from_exploration(self): } } } - customization_args_dict2 = { - 'choices': {'value': [{ - 'content_id': 'ca_choices_0', + customization_args_choices: List[state_domain.SubtitledHtmlDict] = [{ + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), 'html': ( '

    This is value1 for MultipleChoice' '

    ' + '"&quot;&quot;" alt-with-value="&quot;' + 'image&quot;">

    ' ) }, { - 'content_id': 'ca_choices_1', + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), 'html': ( '

    This is value2 for MultipleChoice' '' + '"&quot;image&quot;">' '

    ') - }]}, + }] + customization_args_dict2: Dict[ + str, Dict[str, Union[bool, List[state_domain.SubtitledHtmlDict]]] + ] = { + 'choices': {'value': customization_args_choices}, 'showChoicesInShuffledOrder': {'value': True} } - customization_args_dict3 = { - 'choices': {'value': [{ - 'content_id': 'ca_choices_0', - 'html': ( - '

    This is value1 for ItemSelection' - '' - '

    ') - }, { - 'content_id': 'ca_choices_1', - 'html': ( - '

    This is value2 for ItemSelection' - '' - '

    ') - }, { - 'content_id': 'ca_choices_2', - 'html': ( - '

    This is value3 for ItemSelection' - '' - '

    ') - }]}, + customization_args_choices = [{ + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': ( + '

    This is value1 for ItemSelection' + '' + '

    ') + }, { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': ( + '

    This is value2 for ItemSelection' + '' + '

    ') + }, { + 'content_id': content_id_generator.generate( + translation_domain.ContentType.CUSTOMIZATION_ARG, + extra_prefix='choices'), + 'html': ( + '

    This is value3 for ItemSelection' + '' + '

    ') + }] + customization_args_dict3: Dict[ + str, Dict[str, Union[int, List[state_domain.SubtitledHtmlDict]]] + ] = { + 'choices': {'value': customization_args_choices}, 'minAllowableSelectionCount': {'value': 1}, 'maxAllowableSelectionCount': {'value': 5} } @@ -1723,41 +2400,47 @@ def test_get_image_filenames_from_exploration(self): state3.update_interaction_customization_args(customization_args_dict3) default_outcome1 = state_domain.Outcome( - 'state2', state_domain.SubtitledHtml( - 'default_outcome', '

    Default outcome for state1

    '), - False, [], None, None + 'state2', None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + '

    Default outcome for state1

    ' + ), False, [], None, None ) state1.update_interaction_default_outcome(default_outcome1) hint_list2 = [ state_domain.Hint( state_domain.SubtitledHtml( - 'hint_1', + content_id_generator.generate( + translation_domain.ContentType.HINT), ( '

    Hello, this is html1 for state2

    ' '' + '"&quot;&quot;" alt-with-value="&quot;' + 'image&quot;">' ) ) ), state_domain.Hint( state_domain.SubtitledHtml( - 'hint_2', '

    Hello, this is html2 for state2

    ') + content_id_generator.generate( + translation_domain.ContentType.HINT), + '

    Hello, this is html2 for state2

    ') ), ] state2.update_interaction_hints(hint_list2) state_answer_group_list2 = [state_domain.AnswerGroup( state_domain.Outcome( - 'state1', state_domain.SubtitledHtml( - 'feedback_1', ( + 'state1', None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), ( '

    Outcome1 for state2

    ' + ' alt-with-value="&quot;image&quot;">' '') ), False, [], None, None), [ state_domain.RuleSpec('Equals', {'x': 0}), @@ -1765,8 +2448,10 @@ def test_get_image_filenames_from_exploration(self): ], [], None ), state_domain.AnswerGroup( state_domain.Outcome( - 'state3', state_domain.SubtitledHtml( - 'feedback_2', '

    Outcome2 for state2

    '), + 'state3', None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + '

    Outcome2 for state2

    '), False, [], None, None), [ state_domain.RuleSpec('Equals', {'x': 0}) @@ -1776,8 +2461,10 @@ def test_get_image_filenames_from_exploration(self): )] state_answer_group_list3 = [state_domain.AnswerGroup( state_domain.Outcome( - 'state1', state_domain.SubtitledHtml( - 'feedback_1', '

    Outcome for state3

    '), + 'state1', None, state_domain.SubtitledHtml( + content_id_generator.generate( + translation_domain.ContentType.FEEDBACK), + '

    Outcome for state3

    '), False, [], None, None), [ state_domain.RuleSpec( @@ -1789,7 +2476,7 @@ def test_get_image_filenames_from_exploration(self): 'value=' '"&quot;s3Choice1.png&quot;"' ' caption-with-value="&quot;&quot;" ' - 'alt-with-value="&quot;&quot;">' + 'alt-with-value="&quot;image&quot;">' '') ]}), state_domain.RuleSpec( @@ -1801,7 +2488,7 @@ def test_get_image_filenames_from_exploration(self): 'value=' '"&quot;s3Choice3.png&quot;"' ' caption-with-value="&quot;&quot;" ' - 'alt-with-value="&quot;&quot;">' + 'alt-with-value="&quot;image&quot;">' '') ]}) ], @@ -1812,6 +2499,8 @@ def test_get_image_filenames_from_exploration(self): state2.update_interaction_answer_groups(state_answer_group_list2) state3.update_interaction_answer_groups(state_answer_group_list3) + exploration.update_next_content_id_index( + content_id_generator.next_content_id_index) filenames = ( exp_services.get_image_filenames_from_exploration(exploration)) expected_output = ['s1ImagePath.png', 's1Content.png', 's2Choice1.png', @@ -1826,19 +2515,21 @@ def test_get_image_filenames_from_exploration(self): class ZipFileExportUnitTests(ExplorationServicesUnitTests): """Test export methods for explorations represented as zip files.""" - DUMMY_IMAGE_TAG = ( + DUMMY_IMAGE_TAG: Final = ( '' ) - SAMPLE_YAML_CONTENT = ( + SAMPLE_YAML_CONTENT: str = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: A category +category: Algebra correctness_feedback_enabled: false +edits_allowed: true init_state_name: %s language_code: en +next_content_id_index: 6 objective: The objective param_changes: [] param_specs: {} @@ -1848,22 +2539,25 @@ class ZipFileExportUnitTests(ExplorationServicesUnitTests): card_is_checkpoint: true classifier_model_id: null content: - content_id: content + content_id: content_0 html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: + catchMisspellings: + value: false placeholder: value: - content_id: ca_placeholder_0 + content_id: ca_placeholder_2 unicode_str: '' rows: value: 1 default_outcome: dest: %s + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_1 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null @@ -1873,39 +2567,36 @@ class ZipFileExportUnitTests(ExplorationServicesUnitTests): id: TextInput solution: null linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_placeholder_2: {} + content_0: {} + default_outcome_1: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} New state: card_is_checkpoint: false classifier_model_id: null content: - content_id: content + content_id: content_3 html: %s interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: + catchMisspellings: + value: false placeholder: value: - content_id: ca_placeholder_0 + content_id: ca_placeholder_5 unicode_str: '' rows: value: 1 default_outcome: dest: New state + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_4 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null @@ -1915,19 +2606,13 @@ class ZipFileExportUnitTests(ExplorationServicesUnitTests): id: TextInput solution: null linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_placeholder_5: {} + content_3: {} + default_outcome_4: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} states_schema_version: %d tags: [] title: A title @@ -1941,12 +2626,14 @@ class ZipFileExportUnitTests(ExplorationServicesUnitTests): UPDATED_YAML_CONTENT = ( """author_notes: '' -auto_tts_enabled: true +auto_tts_enabled: false blurb: '' -category: A category +category: Algebra correctness_feedback_enabled: false +edits_allowed: true init_state_name: %s language_code: en +next_content_id_index: 6 objective: The objective param_changes: [] param_specs: {} @@ -1956,22 +2643,25 @@ class ZipFileExportUnitTests(ExplorationServicesUnitTests): card_is_checkpoint: true classifier_model_id: null content: - content_id: content + content_id: content_0 html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: + catchMisspellings: + value: false placeholder: value: - content_id: ca_placeholder_0 + content_id: ca_placeholder_2 unicode_str: '' rows: value: 1 default_outcome: dest: %s + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_1 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null @@ -1981,39 +2671,36 @@ class ZipFileExportUnitTests(ExplorationServicesUnitTests): id: TextInput solution: null linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_placeholder_2: {} + content_0: {} + default_outcome_1: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} Renamed state: card_is_checkpoint: false classifier_model_id: null content: - content_id: content + content_id: content_3 html: %s interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: + catchMisspellings: + value: false placeholder: value: - content_id: ca_placeholder_0 + content_id: ca_placeholder_5 unicode_str: '' rows: value: 1 default_outcome: dest: Renamed state + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_4 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null @@ -2023,19 +2710,13 @@ class ZipFileExportUnitTests(ExplorationServicesUnitTests): id: TextInput solution: null linked_skill_id: null - next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_placeholder_5: {} + content_3: {} + default_outcome_4: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} states_schema_version: %d tags: [] title: A title @@ -2047,11 +2728,17 @@ class ZipFileExportUnitTests(ExplorationServicesUnitTests): DUMMY_IMAGE_TAG, feconf.CURRENT_STATE_SCHEMA_VERSION)) - def test_export_to_zip_file(self): + def test_export_to_zip_file(self) -> None: """Test the export_to_zip_file() method.""" exploration = self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, objective='The objective') + self.EXP_0_ID, self.owner_id, objective='The objective', + category='Algebra') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) init_state = exploration.states[exploration.init_state_name] + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.default_outcome is not None default_outcome_dict = init_state.interaction.default_outcome.to_dict() default_outcome_dict['dest'] = exploration.init_state_name exp_services.update_exploration( @@ -2066,6 +2753,14 @@ def test_export_to_zip_file(self): exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, @@ -2081,41 +2776,43 @@ def test_export_to_zip_file(self): 'new_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': content_id_generator.generate(( + translation_domain + .ContentType.CUSTOMIZATION_ARG), + extra_prefix='placeholder' + ), 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'property_name': - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 'state_name': 'New state', - 'new_value': 1 - }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'New state', 'old_value': state_domain.SubtitledHtml( - 'content', '').to_dict(), + 'content_3', '').to_dict(), 'new_value': state_domain.SubtitledHtml( - 'content', + 'content_3', '' '').to_dict() - })], 'Add state name') + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }), ], 'Add state name') - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, self.EXP_0_ID)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, self.EXP_0_ID) fs.commit('image/abc.png', raw_image) zip_file_output = exp_services.export_to_zip_file(self.EXP_0_ID) zf = zipfile.ZipFile(zip_file_output) @@ -2128,7 +2825,7 @@ def test_export_to_zip_file(self): zf.open('A title.yaml').read().decode('utf-8'), self.SAMPLE_YAML_CONTENT) - def test_export_to_zip_file_with_unpublished_exploration(self): + def test_export_to_zip_file_with_unpublished_exploration(self) -> None: """Test the export_to_zip_file() method.""" self.save_new_default_exploration( self.EXP_0_ID, self.owner_id, title='') @@ -2138,11 +2835,17 @@ def test_export_to_zip_file_with_unpublished_exploration(self): self.assertEqual(zf.namelist(), ['Unpublished_exploration.yaml']) - def test_export_to_zip_file_with_assets(self): + def test_export_to_zip_file_with_assets(self) -> None: """Test exporting an exploration with assets to a zip file.""" exploration = self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, objective='The objective') + self.EXP_0_ID, self.owner_id, objective='The objective', + category='Algebra') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) init_state = exploration.states[exploration.init_state_name] + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.default_outcome is not None default_outcome_dict = init_state.interaction.default_outcome.to_dict() default_outcome_dict['dest'] = exploration.init_state_name exp_services.update_exploration( @@ -2157,6 +2860,14 @@ def test_export_to_zip_file_with_assets(self): exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, @@ -2172,44 +2883,46 @@ def test_export_to_zip_file_with_assets(self): 'new_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': content_id_generator.generate(( + translation_domain + .ContentType.CUSTOMIZATION_ARG), + extra_prefix='placeholder' + ), 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'property_name': - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 'state_name': 'New state', - 'new_value': 1 - }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'New state', 'old_value': state_domain.SubtitledHtml( - 'content', '').to_dict(), + 'content_3', '').to_dict(), 'new_value': state_domain.SubtitledHtml( - 'content', + 'content_3', '' '').to_dict() + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })], 'Add state name') - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None ) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, self.EXP_0_ID)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, self.EXP_0_ID) fs.commit('image/abc.png', raw_image) # Audio files should not be included in asset downloads. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'cafe.mp3'), 'rb', encoding=None ) as f: raw_audio = f.read() @@ -2227,13 +2940,19 @@ def test_export_to_zip_file_with_assets(self): self.SAMPLE_YAML_CONTENT) self.assertEqual(zf.open('assets/image/abc.png').read(), raw_image) - def test_export_by_versions(self): + def test_export_by_versions(self) -> None: """Test export_to_zip_file() for different versions.""" exploration = self.save_new_valid_exploration( - self.EXP_0_ID, self.owner_id, objective='The objective') + self.EXP_0_ID, self.owner_id, objective='The objective', + category='Algebra') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) self.assertEqual(exploration.version, 1) init_state = exploration.states[exploration.init_state_name] + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.default_outcome is not None default_outcome_dict = init_state.interaction.default_outcome.to_dict() default_outcome_dict['dest'] = exploration.init_state_name change_list = [exp_domain.ExplorationChange({ @@ -2244,7 +2963,13 @@ def test_export_by_versions(self): 'new_value': default_outcome_dict }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'New state' + 'state_name': 'New state', + 'content_id_for_state_content': content_id_generator.generate( + translation_domain.ContentType.CONTENT), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'New state', @@ -2258,39 +2983,43 @@ def test_export_by_versions(self): 'new_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': ( + content_id_generator.generate( + translation_domain.ContentType + .CUSTOMIZATION_ARG, + extra_prefix='placeholder') + ), 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'property_name': - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 'state_name': 'New state', - 'new_value': 1 }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'New state', 'old_value': state_domain.SubtitledHtml( - 'content', '').to_dict(), + 'content_3', '').to_dict(), 'new_value': state_domain.SubtitledHtml( - 'content', + 'content_3', '' '').to_dict() + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + })] - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None ) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, self.EXP_0_ID)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, self.EXP_0_ID) fs.commit('image/abc.png', raw_image) exp_services.update_exploration( self.owner_id, exploration.id, change_list, '') @@ -2334,26 +3063,29 @@ class YAMLExportUnitTests(ExplorationServicesUnitTests): contents. """ - _SAMPLE_INIT_STATE_CONTENT = ( + _SAMPLE_INIT_STATE_CONTENT: str = ( """card_is_checkpoint: true classifier_model_id: null content: - content_id: content + content_id: content_0 html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: + catchMisspellings: + value: false placeholder: value: - content_id: ca_placeholder_0 + content_id: ca_placeholder_2 unicode_str: '' rows: value: 1 default_outcome: dest: %s + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_1 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null @@ -2363,43 +3095,40 @@ class YAMLExportUnitTests(ExplorationServicesUnitTests): id: TextInput solution: null linked_skill_id: null -next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_placeholder_2: {} + content_0: {} + default_outcome_1: {} solicit_answer_details: false -written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} """) % (feconf.DEFAULT_INIT_STATE_NAME) - SAMPLE_EXPORTED_DICT = { + SAMPLE_EXPORTED_DICT: Final = { feconf.DEFAULT_INIT_STATE_NAME: _SAMPLE_INIT_STATE_CONTENT, 'New state': ( """card_is_checkpoint: false classifier_model_id: null content: - content_id: content + content_id: content_3 html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: + catchMisspellings: + value: false placeholder: value: - content_id: ca_placeholder_0 + content_id: ca_placeholder_5 unicode_str: '' rows: value: 1 default_outcome: dest: New state + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_4 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null @@ -2409,44 +3138,41 @@ class YAMLExportUnitTests(ExplorationServicesUnitTests): id: TextInput solution: null linked_skill_id: null -next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_placeholder_5: {} + content_3: {} + default_outcome_4: {} solicit_answer_details: false -written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} """) } - UPDATED_SAMPLE_DICT = { + UPDATED_SAMPLE_DICT: Final = { feconf.DEFAULT_INIT_STATE_NAME: _SAMPLE_INIT_STATE_CONTENT, 'Renamed state': ( """card_is_checkpoint: false classifier_model_id: null content: - content_id: content + content_id: content_3 html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: + catchMisspellings: + value: false placeholder: value: - content_id: ca_placeholder_0 + content_id: ca_placeholder_5 unicode_str: '' rows: value: 1 default_outcome: dest: Renamed state + dest_if_really_stuck: null feedback: - content_id: default_outcome + content_id: default_outcome_4 html: '' labelled_as_correct: false missing_prerequisite_skill_id: null @@ -2456,27 +3182,26 @@ class YAMLExportUnitTests(ExplorationServicesUnitTests): id: TextInput solution: null linked_skill_id: null -next_content_id_index: 1 param_changes: [] recorded_voiceovers: voiceovers_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} + ca_placeholder_5: {} + content_3: {} + default_outcome_4: {} solicit_answer_details: false -written_translations: - translations_mapping: - ca_placeholder_0: {} - content: {} - default_outcome: {} """) } - def test_export_to_dict(self): + def test_export_to_dict(self) -> None: """Test the export_to_dict() method.""" exploration = self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, objective='The objective') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) init_state = exploration.states[exploration.init_state_name] + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.default_outcome is not None default_outcome_dict = init_state.interaction.default_outcome.to_dict() default_outcome_dict['dest'] = exploration.init_state_name exp_services.update_exploration( @@ -2491,6 +3216,14 @@ def test_export_to_dict(self): exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, @@ -2506,19 +3239,23 @@ def test_export_to_dict(self): 'new_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': content_id_generator.generate(( + translation_domain + .ContentType.CUSTOMIZATION_ARG), + extra_prefix='placeholder' + ), 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'property_name': - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 'state_name': 'New state', - 'new_value': 1 + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + })], 'Add state name') dict_output = exp_services.export_states_to_yaml( @@ -2526,14 +3263,19 @@ def test_export_to_dict(self): self.assertEqual(dict_output, self.SAMPLE_EXPORTED_DICT) - def test_export_by_versions(self): + def test_export_by_versions(self) -> None: """Test export_to_dict() for different versions.""" - self.maxDiff = None + self.maxDiff = 0 exploration = self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) self.assertEqual(exploration.version, 1) init_state = exploration.states[exploration.init_state_name] + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.default_outcome is not None default_outcome_dict = init_state.interaction.default_outcome.to_dict() default_outcome_dict['dest'] = exploration.init_state_name change_list = [exp_domain.ExplorationChange({ @@ -2544,7 +3286,20 @@ def test_export_by_versions(self): 'new_value': default_outcome_dict }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'New state' + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'New state', @@ -2558,27 +3313,25 @@ def test_export_by_versions(self): 'new_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': content_id_generator.generate(( + translation_domain + .ContentType.CUSTOMIZATION_ARG), + extra_prefix='placeholder' + ), 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'property_name': - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 'state_name': 'New state', - 'new_value': 1 })] exploration.objective = 'The objective' - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, self.EXP_0_ID)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, self.EXP_0_ID) fs.commit('abc.png', raw_image) exp_services.update_exploration( self.owner_id, exploration.id, change_list, '') @@ -2606,7 +3359,13 @@ def test_export_by_versions(self): self.assertEqual(dict_output, self.UPDATED_SAMPLE_DICT) -def _get_change_list(state_name, property_name, new_value): +# Here new_value argument can accept values of type str, int, bool and other +# types too, so to make the argument generalized for every type of values we +# used Any type here. +def _get_change_list( + state_name: str, + property_name: str, new_value: change_domain.AcceptableChangeDictTypes +) -> List[exp_domain.ExplorationChange]: """Generates a change list for a single state change.""" return [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, @@ -2619,8 +3378,8 @@ def _get_change_list(state_name, property_name, new_value): class UpdateStateTests(ExplorationServicesUnitTests): """Test updating a single state.""" - def setUp(self): - super(UpdateStateTests, self).setUp() + def setUp(self) -> None: + super().setUp() exploration = self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id) @@ -2634,13 +3393,16 @@ def setUp(self): 'generator_id': 'RandomSelector' }] # List of answer groups to add into an interaction. - self.interaction_answer_groups = [{ + self.interaction_answer_groups: List[ + state_domain.AnswerGroupDict + ] = [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': {'x': 0}, }], 'outcome': { 'dest': self.init_state_name, + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Try again

    ' @@ -2654,8 +3416,9 @@ def setUp(self): 'tagged_skill_misconception_id': None }] # Default outcome specification for an interaction. - self.interaction_default_outcome = { + self.interaction_default_outcome: state_domain.OutcomeDict = { 'dest': self.init_state_name, + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'default_outcome', 'html': '

    Incorrect

    ' @@ -2666,9 +3429,12 @@ def setUp(self): 'missing_prerequisite_skill_id': None } - def test_add_state_cmd(self): + def test_add_state_cmd(self) -> None: """Test adding of states.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) self.assertNotIn('new state', exploration.states) @@ -2676,21 +3442,89 @@ def test_add_state_cmd(self): self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })], 'Add state name') exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) self.assertIn('new state', exploration.states) - def test_rename_state_cmd(self): + def test_are_changes_mergeable_send_email(self) -> None: + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, self.owner_id) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, + [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'State 1', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + })], 'Added state') + change_list_same_state_name = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'State 1', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + })] + updated_exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + self.assertFalse(exp_services.are_changes_mergeable( + self.EXP_0_ID, updated_exploration.version - 1, + change_list_same_state_name + )) + + def test_rename_state_cmd(self) -> None: """Test updating of state name.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) self.assertIn(feconf.DEFAULT_INIT_STATE_NAME, exploration.states) exp_services.update_exploration( self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })], 'Add state name') exp_services.update_exploration( @@ -2718,7 +3552,7 @@ def test_rename_state_cmd(self): exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) self.assertIn('new state changed name', exploration.states) - def test_rename_state_cmd_with_unicode(self): + def test_rename_state_cmd_with_unicode(self) -> None: """Test updating of state name to one that uses unicode characters.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) @@ -2736,12 +3570,28 @@ def test_rename_state_cmd_with_unicode(self): self.assertIn(u'¡Hola! αβγ', exploration.states) self.assertNotIn(feconf.DEFAULT_INIT_STATE_NAME, exploration.states) - def test_delete_state_cmd(self): + def test_delete_state_cmd(self) -> None: """Test deleting a state name.""" + exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) exp_services.update_exploration( self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })], 'Add state name') exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) @@ -2757,27 +3607,33 @@ def test_delete_state_cmd(self): exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) self.assertNotIn('new state', exploration.states) - def test_update_param_changes(self): + def test_update_param_changes(self) -> None: """Test updating of param_changes.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - exploration.param_specs = { - 'myParam': param_domain.ParamSpec('UnicodeString')} - exp_services._save_exploration(self.owner_id, exploration, '', []) # pylint: disable=protected-access + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'param_specs', + 'new_value': { + 'myParam': {'obj_type': 'UnicodeString'} + } + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list, '') exp_services.update_exploration( self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'param_changes', self.param_changes), '') exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - param_changes = exploration.init_state.param_changes[0] - self.assertEqual(param_changes._name, 'myParam') # pylint: disable=protected-access - self.assertEqual(param_changes._generator_id, 'RandomSelector') # pylint: disable=protected-access + param_changes = exploration.init_state.param_changes[0].to_dict() + self.assertEqual(param_changes['name'], 'myParam') + self.assertEqual(param_changes['generator_id'], 'RandomSelector') self.assertEqual( - param_changes._customization_args, # pylint: disable=protected-access + param_changes['customization_args'], {'list_of_values': ['1', '2'], 'parse_with_jinja': False}) - def test_update_invalid_param_changes(self): + def test_update_invalid_param_changes(self) -> None: """Check that updates cannot be made to non-existent parameters.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, r'The parameter with name \'myParam\' .* does not exist .*' ): @@ -2789,7 +3645,7 @@ def test_update_invalid_param_changes(self): '' ) - def test_update_reserved_param_changes(self): + def test_update_reserved_param_changes(self) -> None: param_changes = [{ 'customization_args': { 'list_of_values': ['1', '2'], 'parse_with_jinja': False @@ -2797,7 +3653,7 @@ def test_update_reserved_param_changes(self): 'name': 'all', 'generator_id': 'RandomSelector' }] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, re.escape( 'The parameter name \'all\' is reserved. Please choose ' @@ -2810,15 +3666,20 @@ def test_update_reserved_param_changes(self): '' ) - def test_update_invalid_generator(self): + def test_update_invalid_generator(self) -> None: """Test for check that the generator_id in param_changes exists.""" - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - exploration.param_specs = { - 'myParam': param_domain.ParamSpec('UnicodeString')} - exp_services._save_exploration(self.owner_id, exploration, '', []) # pylint: disable=protected-access + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'param_specs', + 'new_value': { + 'myParam': {'obj_type': 'UnicodeString'} + } + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list, '') self.param_changes[0]['generator_id'] = 'fake' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid generator ID' ): exp_services.update_exploration( @@ -2829,7 +3690,7 @@ def test_update_invalid_generator(self): '' ) - def test_update_interaction_id(self): + def test_update_interaction_id(self) -> None: """Test updating of interaction_id.""" exp_services.update_exploration( self.owner_id, @@ -2865,7 +3726,7 @@ def test_update_interaction_id(self): self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'content', { 'html': '

    Test content

    ', - 'content_id': 'content', + 'content_id': 'content_0', }), '') change_list = _get_change_list( @@ -2894,7 +3755,7 @@ def test_update_interaction_id(self): self.assertEqual( exploration.init_state.interaction.id, 'Continue') - def test_update_interaction_customization_args(self): + def test_update_interaction_customization_args(self) -> None: """Test updating of interaction customization_args.""" exp_services.update_exploration( self.owner_id, self.EXP_0_ID, @@ -2919,8 +3780,23 @@ def test_update_interaction_customization_args(self): '') exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - choices = exploration.init_state.interaction.customization_args[ - 'choices'].value + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance( + exploration.init_state.interaction.customization_args[ + 'choices'].value, + list + ) + # Here we use cast because we are narrowing down the type from + # various customization args value types to List[SubtitledHtml] + # type, and this is done because here we are accessing 'choices' + # key from MultipleChoiceInput customization arg whose value is + # always of List[SubtitledHtml] type. + choices = cast( + List[state_domain.SubtitledHtml], + exploration.init_state.interaction.customization_args[ + 'choices' + ].value + ) self.assertEqual(choices[0].html, '

    Option A

    ') self.assertEqual(choices[0].content_id, 'ca_choices_0') self.assertEqual(choices[1].html, '

    Option B

    ') @@ -2933,7 +3809,7 @@ def test_update_interaction_customization_args(self): self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'content', { 'html': '

    Test content

    ', - 'content_id': 'content', + 'content_id': 'content_0', }), '') change_list = _get_change_list( @@ -2958,17 +3834,41 @@ def test_update_interaction_customization_args(self): exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) customization_args = ( exploration.init_state.interaction.customization_args) + # Here we use cast because we are narrowing down the type from various + # customization args value types to 'SubtitledUnicode' type, and this + # is done because here we are accessing 'buttontext' key from continue + # customization arg whose value is always of SubtitledUnicode type. + button_text_subtitle_unicode = cast( + state_domain.SubtitledUnicode, + customization_args['buttonText'].value + ) self.assertEqual( - customization_args['buttonText'].value.unicode_str, + button_text_subtitle_unicode.unicode_str, 'Continue') - def test_update_interaction_handlers_fails(self): + def test_update_interaction_handlers_fails(self) -> None: """Test legacy interaction handler updating.""" + exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) exp_services.update_exploration( self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'State 2', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })] + _get_change_list( 'State 2', @@ -2984,12 +3884,13 @@ def test_update_interaction_handlers_fails(self): 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} }), 'Add state name') self.interaction_default_outcome['dest'] = 'State 2' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.InvalidInputException, 'Editing interaction handlers is no longer supported' ): @@ -3005,15 +3906,30 @@ def test_update_interaction_handlers_fails(self): self.interaction_answer_groups), '') - def test_update_interaction_answer_groups(self): + def test_update_interaction_answer_groups(self) -> None: """Test updating of interaction_answer_groups.""" # We create a second state to use as a rule destination. exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) exp_services.update_exploration( self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'State 2', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })] + _get_change_list( 'State 2', @@ -3029,7 +3945,8 @@ def test_update_interaction_answer_groups(self): 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} }), 'Add state name') @@ -3074,6 +3991,8 @@ def test_update_interaction_answer_groups(self): self.assertEqual(rule_specs[0].inputs, {'x': 0}) self.assertEqual(outcome.feedback.html, '

    Try again

    ') self.assertEqual(outcome.dest, self.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert init_interaction.default_outcome is not None self.assertEqual(init_interaction.default_outcome.dest, 'State 2') change_list = ( @@ -3095,10 +4014,6 @@ def test_update_interaction_answer_groups(self): }, 'showChoicesInShuffledOrder': {'value': False} }) + - _get_change_list( - 'State 2', - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 4) + _get_change_list( 'State 2', exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS, @@ -3109,6 +4024,7 @@ def test_update_interaction_answer_groups(self): }], 'outcome': { 'dest': 'State 2', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_3', 'html': '

    Try again

    ' @@ -3126,6 +4042,7 @@ def test_update_interaction_answer_groups(self): exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME, { 'dest': 'State 2', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'default_outcome', 'html': '

    Incorrect

    ' @@ -3145,6 +4062,8 @@ def test_update_interaction_answer_groups(self): exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) second_state = exploration.states['State 2'] second_state_interaction = second_state.interaction + # Ruling out the possibility of None for mypy type checking. + assert second_state_interaction.default_outcome is not None rule_specs = second_state_interaction.answer_groups[0].rule_specs outcome = second_state_interaction.answer_groups[0].outcome self.assertEqual(rule_specs[0].rule_type, 'Equals') @@ -3154,10 +4073,10 @@ def test_update_interaction_answer_groups(self): self.assertEqual( second_state_interaction.default_outcome.dest, 'State 2') - def test_update_state_invalid_state(self): + def test_update_state_invalid_state(self) -> None: """Test that rule destination states cannot be non-existent.""" self.interaction_answer_groups[0]['outcome']['dest'] = 'INVALID' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The destination INVALID is not a valid state' ): @@ -3192,11 +4111,11 @@ def test_update_state_invalid_state(self): self.interaction_default_outcome), '') - def test_update_state_variable_types(self): + def test_update_state_variable_types(self) -> None: """Test that parameters in rules must have the correct type.""" self.interaction_answer_groups[0]['rule_specs'][0][ 'inputs']['x'] = 'abc' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Value has the wrong type. It should be a NonnegativeInt. ' 'The value is abc' @@ -3217,13 +4136,13 @@ def test_update_state_variable_types(self): self.interaction_default_outcome), '') - def test_update_content(self): + def test_update_content(self) -> None: """Test updating of content.""" exp_services.update_exploration( self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'content', { 'html': '

    Test content

    ', - 'content_id': 'content', + 'content_id': 'content_0', }), '') @@ -3232,514 +4151,69 @@ def test_update_content(self): exploration.init_state.content.html, '

    Test content

    ') - def test_add_translation(self): - """Test updating of content.""" - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - self.assertEqual(exploration.get_translation_counts(), {}) - - change_list = _get_change_list( - self.init_state_name, 'content', { - 'html': '

    Test content

    ', - 'content_id': 'content', - }) - - change_list.append(exp_domain.ExplorationChange({ - 'cmd': exp_domain.DEPRECATED_CMD_ADD_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    Test content

    ', - 'translation_html': '

    Translated text

    ' - })) - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, '') - - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - self.assertEqual(exploration.get_translation_counts(), { - 'hi': 1 - }) - - def test_add_written_translation(self): - """Test updating of content.""" + def test_update_solicit_answer_details(self) -> None: + """Test updating of solicit_answer_details.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - self.assertEqual(exploration.get_translation_counts(), {}) - - change_list = _get_change_list( - self.init_state_name, 'content', { - 'html': '

    Test content

    ', - 'content_id': 'content', - }) - - change_list.append(exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    original text

    ', - 'translation_html': '

    Translated text

    ', - 'data_format': 'html' - })) + self.assertEqual( + exploration.init_state.solicit_answer_details, False) exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, '') - + self.owner_id, self.EXP_0_ID, _get_change_list( + self.init_state_name, + exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS, + True), + '') exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - self.assertEqual(exploration.get_translation_counts(), { - 'hi': 1 - }) + self.assertEqual( + exploration.init_state.solicit_answer_details, True) # Check that the property can be changed when working # on old version. - # Add a change to upgrade the version. + # Adding a content change just to increase the version. exp_services.update_exploration( self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, - exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS, - { - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'placeholder' - } - }, - 'rows': {'value': 1} + self.init_state_name, 'content', { + 'html': '

    Test content

    ', + 'content_id': 'content_0', }), - 'Add Customization Args') + '') - change_list = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'bn', - 'content_html': '

    original text

    ', - 'translation_html': '

    Translated text 2

    ', - 'data_format': 'html' - })] + change_list = _get_change_list( + self.init_state_name, + exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS, + False) changes_are_mergeable = exp_services.are_changes_mergeable( self.EXP_0_ID, 2, change_list) self.assertTrue(changes_are_mergeable) - # Applying changed translation to the old_version. exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, - change_list, + self.owner_id, self.EXP_0_ID, change_list, '') + # Assert that exploration's final version consist of all the + # changes. exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - init_interaction = exploration.init_state.interaction - customization_args = init_interaction.customization_args - self.assertEqual( - customization_args['placeholder'].value.unicode_str, - 'placeholder') - self.assertEqual(exploration.get_translation_counts(), { - 'hi': 1, - 'bn': 1, - }) - - def test_mark_written_translation_as_needing_update(self): - """Test marking of written translation for a given language and content - id as needing update. - """ - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual(exploration.get_translation_counts(), {}) - # Update the exploration with a content and add corresponding - # translations in two languages. - change_list = [ - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'state_name': self.init_state_name, - 'property_name': 'content', - 'new_value': { - 'html': '

    Test content

    ', - 'content_id': 'content', - } - }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    Original content

    ', - 'translation_html': '

    Translated text in Hindi

    ', - 'data_format': 'html' - }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'bn', - 'content_html': '

    Original content

    ', - 'translation_html': '

    Translated text in Bangla

    ', - 'data_format': 'html' - }) - ] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, '') - - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - # Assert that there are translations in two languages. - self.assertEqual(exploration.get_translation_counts(), { - 'hi': 1, - 'bn': 1 - }) - # Assert that the written translations are not marked as needing update. - actual_written_translations = ( - exploration.states[self.init_state_name].written_translations) - hindi_written_translation = ( - actual_written_translations.translations_mapping['content']['hi']) - bangla_written_translation = ( - actual_written_translations.translations_mapping['content']['bn']) - self.assertFalse(hindi_written_translation.needs_update) - self.assertFalse(bangla_written_translation.needs_update) - - # Mark all translations for a state as needing update. - update_change_list = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'hi' - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, update_change_list, '') + self.assertEqual( + exploration.init_state.content.html, + '

    Test content

    ') + self.assertEqual( + exploration.init_state.solicit_answer_details, False) + def test_update_solicit_answer_details_with_non_bool_fails(self) -> None: + """Test updating of solicit_answer_details with non bool value.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - # Assert that there are no completed translations and check that the - # needs_update property is set for the corresponding written - # translations. - self.assertEqual(exploration.get_translation_counts(), { - 'bn': 1 - }) - actual_written_translations = ( - exploration.states[self.init_state_name].written_translations) - hindi_written_translation = ( - actual_written_translations.translations_mapping['content']['hi']) - bangla_written_translation = ( - actual_written_translations.translations_mapping['content']['bn']) - self.assertTrue(hindi_written_translation.needs_update) - self.assertFalse(bangla_written_translation.needs_update) - - # Update translations again so the translations are completed. - change_list_2 = [ - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    Original content

    ', - 'translation_html': '

    Translated text in Hindi

    ', - 'data_format': 'html' - }) - ] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, '') - + self.assertEqual( + exploration.init_state.solicit_answer_details, False) + with self.assertRaisesRegex( + Exception, ( + 'Expected solicit_answer_details to be a bool, received ')): + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, _get_change_list( + self.init_state_name, + exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS, + 'abc'), + '') exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - # Assert that the written translations are not marked as needing update. - actual_written_translations = ( - exploration.states[self.init_state_name].written_translations) - hindi_written_translation = ( - actual_written_translations.translations_mapping['content']['hi']) - bangla_written_translation = ( - actual_written_translations.translations_mapping['content']['bn']) - self.assertFalse(hindi_written_translation.needs_update) - self.assertFalse(bangla_written_translation.needs_update) - - # Check that the property can be changed when working - # on old version. - # Add a change to upgrade the version. - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, - exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS, - { - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'placeholder' - } - }, - 'rows': {'value': 1} - }), - 'Add Customization Args') - - # Mark all translations for a state as needing update in the - # old version. - update_change_list_2 = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'hi' - })] - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 4, update_change_list_2) - self.assertTrue(changes_are_mergeable) - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, update_change_list_2, - '') - - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - # Assert that there are no completed translations and check that the - # needs_update property is set for the corresponding written - # translations in the final version. - self.assertEqual(exploration.get_translation_counts(), { - 'bn': 1 - }) - actual_written_translations = ( - exploration.states[self.init_state_name].written_translations) - hindi_written_translation = ( - actual_written_translations.translations_mapping['content']['hi']) - bangla_written_translation = ( - actual_written_translations.translations_mapping['content']['bn']) - self.assertTrue(hindi_written_translation.needs_update) - self.assertFalse(bangla_written_translation.needs_update) - - # Assert that final version has all the changes made above. - init_interaction = exploration.init_state.interaction - customization_args = init_interaction.customization_args - self.assertEqual( - customization_args['placeholder'].value.unicode_str, - 'placeholder') - - def test_mark_written_translations_as_needing_update(self): - """Test marking of written translations in all languages for a - particular content id as needing update. - """ - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual(exploration.get_translation_counts(), {}) - # Update the exploration with a content and add corresponding - # translations in two languages. - change_list = [ - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'state_name': self.init_state_name, - 'property_name': 'content', - 'new_value': { - 'html': '

    Test content

    ', - 'content_id': 'content', - } - }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    Original content

    ', - 'translation_html': '

    Translated text in Hindi

    ', - 'data_format': 'html' - }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'bn', - 'content_html': '

    Original content

    ', - 'translation_html': '

    Translated text in Bangla

    ', - 'data_format': 'html' - }) - ] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, '') - - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - # Assert that there are translations in two languages. - self.assertEqual(exploration.get_translation_counts(), { - 'hi': 1, - 'bn': 1 - }) - # Assert that the written translations are not marked as needing update. - actual_written_translations = ( - exploration.states[self.init_state_name].written_translations) - hindi_written_translation = ( - actual_written_translations.translations_mapping['content']['hi']) - bangla_written_translation = ( - actual_written_translations.translations_mapping['content']['bn']) - self.assertFalse(hindi_written_translation.needs_update) - self.assertFalse(bangla_written_translation.needs_update) - - # Mark all translations for a state as needing update. - update_change_list = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE, - 'state_name': self.init_state_name, - 'content_id': 'content' - })] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, update_change_list, '') - - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - # Assert that there are no completed translations and check that the - # needs_update property is set for the corresponding written - # translations. - self.assertEqual(exploration.get_translation_counts(), {}) - actual_written_translations = ( - exploration.states[self.init_state_name].written_translations) - hindi_written_translation = ( - actual_written_translations.translations_mapping['content']['hi']) - bangla_written_translation = ( - actual_written_translations.translations_mapping['content']['bn']) - self.assertTrue(hindi_written_translation.needs_update) - self.assertTrue(bangla_written_translation.needs_update) - - # Update translations again so the translations are completed. - change_list_2 = [ - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    Original content

    ', - 'translation_html': '

    Translated text in Hindi

    ', - 'data_format': 'html' - }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': self.init_state_name, - 'content_id': 'content', - 'language_code': 'bn', - 'content_html': '

    Original content

    ', - 'translation_html': '

    Translated text in Bangla

    ', - 'data_format': 'html' - }) - ] - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list_2, '') - - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - # Assert that the written translations are not marked as needing update. - actual_written_translations = ( - exploration.states[self.init_state_name].written_translations) - hindi_written_translation = ( - actual_written_translations.translations_mapping['content']['hi']) - bangla_written_translation = ( - actual_written_translations.translations_mapping['content']['bn']) - self.assertFalse(hindi_written_translation.needs_update) - self.assertFalse(bangla_written_translation.needs_update) - - # Check that the property can be changed when working - # on old version. - # Add a change to upgrade the version. - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, - exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS, - { - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'placeholder' - } - }, - 'rows': {'value': 1} - }), - 'Add Customization Args') - - # Mark all translations for a state as needing update in the - # old version. - update_change_list_2 = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_MARK_WRITTEN_TRANSLATIONS_AS_NEEDING_UPDATE, - 'state_name': self.init_state_name, - 'content_id': 'content' - })] - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 4, update_change_list_2) - self.assertTrue(changes_are_mergeable) - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, update_change_list_2, - '') - - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - - # Assert that there are no completed translations and check that the - # needs_update property is set for the corresponding written - # translations in the final version. - self.assertEqual(exploration.get_translation_counts(), {}) - actual_written_translations = ( - exploration.states[self.init_state_name].written_translations) - hindi_written_translation = ( - actual_written_translations.translations_mapping['content']['hi']) - bangla_written_translation = ( - actual_written_translations.translations_mapping['content']['bn']) - self.assertTrue(hindi_written_translation.needs_update) - self.assertTrue(bangla_written_translation.needs_update) - - # Assert that final version has all the changes made above. - init_interaction = exploration.init_state.interaction - customization_args = init_interaction.customization_args - self.assertEqual( - customization_args['placeholder'].value.unicode_str, - 'placeholder') - - def test_update_solicit_answer_details(self): - """Test updating of solicit_answer_details.""" - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual( - exploration.init_state.solicit_answer_details, False) - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, - exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS, - True), - '') - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual( - exploration.init_state.solicit_answer_details, True) - - # Check that the property can be changed when working - # on old version. - # Adding a content change just to increase the version. - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, 'content', { - 'html': '

    Test content

    ', - 'content_id': 'content', - }), - '') - - change_list = _get_change_list( - self.init_state_name, - exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS, - False) - changes_are_mergeable = exp_services.are_changes_mergeable( - self.EXP_0_ID, 2, change_list) - self.assertTrue(changes_are_mergeable) - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, change_list, - '') - - # Assert that exploration's final version consist of all the - # changes. - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual( - exploration.init_state.content.html, - '

    Test content

    ') - self.assertEqual( - exploration.init_state.solicit_answer_details, False) - - def test_update_solicit_answer_details_with_non_bool_fails(self): - """Test updating of solicit_answer_details with non bool value.""" - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual( - exploration.init_state.solicit_answer_details, False) - with self.assertRaisesRegexp( - Exception, ( - 'Expected solicit_answer_details to be a bool, received ')): - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, - exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS, - 'abc'), - '') - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual( - exploration.init_state.solicit_answer_details, False) + self.assertEqual( + exploration.init_state.solicit_answer_details, False) # Check that the property can be changed when working # on old version. @@ -3748,7 +4222,7 @@ def test_update_solicit_answer_details_with_non_bool_fails(self): self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'content', { 'html': '

    Test content

    ', - 'content_id': 'content', + 'content_id': 'content_0', }), '') change_list = _get_change_list( @@ -3758,7 +4232,7 @@ def test_update_solicit_answer_details_with_non_bool_fails(self): changes_are_mergeable = exp_services.are_changes_mergeable( self.EXP_0_ID, 1, change_list) self.assertTrue(changes_are_mergeable) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, ( 'Expected solicit_answer_details to be a bool, received ')): exp_services.update_exploration( @@ -3773,15 +4247,30 @@ def test_update_solicit_answer_details_with_non_bool_fails(self): self.assertEqual( exploration.init_state.solicit_answer_details, False) - def test_update_linked_skill_id(self): + def test_update_linked_skill_id(self) -> None: """Test updating linked_skill_id.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) self.assertEqual( exploration.init_state.linked_skill_id, None) exp_services.update_exploration( self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'State1', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })], 'Add state name') exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) self.assertEqual( @@ -3803,7 +4292,7 @@ def test_update_linked_skill_id(self): self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'content', { 'html': '

    Test content

    ', - 'content_id': 'content', + 'content_id': 'content_0', }), '') @@ -3825,15 +4314,30 @@ def test_update_linked_skill_id(self): self.assertEqual( exploration.states['State1'].linked_skill_id, 'string_2') - def test_update_card_is_checkpoint(self): + def test_update_card_is_checkpoint(self) -> None: """Test updating of card_is_checkpoint.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) self.assertEqual( exploration.init_state.card_is_checkpoint, True) exp_services.update_exploration( self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': 'State1', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })], 'Add state name') exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) self.assertEqual( @@ -3856,7 +4360,7 @@ def test_update_card_is_checkpoint(self): self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'content', { 'html': '

    Test content

    ', - 'content_id': 'content', + 'content_id': 'content_0', }), '') @@ -3878,12 +4382,12 @@ def test_update_card_is_checkpoint(self): self.assertEqual( exploration.states['State1'].card_is_checkpoint, False) - def test_update_card_is_checkpoint_with_non_bool_fails(self): + def test_update_card_is_checkpoint_with_non_bool_fails(self) -> None: """Test updating of card_is_checkpoint with non bool value.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) self.assertEqual( exploration.init_state.card_is_checkpoint, True) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, ( 'Expected card_is_checkpoint to be a bool, received ')): exp_services.update_exploration( @@ -3901,7 +4405,7 @@ def test_update_card_is_checkpoint_with_non_bool_fails(self): self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'content', { 'html': '

    Test content

    ', - 'content_id': 'content', + 'content_id': 'content_0', }), '') @@ -3912,7 +4416,7 @@ def test_update_card_is_checkpoint_with_non_bool_fails(self): changes_are_mergeable = exp_services.are_changes_mergeable( self.EXP_0_ID, 1, change_list) self.assertTrue(changes_are_mergeable) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, ( 'Expected card_is_checkpoint to be a bool, received ')): exp_services.update_exploration( @@ -3930,9 +4434,9 @@ def test_update_card_is_checkpoint_with_non_bool_fails(self): self.assertEqual( exploration.init_state.card_is_checkpoint, True) - def test_update_content_missing_key(self): + def test_update_content_missing_key(self) -> None: """Test that missing keys in content yield an error.""" - with self.assertRaisesRegexp(KeyError, 'content_id'): + with self.assertRaisesRegex(KeyError, 'content_id'): exp_services.update_exploration( self.owner_id, self.EXP_0_ID, _get_change_list( self.init_state_name, 'content', { @@ -3940,74 +4444,17 @@ def test_update_content_missing_key(self): }), '') - def test_update_written_translations(self): - """Test update content translations.""" - written_translations_dict = { - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Test!

    ', - 'needs_update': True - } - }, - 'default_outcome': {}, - 'ca_placeholder_0': {} - } - } - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, 'written_translations', - written_translations_dict), 'Added text translations.') + def test_set_edits_allowed(self) -> None: + """Test update edits allowed field in an exploration.""" exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual( - exploration.init_state.written_translations.to_dict(), - written_translations_dict) + self.assertEqual(exploration.edits_allowed, True) - def test_update_written_translations_cleans_html_translations(self): - written_translations_dict = { - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '
    OK tag
    ', - 'needs_update': True - } - }, - 'default_outcome': {}, - 'ca_placeholder_0': {} - } - } - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, 'written_translations', - written_translations_dict), 'Added text translations.') - exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) - self.assertEqual( - exploration.init_state.written_translations.to_dict(), { - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '
    OK tag
    ', - 'needs_update': True - } - }, - 'default_outcome': {}, - 'ca_placeholder_0': {} - } - }) + exp_services.set_exploration_edits_allowed(self.EXP_0_ID, False) - def test_update_written_translations_with_list_fails(self): - """Test update content translation with a list fails.""" - with self.assertRaisesRegexp( - Exception, 'Expected written_translations to be a dict, received '): - exp_services.update_exploration( - self.owner_id, self.EXP_0_ID, _get_change_list( - self.init_state_name, 'written_translations', - [1, 2]), 'Added fake text translations.') + exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + self.assertEqual(exploration.edits_allowed, False) - def test_migrate_exp_to_latest_version_migrates_to_version(self): + def test_migrate_exp_to_latest_version_migrates_to_version(self) -> None: """Test migrate exploration state schema to the latest version.""" latest_schema_version = str(feconf.CURRENT_STATE_SCHEMA_VERSION) migration_change_list = [ @@ -4026,7 +4473,7 @@ def test_migrate_exp_to_latest_version_migrates_to_version(self): str(exploration.states_schema_version), latest_schema_version) - def test_migrate_exp_to_earlier_version_raises_exception(self): + def test_migrate_exp_to_earlier_version_raises_exception(self) -> None: """Test migrate state schema to earlier version raises exception.""" latest_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION not_latest_schema_version = str(latest_schema_version - 1) @@ -4042,7 +4489,7 @@ def test_migrate_exp_to_earlier_version_raises_exception(self): 'version %s, received %s' % ( latest_schema_version, not_latest_schema_version) ) - with self.assertRaisesRegexp(Exception, exception_string): + with self.assertRaisesRegex(Exception, exception_string): exp_services.update_exploration( self.owner_id, self.EXP_0_ID, migration_change_list, 'Ran Exploration Migration job.') @@ -4051,13 +4498,13 @@ def test_migrate_exp_to_earlier_version_raises_exception(self): class CommitMessageHandlingTests(ExplorationServicesUnitTests): """Test the handling of commit messages.""" - def setUp(self): - super(CommitMessageHandlingTests, self).setUp() + def setUp(self) -> None: + super().setUp() exploration = self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id, end_state_name='End') self.init_state_name = exploration.init_state_name - def test_record_commit_message(self): + def test_record_commit_message(self) -> None: """Check published explorations record commit messages.""" rights_manager.publish_exploration(self.owner, self.EXP_0_ID) @@ -4072,11 +4519,11 @@ def test_record_commit_message(self): self.EXP_0_ID)[1]['commit_message'], 'A message') - def test_demand_commit_message(self): + def test_demand_commit_message(self) -> None: """Check published explorations demand commit messages.""" rights_manager.publish_exploration(self.owner, self.EXP_0_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'Exploration is public so expected a commit message but received ' 'none.' @@ -4086,7 +4533,7 @@ def test_demand_commit_message(self): self.init_state_name, exp_domain.STATE_PROPERTY_INTERACTION_STICKY, False), '') - def test_unpublished_explorations_can_accept_commit_message(self): + def test_unpublished_explorations_can_accept_commit_message(self) -> None: """Test unpublished explorations can accept optional commit messages.""" exp_services.update_exploration( self.owner_id, self.EXP_0_ID, _get_change_list( @@ -4110,10 +4557,10 @@ def test_unpublished_explorations_can_accept_commit_message(self): class ExplorationSnapshotUnitTests(ExplorationServicesUnitTests): """Test methods relating to exploration snapshots.""" - SECOND_USERNAME = 'abc123' - SECOND_EMAIL = 'abc123@gmail.com' + SECOND_USERNAME: Final = 'abc123' + SECOND_EMAIL: Final = 'abc123@gmail.com' - def test_get_last_updated_by_human_ms(self): + def test_get_last_updated_by_human_ms(self) -> None: original_timestamp = utils.get_current_time_in_millisecs() self.save_new_valid_exploration( @@ -4136,7 +4583,7 @@ def test_get_last_updated_by_human_ms(self): exp_services.get_last_updated_by_human_ms(self.EXP_0_ID), timestamp_after_first_edit) - def test_get_exploration_snapshots_metadata(self): + def test_get_exploration_snapshots_metadata(self) -> None: self.signup(self.SECOND_EMAIL, self.SECOND_USERNAME) second_committer_id = self.get_user_id_from_email(self.SECOND_EMAIL) @@ -4149,7 +4596,7 @@ def test_get_exploration_snapshots_metadata(self): 'commit_cmds': [{ 'cmd': 'create_new', 'title': 'A title', - 'category': 'A category', + 'category': 'Algebra', }], 'committer_id': self.owner_id, 'commit_message': ( @@ -4170,7 +4617,7 @@ def test_get_exploration_snapshots_metadata(self): 'commit_cmds': [{ 'cmd': 'create_new', 'title': 'A title', - 'category': 'A category' + 'category': 'Algebra' }], 'committer_id': self.owner_id, 'commit_message': ( @@ -4198,7 +4645,7 @@ def test_get_exploration_snapshots_metadata(self): 'commit_cmds': [{ 'cmd': 'create_new', 'title': 'A title', - 'category': 'A category' + 'category': 'Algebra' }], 'committer_id': self.owner_id, 'commit_message': ( @@ -4218,9 +4665,12 @@ def test_get_exploration_snapshots_metadata(self): snapshots_metadata[1]['created_on_ms']) # Using the old version of the exploration should raise an error. - with self.assertRaisesRegexp(Exception, 'version 1, which is too old'): - exp_services._save_exploration( # pylint: disable=protected-access - second_committer_id, v1_exploration, '', []) + change_list_swap = self.swap_to_always_return( + exp_services, 'apply_change_list', value=v1_exploration) + with change_list_swap, self.assertRaisesRegex( + Exception, 'version 1, which is too old'): + exp_services.update_exploration( + second_committer_id, self.EXP_0_ID, None, 'commit_message') # Another person modifies the exploration. new_change_list = [exp_domain.ExplorationChange({ @@ -4241,7 +4691,7 @@ def test_get_exploration_snapshots_metadata(self): 'commit_cmds': [{ 'cmd': 'create_new', 'title': 'A title', - 'category': 'A category' + 'category': 'Algebra' }], 'committer_id': self.owner_id, 'commit_message': ( @@ -4267,14 +4717,17 @@ def test_get_exploration_snapshots_metadata(self): snapshots_metadata[1]['created_on_ms'], snapshots_metadata[2]['created_on_ms']) - def test_versioning_with_add_and_delete_states(self): + def test_versioning_with_add_and_delete_states(self) -> None: exploration = self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id) - - exploration.title = 'First title' - exp_services._save_exploration( # pylint: disable=protected-access - self.owner_id, exploration, 'Changed title.', []) + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'First title' + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list, 'Changed title.') commit_dict_2 = { 'committer_id': self.owner_id, 'commit_message': 'Changed title.', @@ -4285,9 +4738,24 @@ def test_versioning_with_add_and_delete_states(self): self.assertEqual(len(snapshots_metadata), 2) exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) change_list = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'New state' + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'New state', @@ -4305,7 +4773,8 @@ def test_versioning_with_add_and_delete_states(self): 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } })] exp_services.update_exploration( @@ -4330,7 +4799,7 @@ def test_versioning_with_add_and_delete_states(self): # Perform an invalid action: delete a state that does not exist. This # should not create a new version. - with self.assertRaisesRegexp(ValueError, 'does not exist'): + with self.assertRaisesRegex(ValueError, 'does not exist'): exploration.delete_state('invalid_state_name') # Now delete the new state. @@ -4362,21 +4831,40 @@ def test_versioning_with_add_and_delete_states(self): exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) self.assertEqual(len(exploration.states), 1) - def test_versioning_with_reverting(self): + def test_versioning_with_reverting(self) -> None: exploration = self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id) # In version 1, the title was 'A title'. # In version 2, the title becomes 'V2 title'. - exploration.title = 'V2 title' - exp_services._save_exploration( # pylint: disable=protected-access - self.owner_id, exploration, 'Changed title.', []) + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'V2 title' + })] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list, 'Changed title.') # In version 3, a new state is added. exploration = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) change_list = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'New state' + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'New state', @@ -4394,7 +4882,8 @@ def test_versioning_with_reverting(self): 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } })] exp_services.update_exploration( @@ -4402,7 +4891,7 @@ def test_versioning_with_reverting(self): # It is not possible to revert from anything other than the most # current version. - with self.assertRaisesRegexp(Exception, 'too old'): + with self.assertRaisesRegex(Exception, 'too old'): exp_services.revert_exploration( 'committer_id_v4', self.EXP_0_ID, 2, 1) @@ -4433,9 +4922,12 @@ def test_versioning_with_reverting(self): snapshots_metadata[2]['created_on_ms'], snapshots_metadata[3]['created_on_ms']) - def test_get_composite_change_list(self): + def test_get_composite_change_list(self) -> None: exploration = self.save_new_valid_exploration( self.EXP_0_ID, self.owner_id) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) # Upgrade to version 2. exp_services.update_exploration( @@ -4449,7 +4941,18 @@ def test_get_composite_change_list(self): # Change list for version 3. change_list = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'New state' + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME)) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': 'New state', @@ -4469,7 +4972,8 @@ def test_get_composite_change_list(self): 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } })] exp_services.update_exploration( @@ -4488,7 +4992,14 @@ def test_get_composite_change_list(self): # Complete change list from version 1 to 4. composite_change_list_dict_expected = [{ 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'New state' + 'state_name': 'New state', + 'content_id_for_state_content': 'content_3', + 'content_id_for_default_outcome': 'default_outcome_4' + }, { + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': 5, + 'old_value': None }, { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'old_value': None, @@ -4508,14 +5019,15 @@ def test_get_composite_change_list(self): 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } }, { 'cmd': exp_domain.CMD_DELETE_STATE, 'state_name': 'New state' }] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unexpected error: Trying to find change list from version %s ' 'of exploration to version %s.' @@ -4530,49 +5042,409 @@ def test_get_composite_change_list(self): self.assertEqual( composite_change_list_dict_expected, composite_change_list_dict) + def test_reverts_exp_to_safe_state_when_content_model_is_missing( + self + ) -> None: + self.save_new_valid_exploration('0', self.owner_id) + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 1') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 2') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 3') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 4') -class ExplorationCommitLogUnitTests(ExplorationServicesUnitTests): - """Test methods relating to the exploration commit log.""" - - ALBERT_EMAIL = 'albert@example.com' - BOB_EMAIL = 'bob@example.com' - ALBERT_NAME = 'albert' - BOB_NAME = 'bob' - - EXP_ID_1 = 'eid1' - EXP_ID_2 = 'eid2' + version = exp_services.rollback_exploration_to_safe_state('0') + self.assertEqual(version, 5) - COMMIT_ALBERT_CREATE_EXP_1 = { - 'version': 1, - 'exploration_id': EXP_ID_1, - 'commit_type': 'create', - 'post_commit_community_owned': False, - 'post_commit_is_private': True, - 'commit_message': 'New exploration created with title \'A title\'.', - 'post_commit_status': 'private' - } + snapshot_content_model = ( + exp_models.ExplorationSnapshotContentModel.get( + '0-5', strict=True)) + snapshot_content_model.delete() - COMMIT_BOB_EDIT_EXP_1 = { - 'version': 2, - 'exploration_id': EXP_ID_1, - 'commit_type': 'edit', - 'post_commit_community_owned': False, - 'post_commit_is_private': True, - 'commit_message': 'Changed title.', - 'post_commit_status': 'private' - } + version = exp_services.rollback_exploration_to_safe_state('0') + self.assertEqual(version, 4) - COMMIT_ALBERT_CREATE_EXP_2 = { - 'version': 1, - 'exploration_id': 'eid2', - 'commit_type': 'create', - 'post_commit_community_owned': False, - 'post_commit_is_private': True, - 'commit_message': 'New exploration created with title \'A title\'.', - 'post_commit_status': 'private' + def test_reverts_exp_to_safe_state_when_several_models_are_missing( + self + ) -> None: + self.save_new_valid_exploration('0', self.owner_id) + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 1') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 2') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 3') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 4') + + version = exp_services.rollback_exploration_to_safe_state('0') + self.assertEqual(version, 5) + + snapshot_content_model = ( + exp_models.ExplorationSnapshotContentModel.get( + '0-5', strict=True)) + snapshot_content_model.delete() + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + '0-4', strict=True)) + snapshot_metadata_model.delete() + + version = exp_services.rollback_exploration_to_safe_state('0') + self.assertEqual(version, 3) + + def test_reverts_exp_to_safe_state_when_metadata_model_is_missing( + self + ) -> None: + self.save_new_valid_exploration('0', self.owner_id) + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 1') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 2') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 3') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 4') + + version = exp_services.rollback_exploration_to_safe_state('0') + self.assertEqual(version, 5) + + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + '0-5', strict=True)) + snapshot_metadata_model.delete() + + version = exp_services.rollback_exploration_to_safe_state('0') + self.assertEqual(version, 4) + + def test_reverts_exp_to_safe_state_when_both_models_are_missing( + self + ) -> None: + self.save_new_valid_exploration('0', self.owner_id) + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 1') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 2') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 3') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 4') + + version = exp_services.rollback_exploration_to_safe_state('0') + self.assertEqual(version, 5) + + snapshot_content_model = ( + exp_models.ExplorationSnapshotContentModel.get( + '0-5', strict=True)) + snapshot_content_model.delete() + + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + '0-5', strict=True)) + snapshot_metadata_model.delete() + + version = exp_services.rollback_exploration_to_safe_state('0') + self.assertEqual(version, 4) + + def test_does_not_revert_exp_when_no_models_are_missing(self) -> None: + self.save_new_valid_exploration('0', self.owner_id) + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 1') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 2') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 3') + exp_services.update_exploration( + self.owner_id, '0', [exp_domain.ExplorationChange({ + 'new_value': { + 'content_id': 'content_0', + 'html': 'content 1' + }, + 'state_name': 'Introduction', + 'old_value': { + 'content_id': 'content_0', + 'html': '' + }, + 'cmd': 'edit_state_property', + 'property_name': 'content' + })], 'Update 4') + + version = exp_services.rollback_exploration_to_safe_state('0') + + self.assertEqual(version, 5) + + +class ExplorationCommitLogUnitTests(ExplorationServicesUnitTests): + """Test methods relating to the exploration commit log.""" + + ALBERT_EMAIL: Final = 'albert@example.com' + BOB_EMAIL: Final = 'bob@example.com' + ALBERT_NAME: Final = 'albert' + BOB_NAME: Final = 'bob' + + EXP_ID_1: Final = 'eid1' + EXP_ID_2: Final = 'eid2' + + COMMIT_ALBERT_CREATE_EXP_1: Final = { + 'version': 1, + 'exploration_id': EXP_ID_1, + 'commit_type': 'create', + 'post_commit_community_owned': False, + 'post_commit_is_private': True, + 'commit_message': 'New exploration created with title \'A title\'.', + 'post_commit_status': 'private' + } + + COMMIT_BOB_EDIT_EXP_1: Final = { + 'version': 2, + 'exploration_id': EXP_ID_1, + 'commit_type': 'edit', + 'post_commit_community_owned': False, + 'post_commit_is_private': True, + 'commit_message': 'Changed title.', + 'post_commit_status': 'private' + } + + COMMIT_ALBERT_CREATE_EXP_2: Final = { + 'version': 1, + 'exploration_id': 'eid2', + 'commit_type': 'create', + 'post_commit_community_owned': False, + 'post_commit_is_private': True, + 'commit_message': 'New exploration created with title \'A title\'.', + 'post_commit_status': 'private' } - COMMIT_ALBERT_EDIT_EXP_1 = { + COMMIT_ALBERT_EDIT_EXP_1: Final = { 'version': 3, 'exploration_id': 'eid1', 'commit_type': 'edit', @@ -4582,7 +5454,7 @@ class ExplorationCommitLogUnitTests(ExplorationServicesUnitTests): 'post_commit_status': 'private' } - COMMIT_ALBERT_EDIT_EXP_2 = { + COMMIT_ALBERT_EDIT_EXP_2: Final = { 'version': 2, 'exploration_id': 'eid2', 'commit_type': 'edit', @@ -4592,7 +5464,7 @@ class ExplorationCommitLogUnitTests(ExplorationServicesUnitTests): 'post_commit_status': 'private' } - COMMIT_BOB_REVERT_EXP_1 = { + COMMIT_BOB_REVERT_EXP_1: Final = { 'username': 'bob', 'version': 4, 'exploration_id': 'eid1', @@ -4603,7 +5475,7 @@ class ExplorationCommitLogUnitTests(ExplorationServicesUnitTests): 'post_commit_status': 'private' } - COMMIT_ALBERT_DELETE_EXP_1 = { + COMMIT_ALBERT_DELETE_EXP_1: Final = { 'version': 5, 'exploration_id': 'eid1', 'commit_type': 'delete', @@ -4613,7 +5485,7 @@ class ExplorationCommitLogUnitTests(ExplorationServicesUnitTests): 'post_commit_status': 'private' } - COMMIT_ALBERT_PUBLISH_EXP_2 = { + COMMIT_ALBERT_PUBLISH_EXP_2: Final = { 'version': None, 'exploration_id': 'eid2', 'commit_type': 'edit', @@ -4623,7 +5495,7 @@ class ExplorationCommitLogUnitTests(ExplorationServicesUnitTests): 'post_commit_status': 'public' } - def setUp(self): + def setUp(self) -> None: """Populate the database of explorations to be queried against. The sequence of events is: @@ -4637,7 +5509,7 @@ def setUp(self): - Bob tries to publish EXP_ID_2, and is denied access. - (8) Albert publishes EXP_ID_2. """ - super(ExplorationCommitLogUnitTests, self).setUp() + super().setUp() self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.signup(self.BOB_EMAIL, self.BOB_NAME) @@ -4646,33 +5518,46 @@ def setUp(self): self.albert = user_services.get_user_actions_info(self.albert_id) self.bob = user_services.get_user_actions_info(self.bob_id) - def populate_datastore(): + def populate_datastore() -> None: """Populates the database according to the sequence.""" - exploration_1 = self.save_new_valid_exploration( + self.save_new_valid_exploration( self.EXP_ID_1, self.albert_id) - exploration_1.title = 'Exploration 1 title' - exp_services._save_exploration( # pylint: disable=protected-access - self.bob_id, exploration_1, 'Changed title.', []) + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'Exploration 1 title' + })] + exp_services.update_exploration( + self.bob_id, self.EXP_ID_1, change_list, 'Changed title.') - exploration_2 = self.save_new_valid_exploration( + self.save_new_valid_exploration( self.EXP_ID_2, self.albert_id) - exploration_1.title = 'Exploration 1 Albert title' - exp_services._save_exploration( # pylint: disable=protected-access - self.albert_id, exploration_1, - 'Changed title to Albert1 title.', []) + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'Exploration 1 Albert title' + })] + exp_services.update_exploration( + self.albert_id, self.EXP_ID_1, + change_list, 'Changed title to Albert1 title.') - exploration_2.title = 'Exploration 2 Albert title' - exp_services._save_exploration( # pylint: disable=protected-access - self.albert_id, exploration_2, 'Changed title to Albert2.', []) + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'Exploration 2 Albert title' + })] + exp_services.update_exploration( + self.albert_id, self.EXP_ID_2, + change_list, 'Changed title to Albert2.') exp_services.revert_exploration(self.bob_id, self.EXP_ID_1, 3, 2) exp_services.delete_exploration(self.albert_id, self.EXP_ID_1) # This commit should not be recorded. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This exploration cannot be published' ): rights_manager.publish_exploration(self.bob, self.EXP_ID_2) @@ -4681,15 +5566,19 @@ def populate_datastore(): populate_datastore() + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_get_next_page_of_all_non_private_commits_with_invalid_max_age( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'max_age must be a datetime.timedelta instance. or None.'): exp_services.get_next_page_of_all_non_private_commits( - max_age='invalid_max_age') + max_age='invalid_max_age') # type: ignore[arg-type] - def test_get_next_page_of_all_non_private_commits(self): + def test_get_next_page_of_all_non_private_commits(self) -> None: all_commits = ( exp_services.get_next_page_of_all_non_private_commits()[0]) self.assertEqual(len(all_commits), 1) @@ -4699,14 +5588,46 @@ def test_get_next_page_of_all_non_private_commits(self): # TODO(frederikcreemers@gmail.com): Test max_age here. + def test_raises_error_if_solution_is_provided_without_interaction_id( + self + ) -> None: + exploration = exp_domain.Exploration.create_default_exploration( + 'test_id', 'title', 'Home') + exp_services.save_new_exploration('Test_user', exploration) + + state_solution_dict: state_domain.SolutionDict = { + 'answer_is_exclusive': True, + 'correct_answer': [ + '

    state customization arg html 1

    ', + '

    state customization arg html 2

    ', + '

    state customization arg html 3

    ', + '

    state customization arg html 4

    ' + ], + 'explanation': { + 'content_id': 'solution', + 'html': '

    This is solution for state1

    ' + } + } + change_list = exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': 'Home', + 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_SOLUTION, + 'new_value': state_solution_dict, + }) + with self.assertRaisesRegex( + Exception, + 'solution cannot exist with None interaction id.' + ): + exp_services.apply_change_list('test_id', [change_list]) + class ExplorationSearchTests(ExplorationServicesUnitTests): """Test exploration search.""" - USER_ID_1 = 'user_1' - USER_ID_2 = 'user_2' + USER_ID_1: Final = 'user_1' + USER_ID_2: Final = 'user_2' - def test_index_explorations_given_ids(self): + def test_index_explorations_given_ids(self) -> None: all_exp_ids = ['id0', 'id1', 'id2', 'id3', 'id4'] expected_exp_ids = all_exp_ids[:-1] all_exp_titles = [ @@ -4715,7 +5636,9 @@ def test_index_explorations_given_ids(self): all_exp_categories = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4'] expected_exp_categories = all_exp_categories[:-1] - def mock_add_documents_to_index(docs, index): + def mock_add_documents_to_index( + docs: List[Dict[str, str]], index: str + ) -> List[str]: self.assertEqual(index, exp_services.SEARCH_INDEX_EXPLORATIONS) ids = [doc['id'] for doc in docs] titles = [doc['title'] for doc in docs] @@ -4749,7 +5672,7 @@ def mock_add_documents_to_index(docs, index): self.assertEqual(add_docs_counter.times_called, 1) - def test_updated_exploration_is_added_correctly_to_index(self): + def test_updated_exploration_is_added_correctly_to_index(self) -> None: exp_id = 'id0' exp_title = 'title 0' exp_category = 'cat0' @@ -4772,7 +5695,9 @@ def test_updated_exploration_is_added_correctly_to_index(self): 'title': 'title 0' } - def mock_add_documents_to_index(docs, index): + def mock_add_documents_to_index( + docs: List[Dict[str, str]], index: str + ) -> None: self.assertEqual(index, exp_services.SEARCH_INDEX_EXPLORATIONS) actual_docs.extend(docs) @@ -4803,7 +5728,7 @@ def mock_add_documents_to_index(docs, index): self.assertEqual(actual_docs, [updated_exp_doc]) self.assertEqual(add_docs_counter.times_called, 3) - def test_get_number_of_ratings(self): + def test_get_number_of_ratings(self) -> None: self.save_new_valid_exploration(self.EXP_0_ID, self.owner_id) exp = exp_fetchers.get_exploration_summary_by_id(self.EXP_0_ID) @@ -4828,13 +5753,16 @@ def test_get_number_of_ratings(self): self.assertEqual( exp_services.get_number_of_ratings(exp.ratings), 3) - def test_get_average_rating(self): + def test_get_average_rating(self) -> None: self.save_new_valid_exploration(self.EXP_0_ID, self.owner_id) exp = exp_fetchers.get_exploration_summary_by_id(self.EXP_0_ID) self.assertEqual( exp_services.get_average_rating(exp.ratings), 0) + self.assertEqual( + exp_services.get_average_rating({}), 0) + rating_services.assign_rating_to_exploration( self.owner_id, self.EXP_0_ID, 5) self.assertEqual( @@ -4847,7 +5775,7 @@ def test_get_average_rating(self): self.assertEqual( exp_services.get_average_rating(exp.ratings), 3.5) - def test_get_lower_bound_wilson_rating_from_exp_summary(self): + def test_get_lower_bound_wilson_rating_from_exp_summary(self) -> None: self.save_new_valid_exploration(self.EXP_0_ID, self.owner_id) exp = exp_fetchers.get_exploration_summary_by_id(self.EXP_0_ID) @@ -4868,7 +5796,7 @@ def test_get_lower_bound_wilson_rating_from_exp_summary(self): exp_services.get_scaled_average_rating(exp.ratings), 2.056191454757, places=4) - def test_valid_demo_file_path(self): + def test_valid_demo_file_path(self) -> None: for filename in os.listdir(feconf.SAMPLE_EXPLORATIONS_DIR): full_filepath = os.path.join( feconf.SAMPLE_EXPLORATIONS_DIR, filename) @@ -4877,8 +5805,9 @@ def test_valid_demo_file_path(self): self.assertTrue(valid_exploration_path) def test_get_demo_exploration_components_with_invalid_path_raises_error( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Unrecognized file path: invalid_path'): exp_services.get_demo_exploration_components('invalid_path') @@ -4886,22 +5815,22 @@ def test_get_demo_exploration_components_with_invalid_path_raises_error( class ExplorationSummaryTests(ExplorationServicesUnitTests): """Test exploration summaries.""" - ALBERT_EMAIL = 'albert@example.com' - BOB_EMAIL = 'bob@example.com' - ALBERT_NAME = 'albert' - BOB_NAME = 'bob' + ALBERT_EMAIL: Final = 'albert@example.com' + BOB_EMAIL: Final = 'bob@example.com' + ALBERT_NAME: Final = 'albert' + BOB_NAME: Final = 'bob' - EXP_ID_1 = 'eid1' - EXP_ID_2 = 'eid2' + EXP_ID_1: Final = 'eid1' + EXP_ID_2: Final = 'eid2' - def setUp(self): - super(ExplorationSummaryTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.signup(self.BOB_EMAIL, self.BOB_NAME) self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL) self.bob_id = self.get_user_id_from_email(self.BOB_EMAIL) - def test_is_exp_summary_editable(self): + def test_is_exp_summary_editable(self) -> None: self.save_new_default_exploration(self.EXP_0_ID, self.owner_id) # Check that only the owner may edit. @@ -4930,7 +5859,7 @@ def test_is_exp_summary_editable(self): self.assertFalse(exp_services.is_exp_summary_editable( exp_summary, user_id=self.viewer_id)) - def test_contributors_not_updated_on_revert(self): + def test_contributors_not_updated_on_revert(self) -> None: """Test that a user who only makes a revert on an exploration is not counted in the list of that exploration's contributors. """ @@ -4953,7 +5882,9 @@ def test_contributors_not_updated_on_revert(self): self.EXP_ID_1) self.assertEqual([self.albert_id], exploration_summary.contributor_ids) - def _check_contributors_summary(self, exp_id, expected): + def _check_contributors_summary( + self, exp_id: str, expected: Dict[str, int] + ) -> None: """Check if contributors summary of the given exp is same as expected. Args: @@ -4968,7 +5899,7 @@ def _check_contributors_summary(self, exp_id, expected): exp_id).contributors_summary self.assertEqual(expected, contributors_summary) - def test_contributors_summary(self): + def test_contributors_summary(self) -> None: # Have Albert create a new exploration. Version 1. self.save_new_valid_exploration(self.EXP_ID_1, self.albert_id) self._check_contributors_summary(self.EXP_ID_1, {self.albert_id: 1}) @@ -5010,13 +5941,18 @@ def test_contributors_summary(self): self._check_contributors_summary( self.EXP_ID_1, {self.albert_id: 1, self.bob_id: 2}) - def test_get_exploration_summary_by_id_with_invalid_exploration_id(self): + def test_get_exploration_summary_by_id_with_invalid_exploration_id( + self + ) -> None: exploration_summary = exp_fetchers.get_exploration_summary_by_id( - 'invalid_exploration_id') + 'invalid_exploration_id', strict=False + ) self.assertIsNone(exploration_summary) - def test_create_exploration_summary_with_deleted_contributor(self): + def test_create_exploration_summary_with_deleted_contributor( + self + ) -> None: self.save_new_valid_exploration( self.EXP_ID_1, self.albert_id) exp_services.update_exploration( @@ -5043,23 +5979,76 @@ def test_create_exploration_summary_with_deleted_contributor(self): self._check_contributors_summary( self.EXP_ID_1, {self.albert_id: 1}) + def test_regenerate_summary_with_new_contributor_with_invalid_exp_id( + self + ) -> None: + observed_log_messages = [] + + def _mock_logging_function(msg: str, *args: str) -> None: + """Mocks logging.error().""" + observed_log_messages.append(msg % args) + + logging_swap = self.swap(logging, 'error', _mock_logging_function) + with logging_swap: + exp_services.regenerate_exploration_summary_with_new_contributor( + 'dummy_id', self.albert_id) + + self.assertEqual( + observed_log_messages, + ['Could not find exploration with ID dummy_id'] + ) + + def test_raises_error_while_creating_summary_if_no_created_on_data_present( + self + ) -> None: + self.save_new_valid_exploration('exp_id', 'owner_id') + exploration = exp_fetchers.get_exploration_by_id('exp_id') + exp_rights = rights_manager.get_exploration_rights( + 'exp_id', strict=True) + exploration.created_on = None + with self.assertRaisesRegex( + Exception, 'No data available for when the exploration was' + ): + exp_services.generate_new_exploration_summary( + exploration, exp_rights + ) + + def test_raises_error_while_updating_summary_if_no_created_on_data_present( + self + ) -> None: + self.save_new_valid_exploration('exp_id', 'owner_id') + exploration = exp_fetchers.get_exploration_by_id('exp_id') + exp_rights = rights_manager.get_exploration_rights( + 'exp_id', strict=True) + + exp_summary = exp_services.generate_new_exploration_summary( + exploration, exp_rights + ) + exploration.created_on = None + with self.assertRaisesRegex( + Exception, 'No data available for when the exploration was' + ): + exp_services.update_exploration_summary( + exploration, exp_rights, exp_summary + ) + class ExplorationSummaryGetTests(ExplorationServicesUnitTests): """Test exploration summaries get_* functions.""" - ALBERT_EMAIL = 'albert@example.com' - BOB_EMAIL = 'bob@example.com' - ALBERT_NAME = 'albert' - BOB_NAME = 'bob' + ALBERT_EMAIL: Final = 'albert@example.com' + BOB_EMAIL: Final = 'bob@example.com' + ALBERT_NAME: Final = 'albert' + BOB_NAME: Final = 'bob' - EXP_ID_1 = 'eid1' - EXP_ID_2 = 'eid2' - EXP_ID_3 = 'eid3' + EXP_ID_1: Final = 'eid1' + EXP_ID_2: Final = 'eid2' + EXP_ID_3: Final = 'eid3' - EXPECTED_VERSION_1 = 4 - EXPECTED_VERSION_2 = 2 + EXPECTED_VERSION_1: Final = 4 + EXPECTED_VERSION_2: Final = 2 - def setUp(self): + def setUp(self) -> None: """Populate the database of explorations and their summaries. The sequence of events is: @@ -5075,7 +6064,7 @@ def setUp(self): - (9) Albert publishes EXP_ID_3. - (10) Albert deletes EXP_ID_3. """ - super(ExplorationSummaryGetTests, self).setUp() + super().setUp() self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.signup(self.BOB_EMAIL, self.BOB_NAME) @@ -5111,7 +6100,7 @@ def setUp(self): exp_services.revert_exploration(self.bob_id, self.EXP_ID_1, 3, 2) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This exploration cannot be published' ): rights_manager.publish_exploration(self.bob, self.EXP_ID_2) @@ -5122,14 +6111,14 @@ def setUp(self): rights_manager.publish_exploration(self.albert, self.EXP_ID_3) exp_services.delete_exploration(self.albert_id, self.EXP_ID_3) - def test_get_non_private_exploration_summaries(self): + def test_get_non_private_exploration_summaries(self) -> None: actual_summaries = exp_services.get_non_private_exploration_summaries() expected_summaries = { self.EXP_ID_2: exp_domain.ExplorationSummary( self.EXP_ID_2, 'Exploration 2 Albert title', - 'A category', 'An objective', 'en', [], + 'Algebra', 'An objective', 'en', [], feconf.get_empty_ratings(), feconf.EMPTY_SCALED_AVERAGE_RATING, rights_domain.ACTIVITY_STATUS_PUBLIC, False, [self.albert_id], [], [], [], [self.albert_id], @@ -5158,13 +6147,13 @@ def test_get_non_private_exploration_summaries(self): getattr(actual_summary, prop), getattr(expected_summaries[exp_id], prop)) - def test_get_all_exploration_summaries(self): + def test_get_all_exploration_summaries(self) -> None: actual_summaries = exp_services.get_all_exploration_summaries() expected_summaries = { self.EXP_ID_1: exp_domain.ExplorationSummary( self.EXP_ID_1, 'Exploration 1 title', - 'A category', 'An objective', 'en', [], + 'Algebra', 'An objective', 'en', [], feconf.get_empty_ratings(), feconf.EMPTY_SCALED_AVERAGE_RATING, rights_domain.ACTIVITY_STATUS_PRIVATE, False, [self.albert_id], [], [], [], [self.albert_id, self.bob_id], @@ -5175,7 +6164,7 @@ def test_get_all_exploration_summaries(self): ), self.EXP_ID_2: exp_domain.ExplorationSummary( self.EXP_ID_2, 'Exploration 2 Albert title', - 'A category', 'An objective', 'en', [], + 'Algebra', 'An objective', 'en', [], feconf.get_empty_ratings(), feconf.EMPTY_SCALED_AVERAGE_RATING, rights_domain.ACTIVITY_STATUS_PUBLIC, False, [self.albert_id], [], [], [], [self.albert_id], @@ -5189,19 +6178,133 @@ def test_get_all_exploration_summaries(self): # Check actual summaries equal expected summaries. self.assertItemsEqual(actual_summaries, expected_summaries) + def test_get_top_rated_exploration_summaries(self) -> None: + exploration_summaries = ( + exp_services.get_top_rated_exploration_summaries(3)) + top_rated_summaries = ( + exp_models.ExpSummaryModel.get_top_rated(3)) + top_rated_summaries_model = ( + exp_fetchers.get_exploration_summaries_from_models( + top_rated_summaries)) + self.assertItemsEqual(exploration_summaries, top_rated_summaries_model) + + def test_get_recently_published_exp_summaries(self) -> None: + self.save_new_valid_exploration(self.EXP_0_ID, self.owner_id) + self.save_new_valid_exploration(self.EXP_1_ID, self.owner_id) + self.save_new_valid_exploration(self.EXP_2_ID, self.owner_id) + rights_manager.publish_exploration(self.owner, self.EXP_0_ID) + rights_manager.publish_exploration(self.owner, self.EXP_1_ID) + rights_manager.publish_exploration(self.owner, self.EXP_2_ID) + exploration_summaries = ( + exp_services.get_recently_published_exp_summaries(3) + ) + recently_published_summaries = ( + exp_models.ExpSummaryModel.get_recently_published(3)) + recently_publshed_summaries_model = ( + exp_fetchers.get_exploration_summaries_from_models( + recently_published_summaries)) + self.assertEqual(len(exploration_summaries), 3) + self.assertItemsEqual( + exploration_summaries, + recently_publshed_summaries_model) + + def test_get_story_id_linked_to_exploration(self) -> None: + self.assertIsNone( + exp_services.get_story_id_linked_to_exploration(self.EXP_ID_1)) + story_id = story_services.get_new_story_id() + topic_id = topic_fetchers.get_new_topic_id() + self.save_new_topic( + topic_id, self.albert_id, name='Topic', + abbreviated_name='topic-one', url_fragment='topic-one', + description='A new topic', + canonical_story_ids=[], additional_story_ids=[], + uncategorized_skill_ids=['skill_4'], subtopics=[], + next_subtopic_id=0) + self.save_new_story(story_id, self.albert_id, topic_id) + topic_services.add_canonical_story(self.albert_id, topic_id, story_id) + change_list = [ + story_domain.StoryChange({ + 'cmd': story_domain.CMD_ADD_STORY_NODE, + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'title': 'Title 1' + }), + story_domain.StoryChange({ + 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, + 'property_name': ( + story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID), + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'old_value': None, + 'new_value': self.EXP_ID_1 + }) + ] + story_services.update_story( + self.albert_id, story_id, change_list, + 'Added node.') + self.assertEqual( + exp_services.get_story_id_linked_to_exploration(self.EXP_ID_1), + story_id) + + def test_get_user_exploration_data(self) -> None: + self.save_new_valid_exploration(self.EXP_0_ID, self.albert_id) + exploration_description = ( + exp_services.get_user_exploration_data( + self.albert_id, self.EXP_0_ID)) + self.assertIsNotNone(exploration_description) + + exploration = self.save_new_valid_exploration( + self.EXP_0_ID, + self.albert_id) + exploration.param_specs = { + 'myParam': param_domain.ParamSpec('UnicodeString')} + init_state_name = exploration.init_state_name + param_changes = [{ + 'customization_args': { + 'list_of_values': ['1', '2'], 'parse_with_jinja': False + }, + 'name': 'myParam', + 'generator_id': 'RandomSelector' + }] + draft_change_list = _get_change_list( + init_state_name, 'param_changes', param_changes) + draft_change_list_dict = [ + change.to_dict() for change in draft_change_list] + date_time = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') + user_models.ExplorationUserDataModel( + id='%s.%s' % (self.albert_id, self.EXP_0_ID), + user_id=self.albert_id, + exploration_id=self.EXP_0_ID, + draft_change_list=draft_change_list_dict, + draft_change_list_last_updated=date_time, + draft_change_list_exp_version=1, + draft_change_list_id=2).put() + exploration_description_draft_applied = ( + exp_services.get_user_exploration_data( + self.albert_id, + self.EXP_0_ID, + True)) + self.assertTrue( + exploration_description_draft_applied['is_version_of_draft_valid']) + self.save_new_valid_exploration(self.EXP_1_ID, self.bob_id) + exploration_draft_not_applied = ( + exp_services.get_user_exploration_data( + self.bob_id, self.EXP_1_ID, True)) + self.assertFalse( + exploration_draft_not_applied['is_version_of_draft_valid']) + class ExplorationConversionPipelineTests(ExplorationServicesUnitTests): """Tests the exploration model -> exploration conversion pipeline.""" - NEW_EXP_ID = 'exp_id1' + NEW_EXP_ID: Final = 'exp_id1' - UPGRADED_EXP_YAML = ( + UPGRADED_EXP_YAML: Final = ( """author_notes: '' auto_tts_enabled: true blurb: '' category: category correctness_feedback_enabled: false -init_state_name: %s +edits_allowed: true +init_state_name: %r language_code: en objective: Old objective param_changes: [] @@ -5230,10 +6333,7 @@ class ExplorationConversionPipelineTests(ExplorationServicesUnitTests): voiceovers_mapping: content: {} solicit_answer_details: false - written_translations: - translations_mapping: - content: {} - %s: + %r: classifier_model_id: null content: content_id: content @@ -5248,6 +6348,7 @@ class ExplorationConversionPipelineTests(ExplorationServicesUnitTests): unicode_str: Continue default_outcome: dest: END + dest_if_really_stuck: null feedback: content_id: default_outcome html: '' @@ -5267,11 +6368,6 @@ class ExplorationConversionPipelineTests(ExplorationServicesUnitTests): content: {} default_outcome: {} solicit_answer_details: false - written_translations: - translations_mapping: - ca_buttonText: {} - content: {} - default_outcome: {} states_schema_version: %d tags: [] title: Old Title @@ -5284,8 +6380,8 @@ class ExplorationConversionPipelineTests(ExplorationServicesUnitTests): ALBERT_EMAIL = 'albert@example.com' ALBERT_NAME = 'albert' - def setUp(self): - super(ExplorationConversionPipelineTests, self).setUp() + def setUp(self) -> None: + super().setUp() # Setup user who will own the test explorations. self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) @@ -5297,7 +6393,8 @@ def setUp(self): self._up_to_date_yaml = new_exp.to_yaml() def test_get_exploration_from_model_with_invalid_schema_version_raise_error( - self): + self + ) -> None: exp_model = exp_models.ExplorationModel( id='exp_id', category='category', @@ -5316,13 +6413,82 @@ def test_get_exploration_from_model_with_invalid_schema_version_raise_error( 'category': 'category', }]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v41-v%d exploration state schemas at ' 'present.' % feconf.CURRENT_STATE_SCHEMA_VERSION): exp_fetchers.get_exploration_from_model(exp_model) - def test_update_exploration_with_empty_change_list_does_not_update(self): + def test_update_exploration_by_voice_artist(self) -> None: + exp_id = 'exp_id' + user_id = 'user_id' + self.save_new_default_exploration(exp_id, user_id) + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'new title' + })] + with self.assertRaisesRegex( + utils.ValidationError, + 'Voice artist does not have permission to make some ' + 'changes in the change list.'): + exp_services.update_exploration( + user_id, exp_id, change_list, 'By voice artist', True) + + def test_update_exploration_linked_to_story(self) -> None: + story_id = story_services.get_new_story_id() + topic_id = topic_fetchers.get_new_topic_id() + exp_id = 'exp_id' + user_id = 'user_id' + self.save_new_default_exploration(exp_id, user_id) + exp_services.update_exploration( + user_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'correctness_feedback_enabled', + 'new_value': True + })], 'Changed correctness_feedback_enabled.') + self.save_new_topic( + topic_id, user_id, name='Topic', + abbreviated_name='topic-one', url_fragment='topic-one', + description='A new topic', + canonical_story_ids=[], additional_story_ids=[], + uncategorized_skill_ids=['skill_4'], subtopics=[], + next_subtopic_id=0) + self.save_new_story(story_id, user_id, topic_id) + topic_services.add_canonical_story(user_id, topic_id, story_id) + change_list_story = [ + story_domain.StoryChange({ + 'cmd': story_domain.CMD_ADD_STORY_NODE, + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'title': 'Title 1' + }), + story_domain.StoryChange({ + 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, + 'property_name': ( + story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID), + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'old_value': None, + 'new_value': exp_id + }) + ] + story_services.update_story( + user_id, story_id, change_list_story, + 'Added node.') + change_list_exp = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'new title' + })] + opportunity_services.add_new_exploration_opportunities( + story_id, [exp_id]) + exp_services.update_exploration( + user_id, exp_id, change_list_exp, 'story linked') + updated_exp = exp_fetchers.get_exploration_by_id(exp_id) + self.assertEqual(updated_exp.title, 'new title') + + def test_update_exploration_with_empty_change_list_does_not_update( + self + ) -> None: exploration = self.save_new_default_exploration('exp_id', 'user_id') self.assertEqual(exploration.title, 'A title') @@ -5332,7 +6498,7 @@ def test_update_exploration_with_empty_change_list_does_not_update(self): self.assertEqual(exploration.language_code, 'en') exp_services.update_exploration( - 'user_id', 'exp_id', None, 'empty commit') + 'user_id', 'exp_id', [], 'empty commit') exploration = exp_fetchers.get_exploration_by_id('exp_id') @@ -5342,55 +6508,36 @@ def test_update_exploration_with_empty_change_list_does_not_update(self): exploration.objective, feconf.DEFAULT_EXPLORATION_OBJECTIVE) self.assertEqual(exploration.language_code, 'en') - def test_save_exploration_with_mismatch_of_versions_raises_error(self): - self.save_new_valid_exploration('exp_id', 'user_id') - - exploration_model = exp_models.ExplorationModel.get('exp_id') - exploration_model.version = 0 - - with self.assertRaisesRegexp( - Exception, - 'Unexpected error: trying to update version 0 of exploration ' - 'from version 1. Please reload the page and try again.'): - exp_services.update_exploration( - 'user_id', 'exp_id', [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, - 'property_name': 'title', - 'new_value': 'new title' - })], 'changed title') - - def test_update_exploration_as_suggestion_with_invalid_commit_message(self): + def test_save_exploration_with_mismatch_of_versions_raises_error( + self + ) -> None: self.save_new_valid_exploration('exp_id', 'user_id') - exploration_model = exp_models.ExplorationModel.get('exp_id') - exploration_model.version = 0 - - with self.assertRaisesRegexp( - Exception, 'Invalid commit message for suggestion.'): - exp_services.update_exploration( - 'user_id', 'exp_id', [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, - 'property_name': 'title', - 'new_value': 'new title' - })], '', is_suggestion=True) + exploration = exp_fetchers.get_exploration_from_model(exploration_model) + exploration.version = 2 - def test_update_exploration_with_invalid_commit_message(self): - self.save_new_valid_exploration('exp_id', 'user_id') + def _mock_apply_change_list( + *unused_args: str, **unused_kwargs: str + ) -> exp_domain.Exploration: + """Mocks exp_fetchers.get_exploration_by_id().""" + return exploration - exploration_model = exp_models.ExplorationModel.get('exp_id') - exploration_model.version = 0 + fetch_swap = self.swap( + exp_services, 'apply_change_list', + _mock_apply_change_list) - with self.assertRaisesRegexp( + with fetch_swap, self.assertRaisesRegex( Exception, - 'Commit messages for non-suggestions may not start with'): + 'Unexpected error: trying to update version 1 of exploration ' + 'from version 2. Please reload the page and try again.'): exp_services.update_exploration( 'user_id', 'exp_id', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'title', 'new_value': 'new title' - })], feconf.COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX) + })], 'changed title') - def test_update_title(self): + def test_update_title(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual(exploration.language_code, 'en') exp_services.update_exploration( @@ -5428,7 +6575,7 @@ def test_update_title(self): self.assertEqual(exploration.language_code, 'bn') self.assertEqual(exploration.title, 'new changed title') - def test_update_language_code(self): + def test_update_language_code(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual(exploration.language_code, 'en') exp_services.update_exploration( @@ -5468,7 +6615,7 @@ def test_update_language_code(self): self.assertEqual(exploration.title, 'new title') self.assertEqual(exploration.language_code, 'en') - def test_update_exploration_tags(self): + def test_update_exploration_tags(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual(exploration.tags, []) exp_services.update_exploration( @@ -5508,7 +6655,7 @@ def test_update_exploration_tags(self): self.assertEqual(exploration.title, 'new title') self.assertEqual(exploration.tags, ['test', 'skill']) - def test_update_exploration_author_notes(self): + def test_update_exploration_author_notes(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual(exploration.author_notes, '') exp_services.update_exploration( @@ -5548,7 +6695,7 @@ def test_update_exploration_author_notes(self): self.assertEqual(exploration.title, 'new title') self.assertEqual(exploration.author_notes, 'author_notes_updated_again') - def test_update_exploration_blurb(self): + def test_update_exploration_blurb(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual(exploration.blurb, '') exp_services.update_exploration( @@ -5588,15 +6735,21 @@ def test_update_exploration_blurb(self): self.assertEqual(exploration.title, 'new title') self.assertEqual(exploration.blurb, 'blurb_changed') - def test_update_exploration_param_changes(self): + def test_update_exploration_param_changes(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual(exploration.param_changes, []) - exploration.param_specs = { - 'myParam': param_domain.ParamSpec('UnicodeString')} - exp_services._save_exploration(self.albert_id, exploration, '', []) # pylint: disable=protected-access + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'param_specs', + 'new_value': { + 'myParam': {'obj_type': 'UnicodeString'} + } + })] + exp_services.update_exploration( + self.albert_id, self.NEW_EXP_ID, change_list, '') - param_changes = [{ + param_changes: List[param_domain.ParamChangeDict] = [{ 'customization_args': { 'list_of_values': ['1', '2'], 'parse_with_jinja': False }, @@ -5617,15 +6770,30 @@ def test_update_exploration_param_changes(self): self.assertEqual( exploration.param_changes[0].to_dict(), param_changes[0]) - def test_update_exploration_init_state_name(self): + def test_update_exploration_init_state_name(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) - + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) exp_services.update_exploration( self.albert_id, self.NEW_EXP_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'State' - })], 'Added new state.') - + 'state_name': 'State', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + })], 'Added new state.') + self.assertEqual( exploration.init_state_name, feconf.DEFAULT_INIT_STATE_NAME) @@ -5691,9 +6859,9 @@ def test_update_exploration_init_state_name(self): self.assertEqual( exploration.init_state_name, feconf.DEFAULT_INIT_STATE_NAME) - def test_update_exploration_auto_tts_enabled(self): + def test_update_exploration_auto_tts_enabled(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) - self.assertEqual(exploration.auto_tts_enabled, True) + self.assertEqual(exploration.auto_tts_enabled, False) exp_services.update_exploration( self.albert_id, self.NEW_EXP_ID, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, @@ -5731,7 +6899,7 @@ def test_update_exploration_auto_tts_enabled(self): self.assertEqual(exploration.title, 'new title') self.assertEqual(exploration.auto_tts_enabled, True) - def test_update_exploration_correctness_feedback_enabled(self): + def test_update_exploration_correctness_feedback_enabled(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual(exploration.correctness_feedback_enabled, False) exp_services.update_exploration( @@ -5771,7 +6939,94 @@ def test_update_exploration_correctness_feedback_enabled(self): self.assertEqual(exploration.title, 'new title') self.assertEqual(exploration.correctness_feedback_enabled, False) - def test_update_unclassified_answers(self): + def test_update_exploration_with_mark_translation_needs_update_changes( + self + ) -> None: + exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.NEW_EXP_ID, + exploration.version, 'hi', 'content_0', + translation_domain.TranslatedContent( + 'Translation', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + entity_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, self.NEW_EXP_ID, + exploration.version + ) + ) + self.assertEqual(len(entity_translations), 1) + self.assertFalse( + entity_translations[0].translations['content_0'].needs_update) + + exp_services.update_exploration( + self.albert_id, self.NEW_EXP_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_MARK_TRANSLATIONS_NEEDS_UPDATE, + 'content_id': 'content_0' + })], 'Marked translation need update.') + entity_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, self.NEW_EXP_ID, + exploration.version + 1 + ) + ) + self.assertEqual(len(entity_translations), 1) + self.assertTrue( + entity_translations[0].translations['content_0'].needs_update) + + def test_update_exploration_with_remove_translation_changes(self) -> None: + exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) + + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.NEW_EXP_ID, + exploration.version, 'hi', 'content_0', + translation_domain.TranslatedContent( + 'Translation 1', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.NEW_EXP_ID, + exploration.version, 'hi', 'default_outcome_1', + translation_domain.TranslatedContent( + 'Translation 2', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + entity_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, self.NEW_EXP_ID, + exploration.version + ) + ) + self.assertEqual(len(entity_translations), 1) + self.assertTrue('content_0' in entity_translations[0].translations) + self.assertTrue( + 'default_outcome_1' in entity_translations[0].translations) + + exp_services.update_exploration( + self.albert_id, self.NEW_EXP_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_REMOVE_TRANSLATIONS, + 'content_id': 'content_0' + })], 'Marked translation need update.') + + entity_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, self.NEW_EXP_ID, + exploration.version + 1 + ) + ) + self.assertEqual(len(entity_translations), 1) + self.assertFalse('content_0' in entity_translations[0].translations) + self.assertTrue( + 'default_outcome_1' in entity_translations[0].translations) + + def test_update_unclassified_answers(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual( exploration.init_state.interaction.confirmed_unclassified_answers, @@ -5820,12 +7075,12 @@ def test_update_unclassified_answers(self): exploration.init_state.interaction.confirmed_unclassified_answers, ['test', 'skill']) - def test_update_interaction_hints(self): + def test_update_interaction_hints(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual( exploration.init_state.interaction.hints, []) - hint_list = [{ + hint_list: List[state_domain.HintDict] = [{ 'hint_content': { 'content_id': 'hint_1', 'html': ( @@ -5833,7 +7088,7 @@ def test_update_interaction_hints(self): '' + '"&quot;image&quot;">' '

    ') } }] @@ -5863,7 +7118,7 @@ def test_update_interaction_hints(self): 'new_value': 'new title' })], 'Changed title.') - hint_list_2 = [{ + hint_list_2: List[state_domain.HintDict] = [{ 'hint_content': { 'content_id': 'hint_1', 'html': ( @@ -5871,7 +7126,7 @@ def test_update_interaction_hints(self): '' + '"&quot;image&quot;">' '

    ') } }, { @@ -5882,18 +7137,12 @@ def test_update_interaction_hints(self): '' + '"&quot;image&quot;">' '

    ') } }] change_list = [exp_domain.ExplorationChange({ - 'property_name': 'next_content_id_index', - 'cmd': 'edit_state_property', - 'old_value': 1, - 'state_name': exploration.init_state_name, - 'new_value': 3 - }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_HINTS, 'state_name': exploration.init_state_name, @@ -5916,7 +7165,7 @@ def test_update_interaction_hints(self): exploration.init_state.interaction.hints[1].hint_content.content_id, 'hint_2') - def test_update_interaction_hints_invalid_parameter_type(self): + def test_update_interaction_hints_invalid_parameter_type(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual( exploration.init_state.interaction.hints, []) @@ -5930,12 +7179,12 @@ def test_update_interaction_hints_invalid_parameter_type(self): '' + '"&quot;image&quot;">' '

    ') } } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected hints_list to be a list.*'): hints_update = exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, @@ -5966,7 +7215,7 @@ def test_update_interaction_hints_invalid_parameter_type(self): '' + '"&quot;image&quot;">' '

    ') } } @@ -5980,7 +7229,7 @@ def test_update_interaction_hints_invalid_parameter_type(self): changes_are_mergeable = exp_services.are_changes_mergeable( self.NEW_EXP_ID, 1, change_list) self.assertTrue(changes_are_mergeable) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected hints_list to be a list.*'): exp_services.update_exploration( self.albert_id, self.NEW_EXP_ID, change_list, @@ -6008,7 +7257,7 @@ def test_update_interaction_hints_invalid_parameter_type(self): '' + '"&quot;image&quot;">' '

    ') } } @@ -6022,7 +7271,7 @@ def test_update_interaction_hints_invalid_parameter_type(self): changes_are_mergeable = exp_services.are_changes_mergeable( self.NEW_EXP_ID, 1, change_list) self.assertTrue(changes_are_mergeable) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected hints_list to be a list.*'): exp_services.update_exploration( self.albert_id, self.NEW_EXP_ID, change_list, @@ -6032,11 +7281,11 @@ def test_update_interaction_hints_invalid_parameter_type(self): exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertEqual(exploration.title, 'new title') - def test_update_interaction_solutions(self): + def test_update_interaction_solutions(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) self.assertIsNone(exploration.init_state.interaction.solution) - solution = { + solution: Optional[state_domain.SolutionDict] = { 'answer_is_exclusive': False, 'correct_answer': 'helloworld!', 'explanation': { @@ -6045,7 +7294,7 @@ def test_update_interaction_solutions(self): }, } - hint_list = [{ + hint_list: List[state_domain.HintDict] = [{ 'hint_content': { 'content_id': u'hint_1', 'html': ( @@ -6053,7 +7302,7 @@ def test_update_interaction_solutions(self): u'' + u'"&quot;image&quot;">' u'

    ') } }] @@ -6075,6 +7324,8 @@ def test_update_interaction_solutions(self): })], 'Changed interaction_solutions.') exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert exploration.init_state.interaction.solution is not None self.assertEqual( exploration.init_state.interaction.solution.to_dict(), solution) @@ -6125,15 +7376,17 @@ def test_update_interaction_solutions(self): # Assert that final version consists all the changes. exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert exploration.init_state.interaction.solution is not None self.assertEqual(exploration.title, 'new title') self.assertEqual( exploration.init_state.interaction.solution.to_dict(), solution_2) - def test_cannot_update_recorded_voiceovers_with_invalid_type(self): + def test_cannot_update_recorded_voiceovers_with_invalid_type(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected recorded_voiceovers to be a dict'): exp_services.update_exploration( self.albert_id, self.NEW_EXP_ID, [exp_domain.ExplorationChange({ @@ -6164,19 +7417,65 @@ def test_cannot_update_recorded_voiceovers_with_invalid_type(self): changes_are_mergeable = exp_services.are_changes_mergeable( self.NEW_EXP_ID, 1, change_list) self.assertTrue(changes_are_mergeable) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected recorded_voiceovers to be a dict'): exp_services.update_exploration( self.albert_id, self.NEW_EXP_ID, change_list, 'Changed recorded_voiceovers.') - def test_revert_exploration_with_mismatch_of_versions_raises_error(self): + def test_get_exploration_validation_error(self) -> None: + # Valid exploration version. + info = exp_services.get_exploration_validation_error( + self.NEW_EXP_ID, 0) + self.assertIsNone(info) + + # Invalid exploration version. + def _mock_exploration_validate_function( + *args: str, **kwargs: str + ) -> None: + """Mocks exploration.validate().""" + raise utils.ValidationError('Bad') + + validate_swap = self.swap( + exp_domain.Exploration, 'validate', + _mock_exploration_validate_function) + with validate_swap: + info = exp_services.get_exploration_validation_error( + self.NEW_EXP_ID, 0) + self.assertEqual(info, 'Bad') + + def test_revert_exploration_after_publish(self) -> None: + self.save_new_valid_exploration( + self.EXP_0_ID, self.albert_id, + end_state_name='EndState') + exploration_model = exp_fetchers.get_exploration_by_id(self.EXP_0_ID) + exp_services.update_exploration( + self.albert_id, self.EXP_0_ID, [ + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'title', + 'new_value': 'New title' + })], 'Changed title') + user_actions_info = user_services.get_user_actions_info(self.albert_id) + rights_manager.publish_exploration(user_actions_info, self.EXP_0_ID) + updated_exploration_model = exp_fetchers.get_exploration_by_id( + self.EXP_0_ID) + exp_services.revert_exploration( + self.albert_id, self.EXP_0_ID, updated_exploration_model.version, 1) + reverted_exploration = exp_fetchers.get_exploration_by_id( + self.EXP_0_ID) + self.assertEqual(exploration_model.title, reverted_exploration.title) + self.assertEqual(3, reverted_exploration.version) + + def test_revert_exploration_with_mismatch_of_versions_raises_error( + self + ) -> None: self.save_new_valid_exploration('exp_id', 'user_id') exploration_model = exp_models.ExplorationModel.get('exp_id') exploration_model.version = 0 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unexpected error: trying to update version 0 of exploration ' 'from version 1. Please reload the page and try again.'): @@ -6186,23 +7485,23 @@ def test_revert_exploration_with_mismatch_of_versions_raises_error(self): class EditorAutoSavingUnitTests(test_utils.GenericTestBase): """Test editor auto saving functions in exp_services.""" - EXP_ID1 = 'exp_id1' - EXP_ID2 = 'exp_id2' - EXP_ID3 = 'exp_id3' - USERNAME = 'user123' - USER_ID = 'user_id' - COMMIT_MESSAGE = 'commit message' - DATETIME = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') - OLDER_DATETIME = datetime.datetime.strptime('2016-01-16', '%Y-%m-%d') - NEWER_DATETIME = datetime.datetime.strptime('2016-03-16', '%Y-%m-%d') - NEW_CHANGELIST = [exp_domain.ExplorationChange({ + EXP_ID1: Final = 'exp_id1' + EXP_ID2: Final = 'exp_id2' + EXP_ID3: Final = 'exp_id3' + USERNAME: Final = 'user123' + USER_ID: Final = 'user_id' + COMMIT_MESSAGE: Final = 'commit message' + DATETIME: Final = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') + OLDER_DATETIME: Final = datetime.datetime.strptime('2016-01-16', '%Y-%m-%d') + NEWER_DATETIME: Final = datetime.datetime.strptime('2016-03-16', '%Y-%m-%d') + NEW_CHANGELIST: Final = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'title', 'new_value': 'New title'})] - NEW_CHANGELIST_DICT = [NEW_CHANGELIST[0].to_dict()] + NEW_CHANGELIST_DICT: Final = [NEW_CHANGELIST[0].to_dict()] - def setUp(self): - super(EditorAutoSavingUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -6212,9 +7511,15 @@ def setUp(self): # Create explorations. exploration = self.save_new_valid_exploration( self.EXP_ID1, self.USER_ID) - exploration.param_specs = { - 'myParam': param_domain.ParamSpec('UnicodeString')} - exp_services._save_exploration(self.USER_ID, exploration, '', []) # pylint: disable=protected-access + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'param_specs', + 'new_value': { + 'myParam': {'obj_type': 'UnicodeString'} + } + })] + exp_services.update_exploration( + self.USER_ID, self.EXP_ID1, change_list, '') self.save_new_valid_exploration(self.EXP_ID2, self.USER_ID) self.save_new_valid_exploration(self.EXP_ID3, self.USER_ID) self.init_state_name = exploration.init_state_name @@ -6249,7 +7554,7 @@ def setUp(self): id='%s.%s' % (self.USER_ID, self.EXP_ID3), user_id=self.USER_ID, exploration_id=self.EXP_ID3).put() - def test_draft_cleared_after_change_list_applied(self): + def test_draft_cleared_after_change_list_applied(self) -> None: exp_services.update_exploration( self.USER_ID, self.EXP_ID1, self.draft_change_list, '') exp_user_data = user_models.ExplorationUserDataModel.get_by_id( @@ -6258,30 +7563,41 @@ def test_draft_cleared_after_change_list_applied(self): self.assertIsNone(exp_user_data.draft_change_list_last_updated) self.assertIsNone(exp_user_data.draft_change_list_exp_version) - def test_draft_version_valid_returns_true(self): + def test_draft_version_valid_returns_true(self) -> None: exp_user_data = user_models.ExplorationUserDataModel.get_by_id( '%s.%s' % (self.USER_ID, self.EXP_ID1)) self.assertTrue(exp_services.is_version_of_draft_valid( self.EXP_ID1, exp_user_data.draft_change_list_exp_version)) - def test_draft_version_valid_returns_false(self): + def test_draft_version_valid_returns_false(self) -> None: exp_user_data = user_models.ExplorationUserDataModel.get_by_id( '%s.%s' % (self.USER_ID, self.EXP_ID2)) self.assertFalse(exp_services.is_version_of_draft_valid( self.EXP_ID2, exp_user_data.draft_change_list_exp_version)) - def test_draft_version_valid_when_no_draft_exists(self): + def test_draft_version_valid_when_no_draft_exists(self) -> None: exp_user_data = user_models.ExplorationUserDataModel.get_by_id( '%s.%s' % (self.USER_ID, self.EXP_ID3)) self.assertFalse(exp_services.is_version_of_draft_valid( self.EXP_ID3, exp_user_data.draft_change_list_exp_version)) - def test_create_or_update_draft_when_older_draft_exists(self): + def test_create_or_update_draft_when_by_voice_artist(self) -> None: + with self.assertRaisesRegex( + utils.ValidationError, + 'Voice artist does not have permission to make some ' + 'changes in the change list.'): + exp_services.create_or_update_draft( + self.EXP_ID1, self.USER_ID, self.NEW_CHANGELIST, 5, + self.NEWER_DATETIME, True) + + def test_create_or_update_draft_when_older_draft_exists(self) -> None: exp_services.create_or_update_draft( self.EXP_ID1, self.USER_ID, self.NEW_CHANGELIST, 5, self.NEWER_DATETIME) exp_user_data = user_models.ExplorationUserDataModel.get( self.USER_ID, self.EXP_ID1) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None self.assertEqual(exp_user_data.exploration_id, self.EXP_ID1) self.assertEqual( exp_user_data.draft_change_list, self.NEW_CHANGELIST_DICT) @@ -6290,12 +7606,14 @@ def test_create_or_update_draft_when_older_draft_exists(self): self.assertEqual(exp_user_data.draft_change_list_exp_version, 5) self.assertEqual(exp_user_data.draft_change_list_id, 3) - def test_create_or_update_draft_when_newer_draft_exists(self): + def test_create_or_update_draft_when_newer_draft_exists(self) -> None: exp_services.create_or_update_draft( self.EXP_ID1, self.USER_ID, self.NEW_CHANGELIST, 5, self.OLDER_DATETIME) exp_user_data = user_models.ExplorationUserDataModel.get( self.USER_ID, self.EXP_ID1) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None self.assertEqual(exp_user_data.exploration_id, self.EXP_ID1) self.assertEqual( exp_user_data.draft_change_list, self.draft_change_list_dict) @@ -6304,12 +7622,14 @@ def test_create_or_update_draft_when_newer_draft_exists(self): self.assertEqual(exp_user_data.draft_change_list_exp_version, 2) self.assertEqual(exp_user_data.draft_change_list_id, 2) - def test_create_or_update_draft_when_draft_does_not_exist(self): + def test_create_or_update_draft_when_draft_does_not_exist(self) -> None: exp_services.create_or_update_draft( self.EXP_ID3, self.USER_ID, self.NEW_CHANGELIST, 5, self.NEWER_DATETIME) exp_user_data = user_models.ExplorationUserDataModel.get( self.USER_ID, self.EXP_ID3) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None self.assertEqual(exp_user_data.exploration_id, self.EXP_ID3) self.assertEqual( exp_user_data.draft_change_list, self.NEW_CHANGELIST_DICT) @@ -6318,42 +7638,58 @@ def test_create_or_update_draft_when_draft_does_not_exist(self): self.assertEqual(exp_user_data.draft_change_list_exp_version, 5) self.assertEqual(exp_user_data.draft_change_list_id, 1) - def test_get_exp_with_draft_applied_when_draft_exists(self): + def test_get_exp_with_draft_applied_when_draft_exists(self) -> None: exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID1) self.assertEqual(exploration.init_state.param_changes, []) updated_exp = exp_services.get_exp_with_draft_applied( self.EXP_ID1, self.USER_ID) self.assertIsNotNone(updated_exp) - param_changes = updated_exp.init_state.param_changes[0] - self.assertEqual(param_changes._name, 'myParam') # pylint: disable=protected-access - self.assertEqual(param_changes._generator_id, 'RandomSelector') # pylint: disable=protected-access + # Ruling out the possibility of None for mypy type checking. + assert updated_exp is not None + param_changes = updated_exp.init_state.param_changes[0].to_dict() + self.assertEqual(param_changes['name'], 'myParam') + self.assertEqual(param_changes['generator_id'], 'RandomSelector') self.assertEqual( - param_changes._customization_args, # pylint: disable=protected-access + param_changes['customization_args'], {'list_of_values': ['1', '2'], 'parse_with_jinja': False}) - def test_get_exp_with_draft_applied_when_draft_does_not_exist(self): + def test_get_exp_with_draft_applied_when_draft_does_not_exist( + self + ) -> None: exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID3) self.assertEqual(exploration.init_state.param_changes, []) updated_exp = exp_services.get_exp_with_draft_applied( self.EXP_ID3, self.USER_ID) self.assertIsNone(updated_exp) - def test_get_exp_with_draft_applied_when_draft_version_is_invalid(self): + def test_get_exp_with_draft_applied_when_draft_version_is_invalid( + self + ) -> None: exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID2) self.assertEqual(exploration.init_state.param_changes, []) updated_exp = exp_services.get_exp_with_draft_applied( self.EXP_ID2, self.USER_ID) self.assertIsNone(updated_exp) - def test_draft_discarded(self): - exp_services.discard_draft(self.EXP_ID1, self.USER_ID,) + def test_draft_discarded(self) -> None: + user_data_model = ( + exp_services.get_exp_user_data_model_with_draft_discarded( + self.EXP_ID1, + self.USER_ID + ) + ) + assert user_data_model is not None + user_data_model.update_timestamps() + user_data_model.put() exp_user_data = user_models.ExplorationUserDataModel.get_by_id( '%s.%s' % (self.USER_ID, self.EXP_ID1)) self.assertIsNone(exp_user_data.draft_change_list) self.assertIsNone(exp_user_data.draft_change_list_last_updated) self.assertIsNone(exp_user_data.draft_change_list_exp_version) - def test_create_or_update_draft_with_exploration_model_not_created(self): + def test_create_or_update_draft_with_exploration_model_not_created( + self + ) -> None: self.save_new_valid_exploration( 'exp_id', self.admin_id, title='title') @@ -6369,6 +7705,8 @@ def test_create_or_update_draft_with_exploration_model_not_created(self): self.NEWER_DATETIME) exp_user_data = user_models.ExplorationUserDataModel.get( self.editor_id, 'exp_id') + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None self.assertEqual(exp_user_data.exploration_id, 'exp_id') self.assertEqual( exp_user_data.draft_change_list, self.NEW_CHANGELIST_DICT) @@ -6377,7 +7715,9 @@ def test_create_or_update_draft_with_exploration_model_not_created(self): self.assertEqual(exp_user_data.draft_change_list_exp_version, 1) self.assertEqual(exp_user_data.draft_change_list_id, 1) - def test_get_exp_with_draft_applied_when_draft_has_invalid_math_tags(self): + def test_get_exp_with_draft_applied_when_draft_has_invalid_math_tags( + self + ) -> None: """Test the method get_exp_with_draft_applied when the draft_changes have invalid math-tags in them. """ @@ -6385,26 +7725,29 @@ def test_get_exp_with_draft_applied_when_draft_has_invalid_math_tags(self): 'exp_id') exploration.add_states(['State1']) state = exploration.states['State1'] - state_customization_args_dict = { + choices_subtitled_html_dicts: List[state_domain.SubtitledHtmlDict] = [ + { + 'content_id': 'ca_choices_0', + 'html': '

    state customization arg html 1

    ' + }, + { + 'content_id': 'ca_choices_1', + 'html': '

    state customization arg html 2

    ' + }, + { + 'content_id': 'ca_choices_2', + 'html': '

    state customization arg html 3

    ' + }, + { + 'content_id': 'ca_choices_3', + 'html': '

    state customization arg html 4

    ' + } + ] + state_customization_args_dict: Dict[ + str, Dict[str, Union[int, List[state_domain.SubtitledHtmlDict]]] + ] = { 'choices': { - 'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '

    state customization arg html 1

    ' - }, - { - 'content_id': 'ca_choices_1', - 'html': '

    state customization arg html 2

    ' - }, - { - 'content_id': 'ca_choices_2', - 'html': '

    state customization arg html 3

    ' - }, - { - 'content_id': 'ca_choices_3', - 'html': '

    state customization arg html 4

    ' - } - ] + 'value': choices_subtitled_html_dicts }, 'maxAllowableSelectionCount': { 'value': 1 @@ -6414,7 +7757,6 @@ def test_get_exp_with_draft_applied_when_draft_has_invalid_math_tags(self): } } state.update_interaction_id('ItemSelectionInput') - state.update_next_content_id_index(4) state.update_interaction_customization_args( state_customization_args_dict) exp_services.save_new_exploration(self.USER_ID, exploration) @@ -6463,69 +7805,1894 @@ def test_get_exp_with_draft_applied_when_draft_has_invalid_math_tags(self): draft_change_list_last_updated=self.DATETIME, draft_change_list_exp_version=1, draft_change_list_id=2).put() - updated_exploration = exp_services.get_exp_with_draft_applied( - 'exp_id', self.USER_ID) + with self.swap(state_domain.SubtitledHtml, 'validate', lambda x: True): + updated_exploration = exp_services.get_exp_with_draft_applied( + 'exp_id', self.USER_ID) self.assertIsNone(updated_exploration) class ApplyDraftUnitTests(test_utils.GenericTestBase): """Test apply draft functions in exp_services.""" - EXP_ID1 = 'exp_id1' - USERNAME = 'user123' - USER_ID = 'user_id' - COMMIT_MESSAGE = 'commit message' - DATETIME = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') + EXP_ID1: Final = 'exp_id1' + USER_ID: Final = 'user_id' + DATETIME: Final = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') - def setUp(self): - super(ApplyDraftUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() # Create explorations. exploration = self.save_new_valid_exploration( self.EXP_ID1, self.USER_ID) - exploration.param_specs = { - 'myParam': param_domain.ParamSpec('UnicodeString')} - self.init_state_name = exploration.init_state_name - self.param_changes = [{ - 'customization_args': { - 'list_of_values': ['1', '2'], 'parse_with_jinja': False - }, - 'name': 'myParam', - 'generator_id': 'RandomSelector' - }] + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'param_specs', + 'new_value': { + 'myParam': {'obj_type': 'UnicodeString'} + } + })] + exp_services.update_exploration( + self.USER_ID, self.EXP_ID1, change_list, '') + + migration_change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, + 'from_version': 54, + 'to_version': str(feconf.CURRENT_STATE_SCHEMA_VERSION) + })] + exp_services.update_exploration( + self.USER_ID, self.EXP_ID1, + migration_change_list, 'Migrate state schema.') + + state = exploration.states[exploration.init_state_name] self.draft_change_list = _get_change_list( - self.init_state_name, 'param_changes', self.param_changes) + exploration.init_state_name, 'content', { + 'content_id': state.content.content_id, + 'html': '

    New html value

    ' + }) self.draft_change_list_dict = [ change.to_dict() for change in self.draft_change_list] # Explorations with draft set. + exp_user_data = user_models.ExplorationUserDataModel.create( + self.USER_ID, self.EXP_ID1) + exp_user_data.draft_change_list = self.draft_change_list_dict + exp_user_data.draft_change_list_last_updated = self.DATETIME + exp_user_data.draft_change_list_exp_version = 2 + exp_user_data.draft_change_list_id = 2 + exp_user_data.update_timestamps() + exp_user_data.put() + + def test_get_exp_with_draft_applied_after_draft_upgrade(self) -> None: + exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID1) + self.assertEqual(exploration.init_state.param_changes, []) + updated_exp = exp_services.get_exp_with_draft_applied( + self.EXP_ID1, self.USER_ID) + self.assertIsNotNone(updated_exp) + # Ruling out the possibility of None for mypy type checking. + assert updated_exp is not None + new_content_dict = updated_exp.init_state.content.to_dict() + self.assertEqual(new_content_dict['html'], '

    New html value

    ') + self.assertEqual(new_content_dict['content_id'], 'content_0') + + def test_get_exp_with_draft_applied_when_draft_has_exp_property_changes( + self + ) -> None: + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title' + }).to_dict()] user_models.ExplorationUserDataModel( id='%s.%s' % (self.USER_ID, self.EXP_ID1), user_id=self.USER_ID, exploration_id=self.EXP_ID1, - draft_change_list=self.draft_change_list_dict, + draft_change_list=change_list, draft_change_list_last_updated=self.DATETIME, - draft_change_list_exp_version=1, + draft_change_list_exp_version=2, draft_change_list_id=2).put() + updated_exploration = exp_services.get_exp_with_draft_applied( + self.EXP_ID1, self.USER_ID) + self.assertFalse(updated_exploration is None) - migration_change_list = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, - 'from_version': '0', - 'to_version': '1' + +class UpdateVersionHistoryUnitTests(ExplorationServicesUnitTests): + """Tests for ensuring creation, deletion and updation of version history + data is carried out correctly. + """ + + def setUp(self) -> None: + super().setUp() + exploration = exp_domain.Exploration.create_default_exploration( + self.EXP_0_ID) + exp_services.save_new_exploration(self.owner_id, exploration) + self.exploration = exploration + self.version_history_model_class: Type[ + exp_models.ExplorationVersionHistoryModel + ] = ( + exp_models.ExplorationVersionHistoryModel) + + def test_creating_new_exploration_creates_version_history_model( + self + ) -> None: + version_history_id = ( + self.version_history_model_class.get_instance_id( + self.exploration.id, self.exploration.version)) + version_history_model = self.version_history_model_class.get( + version_history_id) + expected_state_version_history_dict = { + feconf.DEFAULT_INIT_STATE_NAME: state_domain.StateVersionHistory( + None, None, self.owner_id + ).to_dict() + } + + self.assertEqual( + version_history_model.state_version_history, + expected_state_version_history_dict) + self.assertEqual( + version_history_model.metadata_last_edited_version_number, None) + self.assertEqual( + version_history_model.metadata_last_edited_committer_id, + self.owner_id) + self.assertIn(self.owner_id, version_history_model.committer_ids) + + def test_soft_deletion_does_not_delete_version_history_models(self) -> None: + version_history_models_before_deletion: Sequence[ + exp_models.ExplorationVersionHistoryModel + ] = ( + self.version_history_model_class.query( + self.version_history_model_class.exploration_id == + self.exploration.id + ).fetch()) + exp_services.delete_exploration(self.owner_id, self.exploration.id) + version_history_models_after_deletion: Sequence[ + exp_models.ExplorationVersionHistoryModel + ] = ( + self.version_history_model_class.query( + self.version_history_model_class.exploration_id == + self.exploration.id + ).fetch()) + + self.assertEqual( + version_history_models_before_deletion, + version_history_models_after_deletion) + + def test_hard_deletion_deletes_version_history_models(self) -> None: + version_history_models_before_deletion: Sequence[ + exp_models.ExplorationVersionHistoryModel + ] = ( + self.version_history_model_class.query( + self.version_history_model_class.exploration_id == + self.exploration.id + ).fetch()) + exp_services.delete_exploration( + self.owner_id, self.exploration.id, force_deletion=True) + version_history_models_after_deletion: Sequence[ + exp_models.ExplorationVersionHistoryModel + ] = ( + self.version_history_model_class.query( + self.version_history_model_class.exploration_id == + self.exploration.id + ).fetch()) + + self.assertNotEqual( + version_history_models_before_deletion, + version_history_models_after_deletion) + + def test_version_history_on_add_state(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + + self.assertEqual( + old_model.state_version_history.get('New state'), None) + content_id_generator = translation_domain.ContentIdGenerator( + self.exploration.next_content_id_index) + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index, + 'old_value': 0 + })], 'Added state') + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual( + new_model.state_version_history.get('New state'), + state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict()) + + def test_version_history_on_delete_state(self) -> None: + content_id_generator: translation_domain.ContentIdGenerator = ( + translation_domain.ContentIdGenerator( + self.exploration.next_content_id_index)) + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index, + 'old_value': 0 + })], 'Added state') + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual( + old_model.state_version_history.get('New state'), + state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict()) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_DELETE_STATE, + 'state_name': 'New state', + })], 'Deleted state') + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 3)) + + self.assertEqual( + new_model.state_version_history.get('New state'), None) + + def test_version_history_on_rename_state(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + new_state_name = 'Another name' + + self.assertEqual( + old_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), + state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict()) + self.assertEqual( + old_model.state_version_history.get(new_state_name), None) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'new_state_name': new_state_name + })], 'Renamed state') + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual( + new_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), None) + self.assertEqual( + new_model.state_version_history.get(new_state_name), + state_domain.StateVersionHistory( + 1, feconf.DEFAULT_INIT_STATE_NAME, self.owner_id).to_dict()) + + def test_version_history_on_cancelled_rename_state(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + new_state_name = 'Another name' + expected_dict = state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict() + + self.assertEqual( + old_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), expected_dict) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'new_state_name': new_state_name + }), exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': new_state_name, + 'new_state_name': feconf.DEFAULT_INIT_STATE_NAME + }) + ], 'Renamed state') + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual( + new_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), expected_dict) + + def test_version_history_on_edit_state_property(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + + self.assertEqual( + old_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), + state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict()) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID, + 'state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'new_value': 'TextInput' + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': + exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS, + 'state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'new_value': { + 'placeholder': { + 'value': { + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' + } + }, + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} + } + }) + ], 'Edited interaction' + ) + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual( + new_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), + state_domain.StateVersionHistory( + 1, feconf.DEFAULT_INIT_STATE_NAME, self.owner_id).to_dict()) + + def test_version_history_on_cancelled_edit_state_property(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + expected_dict = state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict() + + self.assertEqual( + old_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), expected_dict) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID, + 'state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'new_value': 'TextInput' + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID, + 'state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'new_value': None + }) + ], 'Edited interaction id' + ) + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual( + new_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), expected_dict) + + def test_version_history_on_only_translation_commits(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + expected_dict = state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict() + + self.assertEqual( + old_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), expected_dict) + + recorded_voiceovers_dict = { + 'voiceovers_mapping': { + 'content_0': { + 'en': { + 'filename': 'filename3.mp3', + 'file_size_bytes': 3000, + 'needs_update': False, + 'duration_secs': 42.43 + } + }, + 'default_outcome_1': {} + } + } + change_list = [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': ( + exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS), + 'state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'new_value': recorded_voiceovers_dict })] - exp_services._save_exploration( # pylint: disable=protected-access - self.USER_ID, exploration, 'Migrate state schema.', - migration_change_list) + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list, 'Translation commits') + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) - def test_get_exp_with_draft_applied_after_draft_upgrade(self): - exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID1) - self.assertEqual(exploration.init_state.param_changes, []) - draft_upgrade_services.DraftUpgradeUtil._convert_states_v0_dict_to_v1_dict = ( # pylint: disable=line-too-long, protected-access - classmethod(lambda cls, changelist: changelist)) - updated_exp = exp_services.get_exp_with_draft_applied( - self.EXP_ID1, self.USER_ID) - self.assertIsNotNone(updated_exp) - param_changes = updated_exp.init_state.param_changes[0] - self.assertEqual(param_changes._name, 'myParam') # pylint: disable=protected-access - self.assertEqual(param_changes._generator_id, 'RandomSelector') # pylint: disable=protected-access self.assertEqual( - param_changes._customization_args, # pylint: disable=protected-access - {'list_of_values': ['1', '2'], 'parse_with_jinja': False}) + new_model.state_version_history.get( + feconf.DEFAULT_INIT_STATE_NAME), expected_dict) + + def test_version_history_on_edit_exploration_property(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + + self.assertEqual(old_model.metadata_last_edited_version_number, None) + self.assertEqual( + old_model.metadata_last_edited_committer_id, self.owner_id) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title'})], 'Changed title') + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual(new_model.metadata_last_edited_version_number, 1) + self.assertEqual( + new_model.metadata_last_edited_committer_id, self.owner_id) + + def test_version_history_on_cancelled_edit_exploration_property( + self + ) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + + self.assertEqual(old_model.metadata_last_edited_version_number, None) + self.assertEqual( + old_model.metadata_last_edited_committer_id, self.owner_id) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title'} + ), exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': feconf.DEFAULT_EXPLORATION_TITLE} + ) + ], 'Changed title') + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual(new_model.metadata_last_edited_version_number, None) + self.assertEqual( + new_model.metadata_last_edited_committer_id, self.owner_id) + + def test_version_history_on_revert_exploration(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title'})], 'Changed title') + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': feconf.DEFAULT_INIT_STATE_NAME, + 'new_state_name': 'Another state' + }) + ], 'Renamed state') + exp_services.revert_exploration(self.owner_id, self.EXP_0_ID, 3, 1) + + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 4)) + + self.assertEqual( + old_model.state_version_history, + new_model.state_version_history) + self.assertEqual( + old_model.metadata_last_edited_version_number, + new_model.metadata_last_edited_version_number) + self.assertEqual( + old_model.metadata_last_edited_committer_id, + new_model.metadata_last_edited_committer_id) + self.assertEqual(old_model.committer_ids, new_model.committer_ids) + + def test_version_history_on_cancelled_add_state(self) -> None: + # In this case, the version history for that state should not be + # recorded because it was added and deleted in the same commit. + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + content_id_generator = translation_domain.ContentIdGenerator( + self.exploration.next_content_id_index) + change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_DELETE_STATE, + 'state_name': 'New state' + }) + ] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list, + 'Added and deleted state') + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertIsNone(old_model.state_version_history.get('New state')) + self.assertIsNone(new_model.state_version_history.get('New state')) + + def test_version_history_on_state_name_interchange(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator( + self.exploration.next_content_id_index) + change_list_from_v1_to_v2 = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'first', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'second', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index, + 'old_value': 0 + }) + ] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list_from_v1_to_v2, + 'Added two new states') + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 2)) + + self.assertEqual( + old_model.state_version_history['first'], + state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict()) + self.assertEqual( + old_model.state_version_history['second'], + state_domain.StateVersionHistory( + None, None, self.owner_id).to_dict()) + + # Correctly interchanging the state names. + change_list_from_v2_to_v3 = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'first', + 'new_state_name': 'temporary' + }), exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'second', + 'new_state_name': 'first' + }), exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'temporary', + 'new_state_name': 'second' + }) + ] + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, change_list_from_v2_to_v3, + 'Added two new states') + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 3)) + + self.assertEqual( + new_model.state_version_history['second'], + state_domain.StateVersionHistory( + 2, 'first', self.owner_id).to_dict()) + self.assertEqual( + new_model.state_version_history['first'], + state_domain.StateVersionHistory( + 2, 'second', self.owner_id).to_dict()) + + def test_new_committer_id_is_added_to_committer_ids_list(self) -> None: + old_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 1)) + + self.assertNotIn(self.editor_id, old_model.committer_ids) + + content_id_generator = translation_domain.ContentIdGenerator( + self.exploration.next_content_id_index) + exp_services.update_exploration( + self.editor_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index, + 'old_value': 0 + })], 'Added a state') + exp_services.update_exploration( + self.owner_id, self.EXP_0_ID, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'Another state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index, + 'old_value': 0 + })], 'Added a state') + new_model = self.version_history_model_class.get( + self.version_history_model_class.get_instance_id(self.EXP_0_ID, 3)) + + self.assertIn(self.editor_id, new_model.committer_ids) + + +class LoggedOutUserProgressUpdateTests(test_utils.GenericTestBase): + """Tests whether logged-out user progress is updated correctly""" + + EXP_ID: Final = 'exp_id0' + UNIQUE_PROGRESS_URL_ID: Final = 'pid123' + + SAMPLE_EXPLORATION_YAML: str = ( +""" +author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 47 +states: + Introduction: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: New state + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + catchMisspellings: + value: false + default_outcome: + dest: Introduction + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: + - hint_content: + content_id: hint_1 + html:

    hint one,

    + id: TextInput + solution: + answer_is_exclusive: false + correct_answer: helloworld! + explanation: + content_id: solution + html:

    hello_world is a string

    + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: introduction_state.mp3 + needs_update: false + default_outcome: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: unknown_answer_feedback.mp3 + needs_update: false + feedback_1: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: correct_answer_feedback.mp3 + needs_update: false + hint_1: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: answer_hint.mp3 + needs_update: false + rule_input_3: {} + solution: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: answer_solution.mp3 + needs_update: false + solicit_answer_details: false + card_is_checkpoint: true + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + hint_1: {} + rule_input_3: {} + solution: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: {} + default_outcome: + dest: New state + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: null + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + default_outcome: {} + solicit_answer_details: false + card_is_checkpoint: false + written_translations: + translations_mapping: + content: {} + default_outcome: {} +states_schema_version: 42 +tags: [] +title: Title +""") + + def setUp(self) -> None: + super().setUp() + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + + exp_services.save_new_exploration_from_yaml_and_assets( + self.owner_id, self.SAMPLE_EXPLORATION_YAML, self.EXP_ID, []) + self.exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) + + def test_logged_out_user_checkpoint_progress_is_updated_correctly( + self + ) -> None: + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID + ) + self.assertIsNone(logged_out_user_data) + + # First checkpoint reached. + exp_services.update_logged_out_user_progress( + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID, 'Introduction', 1) + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + # Ruling out the possibility of None for mypy type checking. + assert logged_out_user_data is not None + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_exp_version, 1) + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_state_name, + 'Introduction') + self.assertEqual( + logged_out_user_data. + most_recently_reached_checkpoint_exp_version, 1) + self.assertEqual( + logged_out_user_data. + most_recently_reached_checkpoint_state_name, 'Introduction') + + # Make 'New state' a checkpoint. + # Now version of the exploration becomes 2. + change_list = _get_change_list( + 'New state', + exp_domain.STATE_PROPERTY_CARD_IS_CHECKPOINT, + True) + exp_services.update_exploration( + self.owner_id, self.EXP_ID, change_list, '') + + # Second checkpoint reached. + exp_services.update_logged_out_user_progress( + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID, 'New state', 2) + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + # Ruling out the possibility of None for mypy type checking. + assert logged_out_user_data is not None + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_exp_version, 2) + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_state_name, + 'New state') + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_exp_version, + 2) + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_state_name, + 'New state') + + # Unmark 'New state' as a checkpoint. + # Now version of the exploration becomes 3. + change_list = _get_change_list( + 'New state', + exp_domain.STATE_PROPERTY_CARD_IS_CHECKPOINT, + False) + exp_services.update_exploration( + self.owner_id, self.EXP_ID, change_list, '') + + # First checkpoint reached again. + # Since the previously furthest reached checkpoint 'New state' doesn't + # exist in the current exploration, the first checkpoint behind + # 'New state' that exists in current exploration ('Introduction' + # state in this case) becomes the new furthest reached checkpoint. + exp_services.update_logged_out_user_progress( + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID, 'Introduction', 3) + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + # Ruling out the possibility of None for mypy type checking. + assert logged_out_user_data is not None + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_exp_version, 3) + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_state_name, + 'Introduction') + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_exp_version, + 3) + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_state_name, + 'Introduction') + + # Change state name of 'Introduction' state. + # Now version of exploration becomes 4. + exp_services.update_exploration( + self.owner_id, self.EXP_ID, + [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'Intro', + })], 'Change state name' + ) + + # First checkpoint reached again. + exp_services.update_logged_out_user_progress( + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID, 'Intro', 4) + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + # Ruling out the possibility of None for mypy type checking. + assert logged_out_user_data is not None + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_exp_version, 4) + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_state_name, + 'Intro') + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_exp_version, + 4) + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_state_name, + 'Intro') + + def test_sync_logged_out_learner_checkpoint_progress_with_current_exp_version( # pylint: disable=line-too-long + self + ) -> None: + logged_out_user_data = ( + exp_services.sync_logged_out_learner_checkpoint_progress_with_current_exp_version( # pylint: disable=line-too-long + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID)) + self.assertIsNone(logged_out_user_data) + + # First checkpoint reached. + exp_services.update_logged_out_user_progress( + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID, 'Introduction', 1) + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + # Ruling out the possibility of None for mypy type checking. + assert logged_out_user_data is not None + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_exp_version, 1) + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_state_name, + 'Introduction') + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_exp_version, + 1) + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_state_name, + 'Introduction') + + # Change state name of 'Introduction' state. + # Now version of exploration becomes 2. + exp_services.update_exploration( + self.owner_id, self.EXP_ID, + [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'Intro', + })], 'Change state name' + ) + + # This method is called when exploration data is fetched since now + # latest exploration version > most recently interacted exploration + # version. + # Working - First the furthest reached checkpoint ('Introduction' in + # this case) is searched in current exploration. It will not be found + # since its state name is changed to 'Intro'. It will then search for + # an checkpoint that had been reached in older exploration and also + # exists in current exploration. If such checkpoint is not found, + # furthest reached checkpoint is set to None. Similar workflow is + # carried out for most recently reached checkpoint. + logged_out_user_data = ( + exp_services.sync_logged_out_learner_checkpoint_progress_with_current_exp_version( # pylint: disable=line-too-long + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID)) + # Ruling out the possibility of None for mypy type checking. + assert logged_out_user_data is not None + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_exp_version, 2) + self.assertIsNone( + logged_out_user_data.furthest_reached_checkpoint_state_name) + self.assertEqual( + logged_out_user_data.most_recently_reached_checkpoint_exp_version, + 2) + self.assertIsNone( + logged_out_user_data.most_recently_reached_checkpoint_state_name) + + +class SyncLoggedInAndLoggedOutProgressTests(test_utils.GenericTestBase): + """Tests whether logged-in user progress is synced correctly""" + + EXP_ID: Final = 'exp_id0' + UNIQUE_PROGRESS_URL_ID: Final = 'pid123' + + SAMPLE_EXPLORATION_YAML: str = ( +""" +author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 47 +states: + Introduction: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: New state + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + catchMisspellings: + value: false + default_outcome: + dest: Introduction + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: + - hint_content: + content_id: hint_1 + html:

    hint one,

    + id: TextInput + solution: + answer_is_exclusive: false + correct_answer: helloworld! + explanation: + content_id: solution + html:

    hello_world is a string

    + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: introduction_state.mp3 + needs_update: false + default_outcome: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: unknown_answer_feedback.mp3 + needs_update: false + feedback_1: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: correct_answer_feedback.mp3 + needs_update: false + hint_1: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: answer_hint.mp3 + needs_update: false + rule_input_3: {} + solution: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: answer_solution.mp3 + needs_update: false + solicit_answer_details: false + card_is_checkpoint: true + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + hint_1: {} + rule_input_3: {} + solution: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: {} + default_outcome: + dest: Third state + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: null + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + default_outcome: {} + solicit_answer_details: false + card_is_checkpoint: false + written_translations: + translations_mapping: + content: {} + default_outcome: {} + Third state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: {} + default_outcome: + dest: Third state + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: null + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + default_outcome: {} + solicit_answer_details: false + card_is_checkpoint: false + written_translations: + translations_mapping: + content: {} + default_outcome: {} +states_schema_version: 42 +tags: [] +title: Title +""") + + def setUp(self) -> None: + super().setUp() + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL) + + exp_services.save_new_exploration_from_yaml_and_assets( + self.owner_id, self.SAMPLE_EXPLORATION_YAML, self.EXP_ID, []) + self.exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) + + def test_logged_in_user_progress_is_updated_correctly(self) -> None: + self.login(self.VIEWER_EMAIL) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + self.assertIsNone(exp_user_data) + + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID + ) + self.assertIsNone(logged_out_user_data) + + # No sync occurs if there is no logged-out user data or if the data + # has been cleared by the cron job. + exp_services.sync_logged_out_learner_progress_with_logged_in_progress( + self.viewer_id, self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID + ) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + self.assertIsNone(exp_user_data) + + # First checkpoint reached as logged out user. + exp_services.update_logged_out_user_progress( + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID, 'Introduction', 1) + + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + # Ruling out the possibility of None for mypy type checking. + assert logged_out_user_data is not None + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_exp_version, 1) + self.assertEqual( + logged_out_user_data.furthest_reached_checkpoint_state_name, + 'Introduction') + self.assertEqual( + logged_out_user_data. + most_recently_reached_checkpoint_exp_version, 1) + self.assertEqual( + logged_out_user_data. + most_recently_reached_checkpoint_state_name, 'Introduction') + + exp_services.sync_logged_out_learner_progress_with_logged_in_progress( + self.viewer_id, self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID + ) + + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + self.assertIsNotNone(exp_user_data) + + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + assert logged_out_user_data is not None + + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, + logged_out_user_data.most_recently_reached_checkpoint_exp_version + ) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, + logged_out_user_data.most_recently_reached_checkpoint_state_name + ) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, + logged_out_user_data.furthest_reached_checkpoint_exp_version + ) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, + logged_out_user_data.furthest_reached_checkpoint_state_name + ) + + # Mark 'New state' as a checkpoint. + # Now version of the exploration becomes 2. + change_list = _get_change_list( + 'New state', + exp_domain.STATE_PROPERTY_CARD_IS_CHECKPOINT, + True) + exp_services.update_exploration( + self.owner_id, self.EXP_ID, change_list, '') + + # New second checkpoint reached as logged out user. + exp_services.update_logged_out_user_progress( + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID, 'New state', 2) + + exp_services.sync_logged_out_learner_progress_with_logged_in_progress( + self.viewer_id, self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID + ) + + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + assert logged_out_user_data is not None + + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, + logged_out_user_data.most_recently_reached_checkpoint_exp_version + ) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, + logged_out_user_data.most_recently_reached_checkpoint_state_name + ) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, + logged_out_user_data.furthest_reached_checkpoint_exp_version + ) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, + logged_out_user_data.furthest_reached_checkpoint_state_name + ) + + # Mark 'Third state' as a checkpoint. + # Now version of the exploration becomes 3. + change_list = _get_change_list( + 'Third state', + exp_domain.STATE_PROPERTY_CARD_IS_CHECKPOINT, + True) + exp_services.update_exploration( + self.owner_id, self.EXP_ID, change_list, '') + + # Unmark 'Next state' as a checkpoint. + # Now version of the exploration becomes 4. + change_list = _get_change_list( + 'New state', + exp_domain.STATE_PROPERTY_CARD_IS_CHECKPOINT, + False) + exp_services.update_exploration( + self.owner_id, self.EXP_ID, change_list, '') + + # New third checkpoint reached as logged out user. + exp_services.update_logged_out_user_progress( + self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID, 'Third state', 4) + + exp_services.sync_logged_out_learner_progress_with_logged_in_progress( + self.viewer_id, self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID + ) + + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + assert logged_out_user_data is not None + + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, + logged_out_user_data.most_recently_reached_checkpoint_exp_version + ) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, + logged_out_user_data.most_recently_reached_checkpoint_state_name + ) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, + logged_out_user_data.furthest_reached_checkpoint_exp_version + ) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, + logged_out_user_data.furthest_reached_checkpoint_state_name + ) + + # Changing logged-in most recently reached state. + user_services.update_learner_checkpoint_progress( + self.viewer_id, + self.EXP_ID, + 'Introduction', + 4 + ) + + exp_services.sync_logged_out_learner_progress_with_logged_in_progress( + self.viewer_id, self.EXP_ID, self.UNIQUE_PROGRESS_URL_ID + ) + + logged_out_user_data = exp_fetchers.get_logged_out_user_progress( + self.UNIQUE_PROGRESS_URL_ID) + + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + assert logged_out_user_data is not None + + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, + logged_out_user_data.most_recently_reached_checkpoint_exp_version + ) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, + logged_out_user_data.most_recently_reached_checkpoint_state_name + ) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, + logged_out_user_data.furthest_reached_checkpoint_exp_version + ) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, + logged_out_user_data.furthest_reached_checkpoint_state_name + ) + + self.logout() + + +class RegenerateMissingExpStatsUnitTests(test_utils.GenericTestBase): + """Test apply draft functions in exp_services.""" + + def test_when_exp_and_state_stats_models_exist(self) -> None: + self.save_new_default_exploration('ID', 'owner_id') + + self.assertEqual( + exp_services.regenerate_missing_stats_for_exploration('ID'), ( + [], [], 1, 1)) + + def test_fail_to_fetch_exploration_snapshots(self) -> None: + observed_log_messages = [] + def _mock_logging_function(msg: str, *args: str) -> None: + """Mocks logging.error().""" + observed_log_messages.append(msg % args) + logging_swap = self.swap(logging, 'error', _mock_logging_function) + + self.save_new_default_exploration('ID', 'owner_id') + exp_snapshot_id = exp_models.ExplorationModel.get_snapshot_id('ID', 1) + exp_snapshot = exp_models.ExplorationSnapshotMetadataModel.get_by_id( + exp_snapshot_id) + exp_snapshot.commit_cmds[0] = {} + exp_snapshot.update_timestamps() + exp_models.ExplorationSnapshotMetadataModel.put(exp_snapshot) + + with logging_swap: + exp_services.regenerate_missing_stats_for_exploration('ID') + self.assertEqual( + observed_log_messages, + [ + 'Exploration(id=\'ID\') snapshots contains invalid ' + 'commit_cmd: {}' + ] + ) + + def test_handle_state_name_is_not_found_in_state_stats_mapping( + self + ) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 1' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_stats_list = ( + stats_services.get_multiple_exploration_stats_by_version( + exp_id, [1, 2, 3])) + assert exp_stats_list[0] is not None + exp_stats_list[0].state_stats_mapping['new'] = ( + exp_stats_list[0].state_stats_mapping['Introduction']) + del exp_stats_list[0].state_stats_mapping['Introduction'] + stats_services.save_stats_model(exp_stats_list[0]) + exp_stats_model_to_delete = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 3) + ) + assert exp_stats_model_to_delete is not None + exp_stats_model_to_delete.delete() + error_message = ( + r'Exploration\(id=.*, exp_version=1\) has no State\(name=.*\)') + with self.assertRaisesRegex(Exception, error_message): + exp_services.regenerate_missing_stats_for_exploration(exp_id) + + def test_handle_missing_exp_stats_for_reverted_exp_version(self) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 1' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 3' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 4' + })], 'Changed title.') + exp_services.revert_exploration(owner_id, exp_id, 5, 4) + exp_stats_model_to_delete = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 6) + ) + assert exp_stats_model_to_delete is not None + exp_stats_model_to_delete.delete() + + self.assertItemsEqual( + exp_services.regenerate_missing_stats_for_exploration('ID1'), + ( + [ + 'ExplorationStats(exp_id=\'ID1\', exp_version=6)', + ], [], 5, 6 + ) + ) + + def test_handle_missing_state_stats_for_reverted_exp_version(self) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 1' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 3' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 4' + })], 'Changed title.') + exp_services.revert_exploration(owner_id, exp_id, 5, 4) + exp_stats = stats_services.get_exploration_stats_by_id(exp_id, 6) + assert exp_stats is not None + exp_stats.state_stats_mapping = {} + stats_services.save_stats_model(exp_stats) + + self.assertItemsEqual( + exp_services.regenerate_missing_stats_for_exploration('ID1'), + ( + [], [ + 'StateStats(exp_id=\'ID1\', exp_version=6, ' + 'state_name=\'Introduction\')' + ], 5, 6 + ) + ) + + def test_when_few_exp_stats_models_are_missing(self) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 1' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 3' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 4' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 5' + })], 'Changed title.') + + exp_stats_model_for_version_2 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 2) + ) + exp_stats_model_for_version_4 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 4) + ) + assert exp_stats_model_for_version_2 is not None + assert exp_stats_model_for_version_4 is not None + exp_stats_model_for_version_2.delete() + exp_stats_model_for_version_4.delete() + + self.assertItemsEqual( + exp_services.regenerate_missing_stats_for_exploration('ID1'), + ( + [ + 'ExplorationStats(exp_id=\'ID1\', exp_version=2)', + 'ExplorationStats(exp_id=\'ID1\', exp_version=4)' + ], [], 4, 6 + ) + ) + + def test_when_v1_version_exp_stats_model_is_missing(self) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 1' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 3' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 4' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 5' + })], 'Changed title.') + exp_stats_model_for_version_1 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 1) + ) + assert exp_stats_model_for_version_1 is not None + exp_stats_model_for_version_1.delete() + + exp_stats_model_for_version_2 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 2) + ) + assert exp_stats_model_for_version_2 is not None + exp_stats_model_for_version_2.delete() + + exp_stats_model_for_version_3 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 3) + ) + assert exp_stats_model_for_version_3 is not None + exp_stats_model_for_version_3.delete() + + self.assertItemsEqual( + exp_services.regenerate_missing_stats_for_exploration('ID1'), + ( + [ + 'ExplorationStats(exp_id=\'ID1\', exp_version=1)', + 'ExplorationStats(exp_id=\'ID1\', exp_version=2)', + 'ExplorationStats(exp_id=\'ID1\', exp_version=3)' + ], [], 3, 6 + ) + ) + + def test_generate_exp_stats_when_revert_commit_is_present(self) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 1' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 3' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 4' + })], 'Changed title.') + exp_services.revert_exploration(owner_id, exp_id, 5, 3) + + exp_stats_model_for_version_1 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 1) + ) + assert exp_stats_model_for_version_1 is not None + exp_stats_model_for_version_1.delete() + + exp_stats_model_for_version_2 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 2) + ) + assert exp_stats_model_for_version_2 is not None + exp_stats_model_for_version_2.delete() + + self.assertItemsEqual( + exp_services.regenerate_missing_stats_for_exploration('ID1'), + ( + [ + 'ExplorationStats(exp_id=\'ID1\', exp_version=1)', + 'ExplorationStats(exp_id=\'ID1\', exp_version=2)' + ], [], 4, 6 + ) + ) + + def test_when_all_exp_stats_models_are_missing(self) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, owner_id) + exp_stats_model_for_version_1 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 1) + ) + assert exp_stats_model_for_version_1 is not None + exp_stats_model_for_version_1.delete() + + with self.assertRaisesRegex( + Exception, 'No ExplorationStatsModels found'): + exp_services.regenerate_missing_stats_for_exploration('ID1') + + def test_when_few_state_stats_models_are_missing(self) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 1' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 3' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 4' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 5' + })], 'Changed title.') + exp_stats = stats_services.get_exploration_stats_by_id(exp_id, 2) + assert exp_stats is not None + exp_stats.state_stats_mapping = {} + stats_services.save_stats_model(exp_stats) + + self.assertItemsEqual( + exp_services.regenerate_missing_stats_for_exploration('ID1'), + ( + [], + [ + 'StateStats(exp_id=\'ID1\', exp_version=2, ' + 'state_name=\'Introduction\')' + ], 6, 5 + ) + ) + + def test_when_few_state_stats_models_are_missing_for_old_exps( + self + ) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_valid_exploration( + exp_id, owner_id, title='title', category='Category 1', + end_state_name='END', correctness_feedback_enabled=True) + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 3' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 4' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 5' + })], 'Changed title.') + exp_stats = stats_services.get_exploration_stats_by_id(exp_id, 2) + assert exp_stats is not None + exp_stats.state_stats_mapping = {} + stats_services.save_stats_model(exp_stats) + + self.assertItemsEqual( + exp_services.regenerate_missing_stats_for_exploration('ID1'), + ( + [], + [ + 'StateStats(exp_id=\'ID1\', exp_version=2, ' + 'state_name=\'Introduction\')', + 'StateStats(exp_id=\'ID1\', exp_version=2, ' + 'state_name=\'END\')', + ], 8, 5 + ) + ) diff --git a/core/domain/expression_parser.py b/core/domain/expression_parser.py index d514ae189d9e..d980f96bd958 100644 --- a/core/domain/expression_parser.py +++ b/core/domain/expression_parser.py @@ -36,26 +36,30 @@ from core.constants import constants -_OPENING_PARENS = ['[', '{', '('] -_CLOSING_PARENS = [')', '}', ']'] -_VALID_OPERATORS = _OPENING_PARENS + _CLOSING_PARENS + ['+', '-', '/', '*', '^'] +from typing import Final, List, Optional -_TOKEN_CATEGORY_IDENTIFIER = 'identifier' -_TOKEN_CATEGORY_FUNCTION = 'function' -_TOKEN_CATEGORY_NUMBER = 'number' -_TOKEN_CATEGORY_OPERATOR = 'operator' +_OPENING_PARENS: List[str] = ['[', '{', '('] +_CLOSING_PARENS: List[str] = [')', '}', ']'] +_VALID_OPERATORS: List[str] = ( + _OPENING_PARENS + _CLOSING_PARENS + ['+', '-', '/', '*', '^'] +) -_OPENING_CATEGORIES = ( +_TOKEN_CATEGORY_IDENTIFIER: Final = 'identifier' +_TOKEN_CATEGORY_FUNCTION: Final = 'function' +_TOKEN_CATEGORY_NUMBER: Final = 'number' +_TOKEN_CATEGORY_OPERATOR: Final = 'operator' + +_OPENING_CATEGORIES: Final = ( _TOKEN_CATEGORY_IDENTIFIER, _TOKEN_CATEGORY_FUNCTION, _TOKEN_CATEGORY_NUMBER) -_CLOSING_CATEGORIES = ( +_CLOSING_CATEGORIES: Final = ( _TOKEN_CATEGORY_IDENTIFIER, _TOKEN_CATEGORY_NUMBER) -def contains_balanced_brackets(expression): +def contains_balanced_brackets(expression: str) -> bool: """Checks if the given expression contains a balanced bracket sequence. Args: @@ -78,9 +82,9 @@ def contains_balanced_brackets(expression): return len(stack) == 0 -def is_algebraic(expression): - """Checks if the given expression is algebraic. An algebraic expression must - contain at least one valid identifier (latin letter or greek symbol name). +def contains_at_least_one_variable(expression: str) -> bool: + """Checks if the given expression contains at least one valid identifier + (latin letter or greek symbol name). Args: expression: str. A math expression. @@ -100,7 +104,7 @@ def is_algebraic(expression): token.category == _TOKEN_CATEGORY_IDENTIFIER for token in token_list) -def tokenize(expression): +def tokenize(expression: str) -> List[Token]: """Splits the given expression into separate tokens based on the grammar definitions. @@ -178,7 +182,7 @@ def tokenize(expression): return final_token_list -def get_variables(expression): +def get_variables(expression: str) -> List[str]: """Extracts all variables along with pi and e from a given expression. Args: @@ -204,7 +208,7 @@ def get_variables(expression): class Token: """Class for tokens of the math expression.""" - def __init__(self, text): + def __init__(self, text: str) -> None: """Initializes a Token object. Args: @@ -227,7 +231,7 @@ def __init__(self, text): else: raise Exception('Invalid token: %s.' % text) - def is_function(self, text): + def is_function(self, text: str) -> bool: """Checks if the given token represents a valid math function. Args: @@ -238,7 +242,7 @@ def is_function(self, text): """ return text in constants.MATH_FUNCTION_NAMES - def is_identifier(self, text): + def is_identifier(self, text: str) -> bool: """Checks if the given token represents a valid identifier. A valid identifier could be a single latin letter (uppercase/lowercase) or a greek letter represented by the symbol name. @@ -251,7 +255,7 @@ def is_identifier(self, text): """ return text in constants.VALID_ALGEBRAIC_IDENTIFIERS - def is_number(self, text): + def is_number(self, text: str) -> bool: """Checks if the given token represents a valid real number without a '+'/'-' sign. 'pi' and 'e' are also considered as numeric values. @@ -263,7 +267,7 @@ def is_number(self, text): """ return text.replace('.', '', 1).isdigit() or text in ('pi', 'e') - def is_operator(self, text): + def is_operator(self, text: str) -> bool: """Checks if the given token represents a valid math operator. Args: @@ -286,7 +290,7 @@ class Node: the operator nodes, the class name should represent the type of operator. """ - def __init__(self, children): + def __init__(self, children: List[Node]) -> None: """Initializes a Node object. For ex. 'a + b' will have root node as '+' and children as ['a', 'b']. @@ -299,66 +303,66 @@ def __init__(self, children): class AdditionOperatorNode(Node): """Class representing the addition operator node.""" - def __init__(self, left, right): + def __init__(self, left: Node, right: Node) -> None: """Initializes an AdditionOperatorNode object. Args: left: Node. Left child of the operator. right: Node. Right child of the operator. """ - super(AdditionOperatorNode, self).__init__([left, right]) + super().__init__([left, right]) class SubtractionOperatorNode(Node): """Class representing the subtraction operator node.""" - def __init__(self, left, right): + def __init__(self, left: Node, right: Node) -> None: """Initializes an SubtractionOperatorNode object. Args: left: Node. Left child of the operator. right: Node. Right child of the operator. """ - super(SubtractionOperatorNode, self).__init__([left, right]) + super().__init__([left, right]) class MultiplicationOperatorNode(Node): """Class representing the multiplication operator node.""" - def __init__(self, left, right): + def __init__(self, left: Node, right: Node) -> None: """Initializes an MultiplicationOperatorNode object. Args: left: Node. Left child of the operator. right: Node. Right child of the operator. """ - super(MultiplicationOperatorNode, self).__init__([left, right]) + super().__init__([left, right]) class DivisionOperatorNode(Node): """Class representing the division operator node.""" - def __init__(self, left, right): + def __init__(self, left: Node, right: Node) -> None: """Initializes an DivisionOperatorNode object. Args: left: Node. Left child of the operator. right: Node. Right child of the operator. """ - super(DivisionOperatorNode, self).__init__([left, right]) + super().__init__([left, right]) class PowerOperatorNode(Node): """Class representing the power operator node.""" - def __init__(self, left, right): + def __init__(self, left: Node, right: Node) -> None: """Initializes an PowerOperatorNode object. Args: left: Node. Left child of the operator. right: Node. Right child of the operator. """ - super(PowerOperatorNode, self).__init__([left, right]) + super().__init__([left, right]) class IdentifierNode(Node): @@ -367,27 +371,27 @@ class IdentifierNode(Node): symbol name. """ - def __init__(self, token): + def __init__(self, token: Token) -> None: """Initializes an IdentifierNode object. Args: token: Token. The token representing the identifier. """ self.token = token - super(IdentifierNode, self).__init__([]) + super().__init__([]) class NumberNode(Node): """Class representing the number node.""" - def __init__(self, token): + def __init__(self, token: Token) -> None: """Initializes a NumberNode object. Args: token: Token. The token representing a real number. """ self.token = token - super(NumberNode, self).__init__([]) + super().__init__([]) class UnaryFunctionNode(Node): @@ -395,7 +399,7 @@ class UnaryFunctionNode(Node): class must have exactly one parameter. """ - def __init__(self, token, child): + def __init__(self, token: Token, child: Node) -> None: """Initializes a UnaryFunctionNode object. Args: @@ -403,7 +407,7 @@ def __init__(self, token, child): child: Node. The parameter of the function. """ self.token = token - super(UnaryFunctionNode, self).__init__([child]) + super().__init__([child]) class Parser: @@ -414,7 +418,7 @@ class Parser: https://en.wikipedia.org/wiki/Recursive_descent_parser """ - def __init__(self): + def __init__(self) -> None: """Initializes the Parser object.""" # Stores the index of the next token to be parsed. This attribute is # global to this class, i.e., all methods operate on the same instance @@ -422,7 +426,7 @@ def __init__(self): # upon parsing the current token from the token list. self._next_token_index = 0 - def parse(self, expression): + def parse(self, expression: str) -> Node: """A wrapper around the _parse_expr method. This method creates a list of tokens present in the expression and calls the _parse_expr method. @@ -454,7 +458,7 @@ def parse(self, expression): return self._parse_expr(token_list) - def _parse_expr(self, token_list): + def _parse_expr(self, token_list: List[Token]) -> Node: """Function representing the following production rule of the grammar: ::= (('+' | '-') )* @@ -478,7 +482,7 @@ def _parse_expr(self, token_list): ['+', '-'], token_list) return parsed_expr - def _parse_mul_expr(self, token_list): + def _parse_mul_expr(self, token_list: List[Token]) -> Node: """Function representing the following production rule of the grammar: ::= (('*' | '/') )* @@ -503,7 +507,7 @@ def _parse_mul_expr(self, token_list): ['*', '/'], token_list) return parsed_expr - def _parse_pow_expr(self, token_list): + def _parse_pow_expr(self, token_list: List[Token]) -> Node: """Function representing the following production rule of the grammar: ::= '-' | '+' | ('^' )? @@ -528,7 +532,7 @@ def _parse_pow_expr(self, token_list): return PowerOperatorNode(parsed_expr, parsed_right) return parsed_expr - def _parse_unit(self, token_list): + def _parse_unit(self, token_list: List[Token]) -> Node: """Function representing the following production rule of the grammar: ::= | | '(' ')' | '(' ')' @@ -550,20 +554,24 @@ def _parse_unit(self, token_list): if token.category == _TOKEN_CATEGORY_FUNCTION: if self._get_next_token_if_text_in(['('], token_list): parsed_child = self._parse_expr(token_list) - token = self._get_next_token_if_text_in([')'], token_list) - return UnaryFunctionNode(token, parsed_child) + next_token = self._get_next_token_if_text_in([')'], token_list) + # Here, we are asserting that next_token is never going to be + # None, because before reaching this line of code we are already + # checking if token exists or not with method `_get_next_token`. + assert next_token is not None + return UnaryFunctionNode(next_token, parsed_child) if token.category == _TOKEN_CATEGORY_NUMBER: return NumberNode(token) if token.text == '(': parsed_expr = self._parse_expr(token_list) - token = self._get_next_token_if_text_in([')'], token_list) + next_token = self._get_next_token_if_text_in([')'], token_list) return parsed_expr raise Exception('Invalid token: %s.' % token.text) - def _get_next_token(self, token_list): + def _get_next_token(self, token_list: List[Token]) -> Token: """Function to retrieve the token at the next position and then increment the _next_token_index. @@ -584,7 +592,9 @@ def _get_next_token(self, token_list): raise Exception('Invalid syntax: Unexpected end of expression.') - def _get_next_token_if_text_in(self, allowed_token_texts, token_list): + def _get_next_token_if_text_in( + self, allowed_token_texts: List[str], token_list: List[Token] + ) -> Optional[Token]: """Function to verify that there is at least one more token remaining and that the next token text is among the allowed_token_texts provided. If true, returns the token; otherwise, returns None. @@ -609,7 +619,7 @@ def _get_next_token_if_text_in(self, allowed_token_texts, token_list): return None -def is_valid_expression(expression): +def is_valid_expression(expression: str) -> bool: """Checks if the given math expression is syntactically valid. Args: diff --git a/core/domain/expression_parser_test.py b/core/domain/expression_parser_test.py index 4de66914f7be..7fb4697e4725 100644 --- a/core/domain/expression_parser_test.py +++ b/core/domain/expression_parser_test.py @@ -23,11 +23,11 @@ class HelperFunctionsUnitTests(test_utils.GenericTestBase): - """Test the 'contains_balanced_brackets' and 'is_algebraic' helper - functions. + """Test the 'contains_balanced_brackets' and + 'contains_at_least_one_variable' helper functions. """ - def test_contains_balanced_brackets(self): + def test_contains_balanced_brackets(self) -> None: """Tests for contains_balanced_brackets method.""" self.assertTrue(expression_parser.contains_balanced_brackets('')) self.assertTrue(expression_parser.contains_balanced_brackets('a+2')) @@ -53,34 +53,46 @@ def test_contains_balanced_brackets(self): self.assertFalse(expression_parser.contains_balanced_brackets('4/{0/]')) self.assertFalse(expression_parser.contains_balanced_brackets('(a/2]')) - def test_is_algebraic(self): - """Tests for is_algebraic method.""" - self.assertTrue(expression_parser.is_algebraic('a^2.3')) - self.assertTrue(expression_parser.is_algebraic('abs(alpha)')) - self.assertTrue(expression_parser.is_algebraic('alpha/gamma')) - self.assertTrue(expression_parser.is_algebraic('A + 2/3')) + def test_contains_at_least_one_variable(self) -> None: + """Tests for contains_at_least_one_variable method.""" + self.assertTrue( + expression_parser.contains_at_least_one_variable('a^2.3')) + self.assertTrue( + expression_parser.contains_at_least_one_variable('abs(alpha)')) + self.assertTrue( + expression_parser.contains_at_least_one_variable('alpha/gamma')) + self.assertTrue( + expression_parser.contains_at_least_one_variable('A + 2/3')) # The following tests might seem as invalid but the individual letters # will be joined via '*' during tokenization which makes them valid. - self.assertTrue(expression_parser.is_algebraic('Alpha')) - self.assertTrue(expression_parser.is_algebraic('invalid + 2')) - self.assertTrue(expression_parser.is_algebraic('alpha + bet/22')) - - self.assertFalse(expression_parser.is_algebraic('1 + 2')) - self.assertFalse(expression_parser.is_algebraic('1^2^3/4')) - self.assertFalse(expression_parser.is_algebraic('1')) - self.assertFalse(expression_parser.is_algebraic('sqrt(4/4)')) - self.assertFalse(expression_parser.is_algebraic('tan(30)')) - - with self.assertRaisesRegexp(Exception, 'Invalid bracket pairing.'): - expression_parser.is_algebraic('1 +2)') - with self.assertRaisesRegexp(Exception, 'Invalid character: ~.'): - expression_parser.is_algebraic('a~2') - with self.assertRaisesRegexp(Exception, 'Invalid character: !.'): - expression_parser.is_algebraic('4! 2') - with self.assertRaisesRegexp(Exception, 'Invalid token: ..'): - expression_parser.is_algebraic('alpha + bet/22.3.4') - - def test_tokenize(self): + self.assertTrue( + expression_parser.contains_at_least_one_variable('Alpha')) + self.assertTrue( + expression_parser.contains_at_least_one_variable('invalid + 2')) + self.assertTrue( + expression_parser.contains_at_least_one_variable('alpha + bet/22')) + + self.assertFalse( + expression_parser.contains_at_least_one_variable('1 + 2')) + self.assertFalse( + expression_parser.contains_at_least_one_variable('1^2^3/4')) + self.assertFalse(expression_parser.contains_at_least_one_variable('1')) + self.assertFalse( + expression_parser.contains_at_least_one_variable('sqrt(4/4)')) + self.assertFalse( + expression_parser.contains_at_least_one_variable('tan(30)')) + + with self.assertRaisesRegex(Exception, 'Invalid bracket pairing.'): + expression_parser.contains_at_least_one_variable('1 +2)') + with self.assertRaisesRegex(Exception, 'Invalid character: ~.'): + expression_parser.contains_at_least_one_variable('a~2') + with self.assertRaisesRegex(Exception, 'Invalid character: !.'): + expression_parser.contains_at_least_one_variable('4! 2') + with self.assertRaisesRegex(Exception, 'Invalid token: ..'): + expression_parser.contains_at_least_one_variable( + 'alpha + bet/22.3.4') + + def test_tokenize(self) -> None: """Tests for tokenize method.""" expression = 'a+b' expected_output = ['a', '+', 'b'] @@ -233,20 +245,20 @@ def test_tokenize(self): lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) - with self.assertRaisesRegexp(Exception, 'Invalid token: ..'): + with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('a.3') - with self.assertRaisesRegexp(Exception, 'Invalid token: ..'): + with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('.3 - 2.4') - with self.assertRaisesRegexp(Exception, 'Invalid token: ..'): + with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('1.2.3 + 4/2') - with self.assertRaisesRegexp(Exception, 'Invalid token: ..'): + with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('a . . 3') - with self.assertRaisesRegexp(Exception, 'Invalid token: ..'): + with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('3..4') - with self.assertRaisesRegexp(Exception, 'Invalid token: ..'): + with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('..5') - def test_get_variables(self): + def test_get_variables(self) -> None: """Tests for get_variables method.""" self.assertItemsEqual(expression_parser.get_variables('a^2.3'), ['a']) self.assertItemsEqual( @@ -281,18 +293,18 @@ def test_get_variables(self): class TokenUnitTests(test_utils.GenericTestBase): """Test the token module.""" - def test_is_function(self): + def test_is_function(self) -> None: """Tests for is_function method.""" self.assertEqual(expression_parser.Token('sqrt').category, 'function') self.assertEqual(expression_parser.Token('abs').category, 'function') self.assertEqual(expression_parser.Token('tan').category, 'function') - with self.assertRaisesRegexp(Exception, 'Invalid token: tan().'): + with self.assertRaisesRegex(Exception, 'Invalid token: tan().'): expression_parser.Token('tan()') - with self.assertRaisesRegexp(Exception, 'Invalid token: Sqrt.'): + with self.assertRaisesRegex(Exception, 'Invalid token: Sqrt.'): expression_parser.Token('Sqrt') - def test_is_identifier(self): + def test_is_identifier(self) -> None: """Tests for is_identifier method.""" self.assertEqual(expression_parser.Token('a').category, 'identifier') self.assertEqual(expression_parser.Token('a').category, 'identifier') @@ -300,12 +312,12 @@ def test_is_identifier(self): expression_parser.Token('alpha').category, 'identifier') self.assertEqual(expression_parser.Token('A').category, 'identifier') - with self.assertRaisesRegexp(Exception, 'Invalid token: al.'): + with self.assertRaisesRegex(Exception, 'Invalid token: al.'): expression_parser.Token('al') self.assertNotEqual( expression_parser.Token('5').category, 'identifier') - def test_is_number(self): + def test_is_number(self) -> None: """Tests for is_number method.""" self.assertEqual(expression_parser.Token('1').category, 'number') self.assertEqual(expression_parser.Token('123').category, 'number') @@ -314,10 +326,10 @@ def test_is_number(self): self.assertEqual(expression_parser.Token('pi').category, 'number') self.assertEqual(expression_parser.Token('e').category, 'number') - with self.assertRaisesRegexp(Exception, 'Invalid token: 8.4.3.'): + with self.assertRaisesRegex(Exception, 'Invalid token: 8.4.3.'): expression_parser.Token('8.4.3') - def test_is_operator(self): + def test_is_operator(self) -> None: """Tests for is_operator method.""" self.assertEqual(expression_parser.Token('+').category, 'operator') self.assertEqual(expression_parser.Token('-').category, 'operator') @@ -331,7 +343,7 @@ def test_is_operator(self): class ParserUnitTests(test_utils.GenericTestBase): """Test the expression parser module.""" - def test_parse(self): + def test_parse(self) -> None: """Tests to check whether the following production rule is implemented correctly: ::= (('+' | '-') )* @@ -355,21 +367,21 @@ def test_parse(self): left_child_1, expression_parser.AdditionOperatorNode) self.assertEqual(len(left_child_1.children), 2) # Right child 1 {2}. - self.assertIsInstance(right_child_1, expression_parser.NumberNode) + assert isinstance(right_child_1, expression_parser.NumberNode) self.assertEqual(right_child_1.token.text, '2') self.assertEqual(len(right_child_1.children), 0) left_child_2, right_child_2 = left_child_1.children # Left child 2 {a}. - self.assertIsInstance(left_child_2, expression_parser.IdentifierNode) + assert isinstance(left_child_2, expression_parser.IdentifierNode) self.assertEqual(left_child_2.token.text, 'a') self.assertEqual(len(left_child_2.children), 0) # Right child 2 {b}. - self.assertIsInstance(right_child_2, expression_parser.IdentifierNode) + assert isinstance(right_child_2, expression_parser.IdentifierNode) self.assertEqual(right_child_2.token.text, 'b') self.assertEqual(len(right_child_2.children), 0) - def test_parse_mul_expr(self): + def test_parse_mul_expr(self) -> None: """Tests to check whether the following production rule is implemented correctly: ::= (('*' | '/') )* @@ -383,31 +395,31 @@ def test_parse_mul_expr(self): """ root_node = expression_parser.Parser().parse('a / b * 2') # Root node {*}. - self.assertIsInstance( + assert isinstance( root_node, expression_parser.MultiplicationOperatorNode) self.assertEqual(len(root_node.children), 2) left_child_1, right_child_1 = root_node.children # Left child 1 {/}. - self.assertIsInstance( + assert isinstance( left_child_1, expression_parser.DivisionOperatorNode) self.assertEqual(len(left_child_1.children), 2) # Right child 1 {2}. - self.assertIsInstance(right_child_1, expression_parser.NumberNode) + assert isinstance(right_child_1, expression_parser.NumberNode) self.assertEqual(right_child_1.token.text, '2') self.assertEqual(len(right_child_1.children), 0) left_child_2, right_child_2 = left_child_1.children # Left child 2 {a}. - self.assertIsInstance(left_child_2, expression_parser.IdentifierNode) + assert isinstance(left_child_2, expression_parser.IdentifierNode) self.assertEqual(left_child_2.token.text, 'a') self.assertEqual(len(left_child_2.children), 0) # Right child 2 {b}. - self.assertIsInstance(right_child_2, expression_parser.IdentifierNode) + assert isinstance(right_child_2, expression_parser.IdentifierNode) self.assertEqual(right_child_2.token.text, 'b') self.assertEqual(len(right_child_2.children), 0) - def test_parse_pow_expr(self): + def test_parse_pow_expr(self) -> None: """Tests to check whether the following production rule is implemented correctly: ::= '-' | '+' | @@ -422,30 +434,30 @@ def test_parse_pow_expr(self): """ root_node = expression_parser.Parser().parse('a ^ b ^ 2') # Root node {^}. - self.assertIsInstance(root_node, expression_parser.PowerOperatorNode) + assert isinstance(root_node, expression_parser.PowerOperatorNode) self.assertEqual(len(root_node.children), 2) left_child_1, right_child_1 = root_node.children # Left child 1 {a}. - self.assertIsInstance(left_child_1, expression_parser.IdentifierNode) + assert isinstance(left_child_1, expression_parser.IdentifierNode) self.assertEqual(left_child_1.token.text, 'a') self.assertEqual(len(left_child_1.children), 0) # Right child 1 {^}. - self.assertIsInstance( + assert isinstance( right_child_1, expression_parser.PowerOperatorNode) self.assertEqual(len(right_child_1.children), 2) left_child_2, right_child_2 = right_child_1.children # Left child 2 {b}. - self.assertIsInstance(left_child_2, expression_parser.IdentifierNode) + assert isinstance(left_child_2, expression_parser.IdentifierNode) self.assertEqual(left_child_2.token.text, 'b') self.assertEqual(len(left_child_2.children), 0) # Right child 2 {2}. - self.assertIsInstance(right_child_2, expression_parser.NumberNode) + assert isinstance(right_child_2, expression_parser.NumberNode) self.assertEqual(right_child_2.token.text, '2') self.assertEqual(len(right_child_2.children), 0) - def test_parse_unit(self): + def test_parse_unit(self) -> None: """Tests to check whether the following production rule is implemented correctly: ::= | | '(' ')' | @@ -460,26 +472,26 @@ def test_parse_unit(self): """ root_node = expression_parser.Parser().parse('sqrt(a*2)') # Root node {sqrt}. - self.assertIsInstance(root_node, expression_parser.UnaryFunctionNode) + assert isinstance(root_node, expression_parser.UnaryFunctionNode) self.assertEqual(len(root_node.children), 1) child_1 = root_node.children[0] # Child 1 {*}. - self.assertIsInstance( + assert isinstance( child_1, expression_parser.MultiplicationOperatorNode) self.assertEqual(len(child_1.children), 2) left_child_2, right_child_2 = child_1.children # Left child 2 {a}. - self.assertIsInstance(left_child_2, expression_parser.IdentifierNode) + assert isinstance(left_child_2, expression_parser.IdentifierNode) self.assertEqual(left_child_2.token.text, 'a') self.assertEqual(len(left_child_2.children), 0) # Right child 2 {2}. - self.assertIsInstance(right_child_2, expression_parser.NumberNode) + assert isinstance(right_child_2, expression_parser.NumberNode) self.assertEqual(right_child_2.token.text, '2') self.assertEqual(len(right_child_2.children), 0) - def test_validates_math_expression(self): + def test_validates_math_expression(self) -> None: """Tests whether the parser can validate math expressions.""" self.assertTrue(expression_parser.is_valid_expression('a+b')) self.assertTrue(expression_parser.is_valid_expression('a+(-b)')) diff --git a/core/domain/feedback_domain.py b/core/domain/feedback_domain.py index 3af2ae9c315f..a49685f0233b 100644 --- a/core/domain/feedback_domain.py +++ b/core/domain/feedback_domain.py @@ -16,19 +16,67 @@ from __future__ import annotations +import datetime + from core import utils -from core.domain import user_services + +from typing import Dict, List, Optional, TypedDict + + +class FeedbackThreadDict(TypedDict): + """Dict for FeedbackThread object.""" + + last_updated_msecs: float + original_author_id: str + state_name: Optional[str] + status: str + subject: str + summary: str + thread_id: str + message_count: int + last_nonempty_message_text: Optional[str] + last_nonempty_message_author_id: Optional[str] + + +class FeedbackMessageDict(TypedDict): + """Dict for FeedbackMessage object.""" + + author_id: str + created_on_msecs: float + entity_type: str + entity_id: str + message_id: int + text: str + updated_status: str + updated_subject: str + + +class FeedbackThreadSummaryDict(TypedDict): + """Dict for FeedbackThreadSummary object.""" + + status: str + original_author_id: str + last_updated_msecs: float + last_message_text: str + total_message_count: int + last_message_is_read: bool + second_last_message_is_read: bool + author_last_message: str + author_second_last_message: Optional[str] + exploration_title: str + exploration_id: str + thread_id: str class FeedbackThread: """Domain object for a feedback thread. Attributes: - id: str. The feedback thread ID. + thread_id: str. The feedback thread ID. entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. - state_name: str. The name of the state associated with - the feedback thread. + state_name: str|None. The name of the state associated with + the feedback thread or None, if no state is associated. original_author_id: str. The ID of the original author. status: str. The current status of the thread. Status should be one of core.storage.feedback.gae_models.STATUS_CHOICES. @@ -50,10 +98,22 @@ class FeedbackThread: """ def __init__( - self, thread_id, entity_type, entity_id, state_name, - original_author_id, status, subject, summary, has_suggestion, - message_count, created_on, last_updated, last_nonempty_message_text, - last_nonempty_message_author_id): + self, + thread_id: str, + entity_type: str, + entity_id: str, + state_name: Optional[str], + original_author_id: str, + status: str, + subject: str, + summary: str, + has_suggestion: bool, + message_count: int, + created_on: datetime.datetime, + last_updated: datetime.datetime, + last_nonempty_message_text: Optional[str] = None, + last_nonempty_message_author_id: Optional[str] = None + ) -> None: """Initializes a FeedbackThread object.""" self.id = thread_id @@ -72,7 +132,7 @@ def __init__( self.last_nonempty_message_text = last_nonempty_message_text self.last_nonempty_message_author_id = last_nonempty_message_author_id - def to_dict(self): + def to_dict(self) -> FeedbackThreadDict: """Returns a dict representation of this FeedbackThread object. Returns: @@ -81,9 +141,7 @@ def to_dict(self): return { 'last_updated_msecs': ( utils.get_time_in_millisecs(self.last_updated)), - 'original_author_username': ( - user_services.get_username(self.original_author_id) - if self.original_author_id else None), + 'original_author_id': self.original_author_id, 'state_name': self.state_name, 'status': self.status, 'subject': self.subject, @@ -91,12 +149,11 @@ def to_dict(self): 'thread_id': self.id, 'message_count': self.message_count, 'last_nonempty_message_text': self.last_nonempty_message_text, - 'last_nonempty_message_author': ( - user_services.get_username(self.last_nonempty_message_author_id) - if self.last_nonempty_message_author_id else None), + 'last_nonempty_message_author_id': ( + self.last_nonempty_message_author_id), } - def _get_full_message_id(self, message_id): + def _get_full_message_id(self, message_id: int) -> str: """Returns the full id of the message. Args: @@ -108,7 +165,7 @@ def _get_full_message_id(self, message_id): """ return '.'.join([self.id, str(message_id)]) - def get_last_two_message_ids(self): + def get_last_two_message_ids(self) -> List[Optional[str]]: """Returns the full message ids of the last two messages of the thread. If the thread has only one message, the id of the second last message is None. @@ -130,7 +187,7 @@ class FeedbackMessage: full_message_id: str. The ID of the feedback message. thread_id: str. The ID of the feedback thread containing this message. - message_id: str. The ID of the feedback thread message. + message_id: int. The ID of the feedback thread message. author_id: str. The ID of the message's author. updated_status: str. The new status of the feedback thread. updated_subject: str. The new feedback thread subject. @@ -143,9 +200,18 @@ class FeedbackMessage: """ def __init__( - self, full_message_id, thread_id, message_id, author_id, - updated_status, updated_subject, text, created_on, - last_updated, received_via_email): + self, + full_message_id: str, + thread_id: str, + message_id: int, + author_id: str, + updated_status: str, + updated_subject: str, + text: str, + created_on: datetime.datetime, + last_updated: datetime.datetime, + received_via_email: bool + ) -> None: self.id = full_message_id self.thread_id = thread_id self.message_id = message_id @@ -158,7 +224,7 @@ def __init__( self.received_via_email = received_via_email @property - def entity_id(self): + def entity_id(self) -> str: """Returns the entity ID corresponding to this FeedbackMessage instance. Returns: @@ -167,7 +233,7 @@ def entity_id(self): return self.id.split('.')[1] @property - def entity_type(self): + def entity_type(self) -> str: """Returns the entity type corresponding to this FeedbackMessage instance. @@ -176,16 +242,14 @@ def entity_type(self): """ return self.id.split('.')[0] - def to_dict(self): + def to_dict(self) -> FeedbackMessageDict: """Returns a dict representation of this FeedbackMessage object. Returns: dict. Dict representation of the FeedbackMessage object. """ return { - 'author_username': ( - user_services.get_username(self.author_id) - if self.author_id else None), + 'author_id': self.author_id, 'created_on_msecs': utils.get_time_in_millisecs(self.created_on), 'entity_type': self.entity_type, 'entity_id': self.entity_id, @@ -202,10 +266,14 @@ class FullyQualifiedMessageIdentifier: Attributes: thread_id: str. The ID of the thread. - message_id: str. The ID of a message beloning to the thread. + message_id: int. The ID of a message beloning to the thread. """ - def __init__(self, thread_id, message_id): + def __init__( + self, + thread_id: str, + message_id: int + ) -> None: self.thread_id = thread_id self.message_id = message_id @@ -223,7 +291,12 @@ class FeedbackAnalytics: """ def __init__( - self, entity_type, entity_id, num_open_threads, num_total_threads): + self, + entity_type: str, + entity_id: str, + num_open_threads: int, + num_total_threads: int + ) -> None: """Initializes a FeedbackAnalytics object.""" self.id = entity_id @@ -231,7 +304,7 @@ def __init__( self.num_open_threads = num_open_threads self.num_total_threads = num_total_threads - def to_dict(self): + def to_dict(self) -> Dict[str, int]: """Returns the numbers of threads in the FeedbackAnalytics object. Attributes: @@ -244,6 +317,15 @@ def to_dict(self): } +class FeedbackMessageReferenceDict(TypedDict): + """Dict for FeedbackMessageReference object.""" + + entity_type: str + entity_id: str + thread_id: str + message_id: int + + class FeedbackMessageReference: """Domain object for feedback message references. @@ -251,17 +333,23 @@ class FeedbackMessageReference: entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. thread_id: str. The ID of the feedback thread. - message_id: str. The ID of the feedback thread message. + message_id: int. The ID of the feedback thread message. """ - def __init__(self, entity_type, entity_id, thread_id, message_id): + def __init__( + self, + entity_type: str, + entity_id: str, + thread_id: str, + message_id: int + ) -> None: """Initializes FeedbackMessageReference object.""" self.entity_type = entity_type self.entity_id = entity_id self.thread_id = thread_id self.message_id = message_id - def to_dict(self): + def to_dict(self) -> FeedbackMessageReferenceDict: """Returns dict representation of the FeedbackMessageReference object. Returns: @@ -290,7 +378,7 @@ class FeedbackThreadSummary: read by the user, author_last_message: str. The name of the author of the last message. author_second_last_message: str. The name of the author of the second - last message. + last message and None if no second-to-last message exists. exploration_title: str. The title of the exploration to which exploration belongs. exploration_id: str. The id of the exploration associated to the thread. @@ -298,11 +386,20 @@ class FeedbackThreadSummary: """ def __init__( - self, status, original_author_id, last_updated, last_message_text, - total_message_count, last_message_is_read, - second_last_message_is_read, author_last_message, - author_second_last_message, exploration_title, exploration_id, - thread_id): + self, + status: str, + original_author_id: str, + last_updated: datetime.datetime, + last_message_text: str, + total_message_count: int, + last_message_is_read: bool, + second_last_message_is_read: bool, + author_last_message: str, + author_second_last_message: Optional[str], + exploration_title: str, + exploration_id: str, + thread_id: str + ) -> None: self.status = status self.original_author_id = original_author_id self.last_updated = last_updated @@ -316,7 +413,7 @@ def __init__( self.exploration_id = exploration_id self.thread_id = thread_id - def to_dict(self): + def to_dict(self) -> FeedbackThreadSummaryDict: """Returns dict representation of the FeedbackThreadSummary object. Returns: diff --git a/core/domain/feedback_domain_test.py b/core/domain/feedback_domain_test.py index eb6c01c36186..06d1b836fb48 100644 --- a/core/domain/feedback_domain_test.py +++ b/core/domain/feedback_domain_test.py @@ -28,25 +28,25 @@ class FeedbackThreadDomainUnitTests(test_utils.GenericTestBase): EXP_ID = 'exp0' THREAD_ID = 'exp0.thread0' - def setUp(self): - super(FeedbackThreadDomainUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL) - def test_to_dict(self): + def test_to_dict(self) -> None: fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) - expected_thread_dict = { + expected_thread_dict: feedback_domain.FeedbackThreadDict = { 'thread_id': self.THREAD_ID, 'status': u'open', 'state_name': u'a_state_name', - 'summary': None, - 'original_author_username': self.VIEWER_USERNAME, + 'summary': 'test summary', + 'original_author_id': self.viewer_id, 'message_count': 1, 'subject': u'a subject', 'last_updated_msecs': utils.get_time_in_millisecs(fake_date), 'last_nonempty_message_text': 'last message', - 'last_nonempty_message_author': self.VIEWER_USERNAME, + 'last_nonempty_message_author_id': self.viewer_id, } observed_thread = feedback_domain.FeedbackThread( self.THREAD_ID, feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID, @@ -57,25 +57,31 @@ def test_to_dict(self): self.assertDictEqual( expected_thread_dict, observed_thread.to_dict()) - def test_get_last_two_message_ids_from_thread_with_many_messages(self): + def test_get_last_two_message_ids_from_thread_with_many_messages( + self + ) -> None: fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) thread = feedback_domain.FeedbackThread( self.THREAD_ID, feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID, - u'a_state_name', self.viewer_id, u'open', u'a subject', None, False, + u'a_state_name', self.viewer_id, u'open', u'a subject', # This value of "5" decides the number of messages. - 5, fake_date, fake_date, 'last message', self.VIEWER_USERNAME) + 'test summary', False, 5, fake_date, fake_date, 'last message', + self.VIEWER_USERNAME) self.assertEqual( thread.get_last_two_message_ids(), ['exp0.thread0.4', 'exp0.thread0.3']) - def test_get_last_two_message_ids_from_thread_with_only_one_message(self): + def test_get_last_two_message_ids_from_thread_with_only_one_message( + self + ) -> None: fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) thread = feedback_domain.FeedbackThread( self.THREAD_ID, feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID, - u'a_state_name', self.viewer_id, u'open', u'a subject', None, False, + u'a_state_name', self.viewer_id, u'open', u'a subject', # This value of "1" decides the number of messages. - 1, fake_date, fake_date, 'last message', self.VIEWER_USERNAME) + 'test summary', False, 1, fake_date, fake_date, 'last message', + self.VIEWER_USERNAME) self.assertEqual( thread.get_last_two_message_ids(), ['exp0.thread0.0', None]) @@ -83,19 +89,19 @@ def test_get_last_two_message_ids_from_thread_with_only_one_message(self): class FeedbackMessageDomainUnitTests(test_utils.GenericTestBase): EXP_ID = 'exp0' - MESSAGE_ID = 'message0' + MESSAGE_ID = 0 THREAD_ID = 'exploration.exp0.thread0' - FULL_MESSAGE_ID = THREAD_ID + '.' + MESSAGE_ID + FULL_MESSAGE_ID = THREAD_ID + '.' + str(MESSAGE_ID) - def setUp(self): - super(FeedbackMessageDomainUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) - def test_to_dict(self): + def test_to_dict(self) -> None: fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) - expected_message_dict = { - 'author_username': self.OWNER_USERNAME, + expected_message_dict: feedback_domain.FeedbackMessageDict = { + 'author_id': self.owner_id, 'created_on_msecs': utils.get_time_in_millisecs(fake_date), 'entity_type': feconf.ENTITY_TYPE_EXPLORATION, 'entity_id': self.EXP_ID, @@ -116,7 +122,7 @@ def test_to_dict(self): class FeedbackAnalyticsDomainUnitTests(test_utils.GenericTestBase): EXP_ID = 'exp0' - def test_to_dict(self): + def test_to_dict(self) -> None: expected_thread_analytics = feedback_domain.FeedbackAnalytics( feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID, 1, 2) self.assertDictEqual(expected_thread_analytics.to_dict(), { @@ -127,14 +133,16 @@ def test_to_dict(self): class FeedbackMessageReferenceDomainTests(test_utils.GenericTestBase): - def setUp(self): - super(FeedbackMessageReferenceDomainTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.exp_id = 'exp' - self.message_id = 'message' + self.message_id = 10 self.thread_id = 'exp.thread' - def test_to_dict(self): - expected_feedback_message_reference = { + def test_to_dict(self) -> None: + expected_feedback_message_reference: ( + feedback_domain.FeedbackMessageReferenceDict + ) = { 'entity_type': feconf.ENTITY_TYPE_EXPLORATION, 'entity_id': self.exp_id, 'thread_id': self.thread_id, diff --git a/core/domain/feedback_services.py b/core/domain/feedback_services.py index 1a6d5dbc8828..5d9967b323ca 100644 --- a/core/domain/feedback_services.py +++ b/core/domain/feedback_services.py @@ -22,7 +22,6 @@ import itertools from core import feconf -from core import python_utils from core.domain import email_manager from core.domain import feedback_domain from core.domain import rights_manager @@ -31,35 +30,53 @@ from core.domain import user_services from core.platform import models +from typing import Dict, Final, List, Optional, Tuple, Type, cast + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + from mypy_imports import exp_models + from mypy_imports import feedback_models + from mypy_imports import question_models + from mypy_imports import skill_models + from mypy_imports import suggestion_models + from mypy_imports import topic_models + from mypy_imports import transaction_services + ( - email_models, expl_models, feedback_models, - question_models, skill_models, suggestion_models, + base_models, + exp_models, + feedback_models, + question_models, + skill_models, + suggestion_models, topic_models ) = models.Registry.import_models([ - models.NAMES.email, models.NAMES.exploration, models.NAMES.feedback, - models.NAMES.question, models.NAMES.skill, models.NAMES.suggestion, - models.NAMES.topic + models.Names.BASE_MODEL, + models.Names.EXPLORATION, + models.Names.FEEDBACK, + models.Names.QUESTION, + models.Names.SKILL, + models.Names.SUGGESTION, + models.Names.TOPIC ]) datastore_services = models.Registry.import_datastore_services() transaction_services = models.Registry.import_transaction_services() -DEFAULT_SUGGESTION_THREAD_SUBJECT = 'Suggestion from a learner' -DEFAULT_SUGGESTION_THREAD_INITIAL_MESSAGE = '' - -TARGET_TYPE_TO_TARGET_MODEL = { - feconf.ENTITY_TYPE_EXPLORATION: ( - expl_models.ExplorationModel), - feconf.ENTITY_TYPE_QUESTION: ( - question_models.QuestionModel), - feconf.ENTITY_TYPE_SKILL: ( - skill_models.SkillModel), - feconf.ENTITY_TYPE_TOPIC: ( - topic_models.TopicModel) +DEFAULT_SUGGESTION_THREAD_SUBJECT: Final = 'Suggestion from a learner' +DEFAULT_SUGGESTION_THREAD_INITIAL_MESSAGE: Final = '' + +TARGET_TYPE_TO_TARGET_MODEL: Dict[str, Type[base_models.BaseModel]] = { + feconf.ENTITY_TYPE_EXPLORATION: exp_models.ExplorationModel, + feconf.ENTITY_TYPE_QUESTION: question_models.QuestionModel, + feconf.ENTITY_TYPE_SKILL: skill_models.SkillModel, + feconf.ENTITY_TYPE_TOPIC: topic_models.TopicModel } -def get_exp_id_from_thread_id(thread_id): +def get_exp_id_from_thread_id(thread_id: str) -> str: """Returns the exploration_id part of the thread_id. TODO(#8370): Once feedback threads are generalized, this function needs to @@ -77,14 +94,20 @@ def get_exp_id_from_thread_id(thread_id): def _create_models_for_thread_and_first_message( - entity_type, entity_id, original_author_id, subject, text, - has_suggestion): + entity_type: str, + entity_id: str, + original_author_id: Optional[str], + subject: str, + text: str, + has_suggestion: bool +) -> str: """Creates a feedback thread and its first message. Args: entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. - original_author_id: str. The author id who starts this thread. + original_author_id: str|None. The author id who starts this thread, or + None if the author is anonymous. subject: str. The subject of this thread. text: str. The text of the feedback message. This may be ''. has_suggestion: bool. Whether this thread has a related learner @@ -116,14 +139,20 @@ def _create_models_for_thread_and_first_message( def create_thread( - entity_type, entity_id, original_author_id, subject, text, - has_suggestion=False): + entity_type: str, + entity_id: str, + original_author_id: Optional[str], + subject: str, + text: str, + has_suggestion: bool = False +) -> str: """Creates a thread and its first message. Args: entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. - original_author_id: str. The author id who starts this thread. + original_author_id: str|None. The author id who starts this thread, or + None if the author is anonymous. subject: str. The subject of this thread. text: str. The text of the feedback message. This may be ''. has_suggestion: bool. Whether the thread has a suggestion attached to @@ -138,14 +167,21 @@ def create_thread( def create_message( - thread_id, author_id, updated_status, updated_subject, text, - received_via_email=False, should_send_email=True): + thread_id: str, + author_id: Optional[str], + updated_status: Optional[str], + updated_subject: Optional[str], + text: str, + received_via_email: bool = False, + should_send_email: bool = True +) -> feedback_domain.FeedbackMessage: """Creates a new message for the thread and subscribes the author to the thread. Args: thread_id: str. The thread id the message belongs to. - author_id: str. The author id who creates this message. + author_id: str|None. The author id who creates this message, or None + if the author is anonymous. updated_status: str|None. One of STATUS_CHOICES. New thread status. Must be supplied if this is the first message of a thread. For the rest of the thread, should exist only when the status changes. @@ -172,14 +208,21 @@ def create_message( def create_messages( - thread_ids, author_id, updated_status, updated_subject, text, - received_via_email=False, should_send_email=True): + thread_ids: List[str], + author_id: Optional[str], + updated_status: Optional[str], + updated_subject: Optional[str], + text: str, + received_via_email: bool = False, + should_send_email: bool = True +) -> List[feedback_domain.FeedbackMessage]: """Creates a new message for each of the distinct threads in thread_ids and for each message, subscribes the author to the thread. Args: thread_ids: list(str). The thread ids to append the messages to. - author_id: str. The id of the author who creates the messages. + author_id: str|None. The id of the author who creates the messages, or + None if the author is anonymous. updated_status: str|None. One of STATUS_CHOICES. Applied to each thread. Must be supplied if this is the first message of the threads. Otherwise, this property should only exist when the status @@ -210,12 +253,18 @@ def create_messages( # Get the threads at the outset, in order to check that there are models # corresponding to each of the thread_ids. - thread_models = feedback_models.GeneralFeedbackThreadModel.get_multi( - thread_ids) + thread_models_with_none = ( + feedback_models.GeneralFeedbackThreadModel.get_multi( + thread_ids + ) + ) + thread_models: List[feedback_models.GeneralFeedbackThreadModel] = [] thread_ids_that_do_not_have_models = [] - for index, thread_model in enumerate(thread_models): + for index, thread_model in enumerate(thread_models_with_none): if thread_model is None: thread_ids_that_do_not_have_models.append(thread_ids[index]) + else: + thread_models.append(thread_model) if len(thread_ids_that_do_not_have_models) > 0: multiple_thread_models_are_missing = ( len(thread_ids_that_do_not_have_models) > 1 @@ -240,7 +289,7 @@ def create_messages( # Create a list of FullyQualifiedMessageIdentifier objects so that each # (thread_id, message_id) pair is kept together. message_identifiers = [] - for thread_id, message_id in python_utils.ZIP(thread_ids, message_ids): + for thread_id, message_id in zip(thread_ids, message_ids): message_identifiers.append( feedback_domain.FullyQualifiedMessageIdentifier( thread_id, message_id)) @@ -332,6 +381,7 @@ def create_messages( if (feconf.CAN_SEND_EMAILS and ( feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS and + author_id is not None and user_services.is_user_registered(author_id)) and # TODO(#12079): Figure out a better way to avoid sending feedback # thread emails for contributor dashboard suggestions. @@ -356,7 +406,9 @@ def create_messages( return feedback_messages -def _get_threads_user_info_keys(thread_ids): +def _get_threads_user_info_keys( + thread_ids: List[str] +) -> List[datastore_services.Key]: """Gets the feedback thread user model keys belonging to thread. Args: @@ -367,15 +419,20 @@ def _get_threads_user_info_keys(thread_ids): model. """ if thread_ids: - return feedback_models.GeneralFeedbackThreadUserModel.query( + datastore_keys = feedback_models.GeneralFeedbackThreadUserModel.query( feedback_models.GeneralFeedbackThreadUserModel.thread_id.IN( thread_ids) ).fetch(keys_only=True) + # Here, we are narrowing down the type from sequence to list. + assert isinstance(datastore_keys, list) + return datastore_keys else: return [] -def delete_threads_for_multiple_entities(entity_type, entity_ids): +def delete_threads_for_multiple_entities( + entity_type: str, entity_ids: List[str] +) -> None: """Deletes a thread, its messages and thread user models. When the thread belongs to exploration deletes feedback analytics. When the thread has a suggestion deletes the suggestion. @@ -417,7 +474,9 @@ def delete_threads_for_multiple_entities(entity_type, entity_ids): datastore_services.delete_multi(model_keys) -def update_messages_read_by_the_user(user_id, thread_id, message_ids): +def update_messages_read_by_the_user( + user_id: str, thread_id: str, message_ids: List[int] +) -> None: """Replaces the list of message ids read by the message ids given to the function. @@ -437,7 +496,10 @@ def update_messages_read_by_the_user(user_id, thread_id, message_ids): feedback_thread_user_model.put() -def add_message_ids_to_read_by_list(user_id, message_identifiers): +def add_message_ids_to_read_by_list( + user_id: str, + message_identifiers: List[feedback_domain.FullyQualifiedMessageIdentifier] +) -> None: """Adds the given message IDs to the list of message IDs read by the user. Args: @@ -513,7 +575,9 @@ def add_message_ids_to_read_by_list(user_id, message_identifiers): current_feedback_thread_user_models) -def _get_message_from_model(message_model): +def _get_message_from_model( + message_model: feedback_models.GeneralFeedbackMessageModel +) -> feedback_domain.FeedbackMessage: """Converts the FeedbackMessageModel to a FeedbackMessage. Args: @@ -531,7 +595,7 @@ def _get_message_from_model(message_model): message_model.received_via_email) -def get_messages(thread_id): +def get_messages(thread_id: str) -> List[feedback_domain.FeedbackMessage]: """Fetches all messages of the given thread. Args: @@ -547,7 +611,9 @@ def get_messages(thread_id): ] -def get_message(thread_id, message_id): +def get_message( + thread_id: str, message_id: int +) -> feedback_domain.FeedbackMessage: """Fetches the message indexed by thread_id and message_id. Args: @@ -562,7 +628,9 @@ def get_message(thread_id, message_id): def get_next_page_of_all_feedback_messages( - page_size=feconf.FEEDBACK_TAB_PAGE_SIZE, urlsafe_start_cursor=None): + page_size: int = feconf.FEEDBACK_TAB_PAGE_SIZE, + urlsafe_start_cursor: Optional[str] = None +) -> Tuple[List[feedback_domain.FeedbackMessage], Optional[str], bool]: """Fetches a single page from the list of all feedback messages that have been posted to any exploration on the site. @@ -578,7 +646,7 @@ def get_next_page_of_all_feedback_messages( messages_on_page: list(FeedbackMessage). Contains the slice of messages that are part of the page pointed to by the given start cursor. - next_urlsafe_start_cursor: str. The cursor to the next page. + next_urlsafe_start_cursor: str|None. The cursor to the next page. more: bool. Whether there are more messages available to fetch after this batch. """ @@ -589,7 +657,9 @@ def get_next_page_of_all_feedback_messages( return (messages_on_page, next_urlsafe_start_cursor, more) -def get_thread_analytics_multi(exploration_ids): +def get_thread_analytics_multi( + exploration_ids: List[str] +) -> List[feedback_domain.FeedbackAnalytics]: """Fetches all FeedbackAnalytics, for all the given exploration ids. A FeedbackAnalytics contains the exploration id the analytics belongs to, @@ -611,12 +681,14 @@ def get_thread_analytics_multi(exploration_ids): feconf.ENTITY_TYPE_EXPLORATION, exp_id, model.num_open_threads if model is not None else 0, model.num_total_threads if model is not None else 0) - for exp_id, model in python_utils.ZIP( + for exp_id, model in zip( exploration_ids, feedback_thread_analytics_models) ] -def get_thread_analytics(exploration_id): +def get_thread_analytics( + exploration_id: str +) -> feedback_domain.FeedbackAnalytics: """Fetches the FeedbackAnalytics for the given exploration. Args: @@ -628,7 +700,9 @@ def get_thread_analytics(exploration_id): return get_thread_analytics_multi([exploration_id])[0] -def get_total_open_threads(feedback_analytics_list): +def get_total_open_threads( + feedback_analytics_list: List[feedback_domain.FeedbackAnalytics] +) -> int: """Gets the count of all open threads from the given list of FeedbackAnalytics domain objects. @@ -643,7 +717,9 @@ def get_total_open_threads(feedback_analytics_list): return sum(a.num_open_threads for a in feedback_analytics_list) -def get_multiple_threads(thread_ids): +def get_multiple_threads( + thread_ids: List[str] +) -> List[feedback_domain.FeedbackThread]: """Gets multiple feedback threads. Args: @@ -655,11 +731,15 @@ def get_multiple_threads(thread_ids): return [ _get_thread_from_model(model) for model in feedback_models.GeneralFeedbackThreadModel.get_multi( - thread_ids) + thread_ids + ) + if model is not None ] -def _get_thread_from_model(thread_model): +def _get_thread_from_model( + thread_model: feedback_models.GeneralFeedbackThreadModel +) -> feedback_domain.FeedbackThread: """Converts the given FeedbackThreadModel to a FeedbackThread object. Args: @@ -682,7 +762,9 @@ def _get_thread_from_model(thread_model): thread_model.last_nonempty_message_author_id) -def get_exp_thread_summaries(user_id, thread_ids): +def get_exp_thread_summaries( + user_id: str, thread_ids: List[str] +) -> Tuple[List[feedback_domain.FeedbackThreadSummary], int]: """Returns a list of summaries corresponding to the exploration threads from the given thread ids. Non-exploration threads are not included in the list. It also returns the number of threads that are currently not read by the @@ -714,17 +796,32 @@ def get_exp_thread_summaries(user_id, thread_ids): ] exp_model_ids = [model.entity_id for model in exp_thread_models] - exp_thread_user_models, exp_models = ( - datastore_services.fetch_multiple_entities_by_ids_and_models([ - ('GeneralFeedbackThreadUserModel', exp_thread_user_model_ids), - ('ExplorationModel', exp_model_ids), - ])) + # Here we use cast because we are narrowing down the return type + # of following method from List[Optional[Model]] to List[Optional[ + # exp_models.ExplorationModel]]. + exp_thread_user_models, exploration_models = ( + cast( + Tuple[ + List[Optional[feedback_models.GeneralFeedbackThreadUserModel]], + List[Optional[exp_models.ExplorationModel]] + ], + datastore_services.fetch_multiple_entities_by_ids_and_models( + [ + ( + 'GeneralFeedbackThreadUserModel', + exp_thread_user_model_ids + ), + ('ExplorationModel', exp_model_ids) + ] + ) + ) + ) threads = [_get_thread_from_model(m) for m in exp_thread_models] flattened_last_two_message_models_of_threads = ( feedback_models.GeneralFeedbackMessageModel.get_multi( - itertools.chain.from_iterable( - t.get_last_two_message_ids() for t in threads))) + list(itertools.chain.from_iterable( + t.get_last_two_message_ids() for t in threads)))) last_two_message_models_of_threads = [ flattened_last_two_message_models_of_threads[i:i + 2] for i in range(0, len(flattened_last_two_message_models_of_threads), 2) @@ -733,9 +830,9 @@ def get_exp_thread_summaries(user_id, thread_ids): thread_summaries = [] number_of_unread_threads = 0 for thread, last_two_message_models, thread_user_model, exp_model in ( - python_utils.ZIP( + zip( threads, last_two_message_models_of_threads, - exp_thread_user_models, exp_models)): + exp_thread_user_models, exploration_models)): message_ids_read_by_user = ( () if thread_user_model is None else thread_user_model.message_ids_read_by_user) @@ -743,6 +840,8 @@ def get_exp_thread_summaries(user_id, thread_ids): last_message_model, second_last_message_model = last_two_message_models # We don't need to check if the last message is None because all threads # have at least one message. + # Ruling out the possibility of None for mypy type checking. + assert last_message_model is not None last_message_is_read = ( last_message_model.message_id in message_ids_read_by_user) author_last_message = ( @@ -752,11 +851,19 @@ def get_exp_thread_summaries(user_id, thread_ids): second_last_message_is_read = ( second_last_message_model is not None and second_last_message_model.message_id in message_ids_read_by_user) - author_second_last_message = ( - second_last_message_model and - second_last_message_model.author_id and - user_services.get_username(second_last_message_model.author_id)) - + author_second_last_message = None + if second_last_message_model is not None: + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated + # with Any return type. Once we have proper return type we can + # remove this. + author_id: str = second_last_message_model.author_id + author_second_last_message = ( + author_id and + user_services.get_username(author_id) + ) + # Ruling out the possibility of None for mypy type checking. + assert exp_model is not None if not last_message_is_read: number_of_unread_threads += 1 thread_summaries.append( @@ -769,7 +876,9 @@ def get_exp_thread_summaries(user_id, thread_ids): return thread_summaries, number_of_unread_threads -def get_threads(entity_type, entity_id): +def get_threads( + entity_type: str, entity_id: str +) -> List[feedback_domain.FeedbackThread]: """Fetches all the threads for the given entity id. Args: @@ -784,7 +893,7 @@ def get_threads(entity_type, entity_id): return [_get_thread_from_model(m) for m in thread_models] -def get_thread(thread_id): +def get_thread(thread_id: str) -> feedback_domain.FeedbackThread: """Fetches the thread by thread id. Args: @@ -797,7 +906,9 @@ def get_thread(thread_id): feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id)) -def get_closed_threads(entity_type, entity_id, has_suggestion): +def get_closed_threads( + entity_type: str, entity_id: str, has_suggestion: bool +) -> List[feedback_domain.FeedbackThread]: """Fetches all closed threads of the given entity id. Args: @@ -818,7 +929,9 @@ def get_closed_threads(entity_type, entity_id, has_suggestion): ] -def get_all_threads(entity_type, entity_id, has_suggestion): +def get_all_threads( + entity_type: str, entity_id: str, has_suggestion: bool +) -> List[feedback_domain.FeedbackThread]: """Fetches all threads (regardless of their status) that correspond to the given entity id. @@ -838,7 +951,7 @@ def get_all_threads(entity_type, entity_id, has_suggestion): ] -def enqueue_feedback_message_batch_email_task(user_id): +def enqueue_feedback_message_batch_email_task(user_id: str) -> None: """Adds a 'send feedback email' (batch) task into the task queue. Args: @@ -850,7 +963,8 @@ def enqueue_feedback_message_batch_email_task(user_id): def enqueue_feedback_message_instant_email_task_transactional( - user_id, reference): + user_id: str, reference: feedback_domain.FeedbackMessageReference +) -> None: """Adds a 'send feedback email' (instant) task into the task queue. Args: @@ -868,7 +982,11 @@ def enqueue_feedback_message_instant_email_task_transactional( @transaction_services.run_in_transaction_wrapper def _enqueue_feedback_thread_status_change_email_task_transactional( - user_id, reference, old_status, new_status): + user_id: str, + reference: feedback_domain.FeedbackMessageReference, + old_status: str, + new_status: str +) -> None: """Adds a task for sending email when a feedback thread status is changed. Args: @@ -888,7 +1006,9 @@ def _enqueue_feedback_thread_status_change_email_task_transactional( feconf.TASK_URL_FEEDBACK_STATUS_EMAILS, payload, 0) -def get_feedback_message_references(user_id): +def get_feedback_message_references( + user_id: str +) -> List[feedback_domain.FeedbackMessageReference]: """Fetches all FeedbackMessageReference objects written by the given user。 Args: @@ -911,7 +1031,9 @@ def get_feedback_message_references(user_id): @transaction_services.run_in_transaction_wrapper -def _add_feedback_message_reference_transactional(user_id, reference): +def _add_feedback_message_reference_transactional( + user_id: str, reference: feedback_domain.FeedbackMessageReference +) -> None: """Adds a new message to the feedback message buffer that is used to generate the next notification email to the given user. @@ -936,7 +1058,7 @@ def _add_feedback_message_reference_transactional(user_id, reference): @transaction_services.run_in_transaction_wrapper -def update_feedback_email_retries_transactional(user_id): +def update_feedback_email_retries_transactional(user_id: str) -> None: """If sufficient time has passed, increment the number of retries for the corresponding user's UnsentEmailFeedbackModel. @@ -956,7 +1078,8 @@ def update_feedback_email_retries_transactional(user_id): @transaction_services.run_in_transaction_wrapper def pop_feedback_message_references_transactional( - user_id, num_references_to_pop): + user_id: str, num_references_to_pop: int +) -> None: """Pops feedback message references of the given user which have been processed already. @@ -983,7 +1106,8 @@ def pop_feedback_message_references_transactional( @transaction_services.run_in_transaction_wrapper def clear_feedback_message_references_transactional( - user_id, exploration_id, thread_id): + user_id: str, exploration_id: str, thread_id: str +) -> None: """Removes feedback message references associated with a feedback thread. Args: @@ -1025,7 +1149,9 @@ def clear_feedback_message_references_transactional( model.put() -def _get_all_recipient_ids(exploration_id, thread_id, author_id): +def _get_all_recipient_ids( + exploration_id: str, thread_id: str, author_id: str +) -> Tuple[List[str], List[str]]: """Fetches all authors of the exploration excluding the given author and all the other recipients. @@ -1057,8 +1183,11 @@ def _get_all_recipient_ids(exploration_id, thread_id, author_id): def _send_batch_emails( - recipient_list, feedback_message_reference, exploration_id, - has_suggestion): + recipient_list: List[str], + feedback_message_reference: feedback_domain.FeedbackMessageReference, + exploration_id: str, + has_suggestion: bool +) -> None: """Adds the given FeedbackMessageReference to each of the recipient's email buffers. The collected messages will be sent out as a batch after a short delay. @@ -1074,7 +1203,7 @@ def _send_batch_emails( """ can_recipients_receive_email = email_manager.can_users_receive_thread_email( recipient_list, exploration_id, has_suggestion) - for recipient_id, can_receive_email in python_utils.ZIP( + for recipient_id, can_receive_email in zip( recipient_list, can_recipients_receive_email): if can_receive_email: _add_feedback_message_reference_transactional( @@ -1082,8 +1211,11 @@ def _send_batch_emails( def _send_instant_emails( - recipient_list, feedback_message_reference, exploration_id, - has_suggestion): + recipient_list: List[str], + feedback_message_reference: feedback_domain.FeedbackMessageReference, + exploration_id: str, + has_suggestion: bool +) -> None: """Adds the given FeedbackMessageReference to each of the recipient's email buffers. The collected messages will be sent out immediately. @@ -1098,7 +1230,7 @@ def _send_instant_emails( """ can_recipients_receive_email = email_manager.can_users_receive_thread_email( recipient_list, exploration_id, has_suggestion) - for recipient_id, can_receive_email in python_utils.ZIP( + for recipient_id, can_receive_email in zip( recipient_list, can_recipients_receive_email): if can_receive_email: enqueue_feedback_message_instant_email_task_transactional( @@ -1106,8 +1238,13 @@ def _send_instant_emails( def _send_feedback_thread_status_change_emails( - recipient_list, feedback_message_reference, old_status, new_status, - exploration_id, has_suggestion): + recipient_list: List[str], + feedback_message_reference: feedback_domain.FeedbackMessageReference, + old_status: str, + new_status: str, + exploration_id: str, + has_suggestion: bool +) -> None: """Notifies the given recipients about the status change. Args: @@ -1123,7 +1260,7 @@ def _send_feedback_thread_status_change_emails( """ can_recipients_receive_email = email_manager.can_users_receive_thread_email( recipient_list, exploration_id, has_suggestion) - for recipient_id, can_receive_email in python_utils.ZIP( + for recipient_id, can_receive_email in zip( recipient_list, can_recipients_receive_email): if can_receive_email: _enqueue_feedback_thread_status_change_email_task_transactional( @@ -1132,8 +1269,13 @@ def _send_feedback_thread_status_change_emails( def _add_message_to_email_buffer( - author_id, thread_id, message_id, message_length, old_status, - new_status): + author_id: str, + thread_id: str, + message_id: int, + message_length: int, + old_status: str, + new_status: str +) -> None: """Sends the given message to the recipients of the given thread. If status has changed, notify the recipients as well. @@ -1170,7 +1312,7 @@ def _add_message_to_email_buffer( has_suggestion) -def delete_exploration_feedback_analytics(exp_ids): +def delete_exploration_feedback_analytics(exp_ids: List[str]) -> None: """Deletes the FeedbackAnalyticsModel models corresponding to the given exp_ids. @@ -1188,7 +1330,7 @@ def delete_exploration_feedback_analytics(exp_ids): feedback_analytics_models_to_be_deleted) -def handle_new_thread_created(exp_id): +def handle_new_thread_created(exp_id: str) -> None: """Reacts to new threads added to an exploration. Args: @@ -1198,7 +1340,11 @@ def handle_new_thread_created(exp_id): _increment_open_threads_count_transactional(exp_id) -def handle_thread_status_changed(exp_id, old_status, new_status): +def handle_thread_status_changed( + exp_id: str, + old_status: str, + new_status: str +) -> None: """Reacts to changes in an exploration thread's status. Args: @@ -1217,7 +1363,7 @@ def handle_thread_status_changed(exp_id, old_status, new_status): @transaction_services.run_in_transaction_wrapper -def _increment_open_threads_count_transactional(exp_id): +def _increment_open_threads_count_transactional(exp_id: str) -> None: """Increments count of open threads by one.""" model = ( feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or @@ -1228,7 +1374,7 @@ def _increment_open_threads_count_transactional(exp_id): @transaction_services.run_in_transaction_wrapper -def _increment_total_threads_count_transactional(exp_id): +def _increment_total_threads_count_transactional(exp_id: str) -> None: """Increments count of total threads by one.""" model = ( feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or @@ -1239,7 +1385,7 @@ def _increment_total_threads_count_transactional(exp_id): @transaction_services.run_in_transaction_wrapper -def _decrement_open_threads_count_transactional(exp_id): +def _decrement_open_threads_count_transactional(exp_id: str) -> None: """Decrements count of open threads by one.""" model = ( feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or diff --git a/core/domain/feedback_services_test.py b/core/domain/feedback_services_test.py index d4ed8e0d4faf..317f3e3efe00 100644 --- a/core/domain/feedback_services_test.py +++ b/core/domain/feedback_services_test.py @@ -17,7 +17,6 @@ from __future__ import annotations from core import feconf -from core import python_utils from core.domain import event_services from core.domain import exp_domain from core.domain import feedback_domain @@ -29,30 +28,36 @@ from core.platform import models from core.tests import test_utils -( - feedback_models, email_models, suggestion_models -) = models.Registry.import_models([ - models.NAMES.feedback, models.NAMES.email, models.NAMES.suggestion +from typing import Final, List, Optional, TypedDict + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import feedback_models + from mypy_imports import suggestion_models + +(feedback_models, suggestion_models) = models.Registry.import_models([ + models.Names.FEEDBACK, + models.Names.SUGGESTION ]) class FeedbackServicesUnitTests(test_utils.EmailTestBase): """Test functions in feedback_services.""" - USER_EMAIL = 'user@example.com' - USER_USERNAME = 'user' - EXP_1_ID = 'exp_1_id' + USER_EMAIL: Final = 'user@example.com' + USER_USERNAME: Final = 'user' + EXP_1_ID: Final = 'exp_1_id' - def setUp(self): - super(FeedbackServicesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) - def test_feedback_ids(self): + def test_feedback_ids(self) -> None: """Test various conventions for thread and message ids.""" exp_id = '0' feedback_services.create_thread( - 'exploration', exp_id, None, 'a subject', 'some text') + 'exploration', exp_id, 'test_user', 'a subject', 'some text') threadlist = feedback_services.get_all_threads( 'exploration', exp_id, False) self.assertEqual(len(threadlist), 1) @@ -80,40 +85,44 @@ def test_feedback_ids(self): # stop, followed by the message id. self.assertEqual(datastore_id, '%s.%s' % (thread_id, message_id)) - def test_create_message_raises_exception_for_invalid_thread_id(self): + def test_create_message_raises_exception_for_invalid_thread_id( + self + ) -> None: thread_id = 'invalid_thread_id' expected_exception_regexp = ( r'Thread belonging to the GeneralFeedbackThreadModel class ' r'with id:\[%s\] was not found.' % (thread_id) ) - with self.assertRaisesRegexp(Exception, expected_exception_regexp): + with self.assertRaisesRegex(Exception, expected_exception_regexp): feedback_services.create_message( thread_id, self.user_id, None, None, 'Hello') def test_create_messages_raises_pluralized_exception_for_bad_thread_ids( - self): + self + ) -> None: thread_ids = ['invalid_thread_id_1', 'invalid_thread_id_2'] expected_exception_regexp = ( r'Threads belonging to the GeneralFeedbackThreadModel class ' r'with ids:\[%s\] were not found.' % (' '.join(thread_ids)) ) - with self.assertRaisesRegexp(Exception, expected_exception_regexp): + with self.assertRaisesRegex(Exception, expected_exception_regexp): feedback_services.create_messages( thread_ids, self.user_id, None, None, 'Hello') def test_create_messages_raises_an_exception_if_thread_ids_are_not_unique( - self): + self + ) -> None: repeated_thread_ids = ['thread_id', 'thread_id'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Thread ids must be distinct when calling create_messsages.'): feedback_services.create_messages( repeated_thread_ids, self.user_id, None, None, 'Hello') - def test_delete_threads_for_multiple_entities(self): + def test_delete_threads_for_multiple_entities(self) -> None: self.save_new_default_exploration(self.EXP_1_ID, self.EXP_1_ID) suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, @@ -138,21 +147,24 @@ def test_delete_threads_for_multiple_entities(self): feedback_services.delete_threads_for_multiple_entities( feconf.ENTITY_TYPE_EXPLORATION, [self.EXP_1_ID]) + feedback_services.delete_threads_for_multiple_entities( + feconf.ENTITY_TYPE_EXPLORATION, []) + self.assertIsNone( feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id)) self.assertIsNone( feedback_models.FeedbackAnalyticsModel.get_by_id(self.EXP_1_ID)) - def test_status_of_newly_created_thread_is_open(self): + def test_status_of_newly_created_thread_is_open(self) -> None: exp_id = '0' feedback_services.create_thread( - 'exploration', exp_id, None, 'a subject', 'some text') + 'exploration', exp_id, 'test_user', 'a subject', 'some text') threadlist = feedback_services.get_all_threads( 'exploration', exp_id, False) thread_status = threadlist[0].status self.assertEqual(thread_status, feedback_models.STATUS_CHOICES_OPEN) - def test_get_exp_id_from_thread_id(self): + def test_get_exp_id_from_thread_id(self) -> None: thread_id = 'exploration.exp1.1234' self.assertEqual( feedback_services.get_exp_id_from_thread_id(thread_id), 'exp1') @@ -161,13 +173,13 @@ def test_get_exp_id_from_thread_id(self): class FeedbackDeletionUnitTests(test_utils.GenericTestBase): """Test functions in feedback_services.""" - USER_EMAIL = 'user@example.com' - USER_USERNAME = 'user' - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' + USER_EMAIL: Final = 'user@example.com' + USER_USERNAME: Final = 'user' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' - def setUp(self): - super(FeedbackDeletionUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) @@ -202,7 +214,7 @@ def setUp(self): feedback_models.FeedbackAnalyticsModel(id=self.EXP_1_ID).put() - def test_delete_feedback_threads_deletes_thread(self): + def test_delete_feedback_threads_deletes_thread(self) -> None: self.assertIsNotNone( feedback_models.GeneralFeedbackThreadModel.get_by_id( self.thread_1_id)) @@ -212,7 +224,7 @@ def test_delete_feedback_threads_deletes_thread(self): feedback_models.GeneralFeedbackThreadModel.get_by_id( self.thread_1_id)) - def test_delete_feedback_threads_deletes_suggestion(self): + def test_delete_feedback_threads_deletes_suggestion(self) -> None: self.assertIsNotNone( suggestion_models.GeneralSuggestionModel.get_by_id(self.thread_1_id) ) @@ -222,7 +234,7 @@ def test_delete_feedback_threads_deletes_suggestion(self): suggestion_models.GeneralSuggestionModel.get_by_id(self.thread_1_id) ) - def test_delete_feedback_threads_deletes_message(self): + def test_delete_feedback_threads_deletes_message(self) -> None: self.assertIsNotNone( feedback_models.GeneralFeedbackMessageModel.get_by_id( '%s.%s' % (self.thread_1_id, 0))) @@ -232,7 +244,7 @@ def test_delete_feedback_threads_deletes_message(self): feedback_models.GeneralFeedbackMessageModel.get_by_id( '%s.%s' % (self.thread_1_id, 0))) - def test_delete_feedback_threads_deletes_feedback_analytics(self): + def test_delete_feedback_threads_deletes_feedback_analytics(self) -> None: self.assertIsNotNone( feedback_models.FeedbackAnalyticsModel.get_by_id(self.EXP_1_ID)) feedback_services.delete_threads_for_multiple_entities( @@ -240,7 +252,14 @@ def test_delete_feedback_threads_deletes_feedback_analytics(self): self.assertIsNone( feedback_models.FeedbackAnalyticsModel.get_by_id(self.EXP_1_ID)) - def test_delete_feedback_threads_deletes_multiple_feedbacks(self): + def test_delete_exploration_feedback_analytics(self) -> None: + self.assertIsNotNone( + feedback_models.FeedbackAnalyticsModel.get_by_id(self.EXP_1_ID)) + feedback_services.delete_exploration_feedback_analytics([self.EXP_1_ID]) + self.assertIsNone( + feedback_models.FeedbackAnalyticsModel.get_by_id(self.EXP_1_ID)) + + def test_delete_feedback_threads_deletes_multiple_feedbacks(self) -> None: self.assertIsNotNone( feedback_models.GeneralFeedbackThreadModel.get_by_id( self.thread_1_id)) @@ -257,31 +276,58 @@ def test_delete_feedback_threads_deletes_multiple_feedbacks(self): self.thread_2_id)) +class ExpectedThreadDict(TypedDict): + """Dict representing the EXPECTED_THREAD_DICT dictionary.""" + + status: str + summary: Optional[str] + original_author_id: Optional[str] + subject: str + + +class ExpectedThreadViewerDict(TypedDict): + """Dict representing the EXPECTED_THREAD_DICT_VIEWER dictionary.""" + + status: str + summary: Optional[str] + original_author_id: Optional[str] + subject: str + + +class ReferenceDict(TypedDict): + """Dictionary representing the FeedbackMessageReference dictionary.""" + + entity_type: str + entity_id: str + thread_id: str + message_id: int + + class FeedbackThreadUnitTests(test_utils.GenericTestBase): - EXP_ID_1 = 'eid1' - EXP_ID_2 = 'eid2' - EXP_ID_3 = 'eid3' - THREAD_ID = 'thread_id' + EXP_ID_1: Final = 'eid1' + EXP_ID_2: Final = 'eid2' + EXP_ID_3: Final = 'eid3' + THREAD_ID: Final = 'thread_id' - EXPECTED_THREAD_DICT = { + EXPECTED_THREAD_DICT: ExpectedThreadDict = { 'status': u'open', 'summary': None, - 'original_author_username': None, + 'original_author_id': None, 'subject': u'a subject' } - EXPECTED_THREAD_DICT_VIEWER = { + EXPECTED_THREAD_DICT_VIEWER: ExpectedThreadViewerDict = { 'status': u'open', 'summary': None, - 'original_author_username': None, + 'original_author_id': None, 'subject': u'a subject second' } - USER_EMAIL = 'user@example.com' - USER_USERNAME = 'user' + USER_EMAIL: Final = 'user@example.com' + USER_USERNAME: Final = 'user' - def setUp(self): - super(FeedbackThreadUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) self.signup(self.USER_EMAIL, self.USER_USERNAME) @@ -300,7 +346,7 @@ def setUp(self): self.EXP_ID_3, self.owner_id, title='Leaning tower of Pisa', category='Architecture', language_code='fi') - def _get_all_messages_read(self, user_id, thread_id): + def _get_all_messages_read(self, user_id: str, thread_id: str) -> List[int]: """Returns the list of the ids of all the messages corresponding to the given thread id read by the user. """ @@ -308,11 +354,16 @@ def _get_all_messages_read(self, user_id, thread_id): feedback_models.GeneralFeedbackThreadUserModel.get( user_id, thread_id)) - return ( + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + message_ids: List[int] = ( feedback_thread_user_model.message_ids_read_by_user if - feedback_thread_user_model else []) + feedback_thread_user_model else [] + ) + return message_ids - def test_get_threads_single_exploration(self): + def test_get_threads_single_exploration(self) -> None: threads = feedback_services.get_threads('exploration', self.EXP_ID_1) self.assertEqual(len(threads), 0) feedback_services.create_thread( @@ -323,7 +374,7 @@ def test_get_threads_single_exploration(self): self.assertDictContainsSubset( self.EXPECTED_THREAD_DICT, threads[0].to_dict()) - def test_get_all_threads(self): + def test_get_all_threads(self) -> None: # Create an anonymous feedback thread. feedback_services.create_thread( 'exploration', self.EXP_ID_1, None, @@ -335,8 +386,8 @@ def test_get_all_threads(self): self.assertDictContainsSubset( self.EXPECTED_THREAD_DICT, threads[0].to_dict()) - self.EXPECTED_THREAD_DICT_VIEWER['original_author_username'] = ( - self.VIEWER_USERNAME) + self.EXPECTED_THREAD_DICT_VIEWER['original_author_id'] = ( + self.viewer_id) # Viewer creates feedback thread. feedback_services.create_thread( @@ -349,21 +400,99 @@ def test_get_all_threads(self): self.assertDictContainsSubset( self.EXPECTED_THREAD_DICT_VIEWER, threads[0].to_dict()) - def test_get_total_open_thread_for_single_exploration(self): + def test_get_total_open_thread_for_single_exploration(self) -> None: feedback_services.create_thread( - 'exploration', self.EXP_ID_1, None, + 'exploration', self.EXP_ID_1, 'test_user', self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread = feedback_services.get_thread_analytics(self.EXP_ID_1) self.assertEqual(thread.id, self.EXP_ID_1) self.assertEqual(thread.num_open_threads, 1) self.assertEqual(thread.num_total_threads, 1) - def test_get_total_open_threads_for_multiple_explorations(self): + def test_get_next_page_of_all_feedback_messages(self) -> None: + self.save_new_default_exploration(self.EXP_ID_1, self.EXP_ID_2) + suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + self.EXP_ID_1, + 1, + self.user_id, + { + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': 'state', + 'property_name': exp_domain.STATE_PROPERTY_CONTENT, + 'new_value': 'new content' + }, + 'some text') + thread_id = feedback_services.get_threads( + feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID_1 + )[0].id + feedback_services.create_message( + thread_id, self.user_id, None, None, 'some text') + feedback_services.create_message( + thread_id, self.user_id, None, None, 'Another text') + messages_on_page = feedback_services.get_messages(thread_id) + dictionary_list_from_test_method = [] + dictionary_list_from_page_message = [] + method_result = ( + feedback_services.get_next_page_of_all_feedback_messages()) + for i in (method_result)[0]: + dictionary_list_from_test_method.append(i.to_dict().items()) + for i in messages_on_page: + dictionary_list_from_page_message.append(i.to_dict().items()) + dictionary_list_from_page_message.reverse() + self.assertListEqual( + dictionary_list_from_test_method, + dictionary_list_from_page_message, + ) + genral_feedback_result = ( + feedback_models.GeneralFeedbackMessageModel.get_all_messages( + feconf.FEEDBACK_TAB_PAGE_SIZE, None)) + self.assertEqual(method_result[1], genral_feedback_result[1]) + self.assertEqual(method_result[2], genral_feedback_result[2]) + + def test_get_multiple_threads(self) -> None: + thread_1 = feedback_services.create_thread( + 'exploration', self.EXP_ID_1, 'test_user', + self.EXPECTED_THREAD_DICT['subject'], 'not used here') + thread_2 = feedback_services.create_thread( + 'exploration', self.EXP_ID_2, 'test_user', + self.EXPECTED_THREAD_DICT['subject'], 'not used here') + thread_id_list = [thread_1, thread_2] + thread_list = [] + thread_list_from_result = [] + for i in thread_id_list: + thread_list.append( + feedback_services.get_thread(i).to_dict().items()) + for feedback_thread in feedback_services.get_multiple_threads( + thread_id_list): + thread_list_from_result.append(feedback_thread.to_dict().items()) + self.assertListEqual(thread_list_from_result, thread_list) + + def test_handle_thread_status_changed(self) -> None: + thread_id = feedback_services.create_thread( + 'exploration', self.EXP_ID_1, 'test_user', + self.EXPECTED_THREAD_DICT['subject'], 'not used here') + feedback_services.create_message( + thread_id, self.user_id, + feedback_models.STATUS_CHOICES_FIXED, None, + 'feedback message not used here') + self.assertEqual(feedback_services.get_total_open_threads( + [feedback_services.get_thread_analytics(self.EXP_ID_1)]), 0) + feedback_services.handle_thread_status_changed( + self.EXP_ID_1, + feedback_models.STATUS_CHOICES_FIXED, + feedback_models.STATUS_CHOICES_OPEN) + self.assertEqual(feedback_services.get_total_open_threads( + [feedback_services.get_thread_analytics(self.EXP_ID_1)] + ), 1) + + def test_get_total_open_threads_for_multiple_explorations(self) -> None: feedback_services.create_thread( - 'exploration', self.EXP_ID_1, None, + 'exploration', self.EXP_ID_1, 'test_user', self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( - 'exploration', self.EXP_ID_2, None, + 'exploration', self.EXP_ID_2, 'test_user', self.EXPECTED_THREAD_DICT['subject'], 'not used here') threads_exp_1 = feedback_services.get_all_threads( @@ -385,7 +514,7 @@ def test_get_total_open_threads_for_multiple_explorations(self): feedback_services.get_thread_analytics_multi( [self.EXP_ID_1, self.EXP_ID_2])), 1) - def test_get_thread_summaries(self): + def test_get_thread_summaries(self) -> None: feedback_services.create_thread( 'exploration', self.EXP_ID_1, self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') @@ -425,7 +554,7 @@ def test_get_thread_summaries(self): 'exploration.' + self.EXP_ID_3 + '.' + self.THREAD_ID)) # Check if the number of unread messages match. self.assertEqual(number_of_unread_threads, 0) - for summary, thread, exploration_title in python_utils.ZIP( + for summary, thread, exploration_title in zip( thread_summaries, threads, exploration_titles): self.assertEqual(summary.status, thread.status) self.assertEqual( @@ -450,7 +579,7 @@ def test_get_thread_summaries(self): # Check if the number of unread messages is equal to 1. self.assertEqual(number_of_unread_threads, 1) - def test_get_thread_summaries_returns_correct_message_count(self): + def test_get_thread_summaries_returns_correct_message_count(self) -> None: thread_id_1 = feedback_services.create_thread( 'exploration', self.EXP_ID_1, None, self.EXPECTED_THREAD_DICT['subject'], 'not used here') @@ -465,11 +594,14 @@ def test_get_thread_summaries_returns_correct_message_count(self): self.assertEqual(thread_summaries[0].total_message_count, 1) self.assertEqual(thread_summaries[1].total_message_count, 1) - def test_get_thread_summaries_only_returns_threads_for_explorations(self): + def test_get_thread_summaries_only_returns_threads_for_explorations( + self + ) -> None: exp_thread_id = feedback_services.create_thread( - 'exploration', self.EXP_ID_1, None, 'unused subject', 'unused text') + 'exploration', self.EXP_ID_1, self.user_id, + 'unused subject', 'unused text') skill_thread_id = feedback_services.create_thread( - 'skill', 'skillid1', None, 'unused subject', 'unused text') + 'skill', 'skillid1', self.user_id, 'unused subject', 'unused text') thread_summaries, _ = feedback_services.get_exp_thread_summaries( self.owner_id, [exp_thread_id, skill_thread_id]) @@ -478,7 +610,7 @@ def test_get_thread_summaries_only_returns_threads_for_explorations(self): self.assertEqual( thread_summaries[0].exploration_title, 'Bridges in England') - def test_update_messages_read_by_the_user(self): + def test_update_messages_read_by_the_user(self) -> None: feedback_services.create_thread( 'exploration', self.EXP_ID_1, self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') @@ -501,7 +633,8 @@ def test_update_messages_read_by_the_user(self): self.viewer_id, thread_id), message_ids) def test_add_message_ids_to_read_by_list_adds_msgs_to_threads_in_order( - self): + self + ) -> None: """Tests that the message_ids are being added to the correct feedback thread user model instances when when some of these models exist before the method is called and some do not. @@ -525,7 +658,7 @@ def test_add_message_ids_to_read_by_list_adds_msgs_to_threads_in_order( # Create a list of FullyQualifiedMessageIdentifier objects for the # sample_message_ids and sample_thread_ids. message_identifiers = [] - for sample_thread_id, sample_message_id in python_utils.ZIP( + for sample_thread_id, sample_message_id in zip( sample_thread_ids, sample_message_ids): message_identifiers.append( feedback_domain.FullyQualifiedMessageIdentifier( @@ -539,13 +672,13 @@ def test_add_message_ids_to_read_by_list_adds_msgs_to_threads_in_order( # Assert tht the message_ids were added to message_ids_read_by_user # property of the corresponding thread. - for sample_thread_id, sample_message_id in python_utils.ZIP( + for sample_thread_id, sample_message_id in zip( sample_thread_ids, sample_message_ids): self.assertEqual( self._get_all_messages_read(self.user_id, sample_thread_id), [sample_message_id]) - def test_only_exploration_threads_trigger_events(self): + def test_only_exploration_threads_trigger_events(self) -> None: exp_id = 'eid' self.save_new_valid_exploration(exp_id, 'owner') @@ -556,7 +689,7 @@ def test_only_exploration_threads_trigger_events(self): event_handler_call_counter_exploration): feedback_services.create_thread( feconf.ENTITY_TYPE_EXPLORATION, exp_id, - None, 'a subject', 'some text') + 'test_user', 'a subject', 'some text') self.assertEqual( event_handler_call_counter_exploration.times_called, 1) @@ -568,12 +701,12 @@ def test_only_exploration_threads_trigger_events(self): event_services.FeedbackThreadCreatedEventHandler, 'record', event_handler_call_counter_non_exploration): feedback_services.create_thread( - 'topic', 'topic_id', None, 'a subject', + 'topic', 'topic_id', 'test_user', 'a subject', 'some text') self.assertEqual( event_handler_call_counter_non_exploration.times_called, 0) - def test_create_message_increments_message_count(self): + def test_create_message_increments_message_count(self) -> None: thread_id = feedback_services.create_thread( 'exploration', self.EXP_ID_1, self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') @@ -586,7 +719,7 @@ def test_create_message_increments_message_count(self): thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id) self.assertEqual(thread.message_count, 2) - def test_cache_update_after_create_thread_with_user_text(self): + def test_cache_update_after_create_thread_with_user_text(self) -> None: thread_id = feedback_services.create_thread( 'exploration', self.EXP_ID_1, self.user_id, 'subject', 'initial text') @@ -595,7 +728,7 @@ def test_cache_update_after_create_thread_with_user_text(self): self.assertEqual(thread.last_nonempty_message_text, 'initial text') self.assertEqual(thread.last_nonempty_message_author_id, self.user_id) - def test_cache_update_after_create_thread_with_anon_text(self): + def test_cache_update_after_create_thread_with_anon_text(self) -> None: thread_id = feedback_services.create_thread( 'exploration', self.EXP_ID_1, None, 'subject', 'initial text') @@ -603,7 +736,7 @@ def test_cache_update_after_create_thread_with_anon_text(self): self.assertEqual(thread.last_nonempty_message_text, 'initial text') self.assertIsNone(thread.last_nonempty_message_author_id) - def test_cache_update_after_create_message_with_user_text(self): + def test_cache_update_after_create_message_with_user_text(self) -> None: thread_id = feedback_services.create_thread( 'exploration', self.EXP_ID_1, None, 'subject', 'initial text') @@ -619,7 +752,7 @@ def test_cache_update_after_create_message_with_user_text(self): self.assertEqual(thread.last_nonempty_message_text, 'anonymous text') self.assertEqual(thread.last_nonempty_message_author_id, self.user_id) - def test_cache_update_after_create_message_with_anon_text(self): + def test_cache_update_after_create_message_with_anon_text(self) -> None: thread_id = feedback_services.create_thread( 'exploration', self.EXP_ID_1, self.user_id, 'subject', 'initial text') @@ -636,23 +769,29 @@ def test_cache_update_after_create_message_with_anon_text(self): self.assertEqual(thread.last_nonempty_message_text, 'anonymous text') self.assertIsNone(thread.last_nonempty_message_author_id) - def test_no_cache_update_after_create_thread_with_empty_user_text(self): + def test_no_cache_update_after_create_thread_with_empty_user_text( + self + ) -> None: thread_id = feedback_services.create_thread( - 'exploration', self.EXP_ID_1, self.user_id, 'subject', None) + 'exploration', self.EXP_ID_1, self.user_id, 'subject', '') thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id) self.assertIsNone(thread.last_nonempty_message_text) self.assertIsNone(thread.last_nonempty_message_author_id) - def test_no_cache_update_after_create_thread_with_empty_anon_text(self): + def test_no_cache_update_after_create_thread_with_empty_anon_text( + self + ) -> None: thread_id = feedback_services.create_thread( - 'exploration', self.EXP_ID_1, None, 'subject', None) + 'exploration', self.EXP_ID_1, None, 'subject', '') thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id) self.assertIsNone(thread.last_nonempty_message_text) self.assertIsNone(thread.last_nonempty_message_author_id) - def test_no_cache_update_after_create_message_with_empty_user_text(self): + def test_no_cache_update_after_create_message_with_empty_user_text( + self + ) -> None: thread_id = feedback_services.create_thread( 'exploration', self.EXP_ID_1, None, 'subject', 'initial text') @@ -662,13 +801,15 @@ def test_no_cache_update_after_create_message_with_empty_user_text(self): feedback_services.create_message( thread_id, self.user_id, feedback_models.STATUS_CHOICES_FIXED, None, - None) + '') thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id) self.assertEqual(thread.last_nonempty_message_text, 'initial text') self.assertIsNone(thread.last_nonempty_message_author_id) - def test_no_cache_update_after_create_message_with_empty_anon_text(self): + def test_no_cache_update_after_create_message_with_empty_anon_text( + self + ) -> None: thread_id = feedback_services.create_thread( 'exploration', self.EXP_ID_1, self.user_id, 'subject', 'initial text') @@ -678,7 +819,8 @@ def test_no_cache_update_after_create_message_with_empty_anon_text(self): self.assertEqual(thread.last_nonempty_message_author_id, self.user_id) feedback_services.create_message( - thread_id, None, feedback_models.STATUS_CHOICES_FIXED, None, None) + thread_id, None, feedback_models.STATUS_CHOICES_FIXED, + None, '') thread = feedback_models.GeneralFeedbackThreadModel.get(thread_id) self.assertEqual(thread.last_nonempty_message_text, 'initial text') @@ -688,7 +830,7 @@ def test_no_cache_update_after_create_message_with_empty_anon_text(self): class EmailsTaskqueueTests(test_utils.GenericTestBase): """Tests for tasks in emails taskqueue.""" - def test_create_new_batch_task(self): + def test_create_new_batch_task(self) -> None: user_id = 'user' feedback_services.enqueue_feedback_message_batch_email_task(user_id) self.assertEqual( @@ -701,13 +843,13 @@ def test_create_new_batch_task(self): self.assertEqual( tasks[0].url, feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS) - def test_create_new_instant_task(self): + def test_create_new_instant_task(self) -> None: user_id = 'user' - reference_dict = { + reference_dict: ReferenceDict = { 'entity_type': 'exploration', 'entity_id': 'eid', 'thread_id': 'tid', - 'message_id': 'mid' + 'message_id': 5 } reference = feedback_domain.FeedbackMessageReference( reference_dict['entity_type'], reference_dict['entity_id'], @@ -727,14 +869,16 @@ def test_create_new_instant_task(self): queue_name=taskqueue_services.QUEUE_NAME_EMAILS) self.assertEqual( tasks[0].url, feconf.TASK_URL_INSTANT_FEEDBACK_EMAILS) + # Ruling out the possibility of None for mypy type checking. + assert tasks[0].payload is not None self.assertDictEqual(tasks[0].payload['reference_dict'], reference_dict) class FeedbackMessageEmailTests(test_utils.EmailTestBase): """Tests for feedback message emails.""" - def setUp(self): - super(FeedbackMessageEmailTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup('a@example.com', 'A') self.user_id_a = self.get_user_id_from_email('a@example.com') self.signup('b@example.com', 'B') @@ -748,7 +892,7 @@ def setUp(self): self.can_send_feedback_email_ctx = self.swap( feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True) - def test_pop_feedback_message_references(self): + def test_pop_feedback_message_references(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -764,6 +908,8 @@ def test_pop_feedback_message_references(self): self.editor_id, 0) model = feedback_models.UnsentFeedbackEmailModel.get( self.editor_id, strict=False) + # Ruling out the possibility of None for mypy type checking. + assert model is not None self.assertEqual( len(model.feedback_message_references), 1) self.assertEqual( @@ -775,7 +921,7 @@ def test_pop_feedback_message_references(self): self.editor_id, strict=False) self.assertIsNone(model) - def test_update_feedback_message_references(self): + def test_update_feedback_message_references(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: # There are no feedback message references to remove. self.assertIsNone( @@ -811,7 +957,7 @@ def test_update_feedback_message_references(self): model.feedback_message_references[0]['thread_id'], thread_id) - def test_update_feedback_email_retries(self): + def test_update_feedback_email_retries(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -831,7 +977,7 @@ def test_update_feedback_email_retries(self): self.editor_id) self.assertEqual(model.retries, 1) - def test_send_feedback_message_email(self): + def test_send_feedback_message_email(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -863,7 +1009,7 @@ def test_send_feedback_message_email(self): expected_feedback_message_dict) self.assertEqual(model.retries, 0) - def test_add_new_feedback_message(self): + def test_add_new_feedback_message(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -908,7 +1054,9 @@ def test_add_new_feedback_message(self): expected_feedback_message_dict2) self.assertEqual(model.retries, 0) - def test_email_is_not_sent_recipient_has_muted_emails_globally(self): + def test_email_is_not_sent_recipient_has_muted_emails_globally( + self + ) -> None: user_services.update_email_preferences( self.editor_id, True, False, False, False) @@ -921,7 +1069,9 @@ def test_email_is_not_sent_recipient_has_muted_emails_globally(self): self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_email_is_not_sent_recipient_has_muted_this_exploration(self): + def test_email_is_not_sent_recipient_has_muted_this_exploration( + self + ) -> None: user_services.set_email_preferences_for_exploration( self.editor_id, self.exploration.id, mute_feedback_notifications=True) @@ -935,17 +1085,17 @@ def test_email_is_not_sent_recipient_has_muted_this_exploration(self): self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_that_emails_are_not_sent_for_anonymous_user(self): + def test_that_emails_are_not_sent_for_anonymous_user(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( - 'exploration', self.exploration.id, None, + 'exploration', self.exploration.id, 'test_id', 'a subject', 'some text') messages = self._get_sent_email_messages( self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_that_emails_are_sent_for_registered_user(self): + def test_that_emails_are_sent_for_registered_user(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -968,7 +1118,7 @@ def test_that_emails_are_sent_for_registered_user(self): self.EDITOR_EMAIL) self.assertEqual(len(messages), 1) - def test_that_emails_are_not_sent_if_service_is_disabled(self): + def test_that_emails_are_not_sent_if_service_is_disabled(self) -> None: cannot_send_emails_ctx = self.swap( feconf, 'CAN_SEND_EMAILS', False) cannot_send_feedback_message_email_ctx = self.swap( @@ -982,7 +1132,7 @@ def test_that_emails_are_not_sent_if_service_is_disabled(self): self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_that_emails_are_not_sent_for_thread_status_changes(self): + def test_that_emails_are_not_sent_for_thread_status_changes(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -992,7 +1142,7 @@ def test_that_emails_are_not_sent_for_thread_status_changes(self): self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_that_email_are_not_sent_to_author_himself(self): + def test_that_email_are_not_sent_to_author_himself(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -1002,7 +1152,7 @@ def test_that_email_are_not_sent_to_author_himself(self): self.EDITOR_EMAIL) self.assertEqual(len(messages), 0) - def test_that_email_is_sent_for_reply_on_feedback(self): + def test_that_email_is_sent_for_reply_on_feedback(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -1026,7 +1176,7 @@ def test_that_email_is_sent_for_reply_on_feedback(self): taskqueue_services.QUEUE_NAME_EMAILS), 1) self.process_and_flush_pending_tasks() - def test_that_email_is_sent_for_changing_status_of_thread(self): + def test_that_email_is_sent_for_changing_status_of_thread(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -1054,7 +1204,7 @@ def test_that_email_is_sent_for_changing_status_of_thread(self): taskqueue_services.QUEUE_NAME_EMAILS), 1) self.process_and_flush_pending_tasks() - def test_that_email_is_sent_for_each_feedback_message(self): + def test_that_email_is_sent_for_each_feedback_message(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -1087,8 +1237,8 @@ def test_that_email_is_sent_for_each_feedback_message(self): class FeedbackMessageBatchEmailHandlerTests(test_utils.EmailTestBase): - def setUp(self): - super(FeedbackMessageBatchEmailHandlerTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -1102,7 +1252,7 @@ def setUp(self): self.can_send_feedback_email_ctx = self.swap( feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True) - def test_that_emails_are_sent(self): + def test_that_emails_are_sent(self) -> None: expected_email_html_body = ( 'Hi editor,
    ' '
    ' @@ -1157,7 +1307,7 @@ def test_that_emails_are_sent(self): self.assertEqual(messages[0].html, expected_email_html_body) self.assertEqual(messages[0].body, expected_email_text_body) - def test_that_correct_emails_are_sent_for_multiple_feedback(self): + def test_that_correct_emails_are_sent_for_multiple_feedback(self) -> None: expected_email_html_body = ( 'Hi editor,
    ' '
    ' @@ -1218,7 +1368,7 @@ def test_that_correct_emails_are_sent_for_multiple_feedback(self): self.assertEqual(messages[0].html, expected_email_html_body) self.assertEqual(messages[0].body, expected_email_text_body) - def test_that_emails_are_not_sent_if_already_seen(self): + def test_that_emails_are_not_sent_if_already_seen(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: feedback_services.create_thread( 'exploration', self.exploration.id, @@ -1243,8 +1393,8 @@ def test_that_emails_are_not_sent_if_already_seen(self): class FeedbackMessageInstantEmailHandlerTests(test_utils.EmailTestBase): - def setUp(self): - super(FeedbackMessageInstantEmailHandlerTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -1258,7 +1408,7 @@ def setUp(self): self.can_send_feedback_email_ctx = self.swap( feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True) - def test_that_emails_are_sent_for_feedback_message(self): + def test_that_emails_are_sent_for_feedback_message(self) -> None: expected_email_html_body = ( 'Hi newuser,

    ' 'New update to thread "a subject" on ' @@ -1304,7 +1454,7 @@ def test_that_emails_are_sent_for_feedback_message(self): self.assertEqual(messages[0].html, expected_email_html_body) self.assertEqual(messages[0].body, expected_email_text_body) - def test_that_emails_are_sent_for_status_change(self): + def test_that_emails_are_sent_for_status_change(self) -> None: expected_email_html_body = ( 'Hi newuser,

    ' 'New update to thread "a subject" on ' @@ -1350,7 +1500,9 @@ def test_that_emails_are_sent_for_status_change(self): self.assertEqual(messages[0].html, expected_email_html_body) self.assertEqual(messages[0].body, expected_email_text_body) - def test_that_emails_are_sent_for_both_status_change_and_message(self): + def test_that_emails_are_sent_for_both_status_change_and_message( + self + ) -> None: expected_email_html_body_message = ( 'Hi newuser,

    ' 'New update to thread "a subject" on ' @@ -1425,12 +1577,12 @@ def test_that_emails_are_sent_for_both_status_change_and_message(self): self.assertEqual(messages[1].html, expected_email_html_body_message) self.assertEqual(messages[1].body, expected_email_text_body_message) - def test_that_emails_are_not_sent_to_anonymous_user(self): + def test_that_emails_are_not_sent_to_anonymous_user(self) -> None: with self.can_send_emails_ctx, self.can_send_feedback_email_ctx: # Create thread as anonoymous user. feedback_services.create_thread( 'exploration', self.exploration.id, - None, 'a subject', 'some text') + 'test_id', 'a subject', 'some text') self.process_and_flush_pending_tasks() threadlist = feedback_services.get_all_threads( diff --git a/core/domain/fs_domain.py b/core/domain/fs_domain.py deleted file mode 100644 index a848bebc7125..000000000000 --- a/core/domain/fs_domain.py +++ /dev/null @@ -1,381 +0,0 @@ -# coding: utf-8 -# -# Copyright 2016 The Oppia Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS-IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Domain objects representing a file system and a file stream.""" - -from __future__ import annotations - -from core import feconf -from core import utils -from core.platform import models - -storage_services = models.Registry.import_storage_services() -app_identity_services = models.Registry.import_app_identity_services() - -CHANGE_LIST_SAVE = [{'cmd': 'save'}] - -ALLOWED_ENTITY_NAMES = [ - feconf.ENTITY_TYPE_EXPLORATION, feconf.ENTITY_TYPE_BLOG_POST, - feconf.ENTITY_TYPE_TOPIC, feconf.ENTITY_TYPE_SKILL, - feconf.ENTITY_TYPE_STORY, feconf.ENTITY_TYPE_QUESTION, - feconf.ENTITY_TYPE_VOICEOVER_APPLICATION] -ALLOWED_SUGGESTION_IMAGE_CONTEXTS = [ - feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS, - feconf.IMAGE_CONTEXT_EXPLORATION_SUGGESTIONS] - - -class FileStream: - """A class that wraps a file stream, but adds extra attributes to it. - - Attributes: - content: str. The content of the file snapshot. - """ - - def __init__(self, content): - """Constructs a FileStream object. - - Args: - content: str. The content of the file snapshots. - """ - self._content = content - - def read(self): - """Emulates stream.read(). Returns all bytes and emulates EOF. - - Returns: - content: str. The content of the file snapshot. - """ - content = self._content - self._content = '' - return content - - -class GeneralFileSystem: - """The parent class which is inherited by GcsFileSystem. - - Attributes: - entity_name: str. The name of the entity (eg: exploration, topic etc). - entity_id: str. The ID of the corresponding entity. - """ - - def __init__(self, entity_name, entity_id): - """Constructs a GeneralFileSystem object. - - Args: - entity_name: str. The name of the entity - (eg: exploration, topic etc). - entity_id: str. The ID of the corresponding entity. - """ - self._validate_entity_parameters(entity_name, entity_id) - self._assets_path = '%s/%s/assets' % (entity_name, entity_id) - - def _validate_entity_parameters(self, entity_name, entity_id): - """Checks whether the entity_id and entity_name passed in are valid. - - Args: - entity_name: str. The name of the entity - (eg: exploration, topic etc). - entity_id: str. The ID of the corresponding entity. - - Raises: - ValidationError. When parameters passed in are invalid. - """ - if entity_name not in ALLOWED_ENTITY_NAMES and ( - entity_name not in ALLOWED_SUGGESTION_IMAGE_CONTEXTS): - raise utils.ValidationError( - 'Invalid entity_name received: %s.' % entity_name) - if not isinstance(entity_id, str): - raise utils.ValidationError( - 'Invalid entity_id received: %s' % entity_id) - if entity_id == '': - raise utils.ValidationError('Entity id cannot be empty') - - @property - def assets_path(self): - """Returns the path of the parent folder of assets. - - Returns: - str. The path. - """ - return self._assets_path - - -class GcsFileSystem(GeneralFileSystem): - """Wrapper for a file system based on GCS. - - This implementation ignores versioning. - """ - - def __init__(self, entity_name, entity_id): - self._bucket_name = app_identity_services.get_gcs_resource_bucket_name() - super(GcsFileSystem, self).__init__(entity_name, entity_id) - - def _get_gcs_file_url(self, filepath): - """Returns the constructed GCS file URL. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - - Returns: - str. The GCS file URL. - """ - # Upload to GCS bucket with filepath - # "//assets/". - gcs_file_url = '%s/%s' % (self._assets_path, filepath) - return gcs_file_url - - def isfile(self, filepath): - """Checks if the file with the given filepath exists in the GCS. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - - Returns: - bool. Whether the file exists in GCS. - """ - return storage_services.isfile( - self._bucket_name, self._get_gcs_file_url(filepath)) - - def get(self, filepath): - """Gets a file as an unencoded stream of raw bytes. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - - Returns: - FileStream or None. It returns FileStream domain object if the file - exists. Otherwise, it returns None. - """ - if self.isfile(filepath): - return FileStream(storage_services.get( - self._bucket_name, self._get_gcs_file_url(filepath))) - else: - return None - - def commit(self, filepath, raw_bytes, mimetype): - """Commit raw_bytes to the relevant file in the entity's assets folder. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - raw_bytes: str. The content to be stored in the file. - mimetype: str. The content-type of the cloud file. - """ - storage_services.commit( - self._bucket_name, - self._get_gcs_file_url(filepath), - raw_bytes, - mimetype - ) - - def delete(self, filepath): - """Deletes a file and the metadata associated with it. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - """ - if self.isfile(filepath): - storage_services.delete( - self._bucket_name, self._get_gcs_file_url(filepath)) - else: - raise IOError('File does not exist: %s' % filepath) - - def copy(self, source_assets_path, filepath): - """Copy images from source_path. - - Args: - source_assets_path: str. The path to the source entity's assets - folder. - filepath: str. The path to the relevant file within the entity's - assets folder. - """ - source_file_url = ( - '%s/%s' % (source_assets_path, filepath) - ) - storage_services.copy( - self._bucket_name, source_file_url, self._get_gcs_file_url(filepath) - ) - - def listdir(self, dir_name): - """Lists all files in a directory. - - Args: - dir_name: str. The directory whose files should be listed. This - should not start with '/' or end with '/'. - - Returns: - list(str). A lexicographically-sorted list of filenames. - """ - if dir_name.startswith('/') or dir_name.endswith('/'): - raise IOError( - 'The dir_name should not start with / or end with / : %s' % - dir_name - ) - - # The trailing slash is necessary to prevent non-identical directory - # names with the same prefix from matching, e.g. /abcd/123.png should - # not match a query for files under /abc/. - if dir_name and not dir_name.endswith('/'): - dir_name += '/' - - assets_path = '%s/' % self._assets_path - prefix = utils.vfs_construct_path(self._assets_path, dir_name) - blobs_in_dir = storage_services.listdir(self._bucket_name, prefix) - return [ - blob.name.replace(assets_path, '') for blob in blobs_in_dir] - - -class AbstractFileSystem: - """Interface for a file system.""" - - def __init__(self, impl): - """Constructs a AbstractFileSystem object.""" - self._impl = impl - - @property - def impl(self): - """Returns a AbstractFileSystem object. - - Returns: - AbstractFileSystem. The AbstractFileSystem object. - """ - return self._impl - - def _check_filepath(self, filepath): - """Raises an error if a filepath is invalid. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - - Raises: - IOError. Invalid filepath. - """ - base_dir = utils.vfs_construct_path( - '/', self.impl.assets_path, 'assets') - absolute_path = utils.vfs_construct_path(base_dir, filepath) - normalized_path = utils.vfs_normpath(absolute_path) - - # This check prevents directory traversal. - if not normalized_path.startswith(base_dir): - raise IOError('Invalid filepath: %s' % filepath) - - def isfile(self, filepath): - """Checks if a file exists. Similar to os.path.isfile(...). - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - - Returns: - bool. Whether the file exists. - """ - self._check_filepath(filepath) - return self._impl.isfile(filepath) - - def open(self, filepath): - """Returns a stream with the file content. Similar to open(...). - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - - Returns: - FileStream. The file stream domain object. - """ - self._check_filepath(filepath) - return self._impl.get(filepath) - - def get(self, filepath): - """Returns a bytestring with the file content, but no metadata. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - - Returns: - FileStream. The file stream domain object. - - Raises: - IOError. The given file stream does not exist. - """ - file_stream = self.open(filepath) - if file_stream is None: - raise IOError('File %s not found.' % (filepath)) - return file_stream.read() - - def commit(self, filepath, raw_bytes, mimetype=None): - """Replaces the contents of the file with the given by test string. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - raw_bytes: str. The content to be stored in the file. - mimetype: str. The content-type of the file. If mimetype is set to - 'application/octet-stream' then raw_bytes is expected to - contain binary data. In all other cases, raw_bytes is expected - to be textual data. - """ - # Note that textual data needs to be converted to bytes so that it can - # be stored in a file opened in binary mode. However, it is not - # required for binary data (i.e. when mimetype is set to - # 'application/octet-stream'). - - if isinstance(raw_bytes, str): - raw_bytes = raw_bytes.encode('utf-8') - file_content = ( - raw_bytes if mimetype != 'application/octet-stream' else raw_bytes) - self._check_filepath(filepath) - self._impl.commit(filepath, file_content, mimetype) - - def delete(self, filepath): - """Deletes a file and the metadata associated with it. - - Args: - filepath: str. The path to the relevant file within the entity's - assets folder. - """ - self._check_filepath(filepath) - self._impl.delete(filepath) - - def listdir(self, dir_name): - """Lists all the files in a directory. Similar to os.listdir(...). - - Args: - dir_name: str. The directory whose files should be listed. This - should not start with '/' or end with '/'. - - Returns: - list(str). A lexicographically-sorted list of filenames, - each of which is prefixed with dir_name. - """ - self._check_filepath(dir_name) - return self._impl.listdir(dir_name) - - def copy(self, source_assets_path, filepath): - """Copy images from source. - - Args: - source_assets_path: str. The path to the source entity's assets - folder. - filepath: str. The path to the relevant file within the entity's - assets folder. - """ - self._impl.copy(source_assets_path, filepath) diff --git a/core/domain/fs_domain_test.py b/core/domain/fs_domain_test.py deleted file mode 100644 index 8d19cc9321f4..000000000000 --- a/core/domain/fs_domain_test.py +++ /dev/null @@ -1,152 +0,0 @@ -# coding: utf-8 -# -# Copyright 2014 The Oppia Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, softwar -# distributed under the License is distributed on an "AS-IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for filesystem-related domain objects.""" - -from __future__ import annotations - -import re - -from core import feconf -from core import utils -from core.domain import fs_domain -from core.platform import models -from core.tests import test_utils - -app_identity_services = models.Registry.import_app_identity_services() - - -class GcsFileSystemUnitTests(test_utils.GenericTestBase): - """Tests for the GCS file system.""" - - def setUp(self): - super(GcsFileSystemUnitTests, self).setUp() - self.USER_EMAIL = 'abc@example.com' - self.signup(self.USER_EMAIL, 'username') - self.user_id = self.get_user_id_from_email(self.USER_EMAIL) - self.fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, 'eid')) - - def test_get_and_save(self): - self.fs.commit('abc.png', 'file_contents') - self.assertEqual(self.fs.get('abc.png'), b'file_contents') - - def test_validate_entity_parameters(self): - with self.assertRaisesRegexp( - utils.ValidationError, 'Invalid entity_id received: 1'): - fs_domain.GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, 1) - - with self.assertRaisesRegexp( - utils.ValidationError, 'Entity id cannot be empty'): - fs_domain.GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, '') - - with self.assertRaisesRegexp( - utils.ValidationError, 'Invalid entity_name received: ' - 'invalid_name.'): - fs_domain.GcsFileSystem('invalid_name', 'exp_id') - - def test_delete(self): - self.assertFalse(self.fs.isfile('abc.png')) - self.fs.commit('abc.png', 'file_contents') - self.assertTrue(self.fs.isfile('abc.png')) - - self.fs.delete('abc.png') - self.assertFalse(self.fs.isfile('abc.png')) - with self.assertRaisesRegexp( - IOError, re.escape('File abc.png not found') - ): - self.fs.get('abc.png') - - with self.assertRaisesRegexp( - IOError, 'File does not exist: fake_file.png' - ): - self.fs.delete('fake_file.png') - - def test_listdir(self): - self.assertItemsEqual(self.fs.listdir(''), []) - - self.fs.commit('abc.png', 'file_contents') - self.fs.commit('abcd.png', 'file_contents_2') - self.fs.commit('abc/abcd.png', 'file_contents_3') - self.fs.commit('bcd/bcde.png', 'file_contents_4') - - file_names = ['abc.png', 'abc/abcd.png', 'abcd.png', 'bcd/bcde.png'] - - self.assertItemsEqual(self.fs.listdir(''), file_names) - - self.assertEqual( - self.fs.listdir('abc'), ['abc/abcd.png']) - - with self.assertRaisesRegexp(IOError, 'Invalid filepath'): - self.fs.listdir('/abc') - - with self.assertRaisesRegexp( - IOError, - ( - 'The dir_name should not start with /' - ' or end with / : abc/' - ) - ): - self.fs.listdir('abc/') - - self.assertEqual(self.fs.listdir('fake_dir'), []) - - new_fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, 'eid2')) - self.assertEqual(new_fs.listdir('assets'), []) - - def test_copy(self): - self.fs.commit('abc2.png', 'file_contents') - self.assertEqual(self.fs.listdir(''), ['abc2.png']) - destination_fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_QUESTION, 'question_id1')) - self.assertEqual(destination_fs.listdir(''), []) - destination_fs.copy(self.fs.impl.assets_path, 'abc2.png') - self.assertTrue(destination_fs.isfile('abc2.png')) - - -class DirectoryTraversalTests(test_utils.GenericTestBase): - """Tests to check for the possibility of directory traversal.""" - - def setUp(self): - super(DirectoryTraversalTests, self).setUp() - self.USER_EMAIL = 'abc@example.com' - self.signup(self.USER_EMAIL, 'username') - self.user_id = self.get_user_id_from_email(self.USER_EMAIL) - - def test_invalid_filepaths_are_caught(self): - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, 'eid')) - - invalid_filepaths = [ - '..', '../another_exploration', '../', '/..', '/abc'] - - for filepath in invalid_filepaths: - with self.assertRaisesRegexp(IOError, 'Invalid filepath'): - fs.isfile(filepath) - with self.assertRaisesRegexp(IOError, 'Invalid filepath'): - fs.open(filepath) - with self.assertRaisesRegexp(IOError, 'Invalid filepath'): - fs.get(filepath) - with self.assertRaisesRegexp(IOError, 'Invalid filepath'): - fs.commit(filepath, 'raw_file') - with self.assertRaisesRegexp(IOError, 'Invalid filepath'): - fs.delete(filepath) - with self.assertRaisesRegexp(IOError, 'Invalid filepath'): - fs.listdir(filepath) diff --git a/core/domain/fs_services.py b/core/domain/fs_services.py index cc298c0a48fd..b625d5411cad 100644 --- a/core/domain/fs_services.py +++ b/core/domain/fs_services.py @@ -20,23 +20,270 @@ from core import feconf from core import utils -from core.domain import fs_domain from core.domain import image_services from core.platform import models -(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion]) +from typing import Dict, List, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import app_identity_services + from mypy_imports import storage_services + from proto_files import text_classifier_pb2 + +storage_services = models.Registry.import_storage_services() +app_identity_services = models.Registry.import_app_identity_services() + +CHANGE_LIST_SAVE: List[Dict[str, str]] = [{'cmd': 'save'}] + +ALLOWED_ENTITY_NAMES: List[str] = [ + feconf.ENTITY_TYPE_EXPLORATION, + feconf.ENTITY_TYPE_BLOG_POST, + feconf.ENTITY_TYPE_TOPIC, + feconf.ENTITY_TYPE_SKILL, + feconf.ENTITY_TYPE_STORY, + feconf.ENTITY_TYPE_QUESTION +] +ALLOWED_SUGGESTION_IMAGE_CONTEXTS: List[str] = [ + feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS, + feconf.IMAGE_CONTEXT_EXPLORATION_SUGGESTIONS +] + + +class GeneralFileSystem: + """The parent class which is inherited by GcsFileSystem. + + Attributes: + entity_name: str. The name of the entity (eg: exploration, topic etc). + entity_id: str. The ID of the corresponding entity. + """ + + def __init__(self, entity_name: str, entity_id: str) -> None: + """Constructs a GeneralFileSystem object. + + Args: + entity_name: str. The name of the entity + (eg: exploration, topic etc). + entity_id: str. The ID of the corresponding entity. + """ + self._validate_entity_parameters(entity_name, entity_id) + self._assets_path = '%s/%s/assets' % (entity_name, entity_id) + + def _validate_entity_parameters( + self, entity_name: str, entity_id: str + ) -> None: + """Checks whether the entity_id and entity_name passed in are valid. + + Args: + entity_name: str. The name of the entity + (eg: exploration, topic etc). + entity_id: str. The ID of the corresponding entity. + + Raises: + ValidationError. When parameters passed in are invalid. + """ + if ( + entity_name not in ALLOWED_ENTITY_NAMES and + entity_name not in ALLOWED_SUGGESTION_IMAGE_CONTEXTS + ): + raise utils.ValidationError( + 'Invalid entity_name received: %s.' % entity_name) + if not isinstance(entity_id, str): + raise utils.ValidationError( + 'Invalid entity_id received: %s' % entity_id) + if entity_id == '': + raise utils.ValidationError('Entity id cannot be empty') + + @property + def assets_path(self) -> str: + """Returns the path of the parent folder of assets. + + Returns: + str. The path. + """ + return self._assets_path + + +class GcsFileSystem(GeneralFileSystem): + """Wrapper for a file system based on GCS. + + This implementation ignores versioning. + """ + + def __init__(self, entity_name: str, entity_id: str) -> None: + self._bucket_name = app_identity_services.get_gcs_resource_bucket_name() + super().__init__(entity_name, entity_id) + + def _get_gcs_file_url(self, filepath: str) -> str: + """Returns the constructed GCS file URL. + + Args: + filepath: str. The path to the relevant file within the entity's + assets folder. + + Returns: + str. The GCS file URL. + """ + # Upload to GCS bucket with filepath + # "//assets/". + gcs_file_url = '%s/%s' % (self._assets_path, filepath) + return gcs_file_url + + def _check_filepath(self, filepath: str) -> None: + """Raises an error if a filepath is invalid. + + Args: + filepath: str. The path to the relevant file within the entity's + assets folder. + + Raises: + OSError. Invalid filepath. + """ + base_dir = utils.vfs_construct_path('/', self.assets_path, 'assets') + absolute_path = utils.vfs_construct_path(base_dir, filepath) + normalized_path = utils.vfs_normpath(absolute_path) + + # This check prevents directory traversal. + if not normalized_path.startswith(base_dir): + raise IOError('Invalid filepath: %s' % filepath) + + def isfile(self, filepath: str) -> bool: + """Checks if the file with the given filepath exists in the GCS. + + Args: + filepath: str. The path to the relevant file within the entity's + assets folder. + + Returns: + bool. Whether the file exists in GCS. + """ + self._check_filepath(filepath) + return storage_services.isfile( + self._bucket_name, self._get_gcs_file_url(filepath)) + + def get(self, filepath: str) -> bytes: + """Gets a file as an unencoded stream of raw bytes. + + Args: + filepath: str. The path to the relevant file within the entity's + assets folder. + + Returns: + bytes. A stream of raw bytes if the file exists. + + Raises: + OSError. Given file does not exist. + """ + if self.isfile(filepath): + return storage_services.get( + self._bucket_name, self._get_gcs_file_url(filepath)) + else: + raise IOError('File %s not found.' % (filepath)) + + def commit( + self, + filepath: str, + raw_bytes: bytes, + mimetype: Optional[str] = None + ) -> None: + """Commit raw_bytes to the relevant file in the entity's assets folder. + + Args: + filepath: str. The path to the relevant file within the entity's + assets folder. + raw_bytes: bytes. The content to be stored in the file. + mimetype: Optional[str]. The content-type of the cloud file. + """ + # Note that textual data needs to be converted to bytes so that it can + # be stored in a file opened in binary mode. However, it is not + # required for binary data (i.e. when mimetype is set to + # 'application/octet-stream'). + + self._check_filepath(filepath) + storage_services.commit( + self._bucket_name, + self._get_gcs_file_url(filepath), + raw_bytes, + mimetype + ) + + def delete(self, filepath: str) -> None: + """Deletes a file and the metadata associated with it. + + Args: + filepath: str. The path to the relevant file within the entity's + assets folder. + + Raises: + OSError. Given file does not exist. + """ + if self.isfile(filepath): + storage_services.delete( + self._bucket_name, self._get_gcs_file_url(filepath)) + else: + raise IOError('File does not exist: %s' % filepath) + + def copy(self, source_assets_path: str, filepath: str) -> None: + """Copy images from source_path. + + Args: + source_assets_path: str. The path to the source entity's assets + folder. + filepath: str. The path to the relevant file within the entity's + assets folder. + """ + source_file_url = ('%s/%s' % (source_assets_path, filepath)) + storage_services.copy( + self._bucket_name, source_file_url, self._get_gcs_file_url(filepath) + ) + + def listdir(self, dir_name: str) -> List[str]: + """Lists all files in a directory. + + Args: + dir_name: str. The directory whose files should be listed. This + should not start with '/' or end with '/'. + + Returns: + list(str). A lexicographically-sorted list of filenames. + + Raises: + OSError. The directory name starts or ends with '/'. + """ + self._check_filepath(dir_name) + if dir_name.startswith('/') or dir_name.endswith('/'): + raise IOError( + 'The dir_name should not start with / or end with / : %s' % + dir_name + ) + + # The trailing slash is necessary to prevent non-identical directory + # names with the same prefix from matching, e.g. /abcd/123.png should + # not match a query for files under /abc/. + if dir_name and not dir_name.endswith('/'): + dir_name += '/' + + assets_path = '%s/' % self._assets_path + prefix = utils.vfs_construct_path(self._assets_path, dir_name) + blobs_in_dir = storage_services.listdir(self._bucket_name, prefix) + return [blob.name.replace(assets_path, '') for blob in blobs_in_dir] def save_original_and_compressed_versions_of_image( - filename, entity_type, entity_id, original_image_content, - filename_prefix, image_is_compressible): + filename: str, + entity_type: str, + entity_id: str, + original_image_content: bytes, + filename_prefix: str, + image_is_compressible: bool +) -> None: """Saves the three versions of the image file. Args: filename: str. The name of the image file. entity_type: str. The type of the entity. entity_id: str. The id of the entity. - original_image_content: str. The content of the original image. + original_image_content: bytes. The content of the original image. filename_prefix: str. The string to prefix to the filename. image_is_compressible: bool. Whether the image can be compressed or not. @@ -55,9 +302,7 @@ def save_original_and_compressed_versions_of_image( filename_wo_filetype, filetype) micro_image_filepath = '%s/%s' % (filename_prefix, micro_image_filename) - file_system_class = get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - entity_type, entity_id)) + fs = GcsFileSystem(entity_type, entity_id) if image_is_compressible: compressed_image_content = image_services.compress_image( @@ -90,7 +335,11 @@ def save_original_and_compressed_versions_of_image( micro_image_filepath, micro_image_content, mimetype=mimetype) -def save_classifier_data(exp_id, job_id, classifier_data_proto): +def save_classifier_data( + exp_id: str, + job_id: str, + classifier_data_proto: text_classifier_pb2.TextClassifierFrozenModel +) -> None: """Store classifier model data in a file. Args: @@ -100,16 +349,14 @@ def save_classifier_data(exp_id, job_id, classifier_data_proto): to be stored. """ filepath = '%s-classifier-data.pb.xz' % (job_id) - file_system_class = get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_EXPLORATION, exp_id)) + fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id) content = utils.compress_to_zlib( classifier_data_proto.SerializeToString()) fs.commit( filepath, content, mimetype='application/octet-stream') -def delete_classifier_data(exp_id, job_id): +def delete_classifier_data(exp_id: str, job_id: str) -> None: """Delete the classifier data from file. Args: @@ -117,25 +364,18 @@ def delete_classifier_data(exp_id, job_id): job_id: str. The id of the classifier training job model. """ filepath = '%s-classifier-data.pb.xz' % (job_id) - file_system_class = get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_EXPLORATION, exp_id)) + fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id) if fs.isfile(filepath): fs.delete(filepath) -def get_entity_file_system_class(): - """Returns GcsFileSystem class to the client. - - Returns: - class. GcsFileSystem class. - """ - return fs_domain.GcsFileSystem - - def copy_images( - source_entity_type, source_entity_id, destination_entity_type, - destination_entity_id, filenames): + source_entity_type: str, + source_entity_id: str, + destination_entity_type: str, + destination_entity_id: str, + filenames: List[str] +) -> None: """Copy images from source to destination. Args: @@ -145,11 +385,9 @@ def copy_images( destination_entity_type: str. The entity type of the destination. filenames: list(str). The list of filenames to copy. """ - file_system_class = get_entity_file_system_class() - source_fs = fs_domain.AbstractFileSystem(file_system_class( - source_entity_type, source_entity_id)) - destination_fs = fs_domain.AbstractFileSystem(file_system_class( - destination_entity_type, destination_entity_id)) + source_fs = GcsFileSystem(source_entity_type, source_entity_id) + destination_fs = GcsFileSystem( + destination_entity_type, destination_entity_id) for filename in filenames: filename_wo_filetype = filename[:filename.rfind('.')] filetype = filename[filename.rfind('.') + 1:] @@ -158,9 +396,9 @@ def copy_images( micro_image_filename = '%s_micro.%s' % ( filename_wo_filetype, filetype) destination_fs.copy( - source_fs.impl.assets_path, ('image/%s' % filename)) + source_fs.assets_path, ('image/%s' % filename)) destination_fs.copy( - source_fs.impl.assets_path, + source_fs.assets_path, ('image/%s' % compressed_image_filename)) destination_fs.copy( - source_fs.impl.assets_path, ('image/%s' % micro_image_filename)) + source_fs.assets_path, ('image/%s' % micro_image_filename)) diff --git a/core/domain/fs_services_test.py b/core/domain/fs_services_test.py index d4cb34c6cd7f..ce3969a9c645 100644 --- a/core/domain/fs_services_test.py +++ b/core/domain/fs_services_test.py @@ -20,10 +20,8 @@ import os from core import feconf -from core import python_utils from core import utils from core.constants import constants -from core.domain import fs_domain from core.domain import fs_services from core.domain import image_services from core.domain import user_services @@ -31,22 +29,130 @@ from proto_files import text_classifier_pb2 -class FileSystemServicesTests(test_utils.GenericTestBase): - """Tests for File System services.""" - - def test_get_exploration_file_system_with_dev_mode_enabled(self): - with self.swap(constants, 'DEV_MODE', True): - file_system = fs_services.get_entity_file_system_class() - self.assertIsInstance( - file_system(feconf.ENTITY_TYPE_EXPLORATION, 'entity_id'), - fs_domain.GcsFileSystem) - - def test_get_exploration_file_system_with_dev_mode_disabled(self): - with self.swap(constants, 'DEV_MODE', False): - file_system = fs_services.get_entity_file_system_class() - self.assertIsInstance( - file_system(feconf.ENTITY_TYPE_EXPLORATION, 'entity_id'), - fs_domain.GcsFileSystem) +class GcsFileSystemUnitTests(test_utils.GenericTestBase): + """Tests for the GCS file system.""" + + def setUp(self) -> None: + super().setUp() + self.USER_EMAIL = 'abc@example.com' + self.signup(self.USER_EMAIL, 'username') + self.user_id = self.get_user_id_from_email(self.USER_EMAIL) + self.fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, 'eid') + + def test_get_and_save(self) -> None: + self.fs.commit('abc.png', b'file_contents') + self.assertEqual(self.fs.get('abc.png'), b'file_contents') + + def test_validate_entity_parameters(self) -> None: + with self.assertRaisesRegex( + utils.ValidationError, 'Invalid entity_id received: 1' + ): + # Here we use MyPy ignore because the argument `entity_id` of + # GcsFileSystem() can only accept string values, but here for + # testing purpose we are providing integer value. Thus to avoid + # incompatible argument type MyPy error, we added an ignore + # statement here. + fs_services.GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, 1) # type: ignore[arg-type] + + with self.assertRaisesRegex( + utils.ValidationError, 'Entity id cannot be empty' + ): + fs_services.GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, '') + + with self.assertRaisesRegex( + utils.ValidationError, + 'Invalid entity_name received: ' + 'invalid_name.' + ): + fs_services.GcsFileSystem('invalid_name', 'exp_id') + + def test_delete(self) -> None: + self.assertFalse(self.fs.isfile('abc.png')) + self.fs.commit('abc.png', b'file_contents') + self.assertTrue(self.fs.isfile('abc.png')) + + self.fs.delete('abc.png') + self.assertFalse(self.fs.isfile('abc.png')) + + with self.assertRaisesRegex( + IOError, 'File abc.png not found' + ): + self.fs.get('abc.png') + + with self.assertRaisesRegex( + IOError, 'File does not exist: fake_file.png' + ): + self.fs.delete('fake_file.png') + + def test_listdir(self) -> None: + self.assertItemsEqual(self.fs.listdir(''), []) + + self.fs.commit('abc.png', b'file_contents') + self.fs.commit('abcd.png', b'file_contents_2') + self.fs.commit('abc/abcd.png', b'file_contents_3') + self.fs.commit('bcd/bcde.png', b'file_contents_4') + + file_names = ['abc.png', 'abc/abcd.png', 'abcd.png', 'bcd/bcde.png'] + + self.assertItemsEqual(self.fs.listdir(''), file_names) + + self.assertEqual(self.fs.listdir('abc'), ['abc/abcd.png']) + + with self.assertRaisesRegex(IOError, 'Invalid filepath'): + self.fs.listdir('/abc') + + with self.assertRaisesRegex( + IOError, + ( + 'The dir_name should not start with /' + ' or end with / : abc/' + ) + ): + self.fs.listdir('abc/') + + self.assertEqual(self.fs.listdir('fake_dir'), []) + + new_fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, 'eid2') + self.assertEqual(new_fs.listdir('assets'), []) + + def test_copy(self) -> None: + self.fs.commit('abc2.png', b'file_contents') + self.assertEqual(self.fs.listdir(''), ['abc2.png']) + destination_fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_QUESTION, 'question_id1') + self.assertEqual(destination_fs.listdir(''), []) + destination_fs.copy(self.fs.assets_path, 'abc2.png') + self.assertTrue(destination_fs.isfile('abc2.png')) + + +class DirectoryTraversalTests(test_utils.GenericTestBase): + """Tests to check for the possibility of directory traversal.""" + + def setUp(self) -> None: + super().setUp() + self.USER_EMAIL = 'abc@example.com' + self.signup(self.USER_EMAIL, 'username') + self.user_id = self.get_user_id_from_email(self.USER_EMAIL) + + def test_invalid_filepaths_are_caught(self) -> None: + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, 'eid') + + invalid_filepaths = [ + '..', '../another_exploration', '../', '/..', '/abc'] + + for filepath in invalid_filepaths: + with self.assertRaisesRegex(IOError, 'Invalid filepath'): + fs.isfile(filepath) + with self.assertRaisesRegex(IOError, 'Invalid filepath'): + fs.get(filepath) + with self.assertRaisesRegex(IOError, 'Invalid filepath'): + fs.commit(filepath, b'raw_file') + with self.assertRaisesRegex(IOError, 'Invalid filepath'): + fs.delete(filepath) + with self.assertRaisesRegex(IOError, 'Invalid filepath'): + fs.listdir(filepath) class SaveOriginalAndCompressedVersionsOfImageTests(test_utils.GenericTestBase): @@ -58,22 +164,21 @@ class SaveOriginalAndCompressedVersionsOfImageTests(test_utils.GenericTestBase): MICRO_IMAGE_FILENAME = 'image_micro.png' USER = 'ADMIN' - def setUp(self): - super(SaveOriginalAndCompressedVersionsOfImageTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.user_id_admin = ( self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) self.admin = user_services.get_user_actions_info(self.user_id_admin) - def test_save_original_and_compressed_versions_of_image(self): - with python_utils.open_file( + def test_save_original_and_compressed_versions_of_image(self) -> None: + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None ) as f: original_image_content = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, self.EXPLORATION_ID)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, self.EXPLORATION_ID) self.assertFalse(fs.isfile('image/%s' % self.FILENAME)) self.assertFalse(fs.isfile('image/%s' % self.COMPRESSED_IMAGE_FILENAME)) self.assertFalse(fs.isfile('image/%s' % self.MICRO_IMAGE_FILENAME)) @@ -84,16 +189,15 @@ def test_save_original_and_compressed_versions_of_image(self): self.assertTrue(fs.isfile('image/%s' % self.COMPRESSED_IMAGE_FILENAME)) self.assertTrue(fs.isfile('image/%s' % self.MICRO_IMAGE_FILENAME)) - def test_compress_image_on_prod_mode_with_small_image_size(self): - with python_utils.open_file( + def test_compress_image_on_prod_mode_with_small_image_size(self) -> None: + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None) as f: original_image_content = f.read() with self.swap(constants, 'DEV_MODE', False): - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, self.EXPLORATION_ID)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, self.EXPLORATION_ID) self.assertFalse(fs.isfile('image/%s' % self.FILENAME)) self.assertFalse( @@ -129,16 +233,15 @@ def test_compress_image_on_prod_mode_with_small_image_size(self): micro_image_content), (22, 22)) - def test_save_original_and_compressed_versions_of_svg_image(self): - with python_utils.open_file( + def test_save_original_and_compressed_versions_of_svg_image(self) -> None: + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: image_content = f.read() with self.swap(constants, 'DEV_MODE', False): - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, self.EXPLORATION_ID)) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, self.EXPLORATION_ID) self.assertFalse(fs.isfile('image/%s' % self.FILENAME)) self.assertFalse( @@ -165,17 +268,16 @@ def test_save_original_and_compressed_versions_of_svg_image(self): self.assertEqual(compressed_image_content, image_content) self.assertEqual(micro_image_content, image_content) - def test_copy_images(self): - with python_utils.open_file( + def test_copy_images(self) -> None: + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None) as f: original_image_content = f.read() fs_services.save_original_and_compressed_versions_of_image( self.FILENAME, 'exploration', self.EXPLORATION_ID, original_image_content, 'image', True) - destination_fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_QUESTION, 'question_id1')) + destination_fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_QUESTION, 'question_id1') self.assertFalse(destination_fs.isfile('image/%s' % self.FILENAME)) self.assertFalse( destination_fs.isfile( @@ -195,11 +297,10 @@ def test_copy_images(self): class FileSystemClassifierDataTests(test_utils.GenericTestBase): """Unit tests for storing, reading and deleting classifier data.""" - def setUp(self): - super(FileSystemClassifierDataTests, self).setUp() - self.fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, 'exp_id')) + def setUp(self) -> None: + super().setUp() + self.fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, 'exp_id') self.classifier_data_proto = ( text_classifier_pb2.TextClassifierFrozenModel()) self.classifier_data_proto.model_json = json.dumps({ @@ -210,14 +311,13 @@ def setUp(self): } }) - def test_save_and_get_classifier_data(self): + def test_save_and_get_classifier_data(self) -> None: """Test that classifier data is stored and retrieved correctly.""" fs_services.save_classifier_data( 'exp_id', 'job_id', self.classifier_data_proto) filepath = 'job_id-classifier-data.pb.xz' - file_system_class = fs_services.get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_EXPLORATION, 'exp_id')) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, 'exp_id') classifier_data = utils.decompress_from_zlib(fs.get(filepath)) classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel() classifier_data_proto.ParseFromString(classifier_data) @@ -225,7 +325,7 @@ def test_save_and_get_classifier_data(self): classifier_data_proto.model_json, self.classifier_data_proto.model_json) - def test_remove_classifier_data(self): + def test_remove_classifier_data(self) -> None: """Test that classifier data is removed upon deletion.""" fs_services.save_classifier_data( 'exp_id', 'job_id', self.classifier_data_proto) diff --git a/core/domain/html_cleaner.py b/core/domain/html_cleaner.py index 3f3a42f38536..009ef1802a66 100644 --- a/core/domain/html_cleaner.py +++ b/core/domain/html_cleaner.py @@ -23,13 +23,23 @@ import logging import urllib +from core import utils +from core.constants import constants from core.domain import rte_component_registry import bleach import bs4 +from typing import Dict, Final, List, TypedDict, Union, cast -def filter_a(tag, name, value): +class ComponentsDict(TypedDict): + """Dictionary that represents RTE Components.""" + + id: str + customization_args: Dict[str, Union[str, int, str, bool, Dict[str, str]]] + + +def filter_a(tag: str, name: str, value: str) -> bool: """Returns whether the described attribute of a tag should be whitelisted. @@ -40,6 +50,9 @@ def filter_a(tag, name, value): Returns: bool. Whether the given attribute should be whitelisted. + + Raises: + Exception. The 'tag' is not as expected. """ if tag != 'a': raise Exception('The filter_a method should only be used for a tags.') @@ -54,7 +67,7 @@ def filter_a(tag, name, value): return False -ATTRS_WHITELIST = { +ATTRS_WHITELIST: Final = { 'a': filter_a, 'b': [], 'blockquote': [], @@ -62,6 +75,7 @@ def filter_a(tag, name, value): 'code': [], 'div': [], 'em': [], + 'h1': [], 'hr': [], 'i': [], 'li': [], @@ -79,7 +93,7 @@ def filter_a(tag, name, value): } -def clean(user_submitted_html): +def clean(user_submitted_html: str) -> str: """Cleans a piece of user submitted HTML. This only allows HTML from a restricted set of tags, attrs and styles. @@ -104,7 +118,7 @@ def clean(user_submitted_html): user_submitted_html, tags=tag_names, attributes=core_tags, strip=True) -def strip_html_tags(html_string): +def strip_html_tags(html_string: str) -> str: """Strips all HTML markup from an HTML string. Args: @@ -117,7 +131,7 @@ def strip_html_tags(html_string): return bleach.clean(html_string, tags=[], attributes={}, strip=True) -def get_image_filenames_from_html_strings(html_strings): +def get_image_filenames_from_html_strings(html_strings: List[str]) -> List[str]: """Extracts the image filename from the oppia-noninteractive-image and oppia-noninteractive-math RTE component from all the html strings passed in. @@ -135,18 +149,27 @@ def get_image_filenames_from_html_strings(html_strings): for rte_comp in all_rte_components: if 'id' in rte_comp and rte_comp['id'] == 'oppia-noninteractive-image': - filenames.append( - rte_comp['customization_args']['filepath-with-value']) + # Here we use cast because the above 'if' condition forces + # 'filepath' customization arg to have type str. + filename = cast( + str, rte_comp['customization_args']['filepath-with-value'] + ) + filenames.append(filename) elif ('id' in rte_comp and rte_comp['id'] == 'oppia-noninteractive-math'): - filenames.append( - rte_comp['customization_args']['math_content-with-value'][ - 'svg_filename']) + # Here we use cast because the above 'elif' condition forces + # 'math_content' customization arg to have type Dict[str, str]. + content_to_filename_dict = cast( + Dict[str, str], + rte_comp['customization_args']['math_content-with-value'] + ) + filename = content_to_filename_dict['svg_filename'] + filenames.append(filename) return list(set(filenames)) -def get_rte_components(html_string): +def get_rte_components(html_string: str) -> List[ComponentsDict]: """Extracts the RTE components from an HTML string. Args: @@ -158,20 +181,437 @@ def get_rte_components(html_string): - id: str. The name of the component, i.e. 'oppia-noninteractive-link'. - customization_args: dict. Customization arg specs for the component. """ - components = [] + components: List[ComponentsDict] = [] soup = bs4.BeautifulSoup(html_string, 'html.parser') oppia_custom_tag_attrs = ( rte_component_registry.Registry.get_tag_list_with_attrs()) for tag_name, tag_attrs in oppia_custom_tag_attrs.items(): component_tags = soup.find_all(name=tag_name) for component_tag in component_tags: - component = {'id': tag_name} customization_args = {} for attr in tag_attrs: # Unescape special HTML characters such as '"'. attr_val = html.unescape(component_tag[attr]) customization_args[attr] = json.loads(attr_val) - component['customization_args'] = customization_args + component: ComponentsDict = { + 'id': tag_name, + 'customization_args': customization_args + } components.append(component) return components + + +def is_html_empty(html_str: str) -> bool: + """Checks if the html is empty or not. + + Args: + html_str: str. The html that needs to be validated. + + Returns: + bool. Returns True if the html is empty. + """ + if html_str.strip() in ['""', '\\"""\\"']: + return True + + html_val = utils.unescape_html(html_str) + html_val = ( + html_val.replace('

    ', '').replace('

    ', '').replace('
    ', ''). + replace('', '').replace('', '').replace('', ''). + replace('', '').replace('', '').replace('', ''). + replace('
      ', '').replace('
    ', '').replace('
      ', ''). + replace('
    ', '').replace('

    ', '').replace('

    ', ''). + replace('

    ', '').replace('

    ', '').replace('

    ', ''). + replace('

    ', '').replace('

    ', '').replace('

    ', ''). + replace('
    ', '').replace('
    ', '').replace('
    ', ''). + replace('
    ', '').replace('
  • ', '').replace('
  • ', ''). + replace(' ', '').replace('', '').replace('', ''). + replace('', '').replace('', '').replace('\"\"', ''). + replace('\'\'', '')) + if html_val.strip() == '': + return True + + return False + + +def _raise_validation_errors_for_escaped_html_tag( + tag: bs4.BeautifulSoup, attr: str, tag_name: str +) -> None: + """Raises validation for the errored escaped html tag. + + Args: + tag: bs4.BeautifulSoup. The tag which needs to be validated. + attr: str. The attribute name that needs to be validated inside the tag. + tag_name: str. The tag name. + + Raises: + ValidationError. Tag does not have the attribute. + ValidationError. Tag attribute is empty. + """ + if not tag.has_attr(attr): + raise utils.ValidationError( + '%s tag does not have \'%s\' attribute.' % (tag_name, attr) + ) + + if is_html_empty(tag[attr]): + raise utils.ValidationError( + '%s tag \'%s\' attribute should not be empty.' % (tag_name, attr) + ) + + +def _raise_validation_errors_for_unescaped_html_tag( + tag: bs4.BeautifulSoup, attr: str, tag_name: str +) -> None: + """Raises validation for the errored unescaped html tag. + + Args: + tag: bs4.BeautifulSoup. The tag which needs to be validated. + attr: str. The attribute name that needs to be validated inside the tag. + tag_name: str. The tag name. + + Raises: + ValidationError. Tag does not have the attribute. + ValidationError. Tag attribute is empty. + """ + if not tag.has_attr(attr): + raise utils.ValidationError( + '%s tag does not have \'%s\' attribute.' % (tag_name, attr) + ) + + attr_value = utils.unescape_html(tag[attr])[1:-1].replace('\\"', '') + if is_html_empty(attr_value): + raise utils.ValidationError( + '%s tag \'%s\' attribute should not be empty.' % (tag_name, attr) + ) + + +def validate_rte_tags( + html_data: str, is_tag_nested_inside_tabs_or_collapsible: bool = False +) -> None: + """Validate all the RTE tags. + + Args: + html_data: str. The RTE content of the state. + is_tag_nested_inside_tabs_or_collapsible: bool. True when we + validate tags inside `Tabs` or `Collapsible` tag. + + Raises: + ValidationError. Image does not have alt-with-value attribute. + ValidationError. Image alt-with-value attribute have less + than 5 characters. + ValidationError. Image does not have caption-with-value attribute. + ValidationError. Image caption-with-value attribute have more + than 500 characters. + ValidationError. Image does not have filepath-with-value attribute. + ValidationError. Image filepath-with-value attribute should not be + empty. + ValidationError. SkillReview does not have text-with-value attribute. + ValidationError. SkillReview text-with-value attribute should not be + empty. + ValidationError. SkillReview does not have skill_id-with-value + attribute. + ValidationError. SkillReview skill_id-with-value attribute should not be + empty. + ValidationError. Video does not have start-with-value attribute. + ValidationError. Video start-with-value attribute should not be empty. + ValidationError. Video does not have end-with-value attribute. + ValidationError. Video end-with-value attribute should not be empty. + ValidationError. Start value is greater than end value. + ValidationError. Video does not have autoplay-with-value attribute. + ValidationError. Video autoplay-with-value attribute should be boolean. + ValidationError. Video does not have video_id-with-value attribute. + ValidationError. Link does not have text-with-value attribute. + ValidationError. Link does not have url-with-value attribute. + ValidationError. Link url-with-value attribute should not be empty. + ValidationError. Math does not have math_content-with-value attribute. + ValidationError. Math math_content-with-value attribute should not be + empty. + ValidationError. Math does not have raw_latex-with-value attribute. + ValidationError. Math raw_latex-with-value attribute should not be + empty. + ValidationError. Math does not have svg_filename-with-value attribute. + ValidationError. Math svg_filename-with-value attribute should not be + empty. + ValidationError. Math svg_filename attribute does not have svg + extension. + ValidationError. Tabs tag present inside another tabs or collapsible. + ValidationError. Collapsible tag present inside tabs or another + collapsible. + """ + soup = bs4.BeautifulSoup(html_data, 'html.parser') + for tag in soup.find_all('oppia-noninteractive-image'): + if not tag.has_attr('alt-with-value'): + raise utils.ValidationError( + 'Image tag does not have \'alt-with-value\' attribute.' + ) + + if not tag.has_attr('caption-with-value'): + raise utils.ValidationError( + 'Image tag does not have \'caption-with-value\' attribute.' + ) + + caption_value = utils.unescape_html( + tag['caption-with-value'])[1:-1].replace('\\"', '') + if len(caption_value.strip()) > 500: + raise utils.ValidationError( + 'Image tag \'caption-with-value\' attribute should not ' + 'be greater than 500 characters.' + ) + + if not tag.has_attr('filepath-with-value'): + raise utils.ValidationError( + 'Image tag does not have \'filepath-with-value\' attribute.' + ) + + filepath_value = utils.unescape_html( + tag['filepath-with-value'])[1:-1].replace('\\"', '') + if is_html_empty(filepath_value): + raise utils.ValidationError( + 'Image tag \'filepath-with-value\' attribute should not ' + 'be empty.' + ) + + for tag in soup.find_all('oppia-noninteractive-skillreview'): + _raise_validation_errors_for_unescaped_html_tag( + tag, + 'text-with-value', + 'SkillReview' + ) + + _raise_validation_errors_for_unescaped_html_tag( + tag, + 'skill_id-with-value', + 'SkillReview' + ) + + for tag in soup.find_all('oppia-noninteractive-video'): + + _raise_validation_errors_for_escaped_html_tag( + tag, + 'start-with-value', + 'Video' + ) + + _raise_validation_errors_for_escaped_html_tag( + tag, + 'end-with-value', + 'Video' + ) + + start_value = float(tag['start-with-value'].strip()) + end_value = float(tag['end-with-value'].strip()) + if start_value > end_value and start_value != 0.0 and end_value != 0.0: + raise utils.ValidationError( + 'Start value should not be greater than End value in Video tag.' + ) + + if not tag.has_attr('autoplay-with-value'): + raise utils.ValidationError( + 'Video tag does not have \'autoplay-with-value\' ' + 'attribute.' + ) + + if tag['autoplay-with-value'].strip() not in ( + 'true', 'false', '\'true\'', '\'false\'', + '\"true\"', '\"false\"', True, False + ): + raise utils.ValidationError( + 'Video tag \'autoplay-with-value\' attribute should be ' + 'a boolean value.' + ) + + _raise_validation_errors_for_unescaped_html_tag( + tag, + 'video_id-with-value', + 'Video' + ) + + for tag in soup.find_all('oppia-noninteractive-link'): + if not tag.has_attr('text-with-value'): + raise utils.ValidationError( + 'Link tag does not have \'text-with-value\' ' + 'attribute.' + ) + + _raise_validation_errors_for_unescaped_html_tag( + tag, + 'url-with-value', + 'Link' + ) + + url = tag['url-with-value'].replace('"', '').replace(' ', '') + if utils.get_url_scheme(url) not in constants.ACCEPTABLE_SCHEMES: + raise utils.ValidationError( + 'Link should be prefix with acceptable schemas ' + f'which are {constants.ACCEPTABLE_SCHEMES}' + ) + + for tag in soup.find_all('oppia-noninteractive-math'): + if not tag.has_attr('math_content-with-value'): + raise utils.ValidationError( + 'Math tag does not have \'math_content-with-value\' ' + 'attribute.' + ) + + if is_html_empty(tag['math_content-with-value']): + raise utils.ValidationError( + 'Math tag \'math_content-with-value\' attribute should not ' + 'be empty.' + ) + + math_content_json = utils.unescape_html(tag['math_content-with-value']) + math_content_list = json.loads(math_content_json) + if 'raw_latex' not in math_content_list: + raise utils.ValidationError( + 'Math tag does not have \'raw_latex-with-value\' ' + 'attribute.' + ) + + if is_html_empty(math_content_list['raw_latex']): + raise utils.ValidationError( + 'Math tag \'raw_latex-with-value\' attribute should not ' + 'be empty.' + ) + + if 'svg_filename' not in math_content_list: + raise utils.ValidationError( + 'Math tag does not have \'svg_filename-with-value\' ' + 'attribute.' + ) + + if is_html_empty(math_content_list['svg_filename']): + raise utils.ValidationError( + 'Math tag \'svg_filename-with-value\' attribute should not ' + 'be empty.' + ) + + if math_content_list['svg_filename'].strip()[-4:] != '.svg': + raise utils.ValidationError( + 'Math tag \'svg_filename-with-value\' attribute should ' + 'have svg extension.' + ) + + if is_tag_nested_inside_tabs_or_collapsible: + tabs_tags = soup.find_all('oppia-noninteractive-tabs') + if len(tabs_tags) > 0: + raise utils.ValidationError( + 'Tabs tag should not be present inside another ' + 'Tabs or Collapsible tag.' + ) + + collapsible_tags = soup.find_all('oppia-noninteractive-collapsible') + if len(collapsible_tags) > 0: + raise utils.ValidationError( + 'Collapsible tag should not be present inside another ' + 'Tabs or Collapsible tag.' + ) + + +def _raise_validation_errors_for_empty_tabs_content( + content_dict: Dict[str, str], name: str +) -> None: + """Raises error when the content inside the tabs tag is empty. + + Args: + content_dict: Dict[str]. The dictionary containing the content of + tags tag. + name: str. The content name that needs to be validated. + + Raises: + ValidationError. Content not present in the dictionary. + ValidationError. Content inside the dictionary is empty. + """ + if name not in content_dict: + raise utils.ValidationError( + 'No %s attribute is present inside the tabs tag.' % (name) + ) + + if is_html_empty(content_dict[name]): + raise utils.ValidationError( + '%s present inside tabs tag is empty.' % (name) + ) + + +def validate_tabs_and_collapsible_rte_tags(html_data: str) -> None: + """Validates `Tabs` and `Collapsible` RTE tags + + Args: + html_data: str. The RTE content of the state. + + Raises: + ValidationError. No tabs present inside the tab_contents attribute. + ValidationError. No title present inside the tab_contents attribute. + ValidationError. Title inside the tag is empty. + ValidationError. No content present inside the tab_contents attribute. + ValidationError. Content inside the tag is empty. + ValidationError. No content attributes present inside the tabs tag. + ValidationError. No collapsible content is present inside the tag. + ValidationError. Collapsible content-with-value attribute is not + present. + ValidationError. Collapsible heading-with-value attribute is not + present. + ValidationError. Collapsible heading-with-value attribute is empty. + """ + soup = bs4.BeautifulSoup(html_data, 'html.parser') + tabs_tags = soup.find_all('oppia-noninteractive-tabs') + for tag in tabs_tags: + if not tag.has_attr('tab_contents-with-value'): + raise utils.ValidationError( + 'No content attribute is present inside the tabs tag.' + ) + + tab_content_json = utils.unescape_html( + tag['tab_contents-with-value']) + tab_content_list = json.loads(tab_content_json) + if len(tab_content_list) == 0: + raise utils.ValidationError( + 'No tabs are present inside the tabs tag.' + ) + + for tab_content in tab_content_list: + _raise_validation_errors_for_empty_tabs_content( + tab_content, 'title') + _raise_validation_errors_for_empty_tabs_content( + tab_content, 'content') + + validate_rte_tags( + tab_content['content'], + is_tag_nested_inside_tabs_or_collapsible=True + ) + + collapsibles_tags = soup.find_all('oppia-noninteractive-collapsible') + for tag in collapsibles_tags: + if not tag.has_attr('content-with-value'): + raise utils.ValidationError( + 'No content attribute present in collapsible tag.' + ) + + collapsible_content_json = ( + utils.unescape_html(tag['content-with-value']) + ) + collapsible_content = json.loads( + collapsible_content_json).replace('\\"', '') + if is_html_empty(collapsible_content): + raise utils.ValidationError( + 'No collapsible content is present inside the tag.' + ) + + validate_rte_tags( + collapsible_content, + is_tag_nested_inside_tabs_or_collapsible=True + ) + + if not tag.has_attr('heading-with-value'): + raise utils.ValidationError( + 'No heading attribute present in collapsible tag.' + ) + + collapsible_heading_json = ( + utils.unescape_html(tag['heading-with-value']) + ) + collapsible_heading = json.loads( + collapsible_heading_json).replace('\\"', '') + if is_html_empty(collapsible_heading): + raise utils.ValidationError( + 'Heading attribute inside the collapsible tag is empty.' + ) diff --git a/core/domain/html_cleaner_test.py b/core/domain/html_cleaner_test.py index 8654f61f5a43..ccd187ede200 100644 --- a/core/domain/html_cleaner_test.py +++ b/core/domain/html_cleaner_test.py @@ -20,16 +20,17 @@ from core.domain import html_cleaner from core.tests import test_utils +from typing import List, Tuple class HtmlCleanerUnitTests(test_utils.GenericTestBase): """Test the HTML sanitizer.""" - def setUp(self): - super(HtmlCleanerUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.longMessage = True - def test_whitelisted_tags(self): + def test_whitelisted_tags(self) -> None: self.assertTrue( html_cleaner.filter_a('a', 'href', 'http://www.oppia.com')) @@ -40,12 +41,12 @@ def test_whitelisted_tags(self): self.assertTrue( html_cleaner.filter_a('a', 'title', 'http://www.oppia.com')) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The filter_a method should only be used for a tags.'): html_cleaner.filter_a('link', 'href', 'http://www.oppia.com') - def test_good_tags_allowed(self): - test_data = [( + def test_good_tags_allowed(self) -> None: + test_data: List[Tuple[str, str]] = [( 'Hello', 'Hello' ), ( @@ -73,8 +74,8 @@ def test_good_tags_allowed(self): html_cleaner.clean(datum[0]), datum[1], msg='\n\nOriginal text: %s' % datum[0]) - def test_bad_tags_suppressed(self): - test_data = [( + def test_bad_tags_suppressed(self) -> None: + test_data: List[Tuple[str, str]] = [( '', '' ), ( @@ -102,8 +103,8 @@ def test_bad_tags_suppressed(self): html_cleaner.clean(datum[0]), datum[1], msg='\n\nOriginal text: %s' % datum[0]) - def test_oppia_custom_tags(self): - test_data = [( + def test_oppia_custom_tags(self) -> None: + test_data: List[Tuple[str, ...]] = [( '', '' '' @@ -126,8 +127,8 @@ def test_oppia_custom_tags(self): class HtmlStripperUnitTests(test_utils.GenericTestBase): """Test the HTML stripper.""" - def test_strip_html_tags(self): - test_data = [( + def test_strip_html_tags(self) -> None: + test_data: List[Tuple[str, str]] = [( 'Hello', 'Hello', ), ( @@ -154,7 +155,7 @@ def test_strip_html_tags(self): class RteComponentExtractorUnitTests(test_utils.GenericTestBase): """Test the RTE component extractor.""" - def test_get_rte_components(self): + def test_get_rte_components(self) -> None: test_data = ( '

    Test text ' '

    ' ) - expected_components = [ + expected_components: List[html_cleaner.ComponentsDict] = [ { 'customization_args': { 'text-with-value': u'Link"quoted text"\'singlequotes\'', @@ -205,13 +206,15 @@ def test_get_rte_components(self): } ] - components = html_cleaner.get_rte_components(test_data) + components: List[html_cleaner.ComponentsDict] = ( + html_cleaner.get_rte_components(test_data) + ) self.assertEqual(len(components), len(expected_components)) for component in components: self.assertIn(component, expected_components) - def test_get_image_filenames_from_html_strings(self): + def test_get_image_filenames_from_html_strings(self) -> None: html_strings = [ '', '>') - ] - escaped_html_data = unescaped_html_data - for replace_tuple in replace_list_for_escaping: - escaped_html_data = escaped_html_data.replace( - replace_tuple[0], replace_tuple[1]) - - return escaped_html_data - - -def unescape_html(escaped_html_data): - """This function unescapes an escaped HTML string. - - Args: - escaped_html_data: str. Escaped HTML string to be unescaped. - - Returns: - str. Unescaped HTML string. - """ - # Replace list to unescape html strings. - replace_list_for_unescaping = [ - ('"', '"'), - (''', '\''), - ('<', '<'), - ('>', '>'), - ('&', '&') - ] - unescaped_html_data = escaped_html_data - for replace_tuple in replace_list_for_unescaping: - unescaped_html_data = unescaped_html_data.replace( - replace_tuple[0], replace_tuple[1]) - - return unescaped_html_data - - -def wrap_with_siblings(tag, p): +def wrap_with_siblings(tag: bs4.element.Tag, p: bs4.element.Tag) -> None: """This function wraps a tag and its unwrapped sibling in p tag. Args: @@ -93,7 +44,7 @@ def wrap_with_siblings(tag, p): p: bs4.element.Tag. The new p tag in soup in which the tag and its siblings are to be wrapped. """ - independent_parents = ['p', 'pre', 'ol', 'ul', 'blockquote'] + independent_parents = ['h1', 'p', 'pre', 'ol', 'ul', 'blockquote'] prev_sib = list(tag.previous_siblings) next_sib = list(tag.next_siblings) index_of_first_unwrapped_sibling = -1 @@ -126,16 +77,16 @@ def wrap_with_siblings(tag, p): # List of oppia noninteractive inline components. -INLINE_COMPONENT_TAG_NAMES = ( +INLINE_COMPONENT_TAG_NAMES: List[str] = ( rte_component_registry.Registry.get_inline_component_tag_names()) # List of oppia noninteractive block components. -BLOCK_COMPONENT_TAG_NAMES = ( +BLOCK_COMPONENT_TAG_NAMES: List[str] = ( rte_component_registry.Registry.get_block_component_tag_names()) # See https://perso.crans.org/besson/_static/python/lib/python2.7/encodings/cp1252.py # pylint: disable=line-too-long # Useful reading: https://www.regular-expressions.info/unicode8bit.html -CHAR_MAPPINGS = [ +CHAR_MAPPINGS: List[Tuple[str, str]] = [ (u'\u00a0', u'\xa0'), (u'\u00a1', u'\xa1'), (u'\u00a2', u'\xa2'), @@ -440,7 +391,9 @@ def wrap_with_siblings(tag, p): ] -def validate_rte_format(html_list, rte_format): +def validate_rte_format( + html_list: List[str], rte_format: str +) -> Dict[str, List[str]]: """This function checks if html strings in a given list are valid for given RTE format. @@ -454,7 +407,7 @@ def validate_rte_format(html_list, rte_format): """ # err_dict is a dictionary to store the invalid tags and the # invalid parent-child relations that we find. - err_dict = {} + err_dict: Dict[str, List[str]] = {} # All the invalid html strings will be stored in this. err_dict['strings'] = [] @@ -481,7 +434,7 @@ def validate_rte_format(html_list, rte_format): is_invalid = True else: content_html = json.loads( - unescape_html(collapsible['content-with-value'])) + utils.unescape_html(collapsible['content-with-value'])) soup_for_collapsible = bs4.BeautifulSoup( content_html.replace('
    ', '
    '), 'html.parser') is_invalid = validate_soup_for_rte( @@ -490,7 +443,8 @@ def validate_rte_format(html_list, rte_format): err_dict['strings'].append(html_data) for tabs in soup.findAll(name='oppia-noninteractive-tabs'): - tab_content_json = unescape_html(tabs['tab_contents-with-value']) + tab_content_json = utils.unescape_html( + tabs['tab_contents-with-value']) tab_content_list = json.loads(tab_content_json) for tab_content in tab_content_list: content_html = tab_content['content'] @@ -507,7 +461,9 @@ def validate_rte_format(html_list, rte_format): return err_dict -def validate_soup_for_rte(soup, rte_format, err_dict): +def validate_soup_for_rte( + soup: bs4.BeautifulSoup, rte_format: str, err_dict: Dict[str, List[str]] +) -> bool: """Validate content in given soup for given RTE format. Args: @@ -556,7 +512,7 @@ def validate_soup_for_rte(soup, rte_format, err_dict): return is_invalid -def validate_customization_args(html_list): +def validate_customization_args(html_list: List[str]) -> Dict[str, List[str]]: """Validates customization arguments of Rich Text Components in a list of html string. @@ -593,7 +549,7 @@ def validate_customization_args(html_list): return err_dict -def validate_customization_args_in_tag(tag): +def validate_customization_args_in_tag(tag: bs4.element.Tag) -> Iterator[str]: """Validates customization arguments of Rich Text Components in a soup. Args: @@ -611,7 +567,7 @@ def validate_customization_args_in_tag(tag): attrs = tag.attrs for attr in attrs: - value_dict[attr] = json.loads(unescape_html(attrs[attr])) + value_dict[attr] = json.loads(utils.unescape_html(attrs[attr])) try: component_types_to_component_classes[tag_name].validate(value_dict) @@ -643,7 +599,8 @@ def validate_customization_args_in_tag(tag): def validate_svg_filenames_in_math_rich_text( - entity_type, entity_id, html_string): + entity_type: str, entity_id: str, html_string: str +) -> List[str]: """Validates the SVG filenames for each math rich-text components and returns a list of all invalid math tags in the given HTML. @@ -659,23 +616,23 @@ def validate_svg_filenames_in_math_rich_text( error_list = [] for math_tag in soup.findAll(name='oppia-noninteractive-math'): math_content_dict = ( - json.loads(unescape_html( + json.loads(utils.unescape_html( math_tag['math_content-with-value']))) svg_filename = ( objects.UnicodeString.normalize(math_content_dict['svg_filename'])) if svg_filename == '': error_list.append(str(math_tag)) else: - file_system_class = fs_services.get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem( - file_system_class(entity_type, entity_id)) + fs = fs_services.GcsFileSystem(entity_type, entity_id) filepath = 'image/%s' % svg_filename if not fs.isfile(filepath): error_list.append(str(math_tag)) return error_list -def validate_math_content_attribute_in_html(html_string): +def validate_math_content_attribute_in_html( + html_string: str +) -> List[Dict[str, str]]: """Validates the format of SVG filenames for each math rich-text components and returns a list of all invalid math tags in the given HTML. @@ -690,7 +647,7 @@ def validate_math_content_attribute_in_html(html_string): error_list = [] for math_tag in soup.findAll(name='oppia-noninteractive-math'): math_content_dict = ( - json.loads(unescape_html( + json.loads(utils.unescape_html( math_tag['math_content-with-value']))) try: components.Math.validate({ @@ -704,12 +661,14 @@ def validate_math_content_attribute_in_html(html_string): return error_list -def does_svg_tag_contains_xmlns_attribute(svg_string): +def does_svg_tag_contains_xmlns_attribute( + svg_string: Union[str, bytes] +) -> bool: """Checks whether the svg tag in the given svg string contains the xmlns attribute. Args: - svg_string: str. The SVG string. + svg_string: str|bytes. The SVG string. Returns: bool. Whether the svg tag in the given svg string contains the xmlns @@ -727,11 +686,13 @@ def does_svg_tag_contains_xmlns_attribute(svg_string): ) -def get_invalid_svg_tags_and_attrs(svg_string): +def get_invalid_svg_tags_and_attrs( + svg_string: Union[str, bytes] +) -> Tuple[List[str], List[str]]: """Returns a set of all invalid tags and attributes for the provided SVG. Args: - svg_string: str. The SVG string. + svg_string: str|bytes. The SVG string. Returns: tuple(list(str), list(str)). A 2-tuple, the first element of which @@ -763,7 +724,7 @@ def get_invalid_svg_tags_and_attrs(svg_string): return (invalid_elements, invalid_attrs) -def check_for_svgdiagram_component_in_html(html_string): +def check_for_svgdiagram_component_in_html(html_string: str) -> bool: """Checks for existence of SvgDiagram component tags inside an HTML string. Args: @@ -777,7 +738,7 @@ def check_for_svgdiagram_component_in_html(html_string): return bool(svgdiagram_tags) -def extract_svg_filenames_in_math_rte_components(html_string): +def extract_svg_filenames_in_math_rte_components(html_string: str) -> List[str]: """Extracts the svg_filenames from all the math-rich text components in an HTML string. @@ -792,7 +753,7 @@ def extract_svg_filenames_in_math_rte_components(html_string): filenames = [] for math_tag in soup.findAll(name='oppia-noninteractive-math'): math_content_dict = ( - json.loads(unescape_html( + json.loads(utils.unescape_html( math_tag['math_content-with-value']))) svg_filename = math_content_dict['svg_filename'] if svg_filename != '': @@ -802,7 +763,7 @@ def extract_svg_filenames_in_math_rte_components(html_string): return filenames -def add_math_content_to_math_rte_components(html_string): +def add_math_content_to_math_rte_components(html_string: str) -> str: """Replaces the attribute raw_latex-with-value in all Math component tags with a new attribute math_content-with-value. The new attribute has an additional field for storing SVG filenames. The field for SVG filename will @@ -814,6 +775,10 @@ def add_math_content_to_math_rte_components(html_string): Returns: str. Updated HTML string with all Math component tags having the new attribute. + + Raises: + Exception. Invalid latex string found while parsing the given + HTML string. """ soup = bs4.BeautifulSoup(html_string, 'html.parser') for math_tag in soup.findAll(name='oppia-noninteractive-math'): @@ -833,7 +798,8 @@ def add_math_content_to_math_rte_components(html_string): # double quotes(&quot;) and should be a valid unicode # string. raw_latex = ( - json.loads(unescape_html(math_tag['raw_latex-with-value']))) + json.loads(utils.unescape_html( + math_tag['raw_latex-with-value']))) normalized_raw_latex = ( objects.UnicodeString.normalize(raw_latex)) except Exception as e: @@ -843,17 +809,28 @@ def add_math_content_to_math_rte_components(html_string): ) ) raise e - math_content_dict = { - 'raw_latex': normalized_raw_latex, - 'svg_filename': '' - } + if math_tag.has_attr('svg_filename-with-value'): + svg_filename = json.loads(utils.unescape_html( + math_tag['svg_filename-with-value'])) + normalized_svg_filename = ( + objects.UnicodeString.normalize(svg_filename)) + math_content_dict = { + 'raw_latex': normalized_raw_latex, + 'svg_filename': normalized_svg_filename + } + del math_tag['svg_filename-with-value'] + else: + math_content_dict = { + 'raw_latex': normalized_raw_latex, + 'svg_filename': '' + } # Normalize and validate the value before adding to the math # tag. normalized_math_content_dict = ( objects.MathExpressionContent.normalize(math_content_dict)) # Add the new attribute math_expression_contents-with-value. math_tag['math_content-with-value'] = ( - escape_html( + utils.escape_html( json.dumps(normalized_math_content_dict, sort_keys=True))) # Delete the attribute raw_latex-with-value. del math_tag['raw_latex-with-value'] @@ -868,7 +845,7 @@ def add_math_content_to_math_rte_components(html_string): return str(soup).replace('
    ', '
    ') -def validate_math_tags_in_html(html_string): +def validate_math_tags_in_html(html_string: str) -> List[str]: """Returns a list of all invalid math tags in the given HTML. Args: @@ -887,7 +864,8 @@ def validate_math_tags_in_html(html_string): # double quotes(&quot;) and should be a valid unicode # string. raw_latex = ( - json.loads(unescape_html(math_tag['raw_latex-with-value']))) + json.loads(utils.unescape_html( + math_tag['raw_latex-with-value']))) objects.UnicodeString.normalize(raw_latex) except Exception: error_list.append(math_tag) @@ -896,7 +874,9 @@ def validate_math_tags_in_html(html_string): return error_list -def validate_math_tags_in_html_with_attribute_math_content(html_string): +def validate_math_tags_in_html_with_attribute_math_content( + html_string: str +) -> List[str]: """Returns a list of all invalid new schema math tags in the given HTML. The old schema has the attribute raw_latex-with-value while the new schema has the attribute math-content-with-value which includes a field for storing @@ -915,7 +895,7 @@ def validate_math_tags_in_html_with_attribute_math_content(html_string): if math_tag.has_attr('math_content-with-value'): try: math_content_dict = ( - json.loads(unescape_html( + json.loads(utils.unescape_html( math_tag['math_content-with-value']))) raw_latex = math_content_dict['raw_latex'] svg_filename = math_content_dict['svg_filename'] @@ -928,7 +908,7 @@ def validate_math_tags_in_html_with_attribute_math_content(html_string): return error_list -def is_parsable_as_xml(xml_string): +def is_parsable_as_xml(xml_string: bytes) -> bool: """Checks if input string is parsable as XML. Args: @@ -946,7 +926,9 @@ def is_parsable_as_xml(xml_string): return False -def convert_svg_diagram_to_image_for_soup(soup_context): +def convert_svg_diagram_to_image_for_soup( + soup_context: bs4.BeautifulSoup +) -> str: """"Renames oppia-noninteractive-svgdiagram tag to oppia-noninteractive-image and changes corresponding attributes for a given soup context. @@ -962,12 +944,12 @@ def convert_svg_diagram_to_image_for_soup(soup_context): svg_filepath = svg_image['svg_filename-with-value'] del svg_image['svg_filename-with-value'] svg_image['filepath-with-value'] = svg_filepath - svg_image['caption-with-value'] = escape_html('""') + svg_image['caption-with-value'] = utils.escape_html('""') svg_image.name = 'oppia-noninteractive-image' return str(soup_context) -def convert_svg_diagram_tags_to_image_tags(html_string): +def convert_svg_diagram_tags_to_image_tags(html_string: str) -> str: """Renames all the oppia-noninteractive-svgdiagram on the server to oppia-noninteractive-image and changes corresponding attributes. @@ -985,7 +967,7 @@ def convert_svg_diagram_tags_to_image_tags(html_string): ) -def _replace_incorrectly_encoded_chars(soup_context): +def _replace_incorrectly_encoded_chars(soup_context: bs4.BeautifulSoup) -> str: """Replaces incorrectly encoded character with the correct one in a given HTML string. @@ -1006,7 +988,7 @@ def _replace_incorrectly_encoded_chars(soup_context): return html_string -def fix_incorrectly_encoded_chars(html_string): +def fix_incorrectly_encoded_chars(html_string: str) -> str: """Replaces incorrectly encoded character with the correct one in a given HTML string. @@ -1024,7 +1006,9 @@ def fix_incorrectly_encoded_chars(html_string): ) -def _process_string_with_components(html_string, conversion_fn): +def _process_string_with_components( + html_string: str, conversion_fn: Callable[[bs4.BeautifulSoup], str] +) -> str: """Executes the provided conversion function after parsing complex RTE components. @@ -1043,16 +1027,17 @@ def _process_string_with_components(html_string, conversion_fn): name='oppia-noninteractive-collapsible'): if 'content-with-value' in collapsible.attrs: content_html = json.loads( - unescape_html(collapsible['content-with-value'])) + utils.unescape_html(collapsible['content-with-value'])) soup_for_collapsible = bs4.BeautifulSoup( content_html.replace('
    ', '
    '), 'html.parser') - collapsible['content-with-value'] = escape_html( + collapsible['content-with-value'] = utils.escape_html( json.dumps(conversion_fn( soup_for_collapsible ).replace('
    ', '
    '))) for tabs in soup.findAll(name='oppia-noninteractive-tabs'): - tab_content_json = unescape_html(tabs['tab_contents-with-value']) + tab_content_json = utils.unescape_html( + tabs['tab_contents-with-value']) tab_content_list = json.loads(tab_content_json) for tab_content in tab_content_list: content_html = tab_content['content'] @@ -1061,7 +1046,7 @@ def _process_string_with_components(html_string, conversion_fn): tab_content['content'] = ( conversion_fn(soup_for_tabs).replace( '
    ', '
    ')) - tabs['tab_contents-with-value'] = escape_html( + tabs['tab_contents-with-value'] = utils.escape_html( json.dumps(tab_content_list)) return conversion_fn(soup) diff --git a/core/domain/html_validation_service_test.py b/core/domain/html_validation_service_test.py index b7b6d1f569ae..4fdb8ca6eff8 100644 --- a/core/domain/html_validation_service_test.py +++ b/core/domain/html_validation_service_test.py @@ -22,20 +22,29 @@ import re from core import feconf -from core import python_utils -from core.domain import fs_domain +from core import utils +from core.domain import fs_services from core.domain import html_validation_service from core.tests import test_utils import bs4 +from typing import Dict, List, TypedDict + + +class SvgDiagramTestCaseDict(TypedDict): + """Dict representing the test case SVG content Dictionary.""" + + html_content: str + expected_output: bool + class ContentMigrationTests(test_utils.GenericTestBase): """Tests the function associated with the migration of html strings to valid RTE format. """ - def test_wrap_with_siblings(self): + def test_wrap_with_siblings(self) -> None: test_cases = [{ 'html_content': ( '

    hello

    this istest case1 for ' @@ -75,7 +84,7 @@ def test_wrap_with_siblings(self): html_validation_service.wrap_with_siblings(tag, soup.new_tag('p')) self.assertEqual(str(soup), test_case['expected_output']) - def test_validate_rte_format(self): + def test_validate_rte_format(self) -> None: test_cases_for_ckeditor = [ ( '
    Hello this is  testing '
    @@ -148,7 +157,7 @@ def test_validate_rte_format(self):
             self.assertItemsEqual(
                 actual_output_for_ckeditor, expected_output_for_ckeditor)
     
    -    def test_validate_soup_for_rte(self):
    +    def test_validate_soup_for_rte(self) -> None:
             test_cases_for_textangular = [
                 (
                     '

    Hello this is


    test case ' @@ -173,7 +182,7 @@ def test_validate_soup_for_rte(self): ] expected_output_for_textangular = [False, True, False, True, True, True] - err_dict = {} + err_dict: Dict[str, List[str]] = {} for index, test_case in enumerate(test_cases_for_textangular): actual_output_for_textangular = ( @@ -213,7 +222,7 @@ def test_validate_soup_for_rte(self): actual_output_for_ckeditor, expected_output_for_ckeditor[index]) - def test_validate_customization_args(self): + def test_validate_customization_args(self) -> None: test_cases = [( '

    None: # A Valid SVG string. valid_svg_string = ( '' ) }] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape('Expecting value: line 1 column 1 (char 0)') ): html_validation_service.add_math_content_to_math_rte_components( invalid_cases[0]['html_content']) - def test_validate_math_tags_in_html(self): + def test_validate_math_tags_in_html(self) -> None: """Test that the validate_math_tags_in_html method validates an HTML string and returns all the invalid tags. """ @@ -781,7 +790,9 @@ def test_validate_math_tags_in_html(self): for index, invalid_tag in enumerate(invalid_tags): self.assertEqual(str(invalid_tag), expected_invalid_tags[index]) - def test_validate_math_tags_in_html_with_attribute_math_content(self): + def test_validate_math_tags_in_html_with_attribute_math_content( + self + ) -> None: """Test that the validate_math_tags_in_html_with_attribute_math_content method validates an HTML string and returns all the invalid tags. """ @@ -838,7 +849,7 @@ def test_validate_math_tags_in_html_with_attribute_math_content(self): for invalid_tag in invalid_tags: self.assertIn(str(invalid_tag), expected_invalid_tags) - def test_extract_svg_filenames_in_math_rte_components(self): + def test_extract_svg_filenames_in_math_rte_components(self) -> None: """Test that the extract_svg_filenames_in_math_rte_components method extracts all the filenames from math rich-text components in html. @@ -866,7 +877,7 @@ def test_extract_svg_filenames_in_math_rte_components(self): extract_svg_filenames_in_math_rte_components( html_string_with_no_filename), []) - def test_validate_svg_filenames_when_all_filenames_are_valid(self): + def test_validate_svg_filenames_when_all_filenames_are_valid(self) -> None: """Test the validate_svg_filenames_in_math_rich_text when valid filenames are present for each math rich-text components in html. """ @@ -880,13 +891,12 @@ def test_validate_svg_filenames_when_all_filenames_are_valid(self): '&quot;, &quot;svg_filename&quot;: &quot' ';img2.svg&quot;}">' ) - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, 'exp_id1')) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, 'exp_id1') fs.commit('image/img1.svg', raw_image, mimetype='image/svg+xml') fs.commit('image/img2.svg', raw_image, mimetype='image/svg+xml') self.assertEqual( @@ -894,7 +904,7 @@ def test_validate_svg_filenames_when_all_filenames_are_valid(self): feconf.ENTITY_TYPE_EXPLORATION, 'exp_id1', html_string_with_filename_having_filename), []) - def test_validate_svg_filenames_when_filenames_are_invalid(self): + def test_validate_svg_filenames_when_filenames_are_invalid(self) -> None: """Test the validate_svg_filenames_in_math_rich_text when filenames are present but invalid. """ @@ -908,13 +918,12 @@ def test_validate_svg_filenames_when_filenames_are_invalid(self): '&quot;, &quot;svg_filename&quot;: &quot' ';img2.svg&quot;}">' ) - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, 'exp_id1')) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, 'exp_id1') fs.commit('image/img1.svg', raw_image, mimetype='image/svg+xml') self.assertEqual( html_validation_service.validate_svg_filenames_in_math_rich_text( @@ -926,7 +935,9 @@ def test_validate_svg_filenames_when_filenames_are_invalid(self): 'ot;, &quot;svg_filename&quot;: &quot;img2.' 'svg&quot;}">')]) - def test_validate_svg_filenames_when_filenames_are_not_present(self): + def test_validate_svg_filenames_when_filenames_are_not_present( + self + ) -> None: """Test the validate_svg_filenames_in_math_rich_text when filenames are not present. """ @@ -940,13 +951,12 @@ def test_validate_svg_filenames_when_filenames_are_not_present(self): '&quot;, &quot;svg_filename&quot;: &quot' ';&quot;}">' ) - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_EXPLORATION, 'exp_id1')) + fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_EXPLORATION, 'exp_id1') fs.commit('image/img1.svg', raw_image, mimetype='image/svg+xml') self.assertEqual( html_validation_service.validate_svg_filenames_in_math_rich_text( @@ -958,7 +968,9 @@ def test_validate_svg_filenames_when_filenames_are_not_present(self): 'ot;, &quot;svg_filename&quot;: &quot;' '&quot;}">')]) - def test_validate_svg_filenames_format_when_all_filenames_are_valid(self): + def test_validate_svg_filenames_format_when_all_filenames_are_valid( + self + ) -> None: """Test the validate_svg_filenames_in_math_rich_text when valid filenames are present for each math rich-text components in html. """ @@ -979,7 +991,9 @@ def test_validate_svg_filenames_format_when_all_filenames_are_valid(self): validate_math_content_attribute_in_html( html_string_with_filename_having_valid_format), []) - def test_validate_svg_filenames_format_when_all_filenames_are_invalid(self): + def test_validate_svg_filenames_format_when_all_filenames_are_invalid( + self + ) -> None: """Test the validate_svg_filenames_in_math_rich_text when valid filenames are present for each math rich-text components in html. """ @@ -1028,11 +1042,11 @@ def test_validate_svg_filenames_format_when_all_filenames_are_invalid(self): expected_output ) - def test_check_for_svgdiagram_component_in_html(self): + def test_check_for_svgdiagram_component_in_html(self) -> None: """Test that the check_for_svgdiagram_component_in_html method checks for math-tags in an HTML string and returns a boolean. """ - test_cases = [{ + test_cases: List[SvgDiagramTestCaseDict] = [{ 'html_content': ( ' None: invalid_xml = b'aDRjSzNS' self.assertEqual( html_validation_service.is_parsable_as_xml(invalid_xml), @@ -1065,7 +1079,10 @@ def test_parsable_as_xml(self): self.assertEqual( html_validation_service.is_parsable_as_xml(invalid_xml), False) - invalid_xml = False + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + invalid_xml = False # type: ignore[assignment] self.assertEqual( html_validation_service.is_parsable_as_xml(invalid_xml), False) @@ -1074,7 +1091,7 @@ def test_parsable_as_xml(self): html_validation_service.is_parsable_as_xml(valid_xml), True) - def test_convert_svg_diagram_tags_to_image_tags(self): + def test_convert_svg_diagram_tags_to_image_tags(self) -> None: test_cases = [{ 'html_content': ( ' None: """Test that the convert_svg_diagram_tags_to_image_tags does not make any changes in already existing oppia-noninteractive image tags. """ @@ -1248,3 +1265,42 @@ def test_no_convertion_of_non_interactive_image_tags(self): html_validation_service.convert_svg_diagram_tags_to_image_tags( test_case['html_content']), test_case['expected_output']) + + def test_fix_incorrectly_encoded_chars_replaces_incorrect_encodings( + self + ) -> None: + test_cases = [ + { + 'html_string': '

    This is testing  

    ', + 'expected_output': '

    This is testing

    ' + }, + { + 'html_string': '

    This is \t testing \n

    ', + 'expected_output': '

    This is testing

    ' + }, + { + 'html_string': '

    Hello this is testing \xa0

    ', + 'expected_output': '

    Hello this is testing

    ' + }, + { + 'html_string': '

    Hello this is testing \xc2

    ', + 'expected_output': '

    Hello this is testing

    ' + }, + { + 'html_string': '

    Hello this is testing \xe2\u2020\u2019' + ' \xe2\u20ac\u0153 \xe2\u02c6\u2030 \xe2\u2026\u02dc ' + '\xe2\u20ac\u2122 \xe2\u02c6\u0161 \xe2\u02c6\u02c6 ' + '\xe2\u2026\u2022 \xe2\u2026\u2122 \xe2\u20ac\u02dc ' + '\xe2\u20ac\u201d \xe2\u20ac\u2039 \xe2\xcb\u2020\xe2\u20ac\xb0' + '

    ', + 'expected_output': '

    Hello this is testing \u2192 ' + '\u201c \u2209 \u2158 \u2019 \u221a \u2208 \u2155 \u2159 ' + '\u2018 \u2014 \u200b \u2209

    ' + } + ] + for test_case in test_cases: + self.assertEqual( + html_validation_service.fix_incorrectly_encoded_chars( + test_case['html_string']), + test_case['expected_output'] + ) diff --git a/core/domain/image_services.py b/core/domain/image_services.py index 317dec56e4ab..1810ba811e7d 100644 --- a/core/domain/image_services.py +++ b/core/domain/image_services.py @@ -21,9 +21,10 @@ import io from PIL import Image +from typing import Tuple -def _get_pil_image_dimensions(pil_image): +def _get_pil_image_dimensions(pil_image: Image) -> Tuple[int, int]: """Gets the dimensions of the Pillow Image. Args: @@ -36,11 +37,11 @@ def _get_pil_image_dimensions(pil_image): return height, width -def get_image_dimensions(file_content): +def get_image_dimensions(file_content: bytes) -> Tuple[int, int]: """Gets the dimensions of the image with the given file_content. Args: - file_content: str. The content of the file. + file_content: bytes. The content of the file. Returns: tuple(int). Returns height and width of the image. @@ -49,16 +50,19 @@ def get_image_dimensions(file_content): return _get_pil_image_dimensions(image) -def compress_image(image_content, scaling_factor): +def compress_image(image_content: bytes, scaling_factor: float) -> bytes: """Compresses the image by resizing the image with the scaling factor. Args: - image_content: str. Content of the file to be compressed. + image_content: bytes. Content of the file to be compressed. scaling_factor: float. The number by which the dimensions of the image will be scaled. This is expected to be in the interval (0, 1]. Returns: - str. Returns the content of the compressed image. + bytes. Returns the content of the compressed image. + + Raises: + ValueError. Scaling factor is not in the interval (0, 1]. """ if scaling_factor > 1 or scaling_factor <= 0: raise ValueError( diff --git a/core/domain/image_services_test.py b/core/domain/image_services_test.py index 3fc05f4312f1..a030cd6771bf 100644 --- a/core/domain/image_services_test.py +++ b/core/domain/image_services_test.py @@ -23,7 +23,7 @@ import re from core import feconf -from core import python_utils +from core import utils from core.domain import image_services from core.tests import test_utils @@ -37,24 +37,24 @@ class ImageServicesUnitTests(test_utils.GenericTestBase): TEST_IMAGE_WIDTH = 3000 TEST_IMAGE_HEIGHT = 2092 - def setUp(self): - super(ImageServicesUnitTests, self).setUp() - with python_utils.open_file( + def setUp(self) -> None: + super().setUp() + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'dummy_large_image.jpg'), 'rb', encoding=None) as f: self.jpeg_raw_image = f.read() - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None) as f: self.png_raw_image = f.read() - def test_image_dimensions_are_output_correctly(self): + def test_image_dimensions_are_output_correctly(self) -> None: height, width = ( image_services.get_image_dimensions(self.jpeg_raw_image)) self.assertEqual(self.TEST_IMAGE_HEIGHT, height) self.assertEqual(self.TEST_IMAGE_WIDTH, width) - def test_compress_image_returns_correct_dimensions(self): + def test_compress_image_returns_correct_dimensions(self) -> None: compressed_image = ( image_services.compress_image(self.jpeg_raw_image, 0.5)) height, width = ( @@ -62,8 +62,8 @@ def test_compress_image_returns_correct_dimensions(self): self.assertEqual(self.TEST_IMAGE_HEIGHT * 0.5, height) self.assertEqual(self.TEST_IMAGE_WIDTH * 0.5, width) - def test_invalid_scaling_factor_triggers_value_error(self): - value_exception = self.assertRaisesRegexp( + def test_invalid_scaling_factor_triggers_value_error(self) -> None: + value_exception = self.assertRaisesRegex( ValueError, re.escape( 'Scaling factor should be in the interval (0, 1], ' @@ -71,7 +71,7 @@ def test_invalid_scaling_factor_triggers_value_error(self): with value_exception: image_services.compress_image(self.jpeg_raw_image, 1.1) - value_exception = self.assertRaisesRegexp( + value_exception = self.assertRaisesRegex( ValueError, re.escape( 'Scaling factor should be in the interval (0, 1], ' @@ -79,7 +79,7 @@ def test_invalid_scaling_factor_triggers_value_error(self): with value_exception: image_services.compress_image(self.jpeg_raw_image, 0) - value_exception = self.assertRaisesRegexp( + value_exception = self.assertRaisesRegex( ValueError, re.escape( 'Scaling factor should be in the interval (0, 1], ' @@ -87,7 +87,7 @@ def test_invalid_scaling_factor_triggers_value_error(self): with value_exception: image_services.compress_image(self.jpeg_raw_image, -1) - def test_compression_results_in_correct_format(self): + def test_compression_results_in_correct_format(self) -> None: compressed_image = ( image_services.compress_image(self.jpeg_raw_image, 0.7)) pil_image = Image.open(io.BytesIO(compressed_image)) @@ -98,8 +98,8 @@ def test_compression_results_in_correct_format(self): pil_image = Image.open(io.BytesIO(compressed_image)) self.assertEqual(pil_image.format, 'PNG') - def test_compression_results_in_identical_files(self): - with python_utils.open_file( + def test_compression_results_in_identical_files(self) -> None: + with utils.open_file( os.path.join( feconf.TESTS_DATA_DIR, 'compressed_image.jpg'), 'rb', encoding=None) as f: diff --git a/core/domain/image_validation_services.py b/core/domain/image_validation_services.py index 8e2fea4277a4..effb74a29b52 100644 --- a/core/domain/image_validation_services.py +++ b/core/domain/image_validation_services.py @@ -16,19 +16,30 @@ from __future__ import annotations +import base64 import imghdr from core import feconf from core import utils from core.domain import html_validation_service +from typing import Optional, Union -def validate_image_and_filename(raw_image, filename): +HUNDRED_KB_IN_BYTES = 100 * 1024 +ONE_MB_IN_BYTES = 1 * 1024 * 1024 + + +def validate_image_and_filename( + raw_image: Union[str, bytes], + filename: str, + entity_type: Optional[str] = None, +) -> str: """Validates the image data and its filename. Args: - raw_image: str. The image content. + raw_image: Union[str, bytes]. The image content. filename: str. The filename for the image. + entity_type: str. The type of the entity. Returns: str. The file format of the image. @@ -37,15 +48,23 @@ def validate_image_and_filename(raw_image, filename): ValidationError. Image or filename supplied fails one of the validation checks. """ - hundred_kb_in_bytes = 100 * 1024 + if entity_type == feconf.ENTITY_TYPE_BLOG_POST: + max_file_size = ONE_MB_IN_BYTES + else: + max_file_size = HUNDRED_KB_IN_BYTES if not raw_image: raise utils.ValidationError('No image supplied') - if len(raw_image) > hundred_kb_in_bytes: + if isinstance(raw_image, str) and utils.is_base64_encoded(raw_image): + raw_image = base64.decodebytes(raw_image.encode('utf-8')) + + if len(raw_image) > max_file_size: raise utils.ValidationError( - 'Image exceeds file size limit of 100 KB.') + 'Image exceeds file size limit of %i KB.' % (max_file_size / 1024)) allowed_formats = ', '.join( list(feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS.keys())) + # Ruling out the possibility of str for mypy type checking. + assert isinstance(raw_image, bytes) if html_validation_service.is_parsable_as_xml(raw_image): file_format = 'svg' invalid_tags, invalid_attrs = ( diff --git a/core/domain/image_validation_services_test.py b/core/domain/image_validation_services_test.py index 16f84a7f15f8..73c5bb0bbcba 100644 --- a/core/domain/image_validation_services_test.py +++ b/core/domain/image_validation_services_test.py @@ -19,58 +19,113 @@ import os from core import feconf -from core import python_utils from core import utils from core.domain import image_validation_services from core.tests import test_utils +from typing import Union + class ImageValidationServiceTests(test_utils.GenericTestBase): - def setUp(self): - super(ImageValidationServiceTests, self).setUp() - with python_utils.open_file( + def setUp(self) -> None: + super().setUp() + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None) as f: self.raw_image = f.read() - def _assert_validation_error( - self, image, filename, expected_error_substring): + def _assert_image_validation_error( + self, + image: Union[str, bytes], + filename: str, + entity_type: str, + expected_error_substring: str + ) -> None: """Checks that the image passes validation.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): image_validation_services.validate_image_and_filename( - image, filename) + image, filename, entity_type) - def test_image_validation_checks(self): - self._assert_validation_error(None, 'image.png', 'No image supplied') - self._assert_validation_error( - self.raw_image, None, 'No filename supplied') + def test_image_validation_checks(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self._assert_image_validation_error( + None, # type: ignore[arg-type] + 'image.png', + feconf.ENTITY_TYPE_EXPLORATION, + 'No image supplied', + ) + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self._assert_image_validation_error( + self.raw_image, + None, # type: ignore[arg-type] + feconf.ENTITY_TYPE_EXPLORATION, + 'No filename supplied' + ) large_image = '' % ( 'M150 0 L75 200 L225 200 Z ' * 4000) - self._assert_validation_error( - large_image, 'image.svg', 'Image exceeds file size limit of 100 KB') + self._assert_image_validation_error( + large_image, + 'image.svg', + feconf.ENTITY_TYPE_EXPLORATION, + 'Image exceeds file size limit of 100 KB') + + large_image = '' % ( + 'M150 0 L75 200 L225 200 Z ' * 50000) + self._assert_image_validation_error( + large_image, + 'image.svg', + feconf.ENTITY_TYPE_BLOG_POST, + 'Image exceeds file size limit of 1024 KB') invalid_svg = b'' - self._assert_validation_error( - invalid_svg, 'image.svg', + self._assert_image_validation_error( + invalid_svg, + 'image.svg', + feconf.ENTITY_TYPE_EXPLORATION, 'Unsupported tags/attributes found in the SVG') no_xmlns_attribute_svg = invalid_svg = b'' - self._assert_validation_error( - no_xmlns_attribute_svg, 'image.svg', + self._assert_image_validation_error( + no_xmlns_attribute_svg, + 'image.svg', + feconf.ENTITY_TYPE_EXPLORATION, 'The svg tag does not contains the \'xmlns\' attribute.') - self._assert_validation_error( - b'not an image', 'image.png', 'Image not recognized') - - self._assert_validation_error( - self.raw_image, '.png', 'Invalid filename') - self._assert_validation_error( - self.raw_image, 'image/image.png', - 'Filenames should not include slashes') - self._assert_validation_error( - self.raw_image, 'image', 'Image filename with no extension') - self._assert_validation_error( - self.raw_image, 'image.pdf', 'Expected a filename ending in .png') + self._assert_image_validation_error( + b'not an image', + 'image.png', + feconf.ENTITY_TYPE_EXPLORATION, + 'Image not recognized' + ) + + self._assert_image_validation_error( + self.raw_image, + '.png', + feconf.ENTITY_TYPE_EXPLORATION, + 'Invalid filename' + ) + self._assert_image_validation_error( + self.raw_image, + 'image/image.png', + feconf.ENTITY_TYPE_EXPLORATION, + 'Filenames should not include slashes' + ) + self._assert_image_validation_error( + self.raw_image, + 'image', + feconf.ENTITY_TYPE_EXPLORATION, + 'Image filename with no extension' + ) + self._assert_image_validation_error( + self.raw_image, + 'image.pdf', + feconf.ENTITY_TYPE_EXPLORATION, + 'Expected a filename ending in .png' + ) diff --git a/core/domain/improvements_domain.py b/core/domain/improvements_domain.py index a0b2d2659526..74417d2d1174 100644 --- a/core/domain/improvements_domain.py +++ b/core/domain/improvements_domain.py @@ -18,12 +18,29 @@ from __future__ import annotations +import datetime + +from core import feconf from core import utils -from core.domain import user_services -from core.platform import models +from core.constants import constants + +from typing import Optional, TypedDict + -(improvements_models,) = ( - models.Registry.import_models([models.NAMES.improvements])) +class TaskEntryDict(TypedDict): + """Dict for TaskEntry object.""" + + entity_type: str + entity_id: str + entity_version: int + task_type: str + target_type: str + target_id: str + issue_description: Optional[str] + status: str + resolver_username: Optional[str] + resolver_profile_picture_data_url: Optional[str] + resolved_on_msecs: Optional[float] class TaskEntry: @@ -50,9 +67,18 @@ class TaskEntry: """ def __init__( - self, entity_type, entity_id, entity_version, task_type, - target_type, target_id, issue_description, status, resolver_id, - resolved_on): + self, + entity_type: str, + entity_id: str, + entity_version: int, + task_type: str, + target_type: str, + target_id: str, + issue_description: Optional[str], + status: str, + resolver_id: Optional[str] = None, + resolved_on: Optional[datetime.datetime] = None + ) -> None: """Initializes a new TaskEntry domain object from the given values. Args: @@ -75,7 +101,7 @@ def __init__( resolved_on: datetime. The datetime at which this task was resolved. Only used when status is resolved, otherwise replaced with None. """ - if status != improvements_models.TASK_STATUS_RESOLVED: + if status != constants.TASK_STATUS_RESOLVED: resolver_id = None resolved_on = None self.entity_type = entity_type @@ -90,7 +116,7 @@ def __init__( self.resolved_on = resolved_on @property - def task_id(self): + def task_id(self) -> str: """Returns the unique identifier of this task. Value has the form: "[entity_type].[entity_id].[entity_version]. @@ -99,12 +125,12 @@ def task_id(self): Returns: str. The ID of this task. """ - return improvements_models.TaskEntryModel.generate_task_id( + return feconf.TASK_ENTRY_ID_TEMPLATE % ( self.entity_type, self.entity_id, self.entity_version, self.task_type, self.target_type, self.target_id) @property - def composite_entity_id(self): + def composite_entity_id(self) -> str: """Utility field which results in a 20% speedup compared to querying by each of the invididual fields used to compose it. @@ -113,10 +139,10 @@ def composite_entity_id(self): Returns: str. The value of the utility field. """ - return improvements_models.TaskEntryModel.generate_composite_entity_id( + return feconf.COMPOSITE_ENTITY_ID_TEMPLATE % ( self.entity_type, self.entity_id, self.entity_version) - def to_dict(self): + def to_dict(self) -> TaskEntryDict: """Returns a dict-representation of the task. Returns: @@ -135,18 +161,16 @@ def to_dict(self): issue_description: str. The sentence generated by Oppia to describe why the task was created. status: str. Tracks the state/progress of the task entry. - resolver_username: str. Username of the user who resolved the - task when status is resolved. Otherwise None. - resolver_profile_picture_data_url: str. Profile picture URL of - the user who resolved the task when status is resolved. - Otherwise None. - resolved_on_msecs: float. Time in milliseconds since epoch at - which the task was resolved when status is resolved. - Otherwise None. + resolver_username: str|None. Username of the user who resolved + the task when status is resolved. Otherwise None. + resolver_profile_picture_data_url: str|None. Profile picture + URL of the user who resolved the task when status is + resolved. Otherwise None. + resolved_on_msecs: float|None. Time in + milliseconds since epoch at which the task was resolved + when status is resolved. Otherwise None. """ - resolver_settings = ( - self.resolver_id and - user_services.get_user_settings(self.resolver_id, strict=True)) + return { 'entity_type': self.entity_type, 'entity_id': self.entity_id, @@ -156,12 +180,9 @@ def to_dict(self): 'target_id': self.target_id, 'issue_description': self.issue_description, 'status': self.status, - 'resolver_username': ( - resolver_settings and resolver_settings.username), - 'resolver_profile_picture_data_url': ( - resolver_settings and - resolver_settings.profile_picture_data_url), + 'resolver_username': None, + 'resolver_profile_picture_data_url': None, 'resolved_on_msecs': ( - self.resolved_on and - utils.get_time_in_millisecs(self.resolved_on)), + None if not self.resolved_on + else utils.get_time_in_millisecs(self.resolved_on)), } diff --git a/core/domain/improvements_domain_test.py b/core/domain/improvements_domain_test.py index 759cd74f98c2..9508a1784337 100644 --- a/core/domain/improvements_domain_test.py +++ b/core/domain/improvements_domain_test.py @@ -22,57 +22,55 @@ from core import feconf from core import utils +from core.constants import constants from core.domain import improvements_domain -from core.domain import user_services -from core.platform import models from core.tests import test_utils -(improvements_models,) = ( - models.Registry.import_models([models.NAMES.improvements])) - class TaskEntryTests(test_utils.GenericTestBase): """Unit tests for the TaskEntry domain object.""" MOCK_DATE = datetime.datetime(2020, 6, 15, 9, 0, 0, 123456) - def setUp(self): - super(TaskEntryTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.exp_id = 'eid' self.save_new_valid_exploration(self.exp_id, self.owner_id) - self.maxDiff = None + self.maxDiff = 0 - def test_task_id_has_expected_value(self): + def test_task_id_has_expected_value(self) -> None: task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_RESOLVED, self.owner_id, + constants.TASK_STATUS_RESOLVED, self.owner_id, self.MOCK_DATE) self.assertEqual( task_entry.task_id, 'exploration.eid.1.high_bounce_rate.state.Introduction') - def test_composite_entity_id_has_expected_value(self): + def test_composite_entity_id_has_expected_value(self) -> None: task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_RESOLVED, self.owner_id, + constants.TASK_STATUS_RESOLVED, self.owner_id, self.MOCK_DATE) self.assertEqual(task_entry.composite_entity_id, 'exploration.eid.1') - def test_to_dict_has_expected_value(self): + def test_to_dict_has_expected_value(self) -> None: + # Data url for images/avatar/user_blue_72px.png. + # Generated using utils.convert_png_to_data_url. task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_RESOLVED, self.owner_id, + constants.TASK_STATUS_RESOLVED, self.owner_id, self.MOCK_DATE) self.assertEqual(task_entry.to_dict(), { 'entity_type': 'exploration', @@ -83,31 +81,18 @@ def test_to_dict_has_expected_value(self): 'target_id': 'Introduction', 'issue_description': 'issue description', 'status': 'resolved', - 'resolver_username': self.OWNER_USERNAME, - 'resolver_profile_picture_data_url': ( - user_services.DEFAULT_IDENTICON_DATA_URL), + 'resolver_username': None, + 'resolver_profile_picture_data_url': None, 'resolved_on_msecs': utils.get_time_in_millisecs(self.MOCK_DATE), }) - def test_to_dict_with_non_existing_resolver_id_raises_exception(self): - invalid_resolver_id = 'non_existing_user_id' - task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, - feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_RESOLVED, invalid_resolver_id, - self.MOCK_DATE) - with self.assertRaisesRegexp(Exception, 'User not found'): - task_entry.to_dict() - - def test_can_create_open_task_with_corresponding_values(self): + def test_can_create_open_task_with_corresponding_values(self) -> None: task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_OPEN, None, None) + constants.TASK_STATUS_OPEN, None, None) self.assertEqual(task_entry.entity_type, 'exploration') self.assertEqual(task_entry.entity_id, self.exp_id) @@ -120,13 +105,13 @@ def test_can_create_open_task_with_corresponding_values(self): self.assertIsNone(task_entry.resolver_id) self.assertIsNone(task_entry.resolved_on) - def test_can_create_obsolete_task_with_corresponding_values(self): + def test_can_create_obsolete_task_with_corresponding_values(self) -> None: task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_OBSOLETE, None, None) + constants.TASK_STATUS_OBSOLETE, None, None) self.assertEqual(task_entry.entity_type, 'exploration') self.assertEqual(task_entry.entity_id, self.exp_id) @@ -139,13 +124,13 @@ def test_can_create_obsolete_task_with_corresponding_values(self): self.assertIsNone(task_entry.resolver_id) self.assertIsNone(task_entry.resolved_on) - def test_can_create_resolved_task_with_corresponding_value(self): + def test_can_create_resolved_task_with_corresponding_value(self) -> None: task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_RESOLVED, self.owner_id, + constants.TASK_STATUS_RESOLVED, self.owner_id, self.MOCK_DATE) self.assertEqual(task_entry.entity_type, 'exploration') @@ -159,13 +144,15 @@ def test_can_create_resolved_task_with_corresponding_value(self): self.assertEqual(task_entry.resolver_id, self.owner_id) self.assertEqual(task_entry.resolved_on, self.MOCK_DATE) - def test_constructor_ignores_resolution_args_when_task_is_open(self): + def test_constructor_ignores_resolution_args_when_task_is_open( + self + ) -> None: task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_OPEN, self.owner_id, self.MOCK_DATE) + constants.TASK_STATUS_OPEN, self.owner_id, self.MOCK_DATE) self.assertEqual(task_entry.entity_type, 'exploration') self.assertEqual(task_entry.entity_id, self.exp_id) @@ -178,13 +165,15 @@ def test_constructor_ignores_resolution_args_when_task_is_open(self): self.assertIsNone(task_entry.resolver_id) self.assertIsNone(task_entry.resolved_on) - def test_constructor_ignores_resolution_args_when_task_is_obsolete(self): + def test_constructor_ignores_resolution_args_when_task_is_obsolete( + self + ) -> None: task_entry = improvements_domain.TaskEntry( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_ENTITY_TYPE_EXPLORATION, self.exp_id, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_OBSOLETE, self.owner_id, + constants.TASK_STATUS_OBSOLETE, self.owner_id, self.MOCK_DATE) self.assertEqual(task_entry.entity_type, 'exploration') diff --git a/core/domain/improvements_services.py b/core/domain/improvements_services.py index b89b92d96e33..49af315d2f5b 100644 --- a/core/domain/improvements_services.py +++ b/core/domain/improvements_services.py @@ -23,44 +23,60 @@ import operator from core import feconf -from core import python_utils +from core.constants import constants +from core.domain import exp_domain from core.domain import improvements_domain from core.platform import models +from typing import Dict, Iterator, List, Optional, Sequence, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import improvements_models + (improvements_models,) = ( - models.Registry.import_models([models.NAMES.improvements])) + models.Registry.import_models([models.Names.IMPROVEMENTS]) +) datastore_services = models.Registry.import_datastore_services() -def _yield_all_tasks_ordered_by_status(composite_entity_id): +def _yield_all_tasks_ordered_by_status( + composite_entity_id: str +) -> Iterator[improvements_domain.TaskEntry]: """Yields all of the tasks corresponding to the given entity in storage. Args: composite_entity_id: str. The identifier for the specific entity being queried. Must be generated from: - TaskEntryModel.generate_composite_entity_id. + ExplorationStatsTaskEntryModel.generate_composite_entity_id. Yields: improvements_domain.TaskEntry. All of the tasks corresponding to the given composite_entity_id. """ - query = improvements_models.TaskEntryModel.query( - improvements_models.TaskEntryModel.composite_entity_id == - composite_entity_id).order(improvements_models.TaskEntryModel.status) + model_class = improvements_models.ExplorationStatsTaskEntryModel + results: Sequence[improvements_models.ExplorationStatsTaskEntryModel] = [] + query = model_class.query( + model_class.composite_entity_id == composite_entity_id + ).order(model_class.status) cursor, more = (None, True) while more: results, cursor, more = query.fetch_page( - feconf.MAX_TASK_MODELS_PER_FETCH, start_cursor=cursor) + feconf.MAX_TASK_MODELS_PER_FETCH, start_cursor=cursor + ) for task_model in results: yield get_task_entry_from_model(task_model) -def get_task_entry_from_model(task_entry_model): +def get_task_entry_from_model( + task_entry_model: improvements_models.ExplorationStatsTaskEntryModel +) -> improvements_domain.TaskEntry: """Returns a domain object corresponding to the given task entry model. Args: - task_entry_model: improvements_models.TaskEntryModel. The task entry - model to get the corresponding domain object. + task_entry_model: improvements_models.ExplorationStatsTaskEntryModel. + The task entry model to get the corresponding domain object. Returns: improvements_domain.TaskEntry. The corresponding domain object. @@ -73,7 +89,9 @@ def get_task_entry_from_model(task_entry_model): task_entry_model.resolver_id, task_entry_model.resolved_on) -def fetch_exploration_tasks(exploration): +def fetch_exploration_tasks( + exploration: exp_domain.Exploration +) -> Tuple[List[improvements_domain.TaskEntry], Dict[str, List[str]]]: """Returns a tuple encoding the open and resolved tasks corresponding to the exploration. @@ -90,26 +108,33 @@ def fetch_exploration_tasks(exploration): tasks. """ composite_entity_id = ( - improvements_models.TaskEntryModel.generate_composite_entity_id( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, - exploration.id, exploration.version)) + improvements_models.ExplorationStatsTaskEntryModel + .generate_composite_entity_id( + constants.TASK_ENTITY_TYPE_EXPLORATION, + exploration.id, + exploration.version + ) + ) tasks_grouped_by_status = itertools.groupby( _yield_all_tasks_ordered_by_status(composite_entity_id), operator.attrgetter('status')) - open_tasks = [] + open_tasks: List[improvements_domain.TaskEntry] = [] resolved_task_types_by_state_name = collections.defaultdict(list) for status_group, tasks in tasks_grouped_by_status: - if status_group == improvements_models.TASK_STATUS_OPEN: + if status_group == constants.TASK_STATUS_OPEN: open_tasks.extend(tasks) - elif status_group == improvements_models.TASK_STATUS_RESOLVED: + elif status_group == constants.TASK_STATUS_RESOLVED: for t in tasks: resolved_task_types_by_state_name[t.target_id].append( t.task_type) return open_tasks, dict(resolved_task_types_by_state_name) -def fetch_exploration_task_history_page(exploration, urlsafe_start_cursor=None): +def fetch_exploration_task_history_page( + exploration: exp_domain.Exploration, + urlsafe_start_cursor: Optional[str] = None +) -> Tuple[List[improvements_domain.TaskEntry], Optional[str], bool]: """Fetches a page from the given exploration's history of resolved tasks. Args: @@ -129,28 +154,33 @@ def fetch_exploration_task_history_page(exploration, urlsafe_start_cursor=None): this batch. If False, there are no more results; if True, there are probably more results. """ + model_class = improvements_models.ExplorationStatsTaskEntryModel + results: Sequence[improvements_models.ExplorationStatsTaskEntryModel] = [] start_cursor = ( - urlsafe_start_cursor and - datastore_services.make_cursor(urlsafe_cursor=urlsafe_start_cursor)) - results, cursor, more = ( - improvements_models.TaskEntryModel.query( - improvements_models.TaskEntryModel.entity_type == ( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION), - improvements_models.TaskEntryModel.entity_id == exploration.id, - improvements_models.TaskEntryModel.status == ( - improvements_models.TASK_STATUS_RESOLVED)) - .order(-improvements_models.TaskEntryModel.resolved_on) - .fetch_page( - feconf.MAX_TASK_MODELS_PER_HISTORY_PAGE, start_cursor=start_cursor)) + datastore_services.make_cursor(urlsafe_cursor=urlsafe_start_cursor) + if urlsafe_start_cursor else None + ) + results, cursor, more = model_class.query( + model_class.entity_type == constants.TASK_ENTITY_TYPE_EXPLORATION, + model_class.entity_id == exploration.id, + model_class.status == constants.TASK_STATUS_RESOLVED + ).order( + -model_class.resolved_on + ).fetch_page( + feconf.MAX_TASK_MODELS_PER_HISTORY_PAGE, start_cursor=start_cursor + ) # The urlsafe returns bytes and we need to decode them to string. return ( [get_task_entry_from_model(model) for model in results], - cursor and cursor.urlsafe().decode('utf-8'), + cursor.urlsafe().decode('utf-8') if cursor else None, more ) -def put_tasks(tasks, update_last_updated_time=True): +def put_tasks( + tasks: List[improvements_domain.TaskEntry], + update_last_updated_time: bool = True +) -> None: """Puts each of the given tasks into storage if necessary, conditionally updating their last updated time. @@ -163,13 +193,13 @@ def put_tasks(tasks, update_last_updated_time=True): update_last_updated_time: bool. Whether to update the last_updated field of the task models. """ - task_models = improvements_models.TaskEntryModel.get_multi( + task_models = improvements_models.ExplorationStatsTaskEntryModel.get_multi( [t.task_id for t in tasks]) models_to_put = [] - for task, model in python_utils.ZIP(tasks, task_models): + for task, model in zip(tasks, task_models): if model is None: models_to_put.append( - improvements_models.TaskEntryModel( + improvements_models.ExplorationStatsTaskEntryModel( id=task.task_id, composite_entity_id=task.composite_entity_id, entity_type=task.entity_type, @@ -184,22 +214,29 @@ def put_tasks(tasks, update_last_updated_time=True): resolved_on=task.resolved_on)) elif apply_changes_to_model(task, model): models_to_put.append(model) - improvements_models.TaskEntryModel.update_timestamps_multi( + improvements_models.ExplorationStatsTaskEntryModel.update_timestamps_multi( models_to_put, update_last_updated_time=update_last_updated_time) - improvements_models.TaskEntryModel.put_multi(models_to_put) + improvements_models.ExplorationStatsTaskEntryModel.put_multi(models_to_put) -def apply_changes_to_model(task_entry, task_entry_model): +def apply_changes_to_model( + task_entry: improvements_domain.TaskEntry, + task_entry_model: improvements_models.ExplorationStatsTaskEntryModel +) -> bool: """Makes changes to the given model when differences are found. Args: task_entry: improvements_domain.TaskEntry. The TaskEntry domain object to be check if changes made to the TaskEntry model. - task_entry_model: improvements_models.TaskEntryModel. The TaskEntry - model object to be compared with TaskEntry domain object. + task_entry_model: improvements_models.ExplorationStatsTaskEntryModel. + The TaskEntry model object to be compared with TaskEntry domain + object. Returns: bool. Whether any change was made to the model. + + Raises: + Exception. Wrong model provided. """ if task_entry_model.id != task_entry.task_id: raise Exception('Wrong model provided') diff --git a/core/domain/improvements_services_test.py b/core/domain/improvements_services_test.py index a83fa139d570..77ff5f5cc2cf 100644 --- a/core/domain/improvements_services_test.py +++ b/core/domain/improvements_services_test.py @@ -21,33 +21,42 @@ import datetime from core import feconf +from core.constants import constants from core.domain import improvements_domain from core.domain import improvements_services from core.platform import models from core.tests import test_utils +from typing import Final + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import improvements_models + (improvements_models,) = ( - models.Registry.import_models([models.NAMES.improvements])) + models.Registry.import_models([models.Names.IMPROVEMENTS])) class ImprovementsServicesTestBase(test_utils.GenericTestBase): """Base class with helper methods for the improvements_services tests.""" - EXP_ID = 'eid' - MOCK_DATE = datetime.datetime(2020, 6, 15) + EXP_ID: Final = 'eid' + MOCK_DATE: Final = datetime.datetime(2020, 6, 15) - def setUp(self): - super(ImprovementsServicesTestBase, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.exp = self.save_new_valid_exploration(self.EXP_ID, self.owner_id) # Necessary to provide sufficient debug information when failures occur. - self.maxDiff = None + self.maxDiff = 0 def _new_obsolete_task( - self, state_name=feconf.DEFAULT_INIT_STATE_NAME, - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - exploration_version=1): + self, + state_name: str = feconf.DEFAULT_INIT_STATE_NAME, + task_type: str = constants.TASK_TYPE_HIGH_BOUNCE_RATE, + exploration_version: int = 1 + ) -> improvements_domain.TaskEntry: """Constructs a new default obsolete task with the provided values. Args: @@ -60,21 +69,23 @@ def _new_obsolete_task( improvements_domain.TaskEntry. A new obsolete task entry. """ return improvements_domain.TaskEntry( - entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, entity_id=self.EXP_ID, entity_version=exploration_version, task_type=task_type, - target_type=improvements_models.TASK_TARGET_TYPE_STATE, + target_type=constants.TASK_TARGET_TYPE_STATE, target_id=state_name, issue_description='issue description', - status=improvements_models.TASK_STATUS_OBSOLETE, + status=constants.TASK_STATUS_OBSOLETE, resolver_id=None, resolved_on=None) def _new_open_task( - self, state_name=feconf.DEFAULT_INIT_STATE_NAME, - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - exploration_version=1): + self, + state_name: str = feconf.DEFAULT_INIT_STATE_NAME, + task_type: str = constants.TASK_TYPE_HIGH_BOUNCE_RATE, + exploration_version: int = 1 + ) -> improvements_domain.TaskEntry: """Constructs a new default open task with the provided values. Args: @@ -87,21 +98,23 @@ def _new_open_task( improvements_domain.TaskEntry. A new open task entry. """ return improvements_domain.TaskEntry( - entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, entity_id=self.EXP_ID, entity_version=exploration_version, task_type=task_type, - target_type=improvements_models.TASK_TARGET_TYPE_STATE, + target_type=constants.TASK_TARGET_TYPE_STATE, target_id=state_name, issue_description='issue description', - status=improvements_models.TASK_STATUS_OPEN, + status=constants.TASK_STATUS_OPEN, resolver_id=None, resolved_on=None) def _new_resolved_task( - self, state_name=feconf.DEFAULT_INIT_STATE_NAME, - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - exploration_version=1): + self, + state_name: str = feconf.DEFAULT_INIT_STATE_NAME, + task_type: str = constants.TASK_TYPE_HIGH_BOUNCE_RATE, + exploration_version: int = 1 + ) -> improvements_domain.TaskEntry: """Constructs a new default resolved task with the provided values. Args: @@ -114,14 +127,14 @@ def _new_resolved_task( improvements_domain.TaskEntry. A new resolved task entry. """ return improvements_domain.TaskEntry( - entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, entity_id=self.EXP_ID, entity_version=exploration_version, task_type=task_type, - target_type=improvements_models.TASK_TARGET_TYPE_STATE, + target_type=constants.TASK_TARGET_TYPE_STATE, target_id=state_name, issue_description='issue description', - status=improvements_models.TASK_STATUS_RESOLVED, + status=constants.TASK_STATUS_RESOLVED, resolver_id=self.owner_id, resolved_on=self.MOCK_DATE) @@ -129,15 +142,16 @@ def _new_resolved_task( class GetTaskEntryFromModelTests(ImprovementsServicesTestBase): """Unit tests for the get_task_entry_from_model function.""" - def test_returns_same_fields_as_model(self): - task_id = improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.EXP_ID, 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + def test_returns_same_fields_as_model(self) -> None: + task_id = improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, self.EXP_ID, 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, feconf.DEFAULT_INIT_STATE_NAME, 'issue description', - improvements_models.TASK_STATUS_RESOLVED, self.owner_id, + constants.TASK_STATUS_RESOLVED, self.owner_id, self.MOCK_DATE) - task_entry_model = improvements_models.TaskEntryModel.get_by_id(task_id) + task_entry_model = ( + improvements_models.ExplorationStatsTaskEntryModel.get(task_id)) task_entry = ( improvements_services.get_task_entry_from_model(task_entry_model)) @@ -161,13 +175,15 @@ def test_returns_same_fields_as_model(self): class FetchExplorationTasksTests(ImprovementsServicesTestBase): """Unit tests for the fetch_exploration_tasks function.""" - def test_fetch_when_no_models_exist(self): + def test_fetch_when_no_models_exist(self) -> None: open_tasks, resolved_task_types_by_state_name = ( improvements_services.fetch_exploration_tasks(self.exp)) self.assertEqual(open_tasks, []) self.assertEqual(resolved_task_types_by_state_name, {}) - def test_fetch_when_number_of_open_tasks_exceed_single_fetch_limit(self): + def test_fetch_when_number_of_open_tasks_exceed_single_fetch_limit( + self + ) -> None: tasks = [ self._new_open_task(state_name='State %d' % (i,)) for i in range(int(feconf.MAX_TASK_MODELS_PER_FETCH * 2.5)) @@ -178,39 +194,40 @@ def test_fetch_when_number_of_open_tasks_exceed_single_fetch_limit(self): self.assertEqual(resolved_task_types_by_state_name, {}) self.assertItemsEqual( - [t.to_dict() for t in tasks], [t.to_dict() for t in open_tasks]) + [t.to_dict() for t in tasks], + [t.to_dict() for t in open_tasks]) - def test_fetch_identifies_the_resolved_tasks_of_each_state(self): + def test_fetch_identifies_the_resolved_tasks_of_each_state(self) -> None: tasks = [ self._new_resolved_task( state_name='A', - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE), + task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE), self._new_resolved_task( state_name='B', - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE), + task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE), self._new_resolved_task( state_name='B', task_type=( - improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES)), + constants.TASK_TYPE_NEEDS_GUIDING_RESPONSES)), self._new_resolved_task( state_name='C', task_type=( - improvements_models.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP)), + constants.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP)), self._new_resolved_task( state_name='D', - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE), + task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE), self._new_resolved_task( state_name='D', task_type=( - improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES)), + constants.TASK_TYPE_NEEDS_GUIDING_RESPONSES)), self._new_resolved_task( state_name='D', task_type=( - improvements_models.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP)), + constants.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP)), self._new_resolved_task( state_name='D', task_type=( - improvements_models.TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS)) + constants.TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS)) ] improvements_services.put_tasks(tasks) open_tasks, resolved_task_types_by_state_name = ( @@ -242,7 +259,7 @@ def test_fetch_identifies_the_resolved_tasks_of_each_state(self): 'successive_incorrect_answers', ]) - def test_fetch_ignores_obsolete_tasks(self): + def test_fetch_ignores_obsolete_tasks(self) -> None: tasks = [ self._new_obsolete_task(state_name='State %d' % (i,)) for i in range(50) @@ -254,33 +271,35 @@ def test_fetch_ignores_obsolete_tasks(self): self.assertEqual(open_tasks, []) self.assertEqual(resolved_task_types_by_state_name, {}) - def test_fetch_only_returns_tasks_for_the_given_exploration_version(self): + def test_fetch_only_returns_tasks_for_the_given_exploration_version( + self + ) -> None: tasks = [ # Version 1 tasks. self._new_open_task( state_name='A', - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, + task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE, exploration_version=1), self._new_open_task( state_name='B', - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, + task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE, exploration_version=1), self._new_open_task( state_name='C', - task_type=improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES, + task_type=constants.TASK_TYPE_NEEDS_GUIDING_RESPONSES, exploration_version=1), # Version 2 tasks. self._new_open_task( state_name='A', - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, + task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE, exploration_version=2), self._new_resolved_task( state_name='B', - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, + task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE, exploration_version=2), self._new_resolved_task( state_name='C', - task_type=improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES, + task_type=constants.TASK_TYPE_NEEDS_GUIDING_RESPONSES, exploration_version=2), ] improvements_services.put_tasks(tasks) @@ -301,23 +320,20 @@ def test_fetch_only_returns_tasks_for_the_given_exploration_version(self): class FetchExplorationTaskHistoryPageTests(ImprovementsServicesTestBase): """Unit tests for the fetch_exploration_task_history_page function.""" - def setUp(self): - super(FetchExplorationTaskHistoryPageTests, self).setUp() + def setUp(self) -> None: + super().setUp() task_entries = [] for i in range(1, 26): task_entry = self._new_resolved_task( state_name='State %d' % (i,), exploration_version=i) task_entry.resolved_on = ( self.MOCK_DATE + datetime.timedelta(minutes=5 * i)) - # last_updated of tasks are descending to ensure that the tasks - # returned are ordered by resolved_on instead. - task_entry.last_updated = ( - self.MOCK_DATE - datetime.timedelta(minutes=5 * i)) + task_entries.append(task_entry) improvements_services.put_tasks( task_entries, update_last_updated_time=False) - def test_fetch_returns_first_page_of_history(self): + def test_fetch_returns_first_page_of_history(self) -> None: results, cursor, more = ( improvements_services.fetch_exploration_task_history_page(self.exp)) @@ -328,7 +344,9 @@ def test_fetch_returns_first_page_of_history(self): self.assertTrue(more) self.assertIsNotNone(cursor) - def test_fetch_until_no_more_pages_returns_every_resolved_task(self): + def test_fetch_until_no_more_pages_returns_every_resolved_task( + self + ) -> None: aggregated_tasks, cursor, more = [], None, True while more: results, cursor, more = ( @@ -346,7 +364,8 @@ def test_fetch_until_no_more_pages_returns_every_resolved_task(self): self.assertFalse(more) def test_fetch_first_page_after_fetching_next_page_returns_same_results( - self): + self + ) -> None: initial_results, initial_cursor, initial_more = ( improvements_services.fetch_exploration_task_history_page(self.exp)) self.assertIsNotNone(initial_cursor) @@ -368,7 +387,9 @@ def test_fetch_first_page_after_fetching_next_page_returns_same_results( class PutTasksTests(ImprovementsServicesTestBase): """Unit tests for the put_tasks function.""" - def test_put_for_task_entries_which_do_not_exist_creates_new_models(self): + def test_put_for_task_entries_which_do_not_exist_creates_new_models( + self + ) -> None: open_task = self._new_open_task(state_name='Start') obsolete_task = self._new_obsolete_task(state_name='Middle') resolved_task = self._new_resolved_task(state_name='End') @@ -377,26 +398,30 @@ def test_put_for_task_entries_which_do_not_exist_creates_new_models(self): [open_task, obsolete_task, resolved_task]) open_task_model = ( - improvements_models.TaskEntryModel.get_by_id(open_task.task_id)) + improvements_models.ExplorationStatsTaskEntryModel.get( + open_task.task_id)) obsolete_task_model = ( - improvements_models.TaskEntryModel.get_by_id(obsolete_task.task_id)) + improvements_models.ExplorationStatsTaskEntryModel.get( + obsolete_task.task_id)) resolved_task_model = ( - improvements_models.TaskEntryModel.get_by_id(resolved_task.task_id)) - - self.assertEqual( - open_task.to_dict(), - improvements_services.get_task_entry_from_model( - open_task_model).to_dict()) + improvements_models.ExplorationStatsTaskEntryModel.get( + resolved_task.task_id)) + + open_task_entry = improvements_services.get_task_entry_from_model( + open_task_model) + obsolete_task_entry = improvements_services.get_task_entry_from_model( + obsolete_task_model) + resolved_task_entry = improvements_services.get_task_entry_from_model( + resolved_task_model) + self.assertEqual(open_task.to_dict(), open_task_entry.to_dict()) self.assertEqual( obsolete_task.to_dict(), - improvements_services.get_task_entry_from_model( - obsolete_task_model).to_dict()) + obsolete_task_entry.to_dict()) self.assertEqual( resolved_task.to_dict(), - improvements_services.get_task_entry_from_model( - resolved_task_model).to_dict()) + resolved_task_entry.to_dict()) - def test_put_for_tasks_entries_which_exist_updates_the_models(self): + def test_put_for_tasks_entries_which_exist_updates_the_models(self) -> None: task_entry = self._new_open_task() created_on = datetime.datetime(2020, 6, 15, 5) updated_on = created_on + datetime.timedelta(minutes=5) @@ -404,7 +429,8 @@ def test_put_for_tasks_entries_which_exist_updates_the_models(self): with self.mock_datetime_utcnow(created_on): improvements_services.put_tasks([task_entry]) - model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id) + model = improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id) self.assertEqual(model.resolver_id, None) self.assertEqual(model.created_on, created_on) self.assertEqual(model.last_updated, created_on) @@ -414,12 +440,15 @@ def test_put_for_tasks_entries_which_exist_updates_the_models(self): with self.mock_datetime_utcnow(updated_on): improvements_services.put_tasks([task_entry]) - model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id) + model = improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id) self.assertEqual(model.resolver_id, self.owner_id) self.assertEqual(model.created_on, created_on) self.assertEqual(model.last_updated, updated_on) - def test_put_for_task_entries_that_are_not_changing_does_nothing(self): + def test_put_for_task_entries_that_are_not_changing_does_nothing( + self + ) -> None: task_entry = self._new_resolved_task() created_on = datetime.datetime(2020, 6, 15, 5) updated_on = created_on + datetime.timedelta(minutes=5) @@ -427,7 +456,8 @@ def test_put_for_task_entries_that_are_not_changing_does_nothing(self): with self.mock_datetime_utcnow(created_on): improvements_services.put_tasks([task_entry]) - model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id) + model = improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id) self.assertEqual(model.resolver_id, self.owner_id) self.assertEqual(model.created_on, created_on) self.assertEqual(model.last_updated, created_on) @@ -435,12 +465,15 @@ def test_put_for_task_entries_that_are_not_changing_does_nothing(self): with self.mock_datetime_utcnow(updated_on): improvements_services.put_tasks([task_entry]) - model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id) + model = improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id) self.assertEqual(model.resolver_id, self.owner_id) self.assertEqual(model.created_on, created_on) self.assertEqual(model.last_updated, created_on) - def test_put_for_updated_task_entries_without_changing_last_updated(self): + def test_put_for_updated_task_entries_without_changing_last_updated( + self + ) -> None: task_entry = self._new_open_task() created_on = datetime.datetime(2020, 6, 15, 5) updated_on = created_on + datetime.timedelta(minutes=5) @@ -448,7 +481,8 @@ def test_put_for_updated_task_entries_without_changing_last_updated(self): with self.mock_datetime_utcnow(created_on): improvements_services.put_tasks([task_entry]) - model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id) + model = improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id) self.assertEqual(model.resolver_id, None) self.assertEqual(model.created_on, created_on) self.assertEqual(model.last_updated, created_on) @@ -459,7 +493,8 @@ def test_put_for_updated_task_entries_without_changing_last_updated(self): improvements_services.put_tasks( [task_entry], update_last_updated_time=False) - model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id) + model = improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id) self.assertEqual(model.resolver_id, self.owner_id) self.assertEqual(model.created_on, created_on) self.assertEqual(model.last_updated, created_on) @@ -468,32 +503,35 @@ def test_put_for_updated_task_entries_without_changing_last_updated(self): class ApplyChangesToModelTests(ImprovementsServicesTestBase): """Unit tests for the apply_changes_to_model function.""" - def test_passing_mismatching_task_entries_raises_an_exception(self): + def test_passing_mismatching_task_entries_raises_an_exception(self) -> None: task_entry = self._new_open_task() improvements_services.put_tasks([task_entry]) task_entry_model = ( - improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)) + improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id)) task_entry.target_id = 'Different State' - with self.assertRaisesRegexp(Exception, 'Wrong model provided'): + with self.assertRaisesRegex(Exception, 'Wrong model provided'): improvements_services.apply_changes_to_model( task_entry, task_entry_model) - def test_returns_false_when_task_is_equalivalent_to_model(self): + def test_returns_false_when_task_is_equalivalent_to_model(self) -> None: task_entry = self._new_open_task() improvements_services.put_tasks([task_entry]) task_entry_model = ( - improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)) + improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id)) self.assertFalse( improvements_services.apply_changes_to_model( task_entry, task_entry_model)) - def test_makes_changes_when_issue_description_is_different(self): + def test_makes_changes_when_issue_description_is_different(self) -> None: task_entry = self._new_open_task() improvements_services.put_tasks([task_entry]) task_entry_model = ( - improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)) + improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id)) task_entry.issue_description = 'new issue description' self.assertTrue( @@ -503,47 +541,55 @@ def test_makes_changes_when_issue_description_is_different(self): task_entry_model.issue_description, 'new issue description') def test_makes_changes_to_status_related_fields_if_status_is_different( - self): + self + ) -> None: task_entry = self._new_open_task() improvements_services.put_tasks([task_entry]) task_entry_model = ( - improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)) + improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id)) task_entry = self._new_resolved_task() self.assertTrue( improvements_services.apply_changes_to_model( task_entry, task_entry_model)) self.assertEqual( - task_entry_model.status, improvements_models.TASK_STATUS_RESOLVED) + task_entry_model.status, constants.TASK_STATUS_RESOLVED) self.assertEqual(task_entry_model.resolver_id, self.owner_id) self.assertEqual(task_entry_model.resolved_on, self.MOCK_DATE) - def test_no_changes_made_if_only_resolver_id_is_different(self): + def test_no_changes_made_if_only_resolver_id_is_different(self) -> None: task_entry = self._new_open_task() improvements_services.put_tasks([task_entry]) task_entry_model = ( - improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)) + improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id)) task_entry.resolver_id = self.owner_id self.assertFalse( improvements_services.apply_changes_to_model( task_entry, task_entry_model)) self.assertEqual( - task_entry_model.status, improvements_models.TASK_STATUS_OPEN) + task_entry_model.status, constants.TASK_STATUS_OPEN) self.assertIsNone(task_entry_model.resolver_id) self.assertIsNone(task_entry_model.resolved_on) - def test_no_changes_made_if_only_resolved_on_is_different(self): + def test_no_changes_made_if_only_resolved_on_is_different(self) -> None: task_entry = self._new_open_task() improvements_services.put_tasks([task_entry]) task_entry_model = ( - improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)) - task_entry.resolved_on = self.owner_id + improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry.task_id)) + # Here we use MyPy ignore because `resolved_on` can only accept + # datetime values but for testing purposes here we are providing + # string value which causes MyPy to throw an error. Thus to avoid + # the error, we used ignore here. + task_entry.resolved_on = self.owner_id # type: ignore[assignment] self.assertFalse( improvements_services.apply_changes_to_model( task_entry, task_entry_model)) self.assertEqual( - task_entry_model.status, improvements_models.TASK_STATUS_OPEN) + task_entry_model.status, constants.TASK_STATUS_OPEN) self.assertIsNone(task_entry_model.resolved_on) self.assertIsNone(task_entry_model.resolved_on) diff --git a/core/domain/interaction_registry.py b/core/domain/interaction_registry.py index 702ce09f5e7f..3774132815a2 100644 --- a/core/domain/interaction_registry.py +++ b/core/domain/interaction_registry.py @@ -23,30 +23,38 @@ import json import os +from core import constants from core import feconf -from core import python_utils -from core.constants import constants + +from typing import Dict, List, Optional + +MYPY = False +if MYPY: # pragma: no cover + from extensions.interactions import base class Registry: """Registry of all interactions.""" # Dict mapping interaction ids to instances of the interactions. - _interactions = {} + _interactions: Dict[str, base.BaseInteraction] = {} # Dict mapping State schema version (XX) to interaction specs dict, # retrieved from interaction_specs_vXX.json. - _state_schema_version_to_interaction_specs = {} + _state_schema_version_to_interaction_specs: ( + Dict[int, Dict[str, base.BaseInteractionDict]] + ) = {} @classmethod - def get_all_interaction_ids(cls): + def get_all_interaction_ids(cls) -> List[str]: """Get a list of all interaction ids.""" return list(set(itertools.chain.from_iterable( interaction_category['interaction_ids'] - for interaction_category in constants.ALLOWED_INTERACTION_CATEGORIES + for interaction_category + in constants.constants.ALLOWED_INTERACTION_CATEGORIES ))) @classmethod - def _refresh(cls): + def _refresh(cls) -> None: """Refreshes and updates all the interaction ids to add new interaction instances to the registry. """ @@ -67,25 +75,42 @@ def _refresh(cls): cls._interactions[clazz.__name__] = clazz() @classmethod - def get_all_interactions(cls): + def get_all_interactions(cls) -> List[base.BaseInteraction]: """Get a list of instances of all interactions.""" if len(cls._interactions) == 0: cls._refresh() return list(cls._interactions.values()) @classmethod - def get_interaction_by_id(cls, interaction_id): + def get_interaction_by_id( + cls, interaction_id: Optional[str] + ) -> base.BaseInteraction: """Gets an interaction by its id. Refreshes once if the interaction is not found; subsequently, throws a KeyError. + + Args: + interaction_id: Optional[str]. The interaction id. + + Returns: + BaseInteraction. An interaction for the given interaction_id. + + Raises: + Exception. No interaction exists for the None interaction_id. """ + if interaction_id is None: + raise Exception( + 'No interaction exists for the None interaction_id.' + ) if interaction_id not in cls._interactions: cls._refresh() return cls._interactions[interaction_id] @classmethod - def get_deduplicated_dependency_ids(cls, interaction_ids): + def get_deduplicated_dependency_ids( + cls, interaction_ids: List[str] + ) -> List[str]: """Return a list of dependency ids for the given interactions. Each entry of the resulting list is unique. The list is sorted in no @@ -98,7 +123,7 @@ def get_deduplicated_dependency_ids(cls, interaction_ids): return list(result) @classmethod - def get_all_specs(cls): + def get_all_specs(cls) -> Dict[str, base.BaseInteractionDict]: """Returns a dict containing the full specs of each interaction.""" return { interaction.id: interaction.to_dict() @@ -106,13 +131,20 @@ def get_all_specs(cls): } @classmethod - def get_all_specs_for_state_schema_version(cls, state_schema_version): + def get_all_specs_for_state_schema_version( + cls, + state_schema_version: int, + can_fetch_latest_specs: bool = False + ) -> Dict[str, base.BaseInteractionDict]: """Returns a dict containing the full specs of each interaction for the - given state schema version, if available. + given state schema version, if available else return all specs or an + error depending on can_fetch_latest_specs. Args: state_schema_version: int. The state schema version to retrieve interaction specs for. + can_fetch_latest_specs: boolean. Whether to fetch the latest specs + if the legacy specs file is not found. Returns: dict. The interaction specs for the given state schema @@ -120,26 +152,38 @@ def get_all_specs_for_state_schema_version(cls, state_schema_version): interaction specs. See interaction_specs.json for an example. Raises: - Exception. No interaction specs json file found for the given state + OSError. No interaction specs json file found for the given state schema version. """ if (state_schema_version not in cls._state_schema_version_to_interaction_specs): - file_name = ( - 'interaction_specs_state_v%i.json' % state_schema_version) - spec_file = os.path.join( - feconf.INTERACTIONS_LEGACY_SPECS_FILE_DIR, file_name) - + spec_file_path = os.path.join( + 'interactions', + 'legacy_interaction_specs_by_state_version', + 'interaction_specs_state_v%i.json' % state_schema_version + ) + spec_file_contents: Optional[str] try: - with python_utils.open_file(spec_file, 'r') as f: - specs_from_json = json.loads(f.read()) - except IOError: + spec_file_contents = constants.get_package_file_contents( + 'extensions', spec_file_path + ) + except FileNotFoundError: + spec_file_contents = None + + if spec_file_contents: + specs_from_json: Dict[str, base.BaseInteractionDict] = ( + json.loads(spec_file_contents) + ) + cls._state_schema_version_to_interaction_specs[ + state_schema_version] = specs_from_json + return cls._state_schema_version_to_interaction_specs[ + state_schema_version] + elif can_fetch_latest_specs: + return cls.get_all_specs() + else: raise IOError( 'No specs JSON file found for state schema v%i' % state_schema_version) - cls._state_schema_version_to_interaction_specs[ - state_schema_version] = specs_from_json - return cls._state_schema_version_to_interaction_specs[ state_schema_version] diff --git a/core/domain/interaction_registry_test.py b/core/domain/interaction_registry_test.py index 674405841cb9..28f434e217b7 100644 --- a/core/domain/interaction_registry_test.py +++ b/core/domain/interaction_registry_test.py @@ -22,27 +22,29 @@ import os from core import feconf -from core import python_utils from core import schema_utils +from core import utils from core.domain import exp_services from core.domain import interaction_registry from core.tests import test_utils from extensions.interactions import base -EXPECTED_TERMINAL_INTERACTIONS_COUNT = 1 +from typing import Any, Dict, Final + +EXPECTED_TERMINAL_INTERACTIONS_COUNT: Final = 1 class InteractionDependencyTests(test_utils.GenericTestBase): """Tests for the calculation of dependencies for interactions.""" - def setUp(self): - super(InteractionDependencyTests, self).setUp() + def setUp(self) -> None: + super().setUp() # Register and login as an editor. self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.login(self.EDITOR_EMAIL) - def test_deduplication_of_dependency_ids(self): + def test_deduplication_of_dependency_ids(self) -> None: self.assertItemsEqual( interaction_registry.Registry.get_deduplicated_dependency_ids( ['CodeRepl']), @@ -58,20 +60,11 @@ def test_deduplication_of_dependency_ids(self): ['CodeRepl', 'AlgebraicExpressionInput']), ['skulpt', 'codemirror', 'guppy', 'nerdamer']) - def test_dependency_loads_in_exploration_player_page(self): - exp_id = '0' - - exp_services.load_demo(exp_id) - - # Ensure that dependencies are added in the exploration reader page. - response = self.get_html_response('/explore/%s' % exp_id) - response.mustcontain('dependency_html.html') - - def test_no_dependencies_in_non_exploration_pages(self): + def test_no_dependencies_in_non_exploration_pages(self) -> None: response = self.get_html_response(feconf.LIBRARY_INDEX_URL) response.mustcontain(no=['dependency_html.html']) - def test_dependencies_loaded_in_exploration_editor(self): + def test_dependencies_loaded_in_exploration_editor(self) -> None: exp_services.load_demo('0') @@ -85,7 +78,7 @@ def test_dependencies_loaded_in_exploration_editor(self): class InteractionRegistryUnitTests(test_utils.GenericTestBase): """Test for the interaction registry.""" - def test_interaction_registry(self): + def test_interaction_registry(self) -> None: """Do some sanity checks on the interaction registry.""" self.assertEqual( { @@ -94,7 +87,16 @@ def test_interaction_registry(self): }, set(interaction_registry.Registry.get_all_interaction_ids())) - def test_get_all_specs(self): + with self.swap(interaction_registry.Registry, '_interactions', {}): + self.assertEqual( + { + type(i).__name__ + for i in + interaction_registry.Registry.get_all_interactions() + }, + set(interaction_registry.Registry.get_all_interaction_ids())) + + def test_get_all_specs(self) -> None: """Test the get_all_specs() method.""" specs_dict = interaction_registry.Registry.get_all_specs() @@ -112,7 +114,7 @@ def test_get_all_specs(self): self.assertEqual( terminal_interactions_count, EXPECTED_TERMINAL_INTERACTIONS_COUNT) - def test_interaction_specs_json_sync_all_specs(self): + def test_interaction_specs_json_sync_all_specs(self) -> None: """Test to ensure that the interaction_specs.json file is upto date with additions in the individual interaction files. """ @@ -120,12 +122,14 @@ def test_interaction_specs_json_sync_all_specs(self): spec_file = os.path.join( 'extensions', 'interactions', 'interaction_specs.json') - with python_utils.open_file(spec_file, 'r') as f: + with utils.open_file(spec_file, 'r') as f: specs_from_json = json.loads(f.read()) self.assertDictEqual(all_specs, specs_from_json) - def test_interaction_specs_customization_arg_specs_names_are_valid(self): + def test_interaction_specs_customization_arg_specs_names_are_valid( + self + ) -> None: """Test to ensure that all customization argument names in interaction specs only include alphabetic letters and are lowerCamelCase. This is because these properties are involved in the @@ -134,7 +138,10 @@ def test_interaction_specs_customization_arg_specs_names_are_valid(self): all_specs = interaction_registry.Registry.get_all_specs() ca_names_in_schema = [] - def traverse_schema_to_find_names(schema): + # Here we use type Any because values in schema dictionary can + # be of type str, int, List, Dict and other types too. So to make + # it generalized for every type of value we used Any here. + def traverse_schema_to_find_names(schema: Dict[str, Any]) -> None: """Recursively traverses the schema to find all name fields. Recursion is required because names can be nested within 'type: dict' inside a schema. @@ -161,14 +168,20 @@ def traverse_schema_to_find_names(schema): self.assertTrue(name.isalpha()) self.assertTrue(name[0].islower()) - def test_interaction_specs_customization_arg_default_values_are_valid(self): + def test_interaction_specs_customization_arg_default_values_are_valid( + self + ) -> None: """Test to ensure that all customization argument default values that contain content_ids are properly set to None. """ all_specs = interaction_registry.Registry.get_all_specs() + # Here we use type Any because values in schema dictionary can + # be of type str, int, List, Dict and other types too. So to make + # it generalized for every type of value we used Any here. def traverse_schema_to_find_and_validate_subtitled_content( - value, schema): + value: Any, schema: Dict[str, Any] + ) -> None: """Recursively traverse the schema to find SubtitledHtml or SubtitledUnicode contained or nested in value. @@ -203,11 +216,21 @@ def traverse_schema_to_find_and_validate_subtitled_content( traverse_schema_to_find_and_validate_subtitled_content( ca_spec['default_value'], ca_spec['schema']) - def test_get_all_specs_for_state_schema_version_for_unsaved_version(self): - with self.assertRaisesRegexp( + def test_get_all_specs_for_state_schema_version_for_unsaved_version( + self + ) -> None: + with self.assertRaisesRegex( IOError, 'No specs JSON file found for state schema' ): ( interaction_registry.Registry .get_all_specs_for_state_schema_version(10) ) + + def test_get_interaction_by_id_raises_error_for_none_interaction_id( + self + ) -> None: + with self.assertRaisesRegex( + Exception, 'No interaction exists for the None interaction_id.' + ): + interaction_registry.Registry.get_interaction_by_id(None) diff --git a/core/domain/learner_goals_services.py b/core/domain/learner_goals_services.py index 3681b2bd649e..f7df7bdb9454 100644 --- a/core/domain/learner_goals_services.py +++ b/core/domain/learner_goals_services.py @@ -22,10 +22,18 @@ from core.domain import user_domain from core.platform import models -(user_models,) = models.Registry.import_models([models.NAMES.user]) +from typing import List +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models -def get_learner_goals_from_model(learner_goals_model): +(user_models,) = models.Registry.import_models([models.Names.USER]) + + +def get_learner_goals_from_model( + learner_goals_model: user_models.LearnerGoalsModel +) -> user_domain.LearnerGoals: """Returns the learner goals domain object given the learner goals model loaded from the datastore. @@ -43,7 +51,7 @@ def get_learner_goals_from_model(learner_goals_model): learner_goals_model.topic_ids_to_master) -def save_learner_goals(learner_goals): +def save_learner_goals(learner_goals: user_domain.LearnerGoals) -> None: """Save a learner goals domain object as an LearnerGoalsModel entity in the datastore. @@ -60,11 +68,13 @@ def save_learner_goals(learner_goals): learner_goals_model.update_timestamps() learner_goals_model.put() else: - learner_goals_dict['id'] = learner_goals.id - user_models.LearnerGoalsModel(**learner_goals_dict).put() + user_models.LearnerGoalsModel( + id=learner_goals.id, + **learner_goals_dict + ).put() -def mark_topic_to_learn(user_id, topic_id): +def mark_topic_to_learn(user_id: str, topic_id: str) -> bool: """Adds the topic id to the learner goals of the user. If the count exceeds feconf.MAX_CURRENT_GOALS_COUNT, the topic is not added. @@ -76,6 +86,9 @@ def mark_topic_to_learn(user_id, topic_id): Returns: bool. The boolean indicates whether the learner goals limit of the user has been exceeded. + + Raises: + Exception. Given topic is already present. """ learner_goals_model = user_models.LearnerGoalsModel.get( user_id, strict=False) @@ -99,12 +112,18 @@ def mark_topic_to_learn(user_id, topic_id): topic_id)) -def remove_topics_from_learn_goal(user_id, topic_ids_to_remove): +def remove_topics_from_learn_goal( + user_id: str, + topic_ids_to_remove: List[str] +) -> None: """Removes topics from the learner goals of the user (if present). Args: user_id: str. The id of the user. topic_ids_to_remove: list(str). The ids of the topics to be removed. + + Raises: + Exception. Given topic does not exist. """ learner_goals_model = user_models.LearnerGoalsModel.get( user_id, strict=False) @@ -122,7 +141,7 @@ def remove_topics_from_learn_goal(user_id, topic_ids_to_remove): save_learner_goals(learner_goals) -def get_all_topic_ids_to_learn(user_id): +def get_all_topic_ids_to_learn(user_id: str) -> List[str]: """Returns a list with the ids of all the topics that are in the goals of the user. diff --git a/core/domain/learner_goals_services_test.py b/core/domain/learner_goals_services_test.py index c211da04f3af..df1261eb52d3 100644 --- a/core/domain/learner_goals_services_test.py +++ b/core/domain/learner_goals_services_test.py @@ -27,26 +27,28 @@ from core.platform import models from core.tests import test_utils -(user_models,) = models.Registry.import_models([models.NAMES.user]) +from typing import Final, List -MAX_CURRENT_GOALS_COUNT = ( - feconf.MAX_CURRENT_GOALS_COUNT) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) + +MAX_CURRENT_GOALS_COUNT: Final = feconf.MAX_CURRENT_GOALS_COUNT class LearnerGoalsTests(test_utils.GenericTestBase): """Test the services related to learner goals services.""" - OWNER_EMAIL = 'owner@example.com' - OWNER_USERNAME = 'owner' - - TOPIC_ID_1 = 'Topic_id_1' - TOPIC_NAME_1 = 'Topic name 1' - TOPIC_ID_2 = 'Topic_id_2' - TOPIC_NAME_2 = 'Topic name 2' - TOPIC_ID_3 = 'Topic_id_3' - TOPIC_NAME_3 = 'Topic name 3' - TOPIC_ID_4 = 'Topic_id_4' - TOPIC_NAME_4 = 'Topic name 4' + TOPIC_ID_1: Final = 'Topic_id_1' + TOPIC_NAME_1: Final = 'Topic name 1' + TOPIC_ID_2: Final = 'Topic_id_2' + TOPIC_NAME_2: Final = 'Topic name 2' + TOPIC_ID_3: Final = 'Topic_id_3' + TOPIC_NAME_3: Final = 'Topic name 3' + TOPIC_ID_4: Final = 'Topic_id_4' + TOPIC_NAME_4: Final = 'Topic name 4' subtopic_1 = topic_domain.Subtopic( 0, 'Title 1', ['skill_id_1'], 'image.svg', @@ -69,8 +71,8 @@ class LearnerGoalsTests(test_utils.GenericTestBase): 'dummy-subtopic-zero' ) - def setUp(self): - super(LearnerGoalsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -111,18 +113,22 @@ def setUp(self): subtopics=[self.subtopic_4], next_subtopic_id=1) topic_services.publish_topic(self.TOPIC_ID_4, self.curriculum_admin_id) - def _get_all_topic_ids_to_learn(self, user_id): + def _get_all_topic_ids_to_learn(self, user_id: str) -> List[str]: """Returns the list of all the topic ids to learn corresponding to the given user id. """ learner_goals_model = user_models.LearnerGoalsModel.get( user_id, strict=False) - - return ( - learner_goals_model.topic_ids_to_learn if - learner_goals_model else []) - - def test_single_topic_is_added_correctly_to_learn(self): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if learner_goals_model: + topic_ids: List[str] = learner_goals_model.topic_ids_to_learn + return topic_ids + else: + return [] + + def test_single_topic_is_added_correctly_to_learn(self) -> None: # Test adding a single topic_id to learn. self.assertEqual( self._get_all_topic_ids_to_learn(self.viewer_id), []) @@ -132,7 +138,7 @@ def test_single_topic_is_added_correctly_to_learn(self): self._get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1]) - def test_multiple_topics_are_added_correctly_to_learn(self): + def test_multiple_topics_are_added_correctly_to_learn(self) -> None: # Test adding two topics to the learn. self.assertEqual( self._get_all_topic_ids_to_learn( @@ -150,7 +156,7 @@ def test_multiple_topics_are_added_correctly_to_learn(self): self._get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2]) - def test_adding_exisiting_topic_is_not_added_again(self): + def test_adding_exisiting_topic_is_not_added_again(self) -> None: # Test adding the topic_id if it is already in # learner_goals.topic_id. learner_progress_services.validate_and_add_topic_to_learn_goal( @@ -161,13 +167,13 @@ def test_adding_exisiting_topic_is_not_added_again(self): self._get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The topic id Topic_id_1 is already present in the learner goals'): learner_progress_services.validate_and_add_topic_to_learn_goal( self.viewer_id, self.TOPIC_ID_1) - def test_completed_topic_is_not_added_to_learner_goals(self): + def test_completed_topic_is_not_added_to_learner_goals(self) -> None: learner_progress_services.validate_and_add_topic_to_learn_goal( self.viewer_id, self.TOPIC_ID_1) self.assertEqual( @@ -183,7 +189,7 @@ def test_completed_topic_is_not_added_to_learner_goals(self): self._get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1]) - def test_number_of_topics_cannot_exceed_max(self): + def test_number_of_topics_cannot_exceed_max(self) -> None: # Add MAX_CURRENT_GOALS_COUNT topics. topic_ids = ['SAMPLE_TOPIC_ID_%s' % index for index in ( range(0, MAX_CURRENT_GOALS_COUNT))] @@ -201,7 +207,7 @@ def test_number_of_topics_cannot_exceed_max(self): self.assertEqual( self._get_all_topic_ids_to_learn(self.viewer_id), topic_ids) - def test_remove_topic_from_learner_goals(self): + def test_remove_topic_from_learner_goals(self) -> None: self.assertEqual(self._get_all_topic_ids_to_learn( self.viewer_id), []) @@ -220,7 +226,7 @@ def test_remove_topic_from_learner_goals(self): self.viewer_id), [self.TOPIC_ID_2]) # Removing the same topic raises error. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The topic id Topic_id_1 is not present in LearnerGoalsModel'): learner_goals_services.remove_topics_from_learn_goal( @@ -232,21 +238,21 @@ def test_remove_topic_from_learner_goals(self): self.assertEqual(self._get_all_topic_ids_to_learn( self.viewer_id), []) - def test_get_all_topic_ids_in_learn(self): + def test_get_all_topic_ids_in_learn(self) -> None: self.assertEqual( - self._get_all_topic_ids_to_learn( + learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), []) # Add an topic to the learner goals. learner_goals_services.mark_topic_to_learn( self.viewer_id, self.TOPIC_ID_1) self.assertEqual( - self._get_all_topic_ids_to_learn( + learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1]) # Add another topic. learner_goals_services.mark_topic_to_learn( self.viewer_id, self.TOPIC_ID_2) self.assertEqual( - self._get_all_topic_ids_to_learn( + learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2]) diff --git a/core/domain/learner_group_domain.py b/core/domain/learner_group_domain.py new file mode 100644 index 000000000000..840e472c92b6 --- /dev/null +++ b/core/domain/learner_group_domain.py @@ -0,0 +1,138 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Domain objects for Learner Groups.""" + +from __future__ import annotations + +from core import utils + +from core.domain import story_domain +from core.domain import subtopic_page_domain + +from typing import List, TypedDict + + +class LearnerGroupDict(TypedDict): + """Dictionary for LearnerGroup domain object.""" + + group_id: str + title: str + description: str + facilitator_user_ids: List[str] + learner_user_ids: List[str] + invited_learner_user_ids: List[str] + subtopic_page_ids: List[str] + story_ids: List[str] + + +class LearnerGroup: + """Domain object for learner group.""" + + def __init__( + self, + group_id: str, + title: str, + description: str, + facilitator_user_ids: List[str], + learner_user_ids: List[str], + invited_learner_user_ids: List[str], + subtopic_page_ids: List[str], + story_ids: List[str] + ) -> None: + """Constructs a LearnerGroup domain object. + + Attributes: + group_id: str. The unique ID of the learner group. + title: str. The title of the learner group. + description: str. The description of the learner group. + facilitator_user_ids: List[str]. The list of user ids of + facilitators of the learner group. + learner_user_ids: List[str]. The list of user ids of learners + of the learner group. + invited_learner_user_ids: List[str]. The list of user ids of the + users invited to join the learner group as a learner. + subtopic_page_ids: List[str]. The list of subtopic page ids that + are part of the learner group syllabus. A subtopic page id is + depicted as topicId:subtopicId string. + story_ids: List[str]. The list of story ids of the learner group. + """ + self.group_id = group_id + self.title = title + self.description = description + self.facilitator_user_ids = facilitator_user_ids + self.learner_user_ids = learner_user_ids + self.invited_learner_user_ids = invited_learner_user_ids + self.subtopic_page_ids = subtopic_page_ids + self.story_ids = story_ids + + def to_dict(self) -> LearnerGroupDict: + """Convert the LearnerGroup domain instance into a dictionary + form with its keys as the attributes of this class. + + Returns: + dict. A dictionary containing the LearnerGroup class + information in a dictionary form. + """ + + return { + 'group_id': self.group_id, + 'title': self.title, + 'description': self.description, + 'facilitator_user_ids': self.facilitator_user_ids, + 'learner_user_ids': self.learner_user_ids, + 'invited_learner_user_ids': self.invited_learner_user_ids, + 'subtopic_page_ids': self.subtopic_page_ids, + 'story_ids': self.story_ids + } + + def validate(self) -> None: + """Validates the LearnerGroup domain object. + + Raises: + ValidationError. One or more attributes of the LearnerGroup + are invalid. + """ + + if len(self.facilitator_user_ids) < 1: + raise utils.ValidationError( + 'Expected learner group to have at least one facilitator.') + + invited_learner_set = set(self.invited_learner_user_ids) + learner_set = set(self.learner_user_ids) + + if len(invited_learner_set.intersection(learner_set)) > 0: + raise utils.ValidationError( + 'Learner group learner cannot be invited to join the group.') + + facilitator_set = set(self.facilitator_user_ids) + + if len(facilitator_set.intersection(learner_set)) > 0: + raise utils.ValidationError( + 'Learner group facilitator cannot be a learner of the group.') + + if len(facilitator_set.intersection(invited_learner_set)) > 0: + raise utils.ValidationError( + 'Learner group facilitator cannot be invited to ' + 'join the group.') + + +class LearnerGroupSyllabusDict(TypedDict): + """Dictionary reperesentation of learner group syllabus.""" + + story_summary_dicts: List[ + story_domain.LearnerGroupSyllabusStorySummaryDict] + subtopic_summary_dicts: List[subtopic_page_domain.SubtopicPageSummaryDict] diff --git a/core/domain/learner_group_domain_test.py b/core/domain/learner_group_domain_test.py new file mode 100644 index 000000000000..553e06dffce5 --- /dev/null +++ b/core/domain/learner_group_domain_test.py @@ -0,0 +1,126 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for learner group domain objects.""" + +from __future__ import annotations + +from core.domain import learner_group_domain +from core.tests import test_utils + + +class LearnerGroupTest(test_utils.GenericTestBase): + """Tests for LearnerGroup domain object.""" + + VALID_LEARNER_GROUP = learner_group_domain.LearnerGroup( + '3232', 'title', 'description', + ['user_1'], + ['user_2', 'user_3', 'user_4'], + ['user_5', 'user_6'], + ['subtopic_1', 'subtopic_2'], + ['story_1', 'story_2']) + + def test_initialization(self) -> None: + learner_group = self.VALID_LEARNER_GROUP + expected_learner_group_dict = { + 'group_id': '3232', + 'title': 'title', + 'description': 'description', + 'facilitator_user_ids': ['user_1'], + 'learner_user_ids': ['user_2', 'user_3', 'user_4'], + 'invited_learner_user_ids': ['user_5', 'user_6'], + 'subtopic_page_ids': ['subtopic_1', 'subtopic_2'], + 'story_ids': ['story_1', 'story_2'] + } + + self.assertEqual(learner_group.group_id, '3232') + self.assertEqual(learner_group.title, 'title') + self.assertEqual(learner_group.description, 'description') + self.assertEqual(learner_group.facilitator_user_ids, ['user_1']) + self.assertEqual( + learner_group.learner_user_ids, ['user_2', 'user_3', 'user_4']) + self.assertEqual( + learner_group.invited_learner_user_ids, ['user_5', 'user_6']) + self.assertEqual( + learner_group.subtopic_page_ids, + ['subtopic_1', 'subtopic_2']) + self.assertEqual(learner_group.story_ids, ['story_1', 'story_2']) + + self.assertEqual( + learner_group.to_dict(), + expected_learner_group_dict) + + def test_to_dict(self) -> None: + learner_group = self.VALID_LEARNER_GROUP + expected_learner_group_dict = { + 'group_id': '3232', + 'title': 'title', + 'description': 'description', + 'facilitator_user_ids': ['user_1'], + 'learner_user_ids': ['user_2', 'user_3', 'user_4'], + 'invited_learner_user_ids': ['user_5', 'user_6'], + 'subtopic_page_ids': ['subtopic_1', 'subtopic_2'], + 'story_ids': ['story_1', 'story_2'] + } + + self.assertEqual( + learner_group.to_dict(), + expected_learner_group_dict) + + def test_validation(self) -> None: + self._assert_validation_error( + learner_group_domain.LearnerGroup( + '3232', 'title', 'description', + [], + ['user_2', 'user_3', 'user_4'], + ['user_5', 'user_6'], + ['subtopic_1', 'subtopic_2'], + ['story_1', 'story_2']), + 'Expected learner group to have at least one facilitator.') + + self._assert_validation_error( + learner_group_domain.LearnerGroup( + '3232', 'title', 'description', + ['user_1'], + ['user_2', 'user_3', 'user_5'], + ['user_5', 'user_6'], + ['subtopic_1', 'subtopic_2'], + ['story_1', 'story_2']), + 'Learner group learner cannot be invited to join the group.') + + self._assert_validation_error( + learner_group_domain.LearnerGroup( + '3232', 'title', 'description', + ['user_1'], + ['user_1', 'user_3', 'user_4'], + ['user_5', 'user_6'], + ['subtopic_1', 'subtopic_2'], + ['story_1', 'story_2']), + 'Learner group facilitator cannot be a learner of the group.') + + self._assert_validation_error( + learner_group_domain.LearnerGroup( + '3232', 'title', 'description', + ['user_1'], + ['user_2', 'user_3', 'user_4'], + ['user_1', 'user_6'], + ['subtopic_1', 'subtopic_2'], + ['story_1', 'story_2']), + 'Learner group facilitator cannot be invited to join the group.') + + # Valid object should not raise exception during validation. + learner_group = self.VALID_LEARNER_GROUP + learner_group.validate() diff --git a/core/domain/learner_group_fetchers.py b/core/domain/learner_group_fetchers.py new file mode 100644 index 000000000000..7af115cfbca9 --- /dev/null +++ b/core/domain/learner_group_fetchers.py @@ -0,0 +1,255 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Getter commands for learner group models.""" + +from __future__ import annotations + +from core.domain import learner_group_domain +from core.domain import learner_group_services +from core.platform import models + +from typing import List, Literal, Optional, Sequence, overload + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import learner_group_models + from mypy_imports import user_models + +(learner_group_models, user_models) = models.Registry.import_models( + [models.Names.LEARNER_GROUP, models.Names.USER]) + + +def get_new_learner_group_id() -> str: + """Returns a new learner group id. + + Returns: + str. A new learner group id. + """ + return learner_group_models.LearnerGroupModel.get_new_id() + + +@overload +def get_learner_group_by_id( + group_id: str, *, strict: Literal[True] +) -> learner_group_domain.LearnerGroup: ... + + +@overload +def get_learner_group_by_id( + group_id: str +) -> Optional[learner_group_domain.LearnerGroup]: ... + + +@overload +def get_learner_group_by_id( + group_id: str, *, strict: Literal[False] +) -> Optional[learner_group_domain.LearnerGroup]: ... + + +@overload +def get_learner_group_by_id( + group_id: str, strict: bool +) -> Optional[learner_group_domain.LearnerGroup]: ... + + +def get_learner_group_by_id( + group_id: str, strict: bool = False +) -> Optional[learner_group_domain.LearnerGroup]: + """Returns the learner group domain object given the learner group id. + + Args: + group_id: str. The id of the learner group. + strict: bool. Whether to fail noisily if no LearnerGroupModel with the + given group_id exists in the datastore. + + Returns: + LearnerGroup or None. The learner group domain object corresponding to + the given id or None if no learner group exists for the given group id. + + Raises: + Exception. No LearnerGroupModel found for the given group_id. + """ + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=False) + + if not learner_group_model: + if strict: + raise Exception( + 'No LearnerGroupModel found for the given group_id: %s' % + group_id + ) + return None + + return learner_group_services.get_learner_group_from_model( + learner_group_model) + + +def get_learner_groups_of_facilitator( + user_id: str +) -> List[learner_group_domain.LearnerGroup]: + """Returns a list of learner groups of the given facilitator. + + Args: + user_id: str. The id of the facilitator. + + Returns: + list(LearnerGroup). A list of learner groups of the given facilitator. + """ + learner_grp_models = ( + learner_group_models.LearnerGroupModel.get_by_facilitator_id(user_id)) + + if not learner_grp_models: + return [] + + return [ + learner_group_services.get_learner_group_from_model(model) + for model in learner_grp_models + ] + + +@overload +def get_learner_group_models_by_ids( + user_ids: List[str], *, strict: Literal[True] +) -> List[user_models.LearnerGroupsUserModel]: ... + + +@overload +def get_learner_group_models_by_ids( + user_ids: List[str] +) -> List[Optional[user_models.LearnerGroupsUserModel]]: ... + + +@overload +def get_learner_group_models_by_ids( + user_ids: List[str], *, strict: Literal[False] +) -> List[Optional[user_models.LearnerGroupsUserModel]]: ... + + +def get_learner_group_models_by_ids( + user_ids: List[str], strict: bool = False +) -> Sequence[Optional[user_models.LearnerGroupsUserModel]]: + """Returns a list of learner_groups_user models matching the IDs provided. + + Args: + user_ids: list(str). The user ids of the learners of the group. + strict: bool. Whether to fail noisily if no LearnerGroupsUserModel + exists with a given ID exists in the datastore. + + Returns: + list(LearnerGroupsUserModel|None). The list of learner_groups_user + models corresponding to given ids. If a LearnerGroupsUserModel does + not exist, the corresponding returned list element is None. + + Raises: + Exception. No LearnerGroupsUserModel exists for the given user_id. + """ + + learner_group_user_models = user_models.LearnerGroupsUserModel.get_multi( + user_ids + ) + + if strict: + for index, learner_group_user_model in enumerate( + learner_group_user_models + ): + if learner_group_user_model is None: + raise Exception( + 'No LearnerGroupsUserModel exists for the user_id: %s' + % user_ids[index] + ) + + return learner_group_user_models + + +def can_multi_learners_share_progress( + user_ids: List[str], group_id: str +) -> List[bool]: + """Returns the progress sharing permissions of the given users in the given + group. + + Args: + user_ids: list(str). The user ids of the learners of the group. + group_id: str. The id of the learner group. + + Returns: + list(bool). True if a user has progress sharing permission of the + given group as True, False otherwise. + """ + learner_group_user_models = get_learner_group_models_by_ids( + user_ids, strict=True + ) + + progress_sharing_permissions: List[bool] = [] + for model in learner_group_user_models: + for group_details in model.learner_groups_user_details: + if group_details['group_id'] == group_id: + progress_sharing_permissions.append( + bool(group_details['progress_sharing_is_turned_on']) + ) + break + + return progress_sharing_permissions + + +def get_invited_learner_groups_of_learner( + user_id: str +) -> List[learner_group_domain.LearnerGroup]: + """Returns a list of learner groups that the given learner has been + invited to join. + + Args: + user_id: str. The id of the learner. + + Returns: + list(LearnerGroup). A list of learner groups that the given learner + has been invited to join. + """ + learner_grp_models = ( + learner_group_models.LearnerGroupModel.get_by_invited_learner_user_id( + user_id)) + + if not learner_grp_models: + return [] + + return [ + learner_group_services.get_learner_group_from_model(model) + for model in learner_grp_models + ] + + +def get_learner_groups_joined_by_learner( + user_id: str +) -> List[learner_group_domain.LearnerGroup]: + """Returns a list of learner groups that the given learner has joined. + + Args: + user_id: str. The id of the learner. + + Returns: + list(LearnerGroup). A list of learner groups that the given learner + is part of. + """ + learner_grp_models = ( + learner_group_models.LearnerGroupModel.get_by_learner_user_id(user_id)) + + if not learner_grp_models: + return [] + + return [ + learner_group_services.get_learner_group_from_model(model) + for model in learner_grp_models + ] diff --git a/core/domain/learner_group_fetchers_test.py b/core/domain/learner_group_fetchers_test.py new file mode 100644 index 000000000000..3154eb8c0729 --- /dev/null +++ b/core/domain/learner_group_fetchers_test.py @@ -0,0 +1,145 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for methods defined in learner group fetchers.""" + +from __future__ import annotations + +from core.domain import learner_group_fetchers +from core.domain import learner_group_services + +from core.tests import test_utils + + +class LearnerGroupFetchersUnitTests(test_utils.GenericTestBase): + """Tests for skill fetchers.""" + + FACILITATOR_ID = 'facilitator_user_1' + LEARNER_ID_1 = 'learner_user_1' + LEARNER_ID_2 = 'learner_user_2' + + def setUp(self) -> None: + super().setUp() + + self.LEARNER_GROUP_ID = ( + learner_group_fetchers.get_new_learner_group_id() + ) + + self.learner_group = learner_group_services.create_learner_group( + self.LEARNER_GROUP_ID, 'Learner Group Name', 'Description', + [self.FACILITATOR_ID], [self.LEARNER_ID_1, self.LEARNER_ID_2], + ['subtopic_id_1'], ['story_id_1']) + + def test_get_new_learner_group_id(self) -> None: + self.assertIsNotNone(learner_group_fetchers.get_new_learner_group_id()) + + def test_get_learner_group_by_id(self) -> None: + fake_learner_group_id = 'fake_learner_group_id' + fake_learner_group = learner_group_fetchers.get_learner_group_by_id( + fake_learner_group_id) + self.assertIsNone(fake_learner_group) + + learner_group = learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID + ) + # Ruling out the possibility of None for mypy type checking. + assert learner_group is not None + self.assertIsNotNone(learner_group) + self.assertEqual(learner_group.group_id, self.LEARNER_GROUP_ID) + + with self.assertRaisesRegex( + Exception, + 'No LearnerGroupModel found for the given group_id: ' + 'fake_learner_group_id' + ): + learner_group_fetchers.get_learner_group_by_id( + fake_learner_group_id, strict=True + ) + + def test_raises_error_if_learner_group_model_is_fetched_with_strict_and_invalid_id( # pylint: disable=line-too-long + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'No LearnerGroupsUserModel exists for the user_id: invalid_id' + ): + learner_group_fetchers.get_learner_group_models_by_ids( + ['invalid_id'], strict=True + ) + + def test_get_learner_groups_of_facilitator(self) -> None: + fake_facilitator_id = 'fake_facilitator_id' + fake_learner_groups = ( + learner_group_fetchers.get_learner_groups_of_facilitator( + fake_facilitator_id + ) + ) + self.assertEqual(len(fake_learner_groups), 0) + + learner_groups = ( + learner_group_fetchers.get_learner_groups_of_facilitator( + self.FACILITATOR_ID + ) + ) + self.assertEqual(len(learner_groups), 1) + self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID) + + def test_can_multi_learners_share_progress(self) -> None: + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.LEARNER_ID_1, True) + + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.LEARNER_ID_2, False) + + self.assertEqual( + learner_group_fetchers.can_multi_learners_share_progress( + [self.LEARNER_ID_1, self.LEARNER_ID_2], self.LEARNER_GROUP_ID + ), [True, False]) + + def test_get_invited_learner_groups_of_learner(self) -> None: + fake_learner_id = 'fake_learner_id' + learner_groups = ( + learner_group_fetchers.get_invited_learner_groups_of_learner( + fake_learner_id + ) + ) + self.assertEqual(len(learner_groups), 0) + + learner_groups = ( + learner_group_fetchers.get_invited_learner_groups_of_learner( + self.LEARNER_ID_1 + ) + ) + self.assertEqual(len(learner_groups), 1) + self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID) + + def test_get_learner_groups_joined_by_learner(self) -> None: + learner_groups = ( + learner_group_fetchers.get_learner_groups_joined_by_learner( + self.LEARNER_ID_1 + ) + ) + self.assertEqual(len(learner_groups), 0) + + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.LEARNER_ID_1, True) + learner_groups = ( + learner_group_fetchers.get_learner_groups_joined_by_learner( + self.LEARNER_ID_1 + ) + ) + self.assertEqual(len(learner_groups), 1) + self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID) diff --git a/core/domain/learner_group_services.py b/core/domain/learner_group_services.py new file mode 100644 index 000000000000..c0edf6dff0cb --- /dev/null +++ b/core/domain/learner_group_services.py @@ -0,0 +1,794 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Services for the learner groups.""" + +from __future__ import annotations + +from core.constants import constants +from core.domain import config_domain +from core.domain import learner_group_domain +from core.domain import learner_group_fetchers +from core.domain import story_domain +from core.domain import story_fetchers +from core.domain import subtopic_page_domain +from core.domain import topic_domain +from core.domain import topic_fetchers +from core.platform import models + +from typing import List, Optional, Sequence, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import learner_group_models + from mypy_imports import user_models + +(learner_group_models, user_models) = models.Registry.import_models( + [models.Names.LEARNER_GROUP, models.Names.USER]) + +datastore_services = models.Registry.import_datastore_services() + + +def is_learner_group_feature_enabled() -> bool: + """Checks if the learner group feature is enabled. + + Returns: + bool. Whether the learner group feature is enabled. + """ + return bool(config_domain.LEARNER_GROUPS_ARE_ENABLED.value) + + +def create_learner_group( + group_id: str, + title: str, + description: str, + facilitator_user_ids: List[str], + invited_learner_ids: List[str], + subtopic_page_ids: List[str], + story_ids: List[str] +) -> learner_group_domain.LearnerGroup: + """Creates a new learner group. + + Args: + group_id: str. The id of the learner group to be created. + title: str. The title of the learner group. + description: str. The description of the learner group. + facilitator_user_ids: str. List of user ids of the facilitators of the + learner group. + invited_learner_ids: list(str). List of user ids of the learners who + have been invited to join the learner group. + subtopic_page_ids: list(str). The ids of the subtopics pages that are + part of the learner group syllabus. Each subtopic page id is + represented as a topicId:subtopicId string. + story_ids: list(str). The ids of the stories that are part of the + learner group syllabus. + + Returns: + LearnerGroup. The domain object of the newly created learner group. + """ + learner_group = learner_group_domain.LearnerGroup( + group_id, + title, + description, + facilitator_user_ids, + [], + invited_learner_ids, + subtopic_page_ids, + story_ids + ) + learner_group.validate() + + learner_group_model = learner_group_models.LearnerGroupModel( + id=group_id, + title=title, + description=description, + facilitator_user_ids=facilitator_user_ids, + learner_user_ids=[], + invited_learner_user_ids=invited_learner_ids, + subtopic_page_ids=subtopic_page_ids, + story_ids=story_ids + ) + + learner_group_model.update_timestamps() + learner_group_model.put() + + if len(learner_group_model.invited_learner_user_ids) > 0: + invite_learners_to_learner_group( + group_id, learner_group_model.invited_learner_user_ids) + + return learner_group + + +def update_learner_group( + group_id: str, + title: str, + description: str, + facilitator_user_ids: List[str], + learner_ids: List[str], + invited_learner_ids: List[str], + subtopic_page_ids: List[str], + story_ids: List[str] +) -> learner_group_domain.LearnerGroup: + """Updates a learner group if it is present. + + Args: + group_id: str. The id of the learner group to be updated. + title: str. The title of the learner group. + description: str. The description of the learner group. + facilitator_user_ids: str. List of user ids of the facilitators of the + learner group. + learner_ids: list(str). List of user ids of the learners of the + learner group. + invited_learner_ids: list(str). List of user ids of the learners who + have been invited to join the learner group. + subtopic_page_ids: list(str). The ids of the subtopics pages that are + part of the learner group syllabus. Each subtopic page id is + represented as a topicId:subtopicId string. + story_ids: list(str). The ids of the stories that are part of the + learner group syllabus. + + Returns: + learner_group: learner_group_domain.LearnerGroup. The domain object + of the updated learner group. + """ + + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=True + ) + + old_invited_learner_ids = set(learner_group_model.invited_learner_user_ids) + new_invited_learner_ids = set(invited_learner_ids) + if new_invited_learner_ids != old_invited_learner_ids: + newly_added_invites = list( + new_invited_learner_ids - old_invited_learner_ids + ) + newly_removed_invites = list( + old_invited_learner_ids - new_invited_learner_ids + ) + invite_learners_to_learner_group( + group_id, newly_added_invites) + remove_invited_learners_from_learner_group( + group_id, newly_removed_invites, False) + + old_learner_ids = set(learner_group_model.learner_user_ids) + new_learner_ids = set(learner_ids) + if old_learner_ids != new_learner_ids: + newly_removed_learners = list( + old_learner_ids - new_learner_ids + ) + remove_learners_from_learner_group( + group_id, newly_removed_learners, False) + + learner_group_model.title = title + learner_group_model.description = description + learner_group_model.facilitator_user_ids = facilitator_user_ids + learner_group_model.learner_user_ids = learner_ids + learner_group_model.invited_learner_user_ids = invited_learner_ids + learner_group_model.subtopic_page_ids = subtopic_page_ids + learner_group_model.story_ids = story_ids + + learner_group = get_learner_group_from_model(learner_group_model) + learner_group.validate() + + learner_group_model.update_timestamps() + learner_group_model.put() + + return get_learner_group_from_model(learner_group_model) + + +def is_user_facilitator(user_id: str, group_id: str) -> bool: + """Checks if the user is a facilitator of the leaner group. + + Args: + user_id: str. The id of the user. + group_id: str. The id of the learner group. + + Returns: + bool. Whether the user is a facilitator of the learner group. + """ + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=True + ) + + return user_id in learner_group_model.facilitator_user_ids + + +def is_user_learner(user_id: str, group_id: str) -> bool: + """Checks if the user is a learner of the learner group. + + Args: + user_id: str. The id of the user. + group_id: str. The id of the learner group. + + Returns: + bool. Whether the user is a learner of the learner group. + """ + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=True + ) + + return user_id in learner_group_model.learner_user_ids + + +def remove_learner_group(group_id: str) -> None: + """Removes the learner group with of given learner group ID. + + Args: + group_id: str. The id of the learner group to be removed. + """ + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=True + ) + + # Note: We are not deleting the references of the learner group from the + # related learner group user models. These references are deleted when the + # user tries to access a deleted learner group so that they get a + # notification saying the group was deleted instead of the group just being + # silently removed. + learner_group_model.delete() + + +def get_matching_learner_group_syllabus_to_add( + learner_group_id: str, + keyword: str, + search_type: str, + category: str, + language_code: str +) -> learner_group_domain.LearnerGroupSyllabusDict: + """Returns the syllabus of items matching the given filter arguments + that can be added to the learner group. + + Args: + learner_group_id: str. The id of the learner group. + keyword: str. The keyword to search the syllabus. It is compared with + the title of the topics, stories and subtopics. + search_type: str. The type of the syllabus item to search. It can be + either 'Story' or 'Skill'. + category: str. The category of the syllabus items. It is the + classroom in which the stories and subtopics are to be searched. + language_code: str. The language of the topics in which the stories + and subtopics are to be searched. + + Returns: + dict. The matching syllabus items to add to the learner group. + """ + # Default case when syllabus is being added to a new group. + group_subtopic_page_ids: List[str] = [] + group_story_ids: List[str] = [] + + # Case when syllabus is being added to an existing group. + if learner_group_id: + learner_group_model = learner_group_models.LearnerGroupModel.get( + learner_group_id, strict=True + ) + group_subtopic_page_ids = learner_group_model.subtopic_page_ids + group_story_ids = learner_group_model.story_ids + + matching_topic_ids: List[str] = [] + all_classrooms_dict = config_domain.CLASSROOM_PAGES_DATA.value + + matching_subtopics_dicts: List[ + subtopic_page_domain.SubtopicPageSummaryDict] = [] + matching_story_syllabus_item_dicts: List[ + story_domain.LearnerGroupSyllabusStorySummaryDict] = [] + + if category != constants.DEFAULT_ADD_SYLLABUS_FILTER: + for classroom in all_classrooms_dict: + if category and classroom['name'] == category: + matching_topic_ids.extend(classroom['topic_ids']) + matching_topics: List[topic_domain.Topic] = ( + topic_fetchers.get_topics_by_ids(matching_topic_ids, strict=True) + ) + else: + matching_topics = topic_fetchers.get_all_topics() + + keyword = keyword.lower() + for topic in matching_topics: + if language_code not in ( + constants.DEFAULT_ADD_SYLLABUS_FILTER, topic.language_code + ): + continue + + if keyword in topic.canonical_name: + # If search type is set to default or search type is set to + # 'Story', add all story ids of this topic to the filtered + # story ids. + if ( + search_type in ( + constants.LEARNER_GROUP_ADD_STORY_FILTER, + constants.DEFAULT_ADD_SYLLABUS_FILTER + ) + ): + matching_story_syllabus_item_dicts.extend( + get_matching_story_syllabus_item_dicts( + topic, group_story_ids + ) + ) + + # If search type is set to default or search type is set to + # 'Skill', add all subtopics of this topic to the filtered + # subtopics. + if ( + search_type in ( + constants.LEARNER_GROUP_ADD_SKILL_FILTER, + constants.DEFAULT_ADD_SYLLABUS_FILTER + ) + ): + matching_subtopics_dicts.extend( + get_matching_subtopic_syllabus_item_dicts( + topic, group_subtopic_page_ids + ) + ) + else: + # If search type is set to default or search type is set to + # 'Skill', add the subtopics which have the keyword in their + # title to the filtered subtopics. + if ( + search_type in ( + constants.LEARNER_GROUP_ADD_SKILL_FILTER, + constants.DEFAULT_ADD_SYLLABUS_FILTER + ) + ): + matching_subtopics_dicts.extend( + get_matching_subtopic_syllabus_item_dicts( + topic, group_subtopic_page_ids, keyword + ) + ) + + # If search type is set to default or search type is set to + # 'Story', add all story ids of this topic to the possible + # story ids. + if ( + search_type in ( + constants.LEARNER_GROUP_ADD_STORY_FILTER, + constants.DEFAULT_ADD_SYLLABUS_FILTER + ) + ): + matching_story_syllabus_item_dicts.extend( + get_matching_story_syllabus_item_dicts( + topic, group_story_ids, keyword + ) + ) + + return { + 'story_summary_dicts': matching_story_syllabus_item_dicts, + 'subtopic_summary_dicts': matching_subtopics_dicts + } + + +def get_matching_subtopic_syllabus_item_dicts( + topic: topic_domain.Topic, + group_subtopic_page_ids: List[str], + keyword: Optional[str] = None +) -> List[subtopic_page_domain.SubtopicPageSummaryDict]: + """Returns the matching subtopics syllabus item dicts of the given topic + that can be added to the learner group syllabus. + + Args: + topic: Topic. The topic whose subtopic subtopic items are to be + searched. + group_subtopic_page_ids: list(str). The ids of the subtopic pages of + the learner group. + keyword: Optional[str]. The keyword to search the subtopic syllabus + items. It is compared with the title of the subtopics if passed + in arguments. + + Returns: + list(dict). The matching subtopic syllabus items of the given topic. + """ + matching_subtopic_syllabus_item_dicts: List[ + subtopic_page_domain.SubtopicPageSummaryDict] = [] + for subtopic in topic.subtopics: + subtopic_page_id = '{}:{}'.format(topic.id, subtopic.id) + if subtopic_page_id not in group_subtopic_page_ids: + if keyword is None or keyword in subtopic.title.lower(): + matching_subtopic_syllabus_item_dicts.append({ + 'subtopic_id': subtopic.id, + 'subtopic_title': subtopic.title, + 'parent_topic_id': topic.id, + 'parent_topic_name': topic.name, + 'thumbnail_filename': subtopic.thumbnail_filename, + 'thumbnail_bg_color': subtopic.thumbnail_bg_color, + 'subtopic_mastery': None, + 'parent_topic_url_fragment': topic.url_fragment, + 'classroom_url_fragment': None + }) + + return matching_subtopic_syllabus_item_dicts + + +def get_matching_story_syllabus_item_dicts( + topic: topic_domain.Topic, + group_story_ids: List[str], + keyword: Optional[str] = None +) -> List[story_domain.LearnerGroupSyllabusStorySummaryDict]: + """Returns the matching story syllabus item dicts of the given topic + that can be added to the learner group syllabus. + + Args: + topic: Topic. The topic whose stories are to be searched. + group_story_ids: list(str). The story ids of the learner group. + keyword: Optional[str]. The keyword to search the stories. It is + compared with the title of the story if passed in arguments. + + Returns: + list(dict). The matching story syllabus item dicts of the given topic. + """ + story_ids = [ + story.story_id for story in + topic.canonical_story_references + if ( + story.story_id not in group_story_ids and + story.story_is_published is True + ) + ] + matching_stories = story_fetchers.get_story_summaries_by_ids(story_ids) + stories = story_fetchers.get_stories_by_ids(story_ids, strict=True) + + matching_story_syllabus_item_dicts: List[ + story_domain.LearnerGroupSyllabusStorySummaryDict] = [] + + for ind, story_summary in enumerate(matching_stories): + if keyword is None or keyword in story_summary.title.lower(): + story = stories[ind] + summary_dict = story_summary.to_dict() + matching_story_syllabus_item_dicts.append({ + 'id': summary_dict['id'], + 'title': summary_dict['title'], + 'description': summary_dict['description'], + 'language_code': summary_dict['language_code'], + 'version': summary_dict['version'], + 'node_titles': summary_dict['node_titles'], + 'thumbnail_filename': summary_dict['thumbnail_filename'], + 'thumbnail_bg_color': summary_dict['thumbnail_bg_color'], + 'url_fragment': summary_dict['url_fragment'], + 'story_model_created_on': + summary_dict['story_model_created_on'], + 'story_model_last_updated': + summary_dict['story_model_last_updated'], + 'story_is_published': True, + 'completed_node_titles': [], + 'all_node_dicts': [ + node.to_dict() for node in + story.story_contents.nodes + ], + 'topic_name': topic.name, + 'topic_url_fragment': topic.url_fragment, + 'classroom_url_fragment': None + }) + + return matching_story_syllabus_item_dicts + + +def add_learner_to_learner_group( + group_id: str, + user_id: str, + progress_sharing_permission: bool +) -> None: + """Adds the given learner to the given learner group. + + Args: + group_id: str. The id of the learner group. + user_id: str. The id of the learner. + progress_sharing_permission: bool. The progress sharing permission of + the learner group. True if progress sharing is allowed, False + otherwise. + + Raises: + Exception. Learner was not invited to join the learner group. + """ + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=True + ) + + if user_id not in learner_group_model.invited_learner_user_ids: + raise Exception('Learner was not invited to join the learner group.') + + learner_group_model.invited_learner_user_ids.remove(user_id) + learner_group_model.learner_user_ids.append(user_id) + + details_of_learner_group = { + 'group_id': group_id, + 'progress_sharing_is_turned_on': progress_sharing_permission + } + + learner_grps_user_model = user_models.LearnerGroupsUserModel.get( + user_id, strict=True + ) + + learner_grps_user_model.invited_to_learner_groups_ids.remove(group_id) + learner_grps_user_model.learner_groups_user_details.append( + details_of_learner_group) + + learner_grps_user_model.update_timestamps() + learner_grps_user_model.put() + + learner_group_model.update_timestamps() + learner_group_model.put() + + +def remove_learners_from_learner_group( + group_id: str, + user_ids: List[str], + update_group: bool +) -> None: + """Removes the given learner from the given learner group. + + Args: + group_id: str. The id of the learner group. + user_ids: List[str]. The id of the learners to be removed. + update_group: bool. Flag indicating whether to update the + learner group or not. + """ + if update_group: + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=True + ) + + learner_group_model.learner_user_ids = [ + user_id for user_id in learner_group_model.learner_user_ids + if user_id not in user_ids + ] + + learner_group_model.update_timestamps() + learner_group_model.put() + + learner_grps_users_models = ( + learner_group_fetchers.get_learner_group_models_by_ids( + user_ids, strict=True + ) + ) + + models_to_put = [] + for learner_grps_user_model in learner_grps_users_models: + learner_grps_user_model.learner_groups_user_details = [ + details for details in + learner_grps_user_model.learner_groups_user_details + if details['group_id'] != group_id + ] + models_to_put.append(learner_grps_user_model) + + user_models.LearnerGroupsUserModel.update_timestamps_multi(models_to_put) + user_models.LearnerGroupsUserModel.put_multi(models_to_put) + + +def invite_learners_to_learner_group( + group_id: str, + invited_learner_ids: List[str] +) -> None: + """Invites the given learners to the given learner group. + + Args: + group_id: str. The id of the learner group. + invited_learner_ids: list(str). The ids of the learners to invite. + """ + learner_groups_user_models = ( + user_models.LearnerGroupsUserModel.get_multi(invited_learner_ids)) + + models_to_put = [] + for index, learner_id in enumerate(invited_learner_ids): + learner_groups_user_model = learner_groups_user_models[index] + if learner_groups_user_model: + learner_groups_user_model.invited_to_learner_groups_ids.append( + group_id) + else: + learner_groups_user_model = user_models.LearnerGroupsUserModel( + id=learner_id, + invited_to_learner_groups_ids=[group_id], + learner_groups_user_details=[] + ) + + models_to_put.append(learner_groups_user_model) + + user_models.LearnerGroupsUserModel.update_timestamps_multi(models_to_put) + user_models.LearnerGroupsUserModel.put_multi(models_to_put) + + +def remove_invited_learners_from_learner_group( + group_id: str, + learner_ids: List[str], + update_group: bool +) -> None: + """Removes the given invited learners from the given learner group. + + Args: + group_id: str. The id of the learner group. + learner_ids: list(str). The ids of the learners to remove. + update_group: bool. Flag indicating whether to update the + learner group or not. + """ + if update_group: + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=True + ) + learner_group_model.invited_learner_user_ids = [ + learner_id for learner_id in + learner_group_model.invited_learner_user_ids if + learner_id not in learner_ids + ] + learner_group_model.update_timestamps() + learner_group_model.put() + + found_models = ( + learner_group_fetchers.get_learner_group_models_by_ids( + learner_ids, strict=True + ) + ) + + models_to_put = [] + for model in found_models: + if group_id in model.invited_to_learner_groups_ids: + model.invited_to_learner_groups_ids.remove(group_id) + models_to_put.append(model) + + user_models.LearnerGroupsUserModel.update_timestamps_multi(models_to_put) + user_models.LearnerGroupsUserModel.put_multi(models_to_put) + + +def get_learner_group_from_model( + learner_group_model: learner_group_models.LearnerGroupModel +) -> learner_group_domain.LearnerGroup: + """Returns the learner group domain object given the learner group + model loaded from the datastore. + + Args: + learner_group_model: LearnerGroupModel. The learner group model + from the datastore. + + Returns: + LearnerGroup. The learner group domain object corresponding to the + given model. + """ + return learner_group_domain.LearnerGroup( + learner_group_model.id, + learner_group_model.title, + learner_group_model.description, + learner_group_model.facilitator_user_ids, + learner_group_model.learner_user_ids, + learner_group_model.invited_learner_user_ids, + learner_group_model.subtopic_page_ids, + learner_group_model.story_ids + ) + + +def can_user_be_invited( + user_id: str, username: str, group_id: str +) -> Tuple[bool, str]: + """Checks if the user can be invited to the learner group. + + Args: + user_id: str. The id of the user. + username: str. The username of the user. + group_id: str. The id of the learner group. + + Returns: + bool. True if the user can be invited to the learner group. False + otherwise. + str. Error message if the user cannot be invited to the learner group. + """ + # Case of inviting to new learner group. + if not group_id: + return (True, '') + + learner_group_model = learner_group_models.LearnerGroupModel.get( + group_id, strict=True + ) + + if user_id in learner_group_model.learner_user_ids: + return ( + False, 'User with username %s is already a learner.' % username) + elif user_id in learner_group_model.invited_learner_user_ids: + return ( + False, 'User with username %s has been already invited to ' + 'join the group' % username) + elif user_id in learner_group_model.facilitator_user_ids: + return ( + False, 'User with username %s is already a facilitator.' % username + ) + + return (True, '') + + +def remove_story_reference_from_learner_groups(story_id: str) -> None: + """Removes a given story id from all learner groups that have it's + reference. + + Args: + story_id: str. Story id to remove. + """ + found_models: Sequence[learner_group_models.LearnerGroupModel] = ( + learner_group_models.LearnerGroupModel.get_all().filter( + datastore_services.any_of( + learner_group_models.LearnerGroupModel.story_ids == story_id + ) + ).fetch() + ) + + models_to_put = [] + for model in found_models: + model.story_ids.remove(story_id) + models_to_put.append(model) + + learner_group_models.LearnerGroupModel.update_timestamps_multi( + models_to_put) + learner_group_models.LearnerGroupModel.put_multi(models_to_put) + + +def remove_subtopic_page_reference_from_learner_groups( + topic_id: str, + subtopic_id: int +) -> None: + """Removes a given subtopic page from all learner groups that have it's + reference. + + Args: + topic_id: str. Id of the topic of the subtopic page. + subtopic_id: int. Id of the subtopic of the subtopic page. + """ + subtopic_page_id = '{}:{}'.format(topic_id, subtopic_id) + + learner_group_model_cls = learner_group_models.LearnerGroupModel + found_models: Sequence[learner_group_models.LearnerGroupModel] = ( + learner_group_model_cls.get_all().filter( + datastore_services.any_of( + learner_group_model_cls.subtopic_page_ids == subtopic_page_id + ) + ).fetch() + ) + + models_to_put = [] + for model in found_models: + model.subtopic_page_ids.remove(subtopic_page_id) + models_to_put.append(model) + + learner_group_models.LearnerGroupModel.update_timestamps_multi( + models_to_put) + learner_group_models.LearnerGroupModel.put_multi(models_to_put) + + +def update_progress_sharing_permission( + user_id: str, + group_id: str, + new_progress_sharing_permission: bool +) -> None: + """Updates the progress sharing permission of the learner group. + + Args: + user_id: str. The id of the user. + group_id: str. The id of the learner group. + new_progress_sharing_permission: bool. The new progress sharing + permission of the learner group. + """ + learner_grps_user_model = user_models.LearnerGroupsUserModel.get( + user_id, strict=True + ) + + old_user_details = learner_grps_user_model.learner_groups_user_details + learner_grps_user_model.learner_groups_user_details = [] + for group_details in old_user_details: + if group_details['group_id'] == group_id: + learner_grps_user_model.learner_groups_user_details.append({ + 'group_id': group_id, + 'progress_sharing_is_turned_on': + new_progress_sharing_permission + }) + else: + learner_grps_user_model.learner_groups_user_details.append( + group_details) + + learner_grps_user_model.update_timestamps() + learner_grps_user_model.put() diff --git a/core/domain/learner_group_services_test.py b/core/domain/learner_group_services_test.py new file mode 100644 index 000000000000..558392bc0194 --- /dev/null +++ b/core/domain/learner_group_services_test.py @@ -0,0 +1,538 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the learner group services.""" + +from __future__ import annotations + +from core.constants import constants +from core.domain import config_services +from core.domain import learner_group_fetchers +from core.domain import learner_group_services +from core.domain import topic_domain +from core.domain import topic_services +from core.platform import models +from core.tests import test_utils + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) + + +class LearnerGroupServicesUnitTests(test_utils.GenericTestBase): + """Tests for skill fetchers.""" + + FACILITATOR_ID = 'facilitator_user_1' + LEARNER_ID = 'learner_user_1' + TOPIC_ID_0 = 'topic_id_0' + TOPIC_ID_1 = 'topic_id_1' + STORY_ID_0 = 'story_id_0' + STORY_ID_1 = 'story_id_1' + STORY_ID_2 = 'story_id_2' + + def setUp(self) -> None: + super().setUp() + self.signup( + self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.admin_id = self.get_user_id_from_email( + self.CURRICULUM_ADMIN_EMAIL) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.LEARNER_GROUP_ID = ( + learner_group_fetchers.get_new_learner_group_id() + ) + self.LEARNER_GROUP_ID_2 = ( + learner_group_fetchers.get_new_learner_group_id() + ) + + self.learner_group = learner_group_services.create_learner_group( + self.LEARNER_GROUP_ID, 'Learner Group Name', 'Description', + [self.FACILITATOR_ID], [self.LEARNER_ID], ['subtopic_id_1'], + ['story_id_1']) + self.learner_group_2 = learner_group_services.create_learner_group( + self.LEARNER_GROUP_ID_2, 'Learner Group 2', 'Description 2', + [self.FACILITATOR_ID], [self.LEARNER_ID], ['subtopic_id_1'], + ['story_id_1']) + + # Set up topics, subtopics and stories for learner group syllabus. + topic = topic_domain.Topic.create_default_topic( + self.TOPIC_ID_0, 'Place Values', 'abbrev', 'description', 'fragm') + topic.thumbnail_filename = 'thumbnail.svg' + topic.thumbnail_bg_color = '#C6DCDA' + topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Naming Numbers', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-url')] + topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] + topic_services.save_new_topic(self.admin_id, topic) + self.save_new_story( + self.STORY_ID_0, self.admin_id, self.TOPIC_ID_0, + 'Story test 0') + topic_services.add_canonical_story( + self.admin_id, self.TOPIC_ID_0, self.STORY_ID_0) + + # Publish the topic and its stories. + topic_services.publish_topic(self.TOPIC_ID_0, self.admin_id) + topic_services.publish_story( + self.TOPIC_ID_0, self.STORY_ID_0, self.admin_id) + + # Create another topic. + topic = topic_domain.Topic.create_default_topic( + self.TOPIC_ID_1, 'Negative Numbers', 'abbrev-one', + 'description 1', 'fragm') + topic.thumbnail_filename = 'thumbnail.svg' + topic.thumbnail_bg_color = '#C6DCDA' + topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Intro to negative numbers', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-url-one')] + topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] + + topic_services.save_new_topic(self.admin_id, topic) + self.save_new_story( + self.STORY_ID_1, self.admin_id, self.TOPIC_ID_1, + 'Story test 1') + topic_services.add_canonical_story( + self.admin_id, self.TOPIC_ID_1, self.STORY_ID_1) + + # Publish the topic and its stories. + topic_services.publish_topic(self.TOPIC_ID_1, self.admin_id) + topic_services.publish_story( + self.TOPIC_ID_1, self.STORY_ID_1, self.admin_id) + + def test_create_learner_group(self) -> None: + self.assertIsNotNone(self.learner_group) + self.assertEqual(self.learner_group.group_id, self.LEARNER_GROUP_ID) + self.assertEqual(self.learner_group.title, 'Learner Group Name') + self.assertEqual(self.learner_group.description, 'Description') + self.assertEqual( + self.learner_group.facilitator_user_ids, [self.FACILITATOR_ID]) + self.assertEqual( + self.learner_group.invited_learner_user_ids, [self.LEARNER_ID]) + self.assertEqual( + self.learner_group.subtopic_page_ids, ['subtopic_id_1']) + self.assertEqual(self.learner_group.story_ids, ['story_id_1']) + + def test_is_learner_group_feature_enabled(self) -> None: + config_services.set_property( + self.admin_id, 'learner_groups_are_enabled', True) + self.assertTrue( + learner_group_services.is_learner_group_feature_enabled()) + + config_services.set_property( + self.admin_id, 'learner_groups_are_enabled', False) + self.assertFalse( + learner_group_services.is_learner_group_feature_enabled()) + + def test_update_learner_group(self) -> None: + updated_group = learner_group_services.update_learner_group( + self.LEARNER_GROUP_ID, 'Updated Group Name', 'Updated Description', + [self.FACILITATOR_ID], [], ['new_learner_id'], + ['subtopic_id_1', 'subtopic_id_2'], ['story_id_1', 'story_id_2']) + + self.assertIsNotNone(updated_group) + self.assertEqual(updated_group.group_id, self.LEARNER_GROUP_ID) + self.assertEqual(updated_group.title, 'Updated Group Name') + self.assertEqual(updated_group.description, 'Updated Description') + self.assertEqual( + updated_group.facilitator_user_ids, [self.FACILITATOR_ID] + ) + self.assertEqual( + updated_group.invited_learner_user_ids, ['new_learner_id'] + ) + self.assertEqual( + updated_group.subtopic_page_ids, + ['subtopic_id_1', 'subtopic_id_2'] + ) + self.assertEqual(updated_group.story_ids, ['story_id_1', 'story_id_2']) + + def test_is_user_facilitator(self) -> None: + self.assertTrue( + learner_group_services.is_user_facilitator( + self.FACILITATOR_ID, self.LEARNER_GROUP_ID)) + + self.assertFalse( + learner_group_services.is_user_facilitator( + self.LEARNER_ID, self.LEARNER_GROUP_ID)) + + def test_is_user_learner(self) -> None: + self.assertFalse( + learner_group_services.is_user_learner( + self.FACILITATOR_ID, self.LEARNER_GROUP_ID)) + self.assertFalse( + learner_group_services.is_user_learner( + self.LEARNER_ID, self.LEARNER_GROUP_ID)) + + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.LEARNER_ID, True) + + self.assertTrue( + learner_group_services.is_user_learner( + self.LEARNER_ID, self.LEARNER_GROUP_ID)) + + def test_get_matching_syllabus_to_add_with_default_filters(self) -> None: + # Test 1: Default filters with topic name matching. + matching_syllabus = ( + learner_group_services.get_matching_learner_group_syllabus_to_add( + self.LEARNER_GROUP_ID, 'Place', 'All', + 'All', constants.DEFAULT_LANGUAGE_CODE + ) + ) + story_summary_dicts = matching_syllabus['story_summary_dicts'] + self.assertEqual(len(story_summary_dicts), 1) + self.assertEqual(story_summary_dicts[0]['id'], self.STORY_ID_0) + self.assertEqual(story_summary_dicts[0]['title'], 'Story test 0') + + subtopic_summary_dicts = matching_syllabus['subtopic_summary_dicts'] + self.assertEqual(len(subtopic_summary_dicts), 1) + self.assertEqual(subtopic_summary_dicts[0]['subtopic_id'], 1) + self.assertEqual( + subtopic_summary_dicts[0]['subtopic_title'], 'Naming Numbers') + + def test_get_syllabus_to_add_with_matching_subtopic_name(self) -> None: + # Test 2: Skill type filter with subtopic name matching. + matching_syllabus = ( + learner_group_services.get_matching_learner_group_syllabus_to_add( + self.LEARNER_GROUP_ID, 'Naming', 'Skill', + 'All', constants.DEFAULT_LANGUAGE_CODE + ) + ) + + story_summary_dicts = matching_syllabus['story_summary_dicts'] + self.assertEqual(len(story_summary_dicts), 0) + + subtopic_summary_dicts = matching_syllabus['subtopic_summary_dicts'] + self.assertEqual(len(subtopic_summary_dicts), 1) + self.assertEqual(subtopic_summary_dicts[0]['subtopic_id'], 1) + self.assertEqual(subtopic_summary_dicts[0][ + 'subtopic_title'], 'Naming Numbers') + + def test_get_syllabus_to_add_with_matching_story_name(self) -> None: + # Test 3: Story type filter with story name matching. + matching_syllabus = ( + learner_group_services.get_matching_learner_group_syllabus_to_add( + self.LEARNER_GROUP_ID, 'Story test', 'Story', + 'All', constants.DEFAULT_LANGUAGE_CODE + ) + ) + # Story test 1 is already part of the group syllabus + # so it should not be returned in the filtered syllabus. + story_summary_dicts = matching_syllabus['story_summary_dicts'] + self.assertEqual(len(story_summary_dicts), 1) + self.assertEqual(story_summary_dicts[0]['id'], self.STORY_ID_0) + self.assertEqual(story_summary_dicts[0]['title'], 'Story test 0') + + subtopic_summary_dicts = ( + matching_syllabus['subtopic_summary_dicts'] + ) + self.assertEqual(len(subtopic_summary_dicts), 0) + + def test_get_matching_syllabus_to_add_with_classroom_filter(self) -> None: + # Test 4: Classroom name filter. + matching_syllabus = ( + learner_group_services.get_matching_learner_group_syllabus_to_add( + self.LEARNER_GROUP_ID, 'Place', 'All', + 'math', constants.DEFAULT_LANGUAGE_CODE + ) + ) + # No stories or subtopics are returned as the topics were not added + # to the classroom. + story_summary_dicts = matching_syllabus['story_summary_dicts'] + self.assertEqual(len(story_summary_dicts), 0) + + subtopic_summary_dicts = matching_syllabus['subtopic_summary_dicts'] + self.assertEqual(len(subtopic_summary_dicts), 0) + + def test_get_matching_syllabus_to_add_with_language_filter(self) -> None: + # Test 5: Language filter. + matching_syllabus = ( + learner_group_services.get_matching_learner_group_syllabus_to_add( + self.LEARNER_GROUP_ID, 'Place', 'All', 'All', 'pt-br' + ) + ) + # No stories or subtopics are returned as the topics are all + # of default language. + story_summary_dicts = matching_syllabus['story_summary_dicts'] + self.assertEqual(len(story_summary_dicts), 0) + + subtopic_summary_dicts = matching_syllabus['subtopic_summary_dicts'] + self.assertEqual(len(subtopic_summary_dicts), 0) + + def test_add_learner_to_learner_group(self) -> None: + # Test for invited learner. + learner_grp = learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID) + # Ruling out the possibility of None for mypy type checking. + assert learner_grp is not None + + learner_grps_user_model = user_models.LearnerGroupsUserModel.get( + self.LEARNER_ID, strict=True) + self.assertEqual( + learner_grp.invited_learner_user_ids, [self.LEARNER_ID]) + self.assertEqual( + learner_grp.learner_user_ids, []) + self.assertEqual( + learner_grps_user_model.learner_groups_user_details, []) + + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.LEARNER_ID, True) + + learner_grp = learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID) + # Ruling out the possibility of None for mypy type checking. + assert learner_grp is not None + + learner_grps_user_model = user_models.LearnerGroupsUserModel.get( + self.LEARNER_ID, strict=True) + + self.assertEqual( + learner_grp.invited_learner_user_ids, []) + self.assertEqual( + learner_grp.learner_user_ids, [self.LEARNER_ID]) + self.assertEqual( + learner_grps_user_model.learner_groups_user_details, + [ + { + 'group_id': self.LEARNER_GROUP_ID, + 'progress_sharing_is_turned_on': True + } + ] + ) + + # Test for univited learner. + with self.assertRaisesRegex( + Exception, + 'Learner was not invited to join the learner group.' + ): + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, 'uninvited_learner_id', False) + + def test_remove_learner_group(self) -> None: + # Ruling out the possibility of None for mypy type checking. + assert self.LEARNER_GROUP_ID is not None + + self.assertIsNotNone( + learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID)) + + learner_group_services.remove_learner_group(self.LEARNER_GROUP_ID) + + self.assertIsNone( + learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID)) + + def test_remove_invited_learners_from_learner_group(self) -> None: + # Ruling out the possibility of None for mypy type checking. + assert self.LEARNER_GROUP_ID is not None + + user_model = user_models.LearnerGroupsUserModel.get( + self.LEARNER_ID, strict=True) + self.assertEqual( + user_model.invited_to_learner_groups_ids, + [self.LEARNER_GROUP_ID, self.LEARNER_GROUP_ID_2]) + + learner_group_services.invite_learners_to_learner_group( + 'group_id_2', [self.LEARNER_ID]) + + user_model = user_models.LearnerGroupsUserModel.get( + self.LEARNER_ID, strict=True) + self.assertEqual( + user_model.invited_to_learner_groups_ids, + [self.LEARNER_GROUP_ID, self.LEARNER_GROUP_ID_2, 'group_id_2']) + + learner_group_services.remove_invited_learners_from_learner_group( + self.LEARNER_GROUP_ID, [self.LEARNER_ID], True) + + user_model = user_models.LearnerGroupsUserModel.get( + self.LEARNER_ID, strict=True) + self.assertEqual( + user_model.invited_to_learner_groups_ids, + [self.LEARNER_GROUP_ID_2, 'group_id_2']) + + def test_invite_learners_to_learner_group(self) -> None: + # Ruling out the possibility of None for mypy type checking. + assert self.LEARNER_GROUP_ID is not None + + new_learner_id = 'new_learner_id' + user_model_1 = user_models.LearnerGroupsUserModel.get( + self.LEARNER_ID, strict=True) + self.assertEqual( + user_model_1.invited_to_learner_groups_ids, + [self.LEARNER_GROUP_ID, self.LEARNER_GROUP_ID_2]) + user_model_2 = user_models.LearnerGroupsUserModel.get( + new_learner_id, strict=False) + self.assertIsNone(user_model_2) + + learner_group_services.invite_learners_to_learner_group( + 'group_id_2', [self.LEARNER_ID, new_learner_id]) + + user_model_1 = user_models.LearnerGroupsUserModel.get( + self.LEARNER_ID, strict=True) + self.assertEqual( + user_model_1.invited_to_learner_groups_ids, + [self.LEARNER_GROUP_ID, self.LEARNER_GROUP_ID_2, 'group_id_2']) + + user_model_2 = user_models.LearnerGroupsUserModel.get( + new_learner_id, strict=True) + self.assertEqual( + user_model_2.invited_to_learner_groups_ids, + ['group_id_2']) + + def test_can_already_invited_user_be_invited_to_learner_group( + self + ) -> None: + (is_valid_invite, error_message) = ( + learner_group_services.can_user_be_invited( + self.LEARNER_ID, 'username1', self.LEARNER_GROUP_ID)) + self.assertFalse(is_valid_invite) + self.assertEqual( + error_message, + 'User with username username1 has been already invited to ' + 'join the group' + ) + + def test_can_user_be_invited_to_a_new_learner_group(self) -> None: + (is_valid_invite, error_message) = ( + learner_group_services.can_user_be_invited( + self.LEARNER_ID, 'username1', '')) + self.assertTrue(is_valid_invite) + self.assertEqual(error_message, '') + + def test_can_facilitator_be_invited_to_learner_group(self) -> None: + (is_valid_invite, error_message) = ( + learner_group_services.can_user_be_invited( + self.FACILITATOR_ID, 'facilitator_name', + self.LEARNER_GROUP_ID)) + self.assertFalse(is_valid_invite) + self.assertEqual( + error_message, + 'User with username facilitator_name is already a facilitator.' + ) + + def test_can_a_learner_be_invited_to_learner_group(self) -> None: + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.LEARNER_ID, True) + (is_valid_invite, error_message) = ( + learner_group_services.can_user_be_invited( + self.LEARNER_ID, 'username1', self.LEARNER_GROUP_ID)) + self.assertFalse(is_valid_invite) + self.assertEqual( + error_message, + 'User with username username1 is already a learner.' + ) + + def test_can_uninvolved_user_be_invited_to_learner_group(self) -> None: + (is_valid_invite, error_message) = ( + learner_group_services.can_user_be_invited( + 'uninvolved_user_id', 'username2', self.LEARNER_GROUP_ID)) + self.assertTrue(is_valid_invite) + self.assertEqual(error_message, '') + + def test_remove_learners_from_learner_group(self) -> None: + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.LEARNER_ID, True) + + self.learner_group = learner_group_services.update_learner_group( + self.LEARNER_GROUP_ID, self.learner_group.title, + self.learner_group.description, + self.learner_group.facilitator_user_ids, [], + ['learner2', 'learner3'], self.learner_group.subtopic_page_ids, + self.learner_group.story_ids) + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, 'learner2', True) + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, 'learner3', False) + + learner_group = learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID) + # Ruling out the possibility of None for mypy type checking. + assert learner_group is not None + self.assertEqual( + learner_group.learner_user_ids, + ['learner2', 'learner3']) + learner_group_services.remove_learners_from_learner_group( + self.LEARNER_GROUP_ID, ['learner2', 'learner3'], True) + + learner_group = learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID) + # Ruling out the possibility of None for mypy type checking. + assert learner_group is not None + self.assertEqual(learner_group.learner_user_ids, []) + + def test_remove_subtopic_page_reference_from_learner_groups(self) -> None: + self.learner_group = learner_group_services.update_learner_group( + self.LEARNER_GROUP_ID, self.learner_group.title, + self.learner_group.description, + self.learner_group.facilitator_user_ids, [], + [self.LEARNER_ID], ['topic1:2', 'topic1:1'], + self.learner_group.story_ids) + + ( + learner_group_services + .remove_subtopic_page_reference_from_learner_groups( + 'topic1', 2 + ) + ) + + learner_group = learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID) + # Ruling out the possibility of None for mypy type checking. + assert learner_group is not None + self.assertEqual(learner_group.subtopic_page_ids, ['topic1:1']) + + def test_remove_story_reference_from_learner_groups(self) -> None: + self.learner_group = learner_group_services.update_learner_group( + self.LEARNER_GROUP_ID, self.learner_group.title, + self.learner_group.description, + self.learner_group.facilitator_user_ids, [], + [self.LEARNER_ID], ['topic1:2', 'topic1:1'], + ['story_id1', 'story_id2']) + + learner_group_services.remove_story_reference_from_learner_groups( + 'story_id1') + + learner_group = learner_group_fetchers.get_learner_group_by_id( + self.LEARNER_GROUP_ID) + # Ruling out the possibility of None for mypy type checking. + assert learner_group is not None + self.assertEqual(learner_group.story_ids, ['story_id2']) + + def test_update_progress_sharing_permission(self) -> None: + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID, self.LEARNER_ID, True) + learner_group_services.add_learner_to_learner_group( + self.LEARNER_GROUP_ID_2, self.LEARNER_ID, False) + + self.assertEqual( + learner_group_fetchers.can_multi_learners_share_progress( + [self.LEARNER_ID], self.LEARNER_GROUP_ID + ), [True] + ) + + learner_group_services.update_progress_sharing_permission( + self.LEARNER_ID, self.LEARNER_GROUP_ID, False) + + self.assertEqual( + learner_group_fetchers.can_multi_learners_share_progress( + [self.LEARNER_ID], self.LEARNER_GROUP_ID + ), [False] + ) diff --git a/core/domain/learner_playlist_services.py b/core/domain/learner_playlist_services.py index 3b146e4477a4..214cfb91275b 100644 --- a/core/domain/learner_playlist_services.py +++ b/core/domain/learner_playlist_services.py @@ -23,13 +23,22 @@ from core.domain import user_domain from core.platform import models -(user_models,) = models.Registry.import_models([models.NAMES.user]) +from typing import Final, List, Optional, Tuple -MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT = ( - feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models +(user_models,) = models.Registry.import_models([models.Names.USER]) -def get_learner_playlist_from_model(learner_playlist_model): +MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT: Final = ( + feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT +) + + +def get_learner_playlist_from_model( + learner_playlist_model: user_models.LearnerPlaylistModel +) -> user_domain.LearnerPlaylist: """Returns the learner playlist domain object given the learner playlist model loaded from the datastore. @@ -47,7 +56,9 @@ def get_learner_playlist_from_model(learner_playlist_model): learner_playlist_model.collection_ids) -def save_learner_playlist(learner_playlist): +def save_learner_playlist( + learner_playlist: user_domain.LearnerPlaylist +) -> None: """Save a learner playlist domain object as an LearnerPlaylistModel entity in the datastore. @@ -72,7 +83,10 @@ def save_learner_playlist(learner_playlist): def mark_exploration_to_be_played_later( - user_id, exploration_id, position_to_be_inserted=None): + user_id: str, + exploration_id: str, + position_to_be_inserted: Optional[int] = None +) -> Tuple[bool, bool]: """Adds the exploration id to the learner playlist of the user at the given position. If the position is not specified, the exploration gets added at the end. If the exploration is created or has been edited by the user it is @@ -136,7 +150,10 @@ def mark_exploration_to_be_played_later( def mark_collection_to_be_played_later( - user_id, collection_id, position_to_be_inserted=None): + user_id: str, + collection_id: str, + position_to_be_inserted: Optional[int] = None +) -> Tuple[bool, bool]: """Adds the collection id to the learner playlist of the user at the given position. If the position is not specified, the collection gets added at the end. If the collection is created or has been edited by the user it is @@ -199,7 +216,9 @@ def mark_collection_to_be_played_later( return playlist_limit_exceeded, collection_belongs_to_subscribed_collections -def remove_exploration_from_learner_playlist(user_id, exploration_id): +def remove_exploration_from_learner_playlist( + user_id: str, exploration_id: str +) -> None: """Removes the exploration from the learner playlist of the user (if present). @@ -218,7 +237,9 @@ def remove_exploration_from_learner_playlist(user_id, exploration_id): save_learner_playlist(learner_playlist) -def remove_collection_from_learner_playlist(user_id, collection_id): +def remove_collection_from_learner_playlist( + user_id: str, collection_id: str +) -> None: """Removes the collection from the learner playlist of the user (if present). @@ -237,7 +258,7 @@ def remove_collection_from_learner_playlist(user_id, collection_id): save_learner_playlist(learner_playlist) -def get_all_exp_ids_in_learner_playlist(user_id): +def get_all_exp_ids_in_learner_playlist(user_id: str) -> List[str]: """Returns a list with the ids of all the explorations that are in the playlist of the user. @@ -260,7 +281,7 @@ def get_all_exp_ids_in_learner_playlist(user_id): return [] -def get_all_collection_ids_in_learner_playlist(user_id): +def get_all_collection_ids_in_learner_playlist(user_id: str) -> List[str]: """Returns a list with the ids of all the collections that are in the playlist of the user. diff --git a/core/domain/learner_playlist_services_test.py b/core/domain/learner_playlist_services_test.py index c7942c912748..e42947d8cdc4 100644 --- a/core/domain/learner_playlist_services_test.py +++ b/core/domain/learner_playlist_services_test.py @@ -25,28 +25,35 @@ from core.platform import models from core.tests import test_utils -(user_models,) = models.Registry.import_models([models.NAMES.user]) +from typing import Final, List -MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT = ( - feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) + +MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT: Final = ( + feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT +) class LearnerPlaylistTests(test_utils.GenericTestBase): """Test the services related to learner playlist services.""" - EXP_ID_0 = '0_en_arch_bridges_in_england' - EXP_ID_1 = '1_fi_arch_sillat_suomi' - EXP_ID_2 = '2_en_welcome_introduce_oppia' - EXP_ID_3 = '3_welcome_oppia' - COL_ID_0 = '0_arch_bridges_in_england' - COL_ID_1 = '1_welcome_introduce_oppia' - COL_ID_2 = '2_welcome_introduce_oppia_interactions' - COL_ID_3 = '3_welcome_oppia_collection' - USER_EMAIL = 'user@example.com' - USER_USERNAME = 'user' + EXP_ID_0: Final = '0_en_arch_bridges_in_england' + EXP_ID_1: Final = '1_fi_arch_sillat_suomi' + EXP_ID_2: Final = '2_en_welcome_introduce_oppia' + EXP_ID_3: Final = '3_welcome_oppia' + COL_ID_0: Final = '0_arch_bridges_in_england' + COL_ID_1: Final = '1_welcome_introduce_oppia' + COL_ID_2: Final = '2_welcome_introduce_oppia_interactions' + COL_ID_3: Final = '3_welcome_oppia_collection' + USER_EMAIL: Final = 'user@example.com' + USER_USERNAME: Final = 'user' - def setUp(self): - super(LearnerPlaylistTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) @@ -81,29 +88,41 @@ def setUp(self): self.COL_ID_3, self.owner_id, title='Welcome Oppia Collection', category='Welcome') - def _get_all_learner_playlist_exp_ids(self, user_id): + def _get_all_learner_playlist_exp_ids(self, user_id: str) -> List[str]: """Returns the list of all the exploration ids in the learner's playlist corresponding to the given user id. """ learner_playlist_model = user_models.LearnerPlaylistModel.get( user_id, strict=False) - return ( - learner_playlist_model.exploration_ids if - learner_playlist_model else []) - - def _get_all_learner_playlist_collection_ids(self, user_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if learner_playlist_model: + exp_ids: List[str] = learner_playlist_model.exploration_ids + return exp_ids + else: + return [] + + def _get_all_learner_playlist_collection_ids( + self, user_id: str + ) -> List[str]: """Returns the list of all the collection ids in the learner's playlist corresponding to the given user id. """ learner_playlist_model = user_models.LearnerPlaylistModel.get( user_id, strict=False) - return ( - learner_playlist_model.collection_ids if - learner_playlist_model else []) + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if learner_playlist_model: + collection_ids: List[str] = learner_playlist_model.collection_ids + return collection_ids + else: + return [] - def test_subscribed_exploration_cannot_be_added_to_playlist(self): + def test_subscribed_exploration_cannot_be_added_to_playlist(self) -> None: # Subscribe to exploration. subscription_services.subscribe_to_exploration( self.user_id, self.EXP_ID_0) @@ -116,7 +135,7 @@ def test_subscribed_exploration_cannot_be_added_to_playlist(self): self.assertEqual( self._get_all_learner_playlist_exp_ids(self.user_id), []) - def test_single_exploration_is_added_correctly_to_playlist(self): + def test_single_exploration_is_added_correctly_to_playlist(self) -> None: # Test adding a single exploration_id to learner playlist. self.assertEqual( self._get_all_learner_playlist_exp_ids(self.user_id), []) @@ -134,7 +153,9 @@ def test_single_exploration_is_added_correctly_to_playlist(self): self._get_all_learner_playlist_exp_ids( self.user_id), [self.EXP_ID_1, self.EXP_ID_0]) - def test_multiple_explorations_are_added_correctly_to_playlist(self): + def test_multiple_explorations_are_added_correctly_to_playlist( + self + ) -> None: # Test adding two explorations to the learner playlist. self.assertEqual( self._get_all_learner_playlist_exp_ids( @@ -152,7 +173,9 @@ def test_multiple_explorations_are_added_correctly_to_playlist(self): self._get_all_learner_playlist_exp_ids( self.user_id), [self.EXP_ID_0, self.EXP_ID_1]) - def test_adding_exisiting_exploration_changes_order_of_explorations(self): + def test_adding_exisiting_exploration_changes_order_of_explorations( + self + ) -> None: # Test adding the exploration_id if it is already in # learner_playlist.exploration_ids. # Add the first exploration to the second position. @@ -171,7 +194,9 @@ def test_adding_exisiting_exploration_changes_order_of_explorations(self): self._get_all_learner_playlist_exp_ids( self.user_id), [self.EXP_ID_1, self.EXP_ID_0]) - def test_incomplete_exploration_is_not_added_to_learner_playlist(self): + def test_incomplete_exploration_is_not_added_to_learner_playlist( + self + ) -> None: learner_progress_services.add_exp_to_learner_playlist( self.user_id, self.EXP_ID_0) self.assertEqual( @@ -189,7 +214,7 @@ def test_incomplete_exploration_is_not_added_to_learner_playlist(self): self._get_all_learner_playlist_exp_ids( self.user_id), [self.EXP_ID_0]) - def test_nunmber_of_explorations_cannot_exceed_max(self): + def test_number_of_explorations_cannot_exceed_max(self) -> None: # Add MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT explorations. exp_ids = ['SAMPLE_EXP_ID_%s' % index for index in range( 0, MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT)] @@ -216,7 +241,7 @@ def test_nunmber_of_explorations_cannot_exceed_max(self): self.assertEqual( self._get_all_learner_playlist_exp_ids(self.user_id), exp_ids) - def test_subscribed_collection_cannot_be_added_to_playlist(self): + def test_subscribed_collection_cannot_be_added_to_playlist(self) -> None: # Subscribe to collection. subscription_services.subscribe_to_collection( self.user_id, self.COL_ID_0) @@ -230,7 +255,7 @@ def test_subscribed_collection_cannot_be_added_to_playlist(self): self.assertEqual( self._get_all_learner_playlist_collection_ids(self.user_id), []) - def test_single_collection_is_added_correctly_to_playlist(self): + def test_single_collection_is_added_correctly_to_playlist(self) -> None: # Test adding a single collection_id to learner playlist. self.assertEqual( self._get_all_learner_playlist_collection_ids(self.user_id), []) @@ -248,7 +273,7 @@ def test_single_collection_is_added_correctly_to_playlist(self): self._get_all_learner_playlist_collection_ids( self.user_id), [self.COL_ID_1, self.COL_ID_0]) - def test_multiple_collections_are_added_correctly_to_playlist(self): + def test_multiple_collections_are_added_correctly_to_playlist(self) -> None: # Test adding two explorations to the learner playlist. self.assertEqual( self._get_all_learner_playlist_collection_ids( @@ -266,7 +291,9 @@ def test_multiple_collections_are_added_correctly_to_playlist(self): self._get_all_learner_playlist_collection_ids( self.user_id), [self.COL_ID_0, self.COL_ID_1]) - def test_adding_existing_collection_changes_order_of_collections(self): + def test_adding_existing_collection_changes_order_of_collections( + self + ) -> None: # Test adding the collection_id if it is already in # learner_playlist.collection_ids. # Add the first collection to the second position. @@ -285,7 +312,7 @@ def test_adding_existing_collection_changes_order_of_collections(self): self._get_all_learner_playlist_collection_ids( self.user_id), [self.COL_ID_1, self.COL_ID_0]) - def test_number_of_collections_cannot_exceed_max(self): + def test_number_of_collections_cannot_exceed_max(self) -> None: # Add MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT collections. col_ids = ['SAMPLE_COL_ID_%s' % index for index in range( 0, MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT)] @@ -315,7 +342,7 @@ def test_number_of_collections_cannot_exceed_max(self): self._get_all_learner_playlist_collection_ids( self.user_id), col_ids) - def test_remove_exploration_from_learner_playlist(self): + def test_remove_exploration_from_learner_playlist(self) -> None: self.assertEqual(self._get_all_learner_playlist_exp_ids( self.user_id), []) @@ -345,7 +372,7 @@ def test_remove_exploration_from_learner_playlist(self): self.assertEqual(self._get_all_learner_playlist_exp_ids( self.user_id), []) - def test_remove_collection_from_learner_playlist(self): + def test_remove_collection_from_learner_playlist(self) -> None: self.assertEqual(self._get_all_learner_playlist_collection_ids( self.user_id), []) @@ -375,7 +402,7 @@ def test_remove_collection_from_learner_playlist(self): self.assertEqual(self._get_all_learner_playlist_collection_ids( self.user_id), []) - def test_get_all_exp_ids_in_learner_playlist(self): + def test_get_all_exp_ids_in_learner_playlist(self) -> None: self.assertEqual( learner_playlist_services.get_all_exp_ids_in_learner_playlist( self.user_id), []) @@ -394,7 +421,7 @@ def test_get_all_exp_ids_in_learner_playlist(self): learner_playlist_services.get_all_exp_ids_in_learner_playlist( self.user_id), [self.EXP_ID_0, self.EXP_ID_1]) - def test_get_all_learner_playlist_collection_ids(self): + def test_get_all_learner_playlist_collection_ids(self) -> None: self.assertEqual( learner_playlist_services.get_all_collection_ids_in_learner_playlist( # pylint: disable=line-too-long self.user_id), []) diff --git a/core/domain/learner_progress_domain.py b/core/domain/learner_progress_domain.py index 1015b234c9d7..993a08ca48b0 100644 --- a/core/domain/learner_progress_domain.py +++ b/core/domain/learner_progress_domain.py @@ -37,8 +37,8 @@ def __init__( topics_to_learn_summaries: List[topic_domain.TopicSummary], all_topic_summaries: List[topic_domain.TopicSummary], untracked_topic_summaries: List[topic_domain.TopicSummary], - completed_to_incomplete_story_titles: List[story_domain.StorySummary], - learnt_to_partially_learnt_topic_titles: List[story_domain.StorySummary] + completed_to_incomplete_story_titles: List[str], + learnt_to_partially_learnt_topic_titles: List[str] ) -> None: """Constructs a LearnerProgress domain object. @@ -56,12 +56,12 @@ def __init__( in the edit goals. untracked_topic_summaries: list(TopicSummary). The summaries of the topics not tracked for the user. - completed_to_incomplete_story_titles: list(StorySummary). - The summaries corresponding to those stories which have - been moved to the in progress section on account of new + completed_to_incomplete_story_titles: list(str). + The titles of summaries corresponding to those stories which + have been moved to the in progress section on account of new nodes being added to them. - learnt_to_partially_learnt_topic_titles: list(StorySummary). - The summaries corresponding to those topics which have + learnt_to_partially_learnt_topic_titles: list(str). + The titles of summaries corresponding to those topics which have been moved to the in progress section on account of new stories being added to them. """ @@ -88,8 +88,7 @@ def __init__( collection_domain.CollectionSummary], collection_playlist: List[ collection_domain.CollectionSummary], - completed_to_incomplete_collection_titles: List[ - collection_domain.CollectionSummary], + completed_to_incomplete_collection_titles: List[str], ) -> None: """Constructs a LearnerProgress domain object. diff --git a/core/domain/learner_progress_services.py b/core/domain/learner_progress_services.py index f31e54dec169..cebcde217dda 100644 --- a/core/domain/learner_progress_services.py +++ b/core/domain/learner_progress_services.py @@ -23,26 +23,100 @@ from core import utils from core.constants import constants from core.domain import classroom_services +from core.domain import collection_domain from core.domain import collection_services from core.domain import config_domain +from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import learner_goals_services from core.domain import learner_playlist_services from core.domain import learner_progress_domain from core.domain import skill_services +from core.domain import story_domain from core.domain import story_fetchers from core.domain import story_services from core.domain import subscription_services +from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services from core.domain import user_domain from core.platform import models -(user_models,) = models.Registry.import_models([models.NAMES.user]) + +from typing import Dict, List, Optional, Tuple, TypedDict + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import story_models + from mypy_imports import topic_models + from mypy_imports import user_models + +(user_models, topic_models, story_models) = models.Registry.import_models([ + models.Names.USER, models.Names.TOPIC, models.Names.STORY +]) datastore_services = models.Registry.import_datastore_services() -def _get_completed_activities_from_model(completed_activities_model): +class DisplayableStorySummaryDict(TypedDict): + """Type for the displayable story summary dictionary""" + + id: str + title: str + description: str + node_titles: List[str] + thumbnail_bg_color: Optional[str] + thumbnail_filename: Optional[str] + url_fragment: str + story_is_published: bool + completed_node_titles: List[str] + all_node_dicts: List[story_domain.StoryNodeDict] + topic_name: str + topic_url_fragment: str + classroom_url_fragment: str + + +class DisplayableTopicSummaryDict(TypedDict): + """Type for the displayable topic summary dictionary""" + + id: str + name: str + description: str + language_code: str + version: int + story_titles: List[str] + total_published_node_count: int + thumbnail_bg_color: Optional[str] + thumbnail_filename: Optional[str] + canonical_story_summary_dict: List[topic_fetchers.CannonicalStoryDict] + url_fragment: str + classroom: str + practice_tab_is_displayed: bool + degrees_of_mastery: Dict[str, Optional[float]] + skill_descriptions: Tuple[Dict[str, str], List[str]] + subtopics: List[topic_domain.SubtopicDict] + + +class DisplayableCollectionSummaryDict(TypedDict): + """Type for the displayable collection summary dictionary""" + + id: str + title: str + category: str + objective: str + language_code: str + last_updated_msec: float + created_on: float + status: str + node_count: int + community_owned: bool + thumbnail_icon_url: str + thumbnail_bg_color: str + + +def _get_completed_activities_from_model( + completed_activities_model: user_models.CompletedActivitiesModel +) -> user_domain.CompletedActivities: """Returns an activities completed domain object given a activities completed model loaded from the datastore. @@ -62,7 +136,9 @@ def _get_completed_activities_from_model(completed_activities_model): completed_activities_model.learnt_topic_ids) -def _get_incomplete_activities_from_model(incomplete_activities_model): +def _get_incomplete_activities_from_model( + incomplete_activities_model: user_models.IncompleteActivitiesModel +) -> user_domain.IncompleteActivities: """Returns an incomplete activities domain object given an incomplete activities model loaded from the datastore. @@ -82,7 +158,9 @@ def _get_incomplete_activities_from_model(incomplete_activities_model): incomplete_activities_model.partially_learnt_topic_ids) -def _get_last_playthrough_information(last_playthrough_model): +def _get_last_playthrough_information( + last_playthrough_model: user_models.ExpUserLastPlaythroughModel +) -> user_domain.ExpUserLastPlaythrough: """Returns an ExpUserLastPlaythrough domain object given an ExpUserLastPlaythroughModel loaded from the datastore. @@ -102,7 +180,9 @@ def _get_last_playthrough_information(last_playthrough_model): last_playthrough_model.last_played_state_name) -def _save_completed_activities(activities_completed): +def _save_completed_activities( + activities_completed: user_domain.CompletedActivities +) -> None: """Save an activities completed domain object as a CompletedActivitiesModel instance in the datastore. @@ -129,7 +209,9 @@ def _save_completed_activities(activities_completed): user_models.CompletedActivitiesModel(**activities_completed_dict).put() -def _save_incomplete_activities(incomplete_activities): +def _save_incomplete_activities( + incomplete_activities: user_domain.IncompleteActivities +) -> None: """Save an incomplete activities domain object as an IncompleteActivitiesModel instance in the datastore. @@ -151,7 +233,9 @@ def _save_incomplete_activities(incomplete_activities): incomplete_activities_model.put() -def _save_last_playthrough_information(last_playthrough_information): +def _save_last_playthrough_information( + last_playthrough_information: user_domain.ExpUserLastPlaythrough +) -> None: """Save an ExpUserLastPlaythrough domain object as an ExpUserLastPlaythroughModel instance in the datastore. @@ -172,7 +256,7 @@ def _save_last_playthrough_information(last_playthrough_information): last_playthrough_information_model.put() -def mark_exploration_as_completed(user_id, exp_id): +def mark_exploration_as_completed(user_id: str, exp_id: str) -> None: """Adds the exploration id to the completed list of the user unless the exploration has already been completed or has been created/edited by the user. It is also removed from the incomplete list and the learner playlist @@ -210,7 +294,7 @@ def mark_exploration_as_completed(user_id, exp_id): _save_completed_activities(activities_completed) -def mark_story_as_completed(user_id, story_id): +def mark_story_as_completed(user_id: str, story_id: str) -> None: """Adds the story id to the completed list of the user unless the story has already been completed by the user. It is also removed from the incomplete list(if present). @@ -235,7 +319,7 @@ def mark_story_as_completed(user_id, story_id): _save_completed_activities(activities_completed) -def mark_topic_as_learnt(user_id, topic_id): +def mark_topic_as_learnt(user_id: str, topic_id: str) -> None: """Adds the topic id to the learnt list of the user unless the topic has already been learnt by the user. It is also removed from the partially learnt list and topics to learn list(if present). @@ -265,7 +349,7 @@ def mark_topic_as_learnt(user_id, topic_id): _save_completed_activities(activities_completed) -def mark_collection_as_completed(user_id, collection_id): +def mark_collection_as_completed(user_id: str, collection_id: str) -> None: """Adds the collection id to the list of collections completed by the user unless the collection has already been completed or has been created/edited by the user. It is also removed from the incomplete list and the play later @@ -304,7 +388,8 @@ def mark_collection_as_completed(user_id, collection_id): def mark_exploration_as_incomplete( - user_id, exploration_id, state_name, exploration_version): + user_id: str, exploration_id: str, state_name: str, exploration_version: int +) -> None: """Adds the exploration id to the incomplete list of the user unless the exploration has been already completed or has been created/edited by the user. If the exploration is already present in the incomplete list, just the @@ -317,7 +402,7 @@ def mark_exploration_as_incomplete( exploration_id: str. The id of the partially completed exploration. state_name: str. The name of the state at which the user left the exploration. - exploration_version: str. The version of the exploration played by the + exploration_version: int. The version of the exploration played by the learner. """ incomplete_activities_model = ( @@ -362,7 +447,7 @@ def mark_exploration_as_incomplete( _save_incomplete_activities(incomplete_activities) -def record_story_started(user_id, story_id): +def record_story_started(user_id: str, story_id: str) -> None: """Adds the story id to the incomplete list of the user unless the story has been already completed by the user. @@ -389,7 +474,7 @@ def record_story_started(user_id, story_id): _save_incomplete_activities(incomplete_activities) -def record_topic_started(user_id, topic_id): +def record_topic_started(user_id: str, topic_id: str) -> None: """Adds the topic id to the partially learnt list of the user unless the topic has been already learnt by the user. If the topic is already present in the partially learnt list, just the details associated with it @@ -418,7 +503,7 @@ def record_topic_started(user_id, topic_id): _save_incomplete_activities(incomplete_activities) -def mark_collection_as_incomplete(user_id, collection_id): +def mark_collection_as_incomplete(user_id: str, collection_id: str) -> None: """Adds the collection id to the list of collections partially completed by the user unless the collection has already been completed or has been created/edited by the user or is already present in the incomplete list. @@ -453,7 +538,9 @@ def mark_collection_as_incomplete(user_id, collection_id): _save_incomplete_activities(incomplete_activities) -def validate_and_add_topic_to_learn_goal(user_id, topic_id): +def validate_and_add_topic_to_learn_goal( + user_id: str, topic_id: str +) -> Tuple[bool, bool]: """This function checks if the topic exists in the learnt. If it does not exist we call the function in learner goals services to add the topic to the learn list. @@ -486,7 +573,10 @@ def validate_and_add_topic_to_learn_goal(user_id, topic_id): def add_collection_to_learner_playlist( - user_id, collection_id, position_to_be_inserted=None): + user_id: str, + collection_id: str, + position_to_be_inserted: Optional[int] = None +) -> Tuple[bool, bool, bool]: """This function checks if the collection exists in the completed list or the incomplete list. If it does not exist we call the function in learner playlist services to add the collection to the play later list. @@ -532,7 +622,10 @@ def add_collection_to_learner_playlist( def add_exp_to_learner_playlist( - user_id, exploration_id, position_to_be_inserted=None): + user_id: str, + exploration_id: str, + position_to_be_inserted: Optional[int] = None +) -> Tuple[bool, bool, bool]: """This function checks if the exploration exists in the completed list or the incomplete list. If it does not exist we call the function in learner playlist services to add the exploration to the play later list. @@ -579,7 +672,8 @@ def add_exp_to_learner_playlist( def _remove_activity_ids_from_playlist( - user_id, exploration_ids, collection_ids): + user_id: str, exploration_ids: List[str], collection_ids: List[str] +) -> None: """Removes the explorations and collections from the playlist of the user. Args: @@ -604,7 +698,7 @@ def _remove_activity_ids_from_playlist( learner_playlist_services.save_learner_playlist(learner_playlist) -def remove_story_from_completed_list(user_id, story_id): +def remove_story_from_completed_list(user_id: str, story_id: str) -> None: """Removes the story id from the list of completed stories (if present). @@ -625,7 +719,7 @@ def remove_story_from_completed_list(user_id, story_id): _save_completed_activities(activities_completed) -def remove_topic_from_learnt_list(user_id, topic_id): +def remove_topic_from_learnt_list(user_id: str, topic_id: str) -> None: """Removes the topic id from the list of learnt topics (if present). @@ -645,7 +739,9 @@ def remove_topic_from_learnt_list(user_id, topic_id): _save_completed_activities(activities_completed) -def remove_collection_from_completed_list(user_id, collection_id): +def remove_collection_from_completed_list( + user_id: str, collection_id: str +) -> None: """Removes the collection id from the list of completed collections (if present). @@ -666,7 +762,12 @@ def remove_collection_from_completed_list(user_id, collection_id): def _remove_activity_ids_from_completed_list( - user_id, exploration_ids, collection_ids, story_ids, learnt_topic_ids): + user_id: str, + exploration_ids: List[str], + collection_ids: List[str], + story_ids: List[str], + learnt_topic_ids: List[str] +) -> None: """Removes the explorations, collections, stories and learnt topics from the completed list of the learner. @@ -700,7 +801,7 @@ def _remove_activity_ids_from_completed_list( _save_completed_activities(activities_completed) -def remove_exp_from_incomplete_list(user_id, exploration_id): +def remove_exp_from_incomplete_list(user_id: str, exploration_id: str) -> None: """Removes the exploration from the incomplete list of the user (if present). @@ -719,12 +820,14 @@ def remove_exp_from_incomplete_list(user_id, exploration_id): last_playthrough_information_model = ( user_models.ExpUserLastPlaythroughModel.get( user_id, exploration_id)) + # Ruling out the possibility of None for mypy type checking. + assert last_playthrough_information_model is not None last_playthrough_information_model.delete() _save_incomplete_activities(incomplete_activities) -def remove_story_from_incomplete_list(user_id, story_id): +def remove_story_from_incomplete_list(user_id: str, story_id: str) -> None: """Removes the story from the incomplete list of the user(if present). Args: @@ -743,7 +846,9 @@ def remove_story_from_incomplete_list(user_id, story_id): _save_incomplete_activities(incomplete_activities) -def remove_topic_from_partially_learnt_list(user_id, topic_id): +def remove_topic_from_partially_learnt_list( + user_id: str, topic_id: str +) -> None: """Removes the topic from the partially learnt list of the user(if present). Args: @@ -762,7 +867,9 @@ def remove_topic_from_partially_learnt_list(user_id, topic_id): _save_incomplete_activities(incomplete_activities) -def remove_collection_from_incomplete_list(user_id, collection_id): +def remove_collection_from_incomplete_list( + user_id: str, collection_id: str +) -> None: """Removes the collection id from the list of incomplete collections (if present). @@ -782,8 +889,11 @@ def remove_collection_from_incomplete_list(user_id, collection_id): def _remove_activity_ids_from_incomplete_list( - user_id, exploration_ids=None, collection_ids=None, - partially_learnt_topic_ids=None): + user_id: str, + exploration_ids: List[str], + collection_ids: List[str], + partially_learnt_topic_ids: List[str] +) -> None: """Removes the collections, explorations and topics from the incomplete list of the learner. @@ -814,7 +924,7 @@ def _remove_activity_ids_from_incomplete_list( _save_incomplete_activities(incomplete_activities) -def get_all_completed_exp_ids(user_id): +def get_all_completed_exp_ids(user_id: str) -> List[str]: """Returns a list with the ids of all the explorations completed by the user. @@ -838,13 +948,15 @@ def get_all_completed_exp_ids(user_id): def _get_filtered_completed_exp_summaries( - exploration_summaries, exploration_ids): + exploration_summaries: List[Optional[exp_domain.ExplorationSummary]], + exploration_ids: List[str] +) -> Tuple[List[exp_domain.ExplorationSummary], List[str]]: """Returns a list of summaries of the completed exploration ids and the ids of explorations that are no longer present. Args: - exploration_summaries: list(ExplorationSummary). The list of exploration - summary domain objects to be filtered. + exploration_summaries: list(ExplorationSummary|None). The list of + exploration summary domain objects to be filtered. exploration_ids: list(str). The ids of the explorations corresponding to the exploration summary domain objects. @@ -867,7 +979,7 @@ def _get_filtered_completed_exp_summaries( return filtered_completed_exp_summaries, nonexistent_completed_exp_ids -def get_all_completed_story_ids(user_id): +def get_all_completed_story_ids(user_id: str) -> List[str]: """Returns a list with the ids of all the stories completed by the user. @@ -892,7 +1004,12 @@ def get_all_completed_story_ids(user_id): def _get_filtered_completed_story_summaries( - user_id, story_summaries, story_ids): + user_id: str, + story_summaries: List[Optional[story_domain.StorySummary]], + story_ids: List[str] +) -> Tuple[ + List[story_domain.StorySummary], List[str], List[story_domain.StorySummary] +]: """Returns a list of summaries of the completed story ids, the ids of stories that are no longer present and the summaries of the stories being shifted to the incomplete section on account of new @@ -900,7 +1017,7 @@ def _get_filtered_completed_story_summaries( Args: user_id: str. The id of the learner. - story_summaries: list(StorySummary). The list of story + story_summaries: list(StorySummary|None). The list of story summary domain objects to be filtered. story_ids: list(str). The ids of the story corresponding to the story summary domain objects. @@ -924,13 +1041,20 @@ def _get_filtered_completed_story_summaries( nonexistent_completed_story_ids.append(story_ids[index]) else: story_id = story_summary.id + story = stories[index] + # Ruling out the possibility of None for mypy type checking, because + # in this method story can only be None when story_summary is None + # and above we are already handling the case of None story_summary. + # So, we are sure that the story cannot be None here and that's why + # we used assert here. + assert story is not None if len(story_fetchers.get_completed_node_ids( user_id, story_id)) != len(story_summary.node_titles): remove_story_from_completed_list(user_id, story_id) record_story_started(user_id, story_id) completed_to_incomplete_story_summaries.append(story_summary) elif not story_services.is_story_published_and_present_in_topic( - stories[index]): + story): nonexistent_completed_story_ids.append(story_ids[index]) else: filtered_completed_story_summaries.append(story_summary) @@ -941,7 +1065,7 @@ def _get_filtered_completed_story_summaries( completed_to_incomplete_story_summaries) -def get_all_learnt_topic_ids(user_id): +def get_all_learnt_topic_ids(user_id: str) -> List[str]: """Returns a list with the ids of all the topics learnt by the user. @@ -966,7 +1090,12 @@ def get_all_learnt_topic_ids(user_id): def _get_filtered_learnt_topic_summaries( - user_id, topic_summaries, topic_ids): + user_id: str, + topic_summaries: List[Optional[topic_domain.TopicSummary]], + topic_ids: List[str] +) -> Tuple[ + List[topic_domain.TopicSummary], List[str], List[topic_domain.TopicSummary] +]: """Returns a list of summaries of the learnt topic ids, the ids of topics that are no longer present and the summaries of the topics being shifted to the partially learnt section on account of new @@ -974,7 +1103,7 @@ def _get_filtered_learnt_topic_summaries( Args: user_id: str. The id of the learner. - topic_summaries: list(TopicSummary). The list of topic + topic_summaries: list(TopicSummary|None). The list of topic summary domain objects to be filtered. topic_ids: list(str). The ids of the topic corresponding to the topic summary domain objects. @@ -1003,15 +1132,21 @@ def _get_filtered_learnt_topic_summaries( else: topic_id = topic_summary.id story_ids_in_topic = [] - for story in topics[index].canonical_story_references: + topic = topics[index] + # Ruling out the possibility of None for mypy type checking. + assert topic is not None + for story in topic.canonical_story_references: story_ids_in_topic.append(story.story_id) + topic_right = topic_rights[index] + # Ruling out the possibility of None for mypy type checking. + assert topic_right is not None if not set(story_ids_in_topic).intersection( set(completed_story_ids)): remove_topic_from_learnt_list(user_id, topic_id) record_topic_started(user_id, topic_id) learnt_to_partially_learnt_topics.append(topic_summary) - elif not topic_rights[index].topic_is_published: + elif not topic_right.topic_is_published: nonexistent_learnt_topic_ids.append(topic_ids[index]) else: filtered_learnt_topic_summaries.append(topic_summary) @@ -1022,7 +1157,7 @@ def _get_filtered_learnt_topic_summaries( learnt_to_partially_learnt_topics) -def get_all_completed_collection_ids(user_id): +def get_all_completed_collection_ids(user_id: str) -> List[str]: """Returns a list with the ids of all the collections completed by the user. @@ -1046,7 +1181,14 @@ def get_all_completed_collection_ids(user_id): def _get_filtered_completed_collection_summaries( - user_id, collection_summaries, collection_ids): + user_id: str, + collection_summaries: List[Optional[collection_domain.CollectionSummary]], + collection_ids: List[str] +) -> Tuple[ + List[collection_domain.CollectionSummary], + List[str], + List[collection_domain.CollectionSummary] +]: """Returns a list of summaries of the completed collection ids, the ids of collections that are no longer present and the summaries of the collections being shifted to the incomplete section on account of new @@ -1054,8 +1196,8 @@ def _get_filtered_completed_collection_summaries( Args: user_id: str. The id of the learner. - collection_summaries: list(CollectionSummary). The list of collection - summary domain objects to be filtered. + collection_summaries: list(CollectionSummary|None). The list of + collection summary domain objects to be filtered. collection_ids: list(str). The ids of the collection corresponding to the collection summary domain objects. @@ -1104,7 +1246,7 @@ def _get_filtered_completed_collection_summaries( completed_to_incomplete_collections) -def get_all_incomplete_exp_ids(user_id): +def get_all_incomplete_exp_ids(user_id: str) -> List[str]: """Returns a list with the ids of all the explorations partially completed by the user. @@ -1128,13 +1270,15 @@ def get_all_incomplete_exp_ids(user_id): def _get_filtered_incomplete_exp_summaries( - exploration_summaries, exploration_ids): + exploration_summaries: List[Optional[exp_domain.ExplorationSummary]], + exploration_ids: List[str] +) -> Tuple[List[exp_domain.ExplorationSummary], List[str]]: """Returns a list of summaries of the incomplete exploration ids and the ids of explorations that are no longer present. Args: - exploration_summaries: list(ExplorationSummary). The list of exploration - summary domain objects to be filtered. + exploration_summaries: list(ExplorationSummary|None). The list of + exploration summary domain objects to be filtered. exploration_ids: list(str). The ids of the explorations corresponding to the exploration summary domain objects. @@ -1157,7 +1301,7 @@ def _get_filtered_incomplete_exp_summaries( return filtered_incomplete_exp_summaries, nonexistent_incomplete_exp_ids -def get_all_incomplete_story_ids(user_id): +def get_all_incomplete_story_ids(user_id: str) -> List[str]: """Returns a list with the ids of all the stories partially completed by the user. @@ -1178,7 +1322,7 @@ def get_all_incomplete_story_ids(user_id): return [] -def get_all_partially_learnt_topic_ids(user_id): +def get_all_partially_learnt_topic_ids(user_id: str) -> List[str]: """Returns a list with the ids of all the topics partially learnt by the user. @@ -1200,12 +1344,14 @@ def get_all_partially_learnt_topic_ids(user_id): def _get_filtered_partially_learnt_topic_summaries( - topic_summaries, topic_ids): + topic_summaries: List[Optional[topic_domain.TopicSummary]], + topic_ids: List[str] +) -> Tuple[List[topic_domain.TopicSummary], List[str]]: """Returns a list of summaries of the partially learnt topic ids and the ids of topics that are no longer present. Args: - topic_summaries: list(TopicSummary). The list of topic + topic_summaries: list(TopicSummary|None). The list of topic summary domain objects to be filtered. topic_ids: list(str). The ids of the topic corresponding to the topic summary domain objects. @@ -1218,13 +1364,16 @@ def _get_filtered_partially_learnt_topic_summaries( """ nonexistent_partially_learnt_topic_ids = [] filtered_partially_learnt_topic_summaries = [] - topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids) + all_topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids) for index, topic_summary in enumerate(topic_summaries): if topic_summary is None: nonexistent_partially_learnt_topic_ids.append(topic_ids[index]) else: topic_id = topic_summary.id - if not topic_rights[index].topic_is_published: + topic_rights = all_topic_rights[index] + # Ruling out the possibility of None for mypy type checking. + assert topic_rights is not None + if not topic_rights.topic_is_published: nonexistent_partially_learnt_topic_ids.append(topic_id) else: filtered_partially_learnt_topic_summaries.append(topic_summary) @@ -1234,7 +1383,7 @@ def _get_filtered_partially_learnt_topic_summaries( nonexistent_partially_learnt_topic_ids) -def get_all_incomplete_collection_ids(user_id): +def get_all_incomplete_collection_ids(user_id: str) -> List[str]: """Returns a list with the ids of all the collections partially completed by the user. @@ -1257,13 +1406,15 @@ def get_all_incomplete_collection_ids(user_id): def _get_filtered_incomplete_collection_summaries( - collection_summaries, collection_ids): + collection_summaries: List[Optional[collection_domain.CollectionSummary]], + collection_ids: List[str] +) -> Tuple[List[collection_domain.CollectionSummary], List[str]]: """Returns a list of summaries of the incomplete collection ids and the ids of collections that are no longer present. Args: - collection_summaries: list(CollectionSummary). The list of collection - summary domain objects to be filtered. + collection_summaries: list(CollectionSummary|None). The list of + collection summary domain objects to be filtered. collection_ids: list(str). The ids of the collection corresponding to the collection summary domain objects. @@ -1289,13 +1440,16 @@ def _get_filtered_incomplete_collection_summaries( def _get_filtered_topics_to_learn_summaries( - user_id, topic_summaries, topic_ids): + user_id: str, + topic_summaries: List[Optional[topic_domain.TopicSummary]], + topic_ids: List[str] +) -> Tuple[List[topic_domain.TopicSummary], List[str]]: """Returns a list of summaries of the topics selected by the user ids of topics that are no longer present. Args: user_id: str. The id of the learner. - topic_summaries: list(TopicSummary). The list of topic + topic_summaries: list(TopicSummary|None). The list of topic summary domain objects to be filtered. topic_ids: list(str). The ids of the topics corresponding to the topic summary domain objects. @@ -1310,7 +1464,7 @@ def _get_filtered_topics_to_learn_summaries( filtered_topics_to_learn_summaries = [] completed_story_ids = get_all_completed_story_ids(user_id) - topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids) + all_topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids) topics = topic_fetchers.get_topics_by_ids(topic_ids) for index, topic_summary in enumerate(topic_summaries): @@ -1319,15 +1473,21 @@ def _get_filtered_topics_to_learn_summaries( else: topic_id = topic_summary.id story_ids_in_topic = [] - for story in topics[index].canonical_story_references: + topic = topics[index] + # Ruling out the possibility of None for mypy type checking. + assert topic is not None + for story in topic.canonical_story_references: story_ids_in_topic.append(story.story_id) + topic_rights = all_topic_rights[index] + # Ruling out the possibility of None for mypy type checking. + assert topic_rights is not None if (set(story_ids_in_topic).issubset( set(completed_story_ids))): learner_goals_services.remove_topics_from_learn_goal( user_id, [topic_id]) mark_topic_as_learnt(user_id, topic_id) - elif not topic_rights[index].topic_is_published: + elif not topic_rights.topic_is_published: nonexistent_topic_ids_to_learn.append(topic_ids[index]) else: filtered_topics_to_learn_summaries.append(topic_summary) @@ -1336,13 +1496,15 @@ def _get_filtered_topics_to_learn_summaries( def _get_filtered_exp_playlist_summaries( - exploration_summaries, exploration_ids): + exploration_summaries: List[Optional[exp_domain.ExplorationSummary]], + exploration_ids: List[str] +) -> Tuple[List[exp_domain.ExplorationSummary], List[str]]: """Returns a list of summaries of the explorations in the learner playlist and the ids of explorations that are no longer present. Args: - exploration_summaries: list(ExplorationSummary). The list of exploration - summary domain objects to be filtered. + exploration_summaries: list(ExplorationSummary|None). The list of + exploration summary domain objects to be filtered. exploration_ids: list(str). The ids of the explorations corresponding to the exploration summary domain objects. @@ -1366,13 +1528,15 @@ def _get_filtered_exp_playlist_summaries( def _get_filtered_collection_playlist_summaries( - collection_summaries, collection_ids): + collection_summaries: List[Optional[collection_domain.CollectionSummary]], + collection_ids: List[str] +) -> Tuple[List[collection_domain.CollectionSummary], List[str]]: """Returns a list of summaries of the collections in the learner playlist and the ids of collections that are no longer present. Args: - collection_summaries: list(CollectionSummary). The list of collection - summary domain objects to be filtered. + collection_summaries: list(CollectionSummary|None). The list of + collection summary domain objects to be filtered. collection_ids: list(str). The ids of the collections corresponding to the collection summary domain objects. @@ -1398,7 +1562,10 @@ def _get_filtered_collection_playlist_summaries( def get_all_and_untracked_topic_ids_for_user( - partially_learnt_topic_ids, learnt_topic_ids, topic_ids_to_learn): + partially_learnt_topic_ids: List[str], + learnt_topic_ids: List[str], + topic_ids_to_learn: List[str] +) -> Tuple[List[str], List[str]]: """Returns a list of all the topic ids on the server and ids of topics not tracked for the user. @@ -1429,57 +1596,68 @@ def get_all_and_untracked_topic_ids_for_user( return all_topic_ids, untracked_topic_ids -def _get_filtered_all_topic_summaries(topic_summaries, topic_ids): +def _get_filtered_all_topic_summaries( + topic_summaries: List[Optional[topic_domain.TopicSummary]], + topic_ids: List[str] +) -> List[topic_domain.TopicSummary]: """Returns a list of summaries of the topics in the edit goals. Args: - topic_summaries: list(TopicSummary). The list of topic + topic_summaries: list(TopicSummary|None). The list of topic summary domain objects to be filtered. topic_ids: list(str). The ids of the topics corresponding to the topic summary domain objects. Returns: - tuple. A 2-tuple whose elements are as follows: - - list(TopicSummary). Filtered list of TopicSummary domain - objects of the topics in the edit goals. + list(TopicSummary). Filtered list of TopicSummary domain + objects of the topics in the edit goals. """ filtered_all_topic_summaries = [] - topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids) + all_topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids) for index, topic_summary in enumerate(topic_summaries): if topic_summary is not None: - if topic_rights[index].topic_is_published: + topic_rights = all_topic_rights[index] + # Ruling out the possibility of None for mypy type checking. + assert topic_rights is not None + if topic_rights.topic_is_published: filtered_all_topic_summaries.append(topic_summary) return filtered_all_topic_summaries -def _get_filtered_untracked_topic_summaries(topic_summaries, topic_ids): +def _get_filtered_untracked_topic_summaries( + topic_summaries: List[Optional[topic_domain.TopicSummary]], + topic_ids: List[str] +) -> List[topic_domain.TopicSummary]: """Returns a list of summaries of the topics not tracked for the user and the ids of topics that are no longer present. Args: - topic_summaries: list(TopicSummary). The list of topic + topic_summaries: list(TopicSummary|None). The list of topic summary domain objects to be filtered. topic_ids: list(str). The ids of the topics corresponding to the topic summary domain objects. Returns: - tuple. A 2-tuple whose elements are as follows: - - list(TopicSummary). Filtered list of TopicSummary domain - objects of the topics not tracked for the user. - - list(str). The ids of the topics that are no longer present. + list(TopicSummary). Filtered list of TopicSummary domain + objects of the topics not tracked for the user. """ filtered_untracked_topic_summaries = [] - topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids) + all_topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids) for index, topic_summary in enumerate(topic_summaries): if topic_summary is not None: - if topic_rights[index].topic_is_published: + topic_rights = all_topic_rights[index] + # Ruling out the possibility of None for mypy type checking. + assert topic_rights is not None + if topic_rights.topic_is_published: filtered_untracked_topic_summaries.append(topic_summary) return filtered_untracked_topic_summaries -def get_displayable_story_summary_dicts(user_id, story_summaries): +def get_displayable_story_summary_dicts( + user_id: str, story_summaries: List[story_domain.StorySummary] +) -> List[DisplayableStorySummaryDict]: """Returns a displayable summary dict of the story summaries given to it. @@ -1491,12 +1669,16 @@ def get_displayable_story_summary_dicts(user_id, story_summaries): Returns: list(dict). The summary dict corresponding to the given summary. """ - summary_dicts = [] + summary_dicts: List[DisplayableStorySummaryDict] = [] story_ids = [story_summary.id for story_summary in story_summaries] - stories = story_fetchers.get_stories_by_ids(story_ids) - topic_ids = [story.corresponding_topic_id for story in stories] - topics = topic_fetchers.get_topics_by_ids(topic_ids) + stories = story_fetchers.get_stories_by_ids(story_ids, strict=True) + topic_ids = [] + for story in stories: + topic_ids.append(story.corresponding_topic_id) + topics = topic_fetchers.get_topics_by_ids(topic_ids, strict=True) for index, story_summary in enumerate(story_summaries): + story = stories[index] + topic = topics[index] summary_dicts.append({ 'id': story_summary.id, 'title': story_summary.title, @@ -1507,26 +1689,27 @@ def get_displayable_story_summary_dicts(user_id, story_summaries): 'url_fragment': story_summary.url_fragment, 'story_is_published': ( story_services.is_story_published_and_present_in_topic( - stories[index])), + story)), 'completed_node_titles': [ node.title for node in ( story_fetchers.get_completed_nodes_in_story( user_id, story_summary.id))], 'all_node_dicts': [ - node.to_dict() for node in stories[index].story_contents.nodes + node.to_dict() for node in story.story_contents.nodes ], - 'topic_name': topics[index].name, - 'topic_url_fragment': topics[index].url_fragment, + 'topic_name': topic.name, + 'topic_url_fragment': topic.url_fragment, 'classroom_url_fragment': ( classroom_services.get_classroom_url_fragment_for_topic_id( - stories[index].corresponding_topic_id)) + story.corresponding_topic_id)) }) return summary_dicts def get_displayable_untracked_topic_summary_dicts( - user_id, untracked_topic_summaries): + user_id: str, untracked_topic_summaries: List[topic_domain.TopicSummary] +) -> Dict[str, List[DisplayableTopicSummaryDict]]: """Returns a displayable dict of the the topic summaries given to it. @@ -1540,9 +1723,11 @@ def get_displayable_untracked_topic_summary_dicts( untracked topic summaries as the value. """ - summary_dict = collections.defaultdict(list) + summary_dict: Dict[ + str, List[DisplayableTopicSummaryDict] + ] = collections.defaultdict(list) topic_ids = [topic.id for topic in untracked_topic_summaries] - topics = topic_fetchers.get_topics_by_ids(topic_ids) + topics = topic_fetchers.get_topics_by_ids(topic_ids, strict=True) for index, topic in enumerate(topics): all_skill_ids = topic.get_all_skill_ids() skill_descriptions = ( @@ -1578,7 +1763,9 @@ def get_displayable_untracked_topic_summary_dicts( return summary_dict -def get_displayable_topic_summary_dicts(user_id, topic_summaries): +def get_displayable_topic_summary_dicts( + user_id: str, topic_summaries: List[topic_domain.TopicSummary] +) -> List[DisplayableTopicSummaryDict]: """Returns a displayable summary dict of the the topic summaries given to it. @@ -1591,9 +1778,9 @@ def get_displayable_topic_summary_dicts(user_id, topic_summaries): list(dict). The summary dict corresponding to the given summaries. """ - summary_dicts = [] + summary_dicts: List[DisplayableTopicSummaryDict] = [] topic_ids = [topic.id for topic in topic_summaries] - topics = topic_fetchers.get_topics_by_ids(topic_ids) + topics = topic_fetchers.get_topics_by_ids(topic_ids, strict=True) for index, topic in enumerate(topics): all_skill_ids = topic.get_all_skill_ids() skill_descriptions = ( @@ -1627,7 +1814,9 @@ def get_displayable_topic_summary_dicts(user_id, topic_summaries): return summary_dicts -def get_collection_summary_dicts(collection_summaries): +def get_collection_summary_dicts( + collection_summaries: List[collection_domain.CollectionSummary] +) -> List[DisplayableCollectionSummaryDict]: """Returns a displayable summary dict of the the collection summaries given to it. @@ -1639,7 +1828,7 @@ def get_collection_summary_dicts(collection_summaries): list(dict). The summary dict objects corresponding to the given summary domain objects. """ - summary_dicts = [] + summary_dicts: List[DisplayableCollectionSummaryDict] = [] for collection_summary in collection_summaries: summary_dicts.append({ 'id': collection_summary.id, @@ -1664,7 +1853,9 @@ def get_collection_summary_dicts(collection_summaries): return summary_dicts -def get_learner_dashboard_activities(user_id): +def get_learner_dashboard_activities( + user_id: str +) -> learner_progress_domain.ActivityIdsInLearnerDashboard: """Returns the ids of each of the activities that are present in the various sections of the learner dashboard, namely the completed section, the incomplete section and the playlist section. @@ -1687,12 +1878,26 @@ def get_learner_dashboard_activities(user_id): # If completed model is present. if learner_progress_models[0][0]: + # Here assert is used to narrow down the type from Model to + # CompletedActivitiesModel. + assert isinstance( + learner_progress_models[0][0], + user_models.CompletedActivitiesModel + ) activities_completed = _get_completed_activities_from_model( learner_progress_models[0][0]) - completed_exploration_ids = activities_completed.exploration_ids - completed_collection_ids = activities_completed.collection_ids - completed_story_ids = activities_completed.story_ids - learnt_topic_ids = activities_completed.learnt_topic_ids + completed_exploration_ids: List[str] = ( + activities_completed.exploration_ids + ) + completed_collection_ids: List[str] = ( + activities_completed.collection_ids + ) + completed_story_ids: List[str] = ( + activities_completed.story_ids + ) + learnt_topic_ids: List[str] = ( + activities_completed.learnt_topic_ids + ) else: completed_collection_ids = [] completed_exploration_ids = [] @@ -1701,12 +1906,23 @@ def get_learner_dashboard_activities(user_id): # If incomplete model is present. if learner_progress_models[1][0]: + # Here assert is used to narrow down the type from Model to + # IncompleteActivitiesModel. + assert isinstance( + learner_progress_models[1][0], + user_models.IncompleteActivitiesModel + ) incomplete_activities = _get_incomplete_activities_from_model( learner_progress_models[1][0]) - incomplete_exploration_ids = incomplete_activities.exploration_ids - incomplete_collection_ids = incomplete_activities.collection_ids - partially_learnt_topic_ids = ( - incomplete_activities.partially_learnt_topic_ids) + incomplete_exploration_ids: List[str] = ( + incomplete_activities.exploration_ids + ) + incomplete_collection_ids: List[str] = ( + incomplete_activities.collection_ids + ) + partially_learnt_topic_ids: List[str] = ( + incomplete_activities.partially_learnt_topic_ids + ) else: incomplete_exploration_ids = [] incomplete_collection_ids = [] @@ -1714,21 +1930,33 @@ def get_learner_dashboard_activities(user_id): # If learner playlist model is present. if learner_progress_models[2][0]: + # Here assert is used to narrow down the type from Model to + # LearnerPlaylistModel. + assert isinstance( + learner_progress_models[2][0], + user_models.LearnerPlaylistModel + ) learner_playlist = ( learner_playlist_services.get_learner_playlist_from_model( learner_progress_models[2][0])) - exploration_playlist_ids = learner_playlist.exploration_ids - collection_playlist_ids = learner_playlist.collection_ids + exploration_playlist_ids: List[str] = learner_playlist.exploration_ids + collection_playlist_ids: List[str] = learner_playlist.collection_ids else: exploration_playlist_ids = [] collection_playlist_ids = [] # If learner goals model is present. if learner_progress_models[3][0]: + # Here assert is used to narrow down the type from Model to + # LearnerGoalsModel. + assert isinstance( + learner_progress_models[3][0], + user_models.LearnerGoalsModel + ) learner_goals = ( learner_goals_services.get_learner_goals_from_model( learner_progress_models[3][0])) - topic_ids_to_learn = learner_goals.topic_ids_to_learn + topic_ids_to_learn: List[str] = learner_goals.topic_ids_to_learn else: topic_ids_to_learn = [] @@ -1746,7 +1974,11 @@ def get_learner_dashboard_activities(user_id): return activity_ids -def get_topics_and_stories_progress(user_id): +def get_topics_and_stories_progress( + user_id: str +) -> Tuple[ + learner_progress_domain.LearnerProgressInTopicsAndStories, Dict[str, int] +]: """Returns the progress of the learners - the stories and learnt_topics completed by the user and those in progress. @@ -1793,20 +2025,32 @@ def get_topics_and_stories_progress(user_id): ('StorySummaryModel', completed_story_ids) ])) - topic_id_to_model_dict = {} + topic_id_to_model_dict: Dict[str, topic_domain.TopicSummary] = {} for model in activity_models[0]: if model is not None: + # Here assert is used to narrow down the type of modal from Model + # to TopicSummaryModel. + assert isinstance(model, topic_models.TopicSummaryModel) topic_id_to_model_dict[model.id] = ( topic_fetchers.get_topic_summary_from_model(model)) completed_story_models = activity_models[1] + completed_story_summaries: List[Optional[story_domain.StorySummary]] = [] + for model in completed_story_models: + if model is not None: + # Here assert is used to narrow down the type of modal from Model + # to StorySummaryModel. + assert isinstance(model, story_models.StorySummaryModel) + completed_story_summaries.append( + story_fetchers.get_story_summary_from_model(model) + ) + else: + completed_story_summaries.append(None) + partially_learnt_topic_summaries = ( [topic_id_to_model_dict[topic_id] if topic_id in topic_id_to_model_dict else None for topic_id in partially_learnt_topic_ids]) - completed_story_summaries = ( - [story_fetchers.get_story_summary_from_model(model) - if model else None for model in completed_story_models]) learnt_topic_summaries = ( [topic_id_to_model_dict[topic_id] if topic_id in topic_id_to_model_dict else None for topic_id in learnt_topic_ids]) @@ -1902,7 +2146,11 @@ def get_topics_and_stories_progress(user_id): number_of_nonexistent_topics_and_stories) -def get_collection_progress(user_id): +def get_collection_progress( + user_id: str +) -> Tuple[ + learner_progress_domain.LearnerProgressInCollections, Dict[str, int] +]: """Returns the progress of the learners collections completed by the user and those in progress. @@ -1937,7 +2185,9 @@ def get_collection_progress(user_id): datastore_services.fetch_multiple_entities_by_ids_and_models( [('CollectionSummaryModel', unique_collection_ids)])) - collection_id_to_model_dict = {} + collection_id_to_model_dict: Dict[ + str, collection_domain.CollectionSummary + ] = {} for model in activity_models[0]: if model is not None: collection_id_to_model_dict[model.id] = ( @@ -2014,7 +2264,11 @@ def get_collection_progress(user_id): learner_progress_in_collection, number_of_nonexistent_collections) -def get_exploration_progress(user_id): +def get_exploration_progress( + user_id: str +) -> Tuple[ + learner_progress_domain.LearnerProgressInExplorations, Dict[str, int] +]: """Returns the progress of the learners explorations completed by the user and those in progress. @@ -2049,7 +2303,9 @@ def get_exploration_progress(user_id): datastore_services.fetch_multiple_entities_by_ids_and_models( [('ExpSummaryModel', unique_exploration_ids)])) - exploration_id_to_model_dict = {} + exploration_id_to_model_dict: Dict[ + str, exp_domain.ExplorationSummary + ] = {} for model in activity_models[0]: if model is not None: exploration_id_to_model_dict[model.id] = ( diff --git a/core/domain/learner_progress_services_test.py b/core/domain/learner_progress_services_test.py index 68d192b4b472..5bf061f7ef85 100644 --- a/core/domain/learner_progress_services_test.py +++ b/core/domain/learner_progress_services_test.py @@ -41,37 +41,51 @@ from core.platform import models from core.tests import test_utils -(user_models,) = models.Registry.import_models([models.NAMES.user]) +from typing import Final, List, TypedDict + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) + + +class IncompleteExplorationDetailsDict(TypedDict): + """Type for the incompletely played exploration's details dictionary.""" + + timestamp: datetime.datetime + state_name: str + version: int class LearnerProgressTests(test_utils.GenericTestBase): """Test the services related to tracking the progress of the learner.""" - EXP_ID_0 = '0_en_arch_bridges_in_england' - EXP_ID_1 = '1_fi_arch_sillat_suomi' - EXP_ID_2 = '2_en_welcome_introduce_oppia' - EXP_ID_3 = '3_welcome_oppia' - EXP_ID_4 = 'exp_4' - EXP_ID_5 = 'exp_5' - EXP_ID_6 = 'exp_6' - EXP_ID_7 = 'exp_7' - COL_ID_0 = '0_arch_bridges_in_england' - COL_ID_1 = '1_welcome_introduce_oppia' - COL_ID_2 = '2_welcome_introduce_oppia_interactions' - COL_ID_3 = '3_welcome_oppia_collection' - STORY_ID_0 = 'story_0' - TOPIC_ID_0 = 'topic_0' - STORY_ID_1 = 'story_1' - STORY_ID_2 = 'story_2' - STORY_ID_3 = 'story_3' - TOPIC_ID_1 = 'topic_1' - TOPIC_ID_2 = 'topic_2' - TOPIC_ID_3 = 'topic_3' - USER_EMAIL = 'user@example.com' - USER_USERNAME = 'user' - - def setUp(self): - super(LearnerProgressTests, self).setUp() + EXP_ID_0: Final = '0_en_arch_bridges_in_england' + EXP_ID_1: Final = '1_fi_arch_sillat_suomi' + EXP_ID_2: Final = '2_en_welcome_introduce_oppia' + EXP_ID_3: Final = '3_welcome_oppia' + EXP_ID_4: Final = 'exp_4' + EXP_ID_5: Final = 'exp_5' + EXP_ID_6: Final = 'exp_6' + EXP_ID_7: Final = 'exp_7' + COL_ID_0: Final = '0_arch_bridges_in_england' + COL_ID_1: Final = '1_welcome_introduce_oppia' + COL_ID_2: Final = '2_welcome_introduce_oppia_interactions' + COL_ID_3: Final = '3_welcome_oppia_collection' + STORY_ID_0: Final = 'story_0' + TOPIC_ID_0: Final = 'topic_0' + STORY_ID_1: Final = 'story_1' + STORY_ID_2: Final = 'story_2' + STORY_ID_3: Final = 'story_3' + TOPIC_ID_1: Final = 'topic_1' + TOPIC_ID_2: Final = 'topic_2' + TOPIC_ID_3: Final = 'topic_3' + USER_EMAIL: Final = 'user@example.com' + USER_USERNAME: Final = 'user' + + def setUp(self) -> None: + super().setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) @@ -139,7 +153,7 @@ def setUp(self): # Save new topics and stories. topic = topic_domain.Topic.create_default_topic( - self.TOPIC_ID_0, 'topic', 'abbrev', 'description') + self.TOPIC_ID_0, 'topic', 'abbrev', 'description', 'fragm') topic.thumbnail_filename = 'thumbnail.svg' topic.thumbnail_bg_color = '#C6DCDA' topic.subtopics = [ @@ -148,6 +162,7 @@ def setUp(self): constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-url')] topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, self.TOPIC_ID_0)) @@ -156,7 +171,8 @@ def setUp(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'dummy-fragment' })] ) topic_services.save_new_topic(self.owner_id, topic) @@ -182,7 +198,7 @@ def setUp(self): self.owner_id, self.STORY_ID_0, changelist, 'Added node.') topic = topic_domain.Topic.create_default_topic( - self.TOPIC_ID_1, 'topic 1', 'abbrev-one', 'description 1') + self.TOPIC_ID_1, 'topic 1', 'abbrev-one', 'description 1', 'fragm') topic.thumbnail_filename = 'thumbnail.svg' topic.thumbnail_bg_color = '#C6DCDA' topic.subtopics = [ @@ -191,6 +207,7 @@ def setUp(self): constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-url-one')] topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, self.TOPIC_ID_1)) @@ -199,7 +216,8 @@ def setUp(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'fragment' })] ) topic_services.save_new_topic(self.owner_id, topic) @@ -226,7 +244,7 @@ def setUp(self): self.owner_id, self.STORY_ID_1, changelist, 'Added Node 1.') topic = topic_domain.Topic.create_default_topic( - self.TOPIC_ID_2, 'topic 2', 'abbrev-two', 'description 2') + self.TOPIC_ID_2, 'topic 2', 'abbrev-two', 'description 2', 'fragm') topic.thumbnail_filename = 'thumbnail.svg' topic.thumbnail_bg_color = '#C6DCDA' topic.subtopics = [ @@ -235,6 +253,7 @@ def setUp(self): constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-url-one')] topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, self.TOPIC_ID_2)) @@ -243,7 +262,8 @@ def setUp(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'sample-fragment' })] ) topic_services.save_new_topic(self.owner_id, topic) @@ -252,7 +272,8 @@ def setUp(self): self.owner_id, self.TOPIC_ID_2, self.STORY_ID_2) topic = topic_domain.Topic.create_default_topic( - self.TOPIC_ID_3, 'topic 3', 'abbrev-three', 'description 3') + self.TOPIC_ID_3, 'topic 3', 'abbrev-three', 'description 3', + 'fragm') topic.thumbnail_filename = 'thumbnail.svg' topic.thumbnail_bg_color = '#C6DCDA' topic.subtopics = [ @@ -261,6 +282,7 @@ def setUp(self): constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-url-one')] topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, self.TOPIC_ID_3)) @@ -269,7 +291,8 @@ def setUp(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'sample-fragment' })] ) topic_services.save_new_topic(self.owner_id, topic) @@ -294,7 +317,7 @@ def setUp(self): self.TOPIC_ID_3, self.STORY_ID_3, self.admin_id) topic_services.publish_topic(self.TOPIC_ID_3, self.admin_id) - def _get_all_completed_exp_ids(self, user_id): + def _get_all_completed_exp_ids(self, user_id: str) -> List[str]: """Gets the ids of all the explorations completed by the learner corresponding to the given user id. """ @@ -302,11 +325,18 @@ def _get_all_completed_exp_ids(self, user_id): user_models.CompletedActivitiesModel.get( user_id, strict=False)) - return ( - completed_activities_model.exploration_ids if - completed_activities_model else []) - - def _get_all_completed_collection_ids(self, user_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if completed_activities_model: + exploration_ids: List[str] = ( + completed_activities_model.exploration_ids + ) + return exploration_ids + else: + return [] + + def _get_all_completed_collection_ids(self, user_id: str) -> List[str]: """Gets the ids of all the collections completed by the learner corresponding to the given user id. """ @@ -314,11 +344,18 @@ def _get_all_completed_collection_ids(self, user_id): user_models.CompletedActivitiesModel.get( user_id, strict=False)) - return ( - completed_activities_model.collection_ids if - completed_activities_model else []) - - def _get_all_completed_story_ids(self, user_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if completed_activities_model: + collection_ids: List[str] = ( + completed_activities_model.collection_ids + ) + return collection_ids + else: + return [] + + def _get_all_completed_story_ids(self, user_id: str) -> List[str]: """Gets the ids of all the stories completed by the learner corresponding to the given user id. """ @@ -326,11 +363,16 @@ def _get_all_completed_story_ids(self, user_id): user_models.CompletedActivitiesModel.get( user_id, strict=False)) - return ( - completed_activities_model.story_ids if - completed_activities_model else []) + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if completed_activities_model: + story_ids: List[str] = completed_activities_model.story_ids + return story_ids + else: + return [] - def _get_all_learnt_topic_ids(self, user_id): + def _get_all_learnt_topic_ids(self, user_id: str) -> List[str]: """Gets the ids of all the topics learnt by the learner corresponding to the given user id. """ @@ -338,22 +380,38 @@ def _get_all_learnt_topic_ids(self, user_id): user_models.CompletedActivitiesModel.get( user_id, strict=False)) - return ( - completed_activities_model.learnt_topic_ids if - completed_activities_model else []) - - def _get_all_incomplete_exp_ids(self, user_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if completed_activities_model: + learnt_topic_ids: List[str] = ( + completed_activities_model.learnt_topic_ids + ) + return learnt_topic_ids + else: + return [] + + def _get_all_incomplete_exp_ids(self, user_id: str) -> List[str]: """Gets the ids of all the explorations not fully completed by the learner corresponding to the given user id. """ incomplete_activities_model = ( user_models.IncompleteActivitiesModel.get(user_id, strict=False)) - return ( - incomplete_activities_model.exploration_ids if - incomplete_activities_model else []) - - def _get_incomplete_exp_details(self, user_id, exploration_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if incomplete_activities_model: + exploration_ids: List[str] = ( + incomplete_activities_model.exploration_ids + ) + return exploration_ids + else: + return [] + + def _get_incomplete_exp_details( + self, user_id: str, exploration_id: str + ) -> IncompleteExplorationDetailsDict: """Returns the dict containing all the exploration details that are incompletely played by the learner corresponding to the given user id. """ @@ -361,6 +419,8 @@ def _get_incomplete_exp_details(self, user_id, exploration_id): user_models.ExpUserLastPlaythroughModel.get( user_id, exploration_id)) + # Ruling out the possibility of None for mypy type checking. + assert incomplete_exploration_user_model is not None return { 'timestamp': ( incomplete_exploration_user_model.last_updated), @@ -370,7 +430,10 @@ def _get_incomplete_exp_details(self, user_id, exploration_id): } def _check_if_exp_details_match( - self, actual_details, details_fetched_from_model): + self, + actual_details: IncompleteExplorationDetailsDict, + details_fetched_from_model: IncompleteExplorationDetailsDict + ) -> None: """Verifies the exploration details fetched from the model matches the actual details. """ @@ -388,40 +451,59 @@ def _check_if_exp_details_match( actual_details['timestamp'] - details_fetched_from_model['timestamp']).total_seconds(), 10) - def _get_all_incomplete_collection_ids(self, user_id): + def _get_all_incomplete_collection_ids(self, user_id: str) -> List[str]: """Returns the list of all the collection ids that are incompletely played by the learner corresponding to the given user id. """ incomplete_activities_model = ( user_models.IncompleteActivitiesModel.get(user_id, strict=False)) - return ( - incomplete_activities_model.collection_ids if - incomplete_activities_model else []) - - def _get_all_incomplete_story_ids(self, user_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if incomplete_activities_model: + collection_ids: List[str] = ( + incomplete_activities_model.collection_ids + ) + return collection_ids + else: + return [] + + def _get_all_incomplete_story_ids(self, user_id: str) -> List[str]: """Returns the list of all the story ids that are incompletely played by the learner corresponding to the given user id. """ incomplete_activities_model = ( user_models.IncompleteActivitiesModel.get(user_id, strict=False)) - return ( - incomplete_activities_model.story_ids if - incomplete_activities_model else []) + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if incomplete_activities_model: + story_ids: List[str] = incomplete_activities_model.story_ids + return story_ids + else: + return [] - def _get_all_partially_learnt_topic_ids(self, user_id): + def _get_all_partially_learnt_topic_ids(self, user_id: str) -> List[str]: """Returns the list of all the topics ids that are partially learnt by the learner corresponding to the given user id. """ incomplete_activities_model = ( user_models.IncompleteActivitiesModel.get(user_id, strict=False)) - return ( - incomplete_activities_model.partially_learnt_topic_ids if - incomplete_activities_model else []) - - def test_mark_exploration_as_completed(self): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if incomplete_activities_model: + learnt_topic_ids: List[str] = ( + incomplete_activities_model.partially_learnt_topic_ids + ) + return learnt_topic_ids + else: + return [] + + def test_mark_exploration_as_completed(self) -> None: self.assertEqual(self._get_all_completed_exp_ids(self.user_id), []) # Add an exploration to the completed list of a learner. @@ -477,7 +559,7 @@ def test_mark_exploration_as_completed(self): self.assertEqual(self._get_all_completed_exp_ids( self.user_id), [self.EXP_ID_0, self.EXP_ID_1, self.EXP_ID_3]) - def test_mark_collection_as_completed(self): + def test_mark_collection_as_completed(self) -> None: self.assertEqual( self._get_all_completed_collection_ids(self.user_id), []) @@ -533,7 +615,7 @@ def test_mark_collection_as_completed(self): self.assertEqual(self._get_all_completed_collection_ids( self.user_id), [self.COL_ID_0, self.COL_ID_1, self.COL_ID_3]) - def test_mark_story_as_completed(self): + def test_mark_story_as_completed(self) -> None: self.assertEqual( self._get_all_completed_story_ids(self.user_id), []) @@ -565,7 +647,7 @@ def test_mark_story_as_completed(self): self.assertEqual(self._get_all_completed_story_ids( self.user_id), [self.STORY_ID_0, self.STORY_ID_1]) - def test_mark_topic_as_learnt(self): + def test_mark_topic_as_learnt(self) -> None: self.assertEqual( self._get_all_learnt_topic_ids(self.user_id), []) @@ -609,14 +691,14 @@ def test_mark_topic_as_learnt(self): learner_goals_services.get_all_topic_ids_to_learn( self.user_id), []) - def test_mark_exploration_as_incomplete(self): + def test_mark_exploration_as_incomplete(self) -> None: self.assertEqual(self._get_all_incomplete_exp_ids( self.user_id), []) state_name = u'state name' version = 1 - exp_details = { + exp_details: IncompleteExplorationDetailsDict = { 'timestamp': datetime.datetime.utcnow(), 'state_name': state_name, 'version': version @@ -634,7 +716,7 @@ def test_mark_exploration_as_incomplete(self): state_name = u'new_state_name' version = 2 - modified_exp_details = { + modified_exp_details: IncompleteExplorationDetailsDict = { 'timestamp': datetime.datetime.utcnow(), 'state_name': state_name, 'version': version @@ -682,7 +764,7 @@ def test_mark_exploration_as_incomplete(self): self.assertEqual(self._get_all_incomplete_exp_ids( self.user_id), [self.EXP_ID_0, self.EXP_ID_3]) - def test_mark_collection_as_incomplete(self): + def test_mark_collection_as_incomplete(self) -> None: self.assertEqual(self._get_all_incomplete_collection_ids( self.user_id), []) @@ -731,7 +813,7 @@ def test_mark_collection_as_incomplete(self): self.assertEqual(self._get_all_incomplete_collection_ids( self.user_id), [self.COL_ID_0, self.COL_ID_3]) - def test_record_story_started(self): + def test_record_story_started(self) -> None: self.assertEqual(self._get_all_incomplete_story_ids( self.user_id), []) @@ -756,7 +838,7 @@ def test_record_story_started(self): self.assertEqual(self._get_all_incomplete_story_ids( self.user_id), [self.STORY_ID_0]) - def test_record_topic_started(self): + def test_record_topic_started(self) -> None: self.assertEqual(self._get_all_partially_learnt_topic_ids( self.user_id), []) @@ -781,12 +863,12 @@ def test_record_topic_started(self): self.assertEqual(self._get_all_partially_learnt_topic_ids( self.user_id), [self.TOPIC_ID_0]) - def test_remove_exp_from_incomplete_list(self): + def test_remove_exp_from_incomplete_list(self) -> None: self.assertEqual(self._get_all_incomplete_exp_ids( self.user_id), []) - state_name = 'state name' - version = 1 + state_name: str = 'state name' + version: int = 1 # Add incomplete explorations. learner_progress_services.mark_exploration_as_incomplete( @@ -814,7 +896,7 @@ def test_remove_exp_from_incomplete_list(self): self.assertEqual(self._get_all_incomplete_exp_ids( self.user_id), []) - def test_remove_collection_from_incomplete_list(self): + def test_remove_collection_from_incomplete_list(self) -> None: self.assertEqual(self._get_all_incomplete_collection_ids( self.user_id), []) @@ -844,7 +926,7 @@ def test_remove_collection_from_incomplete_list(self): self.assertEqual(self._get_all_incomplete_collection_ids( self.user_id), []) - def test_remove_story_from_incomplete_list(self): + def test_remove_story_from_incomplete_list(self) -> None: self.assertEqual(self._get_all_incomplete_story_ids( self.user_id), []) @@ -874,7 +956,7 @@ def test_remove_story_from_incomplete_list(self): self.assertEqual(self._get_all_incomplete_story_ids( self.user_id), []) - def test_remove_topic_from_partially_learnt_list(self): + def test_remove_topic_from_partially_learnt_list(self) -> None: self.assertEqual(self._get_all_partially_learnt_topic_ids( self.user_id), []) @@ -904,7 +986,7 @@ def test_remove_topic_from_partially_learnt_list(self): self.assertEqual(self._get_all_partially_learnt_topic_ids( self.user_id), []) - def test_remove_story_from_completed_list(self): + def test_remove_story_from_completed_list(self) -> None: self.assertEqual(self._get_all_completed_story_ids( self.user_id), []) @@ -934,7 +1016,7 @@ def test_remove_story_from_completed_list(self): self.assertEqual(self._get_all_completed_story_ids( self.user_id), []) - def test_remove_topic_from_learnt_list(self): + def test_remove_topic_from_learnt_list(self) -> None: self.assertEqual(self._get_all_learnt_topic_ids( self.user_id), []) @@ -964,7 +1046,7 @@ def test_remove_topic_from_learnt_list(self): self.assertEqual(self._get_all_learnt_topic_ids( self.user_id), []) - def test_get_all_completed_exp_ids(self): + def test_get_all_completed_exp_ids(self) -> None: self.assertEqual(learner_progress_services.get_all_completed_exp_ids( self.user_id), []) @@ -980,7 +1062,7 @@ def test_get_all_completed_exp_ids(self): self.assertEqual(learner_progress_services.get_all_completed_exp_ids( self.user_id), [self.EXP_ID_0, self.EXP_ID_1]) - def test_unpublishing_completed_exploration_filters_it_out(self): + def test_unpublishing_completed_exploration_filters_it_out(self) -> None: # Add explorations to the completed list. learner_progress_services.mark_exploration_as_completed( self.user_id, self.EXP_ID_0) @@ -1016,7 +1098,9 @@ def test_unpublishing_completed_exploration_filters_it_out(self): completed_exp_summaries[1].id, '1_fi_arch_sillat_suomi') self.assertEqual(len(completed_exp_summaries), 2) - def test_republishing_completed_exploration_filters_as_complete(self): + def test_republishing_completed_exploration_filters_as_complete( + self + ) -> None: # Add exploration to the completed list. learner_progress_services.mark_exploration_as_completed( self.user_id, self.EXP_ID_0) @@ -1061,7 +1145,7 @@ def test_republishing_completed_exploration_filters_as_complete(self): completed_exp_summaries[0].id, '0_en_arch_bridges_in_england') self.assertEqual(len(completed_exp_summaries), 1) - def test_get_all_completed_collection_ids(self): + def test_get_all_completed_collection_ids(self) -> None: self.assertEqual( learner_progress_services.get_all_completed_collection_ids( self.user_id), []) @@ -1080,7 +1164,7 @@ def test_get_all_completed_collection_ids(self): learner_progress_services.get_all_completed_collection_ids( self.user_id), [self.COL_ID_0, self.COL_ID_1]) - def test_get_all_completed_story_ids(self): + def test_get_all_completed_story_ids(self) -> None: self.assertEqual( learner_progress_services.get_all_completed_story_ids( self.user_id), []) @@ -1099,7 +1183,7 @@ def test_get_all_completed_story_ids(self): learner_progress_services.get_all_completed_story_ids( self.user_id), [self.STORY_ID_0, self.STORY_ID_1]) - def test_get_all_learnt_topic_ids(self): + def test_get_all_learnt_topic_ids(self) -> None: self.assertEqual( learner_progress_services.get_all_learnt_topic_ids( self.user_id), []) @@ -1118,7 +1202,7 @@ def test_get_all_learnt_topic_ids(self): learner_progress_services.get_all_learnt_topic_ids( self.user_id), [self.TOPIC_ID_0, self.TOPIC_ID_1]) - def test_unpublishing_completed_collection_filters_it_out(self): + def test_unpublishing_completed_collection_filters_it_out(self) -> None: # Add collections to the completed list. learner_progress_services.mark_collection_as_completed( self.user_id, self.COL_ID_0) @@ -1135,6 +1219,8 @@ def test_unpublishing_completed_collection_filters_it_out(self): rights_manager.unpublish_collection(system_user, self.COL_ID_3) private_collection = collection_services.get_collection_summary_by_id( self.COL_ID_3) + # Ruling out the possibility of None for mypy type checking. + assert private_collection is not None self.assertEqual( private_collection.status, constants.ACTIVITY_STATUS_PRIVATE) @@ -1154,7 +1240,9 @@ def test_unpublishing_completed_collection_filters_it_out(self): completed_collection_summaries[1].id, '1_welcome_introduce_oppia') self.assertEqual(len(completed_collection_summaries), 2) - def test_republishing_completed_collection_filters_as_complete(self): + def test_republishing_completed_collection_filters_as_complete( + self + ) -> None: # Add collection to the completed list. learner_progress_services.mark_collection_as_completed( self.user_id, self.COL_ID_0) @@ -1167,6 +1255,8 @@ def test_republishing_completed_collection_filters_as_complete(self): rights_manager.unpublish_collection(system_user, self.COL_ID_0) private_collection = collection_services.get_collection_summary_by_id( self.COL_ID_0) + # Ruling out the possibility of None for mypy type checking. + assert private_collection is not None self.assertEqual( private_collection.status, constants.ACTIVITY_STATUS_PRIVATE) @@ -1185,6 +1275,8 @@ def test_republishing_completed_collection_filters_as_complete(self): self.user_id, self.COL_ID_0) public_collection = collection_services.get_collection_summary_by_id( self.COL_ID_0) + # Ruling out the possibility of None for mypy type checking. + assert public_collection is not None self.assertEqual( public_collection.status, constants.ACTIVITY_STATUS_PUBLIC) @@ -1199,7 +1291,7 @@ def test_republishing_completed_collection_filters_as_complete(self): completed_collection_summaries[0].id, '0_arch_bridges_in_england') self.assertEqual(len(completed_collection_summaries), 1) - def test_unpublishing_completed_story_filters_it_out(self): + def test_unpublishing_completed_story_filters_it_out(self) -> None: # Add stories to the completed list. story_services.record_completed_node_in_story_context( self.user_id, self.STORY_ID_0, 'node_1') @@ -1232,7 +1324,7 @@ def test_unpublishing_completed_story_filters_it_out(self): completed_story_summaries[0].id, self.STORY_ID_0) self.assertEqual(len(completed_story_summaries), 1) - def test_unpublishing_learnt_topic_filters_it_out(self): + def test_unpublishing_learnt_topic_filters_it_out(self) -> None: # Add topics to the learnt list. story_services.record_completed_node_in_story_context( self.user_id, self.STORY_ID_0, 'node_1') @@ -1271,7 +1363,7 @@ def test_unpublishing_learnt_topic_filters_it_out(self): learnt_topic_summaries[0].id, self.TOPIC_ID_0) self.assertEqual(len(learnt_topic_summaries), 1) - def test_deleting_a_story_filters_it_out_from_completed_list(self): + def test_deleting_a_story_filters_it_out_from_completed_list(self) -> None: # Add stories to the completed list. story_services.record_completed_node_in_story_context( self.user_id, self.STORY_ID_0, 'node_1') @@ -1303,7 +1395,7 @@ def test_deleting_a_story_filters_it_out_from_completed_list(self): completed_story_summaries[0].id, self.STORY_ID_0) self.assertEqual(len(completed_story_summaries), 1) - def test_deleting_a_topic_filters_it_out_from_learnt_list(self): + def test_deleting_a_topic_filters_it_out_from_learnt_list(self) -> None: # Add topics to the learnt list. story_services.record_completed_node_in_story_context( self.user_id, self.STORY_ID_0, 'node_1') @@ -1339,7 +1431,7 @@ def test_deleting_a_topic_filters_it_out_from_learnt_list(self): learnt_topic_summaries[0].id, self.TOPIC_ID_0) self.assertEqual(len(learnt_topic_summaries), 1) - def test_get_all_incomplete_exp_ids(self): + def test_get_all_incomplete_exp_ids(self) -> None: self.assertEqual( learner_progress_services.get_all_incomplete_exp_ids( self.user_id), []) @@ -1361,7 +1453,7 @@ def test_get_all_incomplete_exp_ids(self): learner_progress_services.get_all_incomplete_exp_ids( self.user_id), [self.EXP_ID_0, self.EXP_ID_1]) - def test_unpublishing_incomplete_exploration_filters_it_out(self): + def test_unpublishing_incomplete_exploration_filters_it_out(self) -> None: state_name = 'state name' version = 1 @@ -1400,7 +1492,9 @@ def test_unpublishing_incomplete_exploration_filters_it_out(self): incomplete_exp_summaries[1].id, '1_fi_arch_sillat_suomi') self.assertEqual(len(incomplete_exp_summaries), 2) - def test_republishing_incomplete_exploration_filters_as_incomplete(self): + def test_republishing_incomplete_exploration_filters_as_incomplete( + self + ) -> None: state_name = 'state name' version = 1 @@ -1448,7 +1542,7 @@ def test_republishing_incomplete_exploration_filters_as_incomplete(self): incomplete_exp_summaries[0].id, '0_en_arch_bridges_in_england') self.assertEqual(len(incomplete_exp_summaries), 1) - def test_get_all_incomplete_collection_ids(self): + def test_get_all_incomplete_collection_ids(self) -> None: self.assertEqual( learner_progress_services.get_all_incomplete_collection_ids( self.user_id), []) @@ -1467,7 +1561,7 @@ def test_get_all_incomplete_collection_ids(self): learner_progress_services.get_all_incomplete_collection_ids( self.user_id), [self.COL_ID_0, self.COL_ID_1]) - def test_get_all_incomplete_story_ids(self): + def test_get_all_incomplete_story_ids(self) -> None: self.assertEqual( learner_progress_services.get_all_incomplete_story_ids( self.user_id), []) @@ -1486,7 +1580,7 @@ def test_get_all_incomplete_story_ids(self): learner_progress_services.get_all_incomplete_story_ids( self.user_id), [self.STORY_ID_0, self.STORY_ID_1]) - def test_get_all_partially_learnt_topic_ids(self): + def test_get_all_partially_learnt_topic_ids(self) -> None: self.assertEqual( learner_progress_services.get_all_partially_learnt_topic_ids( self.user_id), []) @@ -1505,7 +1599,7 @@ def test_get_all_partially_learnt_topic_ids(self): learner_progress_services.get_all_partially_learnt_topic_ids( self.user_id), [self.TOPIC_ID_0, self.TOPIC_ID_1]) - def test_get_all_and_untracked_topic_ids(self): + def test_get_all_and_untracked_topic_ids(self) -> None: # Add topics to config_domain. self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) @@ -1583,7 +1677,7 @@ def test_get_all_and_untracked_topic_ids(self): self.assertEqual(len(all_topics), 2) self.assertEqual(len(untracked_topics), 0) - def test_unpublishing_incomplete_collection_filters_it_out(self): + def test_unpublishing_incomplete_collection_filters_it_out(self) -> None: # Add collections to the incomplete list. learner_progress_services.mark_collection_as_incomplete( self.user_id, self.COL_ID_0) @@ -1600,6 +1694,8 @@ def test_unpublishing_incomplete_collection_filters_it_out(self): rights_manager.unpublish_collection(system_user, self.COL_ID_3) private_collection = collection_services.get_collection_summary_by_id( self.COL_ID_3) + # Ruling out the possibility of None for mypy type checking. + assert private_collection is not None self.assertEqual( private_collection.status, constants.ACTIVITY_STATUS_PRIVATE) @@ -1619,7 +1715,9 @@ def test_unpublishing_incomplete_collection_filters_it_out(self): incomplete_collection_summaries[1].id, '1_welcome_introduce_oppia') self.assertEqual(len(incomplete_collection_summaries), 2) - def test_republishing_incomplete_collection_filters_as_incomplete(self): + def test_republishing_incomplete_collection_filters_as_incomplete( + self + ) -> None: # Add collection to the incomplete list. learner_progress_services.mark_collection_as_incomplete( self.user_id, self.COL_ID_0) @@ -1632,6 +1730,8 @@ def test_republishing_incomplete_collection_filters_as_incomplete(self): rights_manager.unpublish_collection(system_user, self.COL_ID_0) private_collection = collection_services.get_collection_summary_by_id( self.COL_ID_0) + # Ruling out the possibility of None for mypy type checking. + assert private_collection is not None self.assertEqual( private_collection.status, constants.ACTIVITY_STATUS_PRIVATE) @@ -1650,6 +1750,8 @@ def test_republishing_incomplete_collection_filters_as_incomplete(self): self.user_id, self.COL_ID_0) public_collection = collection_services.get_collection_summary_by_id( self.COL_ID_0) + # Ruling out the possibility of None for mypy type checking. + assert public_collection is not None self.assertEqual( public_collection.status, constants.ACTIVITY_STATUS_PUBLIC) @@ -1664,7 +1766,7 @@ def test_republishing_incomplete_collection_filters_as_incomplete(self): incomplete_collection_summaries[0].id, '0_arch_bridges_in_england') self.assertEqual(len(incomplete_collection_summaries), 1) - def test_unpublishing_partially_learnt_topic_filters_it_out(self): + def test_unpublishing_partially_learnt_topic_filters_it_out(self) -> None: # Add topics to the partially learnt list. learner_progress_services.record_topic_started( self.user_id, self.TOPIC_ID_0) @@ -1695,7 +1797,9 @@ def test_unpublishing_partially_learnt_topic_filters_it_out(self): partially_learnt_topic_summaries[0].id, self.TOPIC_ID_0) self.assertEqual(len(partially_learnt_topic_summaries), 1) - def test_republishing_partially_learnt_topic_filters_as_incomplete(self): + def test_republishing_partially_learnt_topic_filters_as_incomplete( + self + ) -> None: # Add topic to the partially learnt list. learner_progress_services.record_topic_started( self.user_id, self.TOPIC_ID_0) @@ -1742,7 +1846,8 @@ def test_republishing_partially_learnt_topic_filters_as_incomplete(self): self.assertEqual(len(partially_learnt_topic_summaries), 1) def test_removes_a_topic_from_topics_to_learn_list_when_topic_is_learnt( - self): + self + ) -> None: self.assertEqual( learner_goals_services.get_all_topic_ids_to_learn( self.user_id), []) @@ -1769,7 +1874,9 @@ def test_removes_a_topic_from_topics_to_learn_list_when_topic_is_learnt( # Test that topics to learn doesn't include completed topic. self.assertEqual(len(topics_to_learn), 0) - def test_unpublishing_topic_filters_it_out_from_topics_to_learn(self): + def test_unpublishing_topic_filters_it_out_from_topics_to_learn( + self + ) -> None: # Add topics to learn section of the learner goals. learner_progress_services.validate_and_add_topic_to_learn_goal( self.user_id, self.TOPIC_ID_0) @@ -1795,7 +1902,9 @@ def test_unpublishing_topic_filters_it_out_from_topics_to_learn(self): topics_to_learn[0].id, 'topic_1') self.assertEqual(len(topics_to_learn), 1) - def test_unpublishing_exploration_filters_it_out_from_playlist(self): + def test_unpublishing_exploration_filters_it_out_from_playlist( + self + ) -> None: # Add activities to the playlist section. learner_progress_services.add_exp_to_learner_playlist( self.user_id, self.EXP_ID_0) @@ -1825,7 +1934,9 @@ def test_unpublishing_exploration_filters_it_out_from_playlist(self): exploration_playlist[0].id, '0_en_arch_bridges_in_england') self.assertEqual(len(exploration_playlist), 1) - def test_republishing_exploration_keeps_it_in_exploration_playlist(self): + def test_republishing_exploration_keeps_it_in_exploration_playlist( + self + ) -> None: # Add activity to the playlist section. learner_progress_services.add_exp_to_learner_playlist( self.user_id, self.EXP_ID_0) @@ -1870,7 +1981,7 @@ def test_republishing_exploration_keeps_it_in_exploration_playlist(self): exploration_playlist[0].id, '0_en_arch_bridges_in_england') self.assertEqual(len(exploration_playlist), 1) - def test_unpublishing_collection_filters_it_out_from_playlist(self): + def test_unpublishing_collection_filters_it_out_from_playlist(self) -> None: # Add activities to the playlist section. learner_progress_services.add_collection_to_learner_playlist( self.user_id, self.COL_ID_0) @@ -1885,6 +1996,8 @@ def test_unpublishing_collection_filters_it_out_from_playlist(self): rights_manager.unpublish_collection(system_user, self.COL_ID_1) private_collection = collection_services.get_collection_summary_by_id( self.COL_ID_1) + # Ruling out the possibility of None for mypy type checking. + assert private_collection is not None self.assertEqual( private_collection.status, constants.ACTIVITY_STATUS_PRIVATE) @@ -1900,7 +2013,9 @@ def test_unpublishing_collection_filters_it_out_from_playlist(self): collection_playlist[0].id, '0_arch_bridges_in_england') self.assertEqual(len(collection_playlist), 1) - def test_republishing_collection_keeps_it_in_collection_playlist(self): + def test_republishing_collection_keeps_it_in_collection_playlist( + self + ) -> None: # Add activity to the playlist section. learner_progress_services.add_collection_to_learner_playlist( self.user_id, self.COL_ID_0) @@ -1913,6 +2028,8 @@ def test_republishing_collection_keeps_it_in_collection_playlist(self): rights_manager.unpublish_collection(system_user, self.COL_ID_0) private_collection = collection_services.get_collection_summary_by_id( self.COL_ID_0) + # Ruling out the possibility of None for mypy type checking. + assert private_collection is not None self.assertEqual( private_collection.status, constants.ACTIVITY_STATUS_PRIVATE) @@ -1931,6 +2048,8 @@ def test_republishing_collection_keeps_it_in_collection_playlist(self): self.user_id, self.COL_ID_0) public_collection = collection_services.get_collection_summary_by_id( self.COL_ID_0) + # Ruling out the possibility of None for mypy type checking. + assert public_collection is not None self.assertEqual( public_collection.status, constants.ACTIVITY_STATUS_PUBLIC) @@ -1945,7 +2064,7 @@ def test_republishing_collection_keeps_it_in_collection_playlist(self): collection_playlist[0].id, '0_arch_bridges_in_england') self.assertEqual(len(collection_playlist), 1) - def test_get_ids_of_activities_in_learner_dashboard(self): + def test_get_ids_of_activities_in_learner_dashboard(self) -> None: # Add activities to the completed section. learner_progress_services.mark_exploration_as_completed( self.user_id, self.EXP_ID_0) @@ -2004,7 +2123,7 @@ def test_get_ids_of_activities_in_learner_dashboard(self): self.assertEqual( activity_ids.collection_playlist_ids, [self.COL_ID_3]) - def test_get_all_activity_progress(self): + def test_get_all_activity_progress(self) -> None: # Add topics to config_domain. self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) diff --git a/core/domain/moderator_services.py b/core/domain/moderator_services.py index f36e93279946..df7f12554115 100644 --- a/core/domain/moderator_services.py +++ b/core/domain/moderator_services.py @@ -23,7 +23,10 @@ def enqueue_flag_exploration_email_task( - exploration_id, report_text, reporter_id): + exploration_id: str, + report_text: str, + reporter_id: str +) -> None: """Adds a 'send flagged exploration email' task into taskqueue.""" payload = { 'exploration_id': exploration_id, diff --git a/core/domain/moderator_services_test.py b/core/domain/moderator_services_test.py index 84750227f500..711c9261a206 100644 --- a/core/domain/moderator_services_test.py +++ b/core/domain/moderator_services_test.py @@ -26,8 +26,8 @@ class FlagExplorationEmailEnqueueTaskTests(test_utils.EmailTestBase): """Test that flag-exploration-email-tasks works as expected.""" - def setUp(self): - super(FlagExplorationEmailEnqueueTaskTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) @@ -49,7 +49,7 @@ def setUp(self): self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True) - def test_that_flag_exploration_emails_are_correct(self): + def test_that_flag_exploration_emails_are_correct(self) -> None: expected_email_html_body = ( 'Hello Moderator,
    ' diff --git a/core/domain/object_registry.py b/core/domain/object_registry.py index 66332658f0a3..82450c3cfa9c 100644 --- a/core/domain/object_registry.py +++ b/core/domain/object_registry.py @@ -20,19 +20,25 @@ import inspect import json +from core import constants from core import feconf -from core import python_utils from extensions.objects.models import objects +from typing import Dict, List, Optional, Type, Union + +AllowedDefaultValueTypes = Union[ + str, int, float, bool, List[str], Dict[str, Optional[str]] +] + class Registry: """Registry of all objects.""" # Dict mapping object class names to their classes. - objects_dict = {} + objects_dict: Dict[str, Type[objects.BaseObject]] = {} @classmethod - def _refresh_registry(cls): + def _refresh_registry(cls) -> None: """Refreshes the registry by adding new object classes to the registry. """ @@ -51,13 +57,15 @@ def _refresh_registry(cls): cls.objects_dict[clazz.__name__] = clazz @classmethod - def get_all_object_classes(cls): + def get_all_object_classes(cls) -> Dict[str, Type[objects.BaseObject]]: """Get the dict of all object classes.""" cls._refresh_registry() return copy.deepcopy(cls.objects_dict) @classmethod - def get_object_class_by_type(cls, obj_type): + def get_object_class_by_type( + cls, obj_type: str + ) -> Type[objects.BaseObject]: """Gets an object class by its type. Types are CamelCased. Refreshes once if the class is not found; subsequently, throws an @@ -70,9 +78,13 @@ def get_object_class_by_type(cls, obj_type): return cls.objects_dict[obj_type] -def get_default_object_values(): +def get_default_object_values() -> Dict[str, AllowedDefaultValueTypes]: """Returns a dictionary containing the default object values.""" # TODO(wxy): Cache this as it is accessed many times. - return json.loads(python_utils.get_package_file_contents( - 'extensions', feconf.OBJECT_DEFAULT_VALUES_EXTENSIONS_MODULE_PATH)) + default_object_values: Dict[str, AllowedDefaultValueTypes] = json.loads( + constants.get_package_file_contents( + 'extensions', feconf.OBJECT_DEFAULT_VALUES_EXTENSIONS_MODULE_PATH + ) + ) + return default_object_values diff --git a/core/domain/object_registry_test.py b/core/domain/object_registry_test.py index d8dec8b03e60..479ae4f14b5b 100644 --- a/core/domain/object_registry_test.py +++ b/core/domain/object_registry_test.py @@ -25,21 +25,21 @@ class ObjectRegistryUnitTests(test_utils.GenericTestBase): """Test the Registry class in object_registry.""" - def test_get_object_class_by_type_method(self): + def test_get_object_class_by_type_method(self) -> None: """Tests the normal behavior of get_object_class_by_type().""" self.assertEqual( object_registry.Registry.get_object_class_by_type('Int').__name__, 'Int') - def test_fake_class_is_not_gettable(self): + def test_fake_class_is_not_gettable(self) -> None: """Tests that trying to retrieve a fake class raises an error.""" - with self.assertRaisesRegexp(TypeError, 'not a valid object class'): + with self.assertRaisesRegex(TypeError, 'not a valid object class'): object_registry.Registry.get_object_class_by_type('FakeClass') - def test_base_object_is_not_gettable(self): + def test_base_object_is_not_gettable(self) -> None: """Tests that BaseObject exists and cannot be set as an obj_type.""" assert getattr(objects, 'BaseObject') - with self.assertRaisesRegexp(TypeError, 'not a valid object class'): + with self.assertRaisesRegex(TypeError, 'not a valid object class'): object_registry.Registry.get_object_class_by_type('BaseObject') @@ -50,7 +50,7 @@ class ObjectDefaultValuesUnitTests(test_utils.GenericTestBase): are used in rules. """ - def test_all_rule_input_fields_have_default_values(self): + def test_all_rule_input_fields_have_default_values(self) -> None: """Checks that all rule input fields have a default value, and this is provided in get_default_values(). """ @@ -72,7 +72,7 @@ def test_all_rule_input_fields_have_default_values(self): self.assertEqual( default_value, object_default_vals[param_obj_type_name]) - def test_get_object_default_values_is_valid(self): + def test_get_object_default_values_is_valid(self) -> None: """Checks that the default values provided by get_default_values() correspond to the ones defined in objects.py. """ diff --git a/core/domain/opportunity_domain.py b/core/domain/opportunity_domain.py index 6af867c5769b..423768ae0ca1 100644 --- a/core/domain/opportunity_domain.py +++ b/core/domain/opportunity_domain.py @@ -21,8 +21,7 @@ from core import utils from core.constants import constants -from typing import Dict, List -from typing_extensions import TypedDict +from typing import Dict, List, TypedDict class PartialExplorationOpportunitySummaryDict(TypedDict): diff --git a/core/domain/opportunity_domain_test.py b/core/domain/opportunity_domain_test.py index 75feab3b74b3..a7ef723f72e3 100644 --- a/core/domain/opportunity_domain_test.py +++ b/core/domain/opportunity_domain_test.py @@ -29,7 +29,7 @@ class ExplorationOpportunitySummaryDomainTests(test_utils.GenericTestBase): """Test the ExplorationOpportunitySummary domain.""" def setUp(self) -> None: - super(ExplorationOpportunitySummaryDomainTests, self).setUp() + super().setUp() self.mock_supported_audio_languages = [{ 'id': 'en' }, { @@ -104,7 +104,7 @@ def test_negative_content_count_fails_validation_check(self) -> None: # Object with content_count as int passes the validation check. self.valid_exp_opp_summary.validate() self.valid_exp_opp_summary.content_count = -5 - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, 'Expected content_count to be a non-negative integer, ' 'received -5' @@ -138,7 +138,7 @@ def test_same_language_for_need_and_assigend_voice_artist_fails_validation( set(need_voice_artist_languages).isdisjoint( assigned_voice_artist_languages)) - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, re.escape( 'Expected voice_artist "needed" and "assigned" list of ' @@ -161,7 +161,7 @@ def test_translation_counts_with_invalid_language_code_fails_validation( 'invalid_language_code': 4 } # Object with chapter_id as boolean fails the validation check. - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, 'Invalid language_code: invalid_language_code' ) @@ -181,7 +181,7 @@ def test_translation_counts_with_invalid_count_fails_validation( } # Object with invalid language_code in translation_counts fails the # validation. - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, 'Expected count for language_code hi to be a non-negative ' 'integer, received -5' @@ -203,7 +203,7 @@ def test_translation_counts_with_invalid_count_value_fails_validation( } # Object with invalid count value i.e, more than content_count # in translation_counts fails the validation. - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, 'Expected translation count for language_code hi to be ' r'less than or equal to content_count\(5\), received 8' @@ -223,7 +223,7 @@ def test_invalid_lang_code_in_incomplete_translation_langs_fails_validation( 'invalid_language_code'] # Object with invalid language code inside # incomplete_translation_language_codes fails the validation. - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, 'Invalid language_code: invalid_language_code' ) @@ -240,7 +240,7 @@ def test_invalid_lang_code_in_need_voice_artist_languages_fails_validation( 'invalid_language_code'] # Object with invalid language code inside # language_codes_needing_voice_artists fails the validation. - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, 'Invalid language_code: invalid_language_code' ) @@ -260,7 +260,7 @@ def test_invalid_lang_code_in_assigned_voice_artist_langs_fails_validation( 'invalid_language_code'] # Object with invalid language code inside # language_codes_with_assigned_voice_artists fails the validation. - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, 'Invalid language_code: invalid_language_code' ) @@ -277,7 +277,7 @@ def test_all_languages_in_summary_equals_supported_languages(self) -> None: self.valid_exp_opp_summary.validate() self.valid_exp_opp_summary.language_codes_needing_voice_artists = [ 'en'] - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_exp_opp_summary, re.escape( 'Expected set of all languages available in ' @@ -292,7 +292,7 @@ class SkillOpportunityDomainTest(test_utils.GenericTestBase): """Tests for the SkillOpportunity domain object.""" def setUp(self) -> None: - super(SkillOpportunityDomainTest, self).setUp() + super().setUp() valid_skill_opportunity_dict: ( opportunity_domain.SkillOpportunityDict ) = { @@ -330,7 +330,7 @@ def test_negative_question_count_fails_validation_check(self) -> None: # Object with question_count as int passes the validation check. self.valid_skill_opportunity.validate() self.valid_skill_opportunity.question_count = -5 - self._assert_validation_error( # type: ignore[no-untyped-call] + self._assert_validation_error( self.valid_skill_opportunity, 'Expected question_count to be a non-negative integer, ' 'received -5' diff --git a/core/domain/opportunity_services.py b/core/domain/opportunity_services.py index 71a8c378ca4c..2f8c67596489 100644 --- a/core/domain/opportunity_services.py +++ b/core/domain/opportunity_services.py @@ -18,18 +18,33 @@ from __future__ import annotations +import collections import logging +from core import feconf + from core.constants import constants +from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import opportunity_domain from core.domain import question_fetchers +from core.domain import story_domain from core.domain import story_fetchers +from core.domain import suggestion_services +from core.domain import topic_domain from core.domain import topic_fetchers +from core.domain import translation_services from core.platform import models -(opportunity_models, suggestion_models) = models.Registry.import_models( - [models.NAMES.opportunity, models.NAMES.suggestion]) +from typing import Dict, List, Optional, Sequence, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import opportunity_models + +(opportunity_models,) = models.Registry.import_models([ + models.Names.OPPORTUNITY +]) # NOTE TO DEVELOPERS: The functions: # - delete_all_exploration_opportunity_summary_models() @@ -38,7 +53,7 @@ # to that PR if you need to reinstate them. -def is_exploration_available_for_contribution(exp_id): +def is_exploration_available_for_contribution(exp_id: str) -> bool: """Checks whether a given exploration id belongs to a curated list of exploration i.e, whether it's used as the chapter of any story. @@ -54,7 +69,9 @@ def is_exploration_available_for_contribution(exp_id): return model is not None -def get_exploration_opportunity_summary_from_model(model): +def get_exploration_opportunity_summary_from_model( + model: opportunity_models.ExplorationOpportunitySummaryModel +) -> opportunity_domain.ExplorationOpportunitySummary: """Returns the ExplorationOpportunitySummary object out of the model. Args: @@ -93,52 +110,21 @@ def get_exploration_opportunity_summary_from_model(model): {}) -def get_exp_opportunity_summary_with_in_review_translations_from_model( - model, translations_in_review): - """Returns the ExplorationOpportunitySummary object out of the model when - there are translations that are in review. - - Args: - model: ExplorationOpportunitySummaryModel. The exploration opportunity - summary model. - translations_in_review: list(SuggestionModel). The list of translations - which are in review. - - Returns: - ExplorationOpportunitySummary. The corresponding - ExplorationOpportunitySummary object. - """ - translation_opportunity = get_exploration_opportunity_summary_from_model( - model) - translation_in_review_counts = {} - - for language_code in constants.SUPPORTED_CONTENT_LANGUAGES: - in_review_count = 0 - for suggestion in translations_in_review: - if ( - suggestion is not None and - suggestion.language_code == language_code['code'] and - suggestion.target_id == model.id): - in_review_count = in_review_count + 1 - if in_review_count > 0: - translation_in_review_counts[ - language_code['code']] = in_review_count - - translation_opportunity.translation_in_review_counts = ( - translation_in_review_counts) - - return translation_opportunity - - -def _save_multi_exploration_opportunity_summary( - exploration_opportunity_summary_list): - """Stores multiple ExplorationOpportunitySummary into datastore as a - ExplorationOpportunitySummaryModel. +def _construct_new_opportunity_summary_models( + exploration_opportunity_summary_list: List[ + opportunity_domain.ExplorationOpportunitySummary + ] +) -> List[opportunity_models.ExplorationOpportunitySummaryModel]: + """Create ExplorationOpportunitySummaryModels from domain objects. Args: exploration_opportunity_summary_list: list( ExplorationOpportunitySummary). A list of exploration opportunity summary object. + + Returns: + list(ExplorationOpportunitySummaryModel). A list of + ExplorationOpportunitySummaryModel to be stored in the datastore. """ exploration_opportunity_summary_model_list = [] for opportunity_summary in exploration_opportunity_summary_list: @@ -160,15 +146,40 @@ def _save_multi_exploration_opportunity_summary( ) exploration_opportunity_summary_model_list.append(model) + return exploration_opportunity_summary_model_list + +def _save_multi_exploration_opportunity_summary( + exploration_opportunity_summary_list: List[ + opportunity_domain.ExplorationOpportunitySummary + ] +) -> None: + """Stores multiple ExplorationOpportunitySummary into datastore as a + ExplorationOpportunitySummaryModel. + + Args: + exploration_opportunity_summary_list: list( + ExplorationOpportunitySummary). A list of exploration opportunity + summary object. + """ + exploration_opportunity_summary_model_list = ( + _construct_new_opportunity_summary_models( + exploration_opportunity_summary_list + ) + ) ( opportunity_models.ExplorationOpportunitySummaryModel - .update_timestamps_multi(exploration_opportunity_summary_model_list)) + .update_timestamps_multi(exploration_opportunity_summary_model_list) + ) opportunity_models.ExplorationOpportunitySummaryModel.put_multi( exploration_opportunity_summary_model_list) -def create_exp_opportunity_summary(topic, story, exploration): +def create_exp_opportunity_summary( + topic: topic_domain.Topic, + story: story_domain.Story, + exploration: exp_domain.Exploration +) -> opportunity_domain.ExplorationOpportunitySummary: """Create an ExplorationOpportunitySummary object with the given topic, story and exploration object. @@ -185,7 +196,8 @@ def create_exp_opportunity_summary(topic, story, exploration): # TODO(#13903): Find a way to reduce runtime of computing the complete # languages. complete_translation_language_list = ( - exploration.get_languages_with_complete_translation()) + translation_services.get_languages_with_complete_translation( + exploration)) # TODO(#13912): Revisit voiceover language logic. language_codes_needing_voice_artists = set( complete_translation_language_list) @@ -201,7 +213,11 @@ def create_exp_opportunity_summary(topic, story, exploration): language_codes_needing_voice_artists.add(exploration.language_code) content_count = exploration.get_content_count() - translation_counts = exploration.get_translation_counts() + translation_counts = translation_services.get_translation_counts( + feconf.TranslatableEntityType.EXPLORATION, + exploration.id, + exploration.version + ) story_node = story.story_contents.get_node_with_corresponding_exp_id( exploration.id) @@ -222,7 +238,8 @@ def create_exp_opportunity_summary(topic, story, exploration): def _compute_exploration_incomplete_translation_languages( - complete_translation_languages): + complete_translation_languages: List[str] +) -> List[str]: """Computes all languages that are not 100% translated in an exploration. Args: @@ -240,7 +257,9 @@ def _compute_exploration_incomplete_translation_languages( return sorted(list(incomplete_translation_language_codes)) -def add_new_exploration_opportunities(story_id, exp_ids): +def add_new_exploration_opportunities( + story_id: str, exp_ids: List[str] +) -> None: """Adds new exploration opportunity into the model. Args: @@ -254,7 +273,11 @@ def add_new_exploration_opportunities(story_id, exp_ids): _create_exploration_opportunities(story, topic, exp_ids) -def _create_exploration_opportunities(story, topic, exp_ids): +def _create_exploration_opportunities( + story: story_domain.Story, + topic: topic_domain.Topic, + exp_ids: List[str] +) -> None: """Creates new exploration opportunities corresponding to the supplied story, topic, and exploration IDs. @@ -274,24 +297,36 @@ def _create_exploration_opportunities(story, topic, exp_ids): create_exp_opportunity_summary( topic, story, exploration)) _save_multi_exploration_opportunity_summary( - exploration_opportunity_summary_list) + exploration_opportunity_summary_list + ) -def update_opportunity_with_updated_exploration(exp_id): +def compute_opportunity_models_with_updated_exploration( + exp_id: str, + content_count: int, + translation_counts: Dict[str, int] +) -> List[opportunity_models.ExplorationOpportunitySummaryModel]: """Updates the opportunities models with the changes made in the exploration. Args: exp_id: str. The exploration id which is also the id of the opportunity model. + content_count: int. The number of contents available in the exploration. + translation_counts: dict(str, int). The number of translations available + for the exploration in different languages. + + Returns: + list(ExplorationOpportunitySummaryModel). A list of opportunity models + which are updated. """ updated_exploration = exp_fetchers.get_exploration_by_id(exp_id) - content_count = updated_exploration.get_content_count() - translation_counts = updated_exploration.get_translation_counts() - # TODO(#13903): Find a way to reduce runtime of computing the complete - # languages. - complete_translation_language_list = ( - updated_exploration.get_languages_with_complete_translation()) + + complete_translation_language_list = [] + for language_code, translation_count in translation_counts.items(): + if translation_count == content_count: + complete_translation_language_list.append(language_code) + model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id) exploration_opportunity_summary = ( get_exploration_opportunity_summary_from_model(model)) @@ -329,11 +364,48 @@ def update_opportunity_with_updated_exploration(exp_id): exploration_opportunity_summary.validate() - _save_multi_exploration_opportunity_summary( + return _construct_new_opportunity_summary_models( [exploration_opportunity_summary]) -def update_exploration_opportunities_with_story_changes(story, exp_ids): +def update_translation_opportunity_with_accepted_suggestion( + exploration_id: str, language_code: str +) -> None: + """Updates the translation opportunity for the accepted suggestion in the + ExplorationOpportunitySummaryModel. + + Args: + exploration_id: str. The ID of the exploration. + language_code: str. The langauge code of the accepted translation + suggestion. + """ + model = opportunity_models.ExplorationOpportunitySummaryModel.get( + exploration_id) + exp_opportunity_summary = ( + get_exploration_opportunity_summary_from_model(model)) + + if language_code in exp_opportunity_summary.translation_counts: + exp_opportunity_summary.translation_counts[language_code] += 1 + else: + exp_opportunity_summary.translation_counts[language_code] = 1 + + if ( + exp_opportunity_summary.content_count == + exp_opportunity_summary.translation_counts[language_code] + ): + exp_opportunity_summary.incomplete_translation_language_codes.remove( + language_code) + exp_opportunity_summary.language_codes_needing_voice_artists.append( + language_code + ) + + exp_opportunity_summary.validate() + _save_multi_exploration_opportunity_summary([exp_opportunity_summary]) + + +def update_exploration_opportunities_with_story_changes( + story: story_domain.Story, exp_ids: List[str] +) -> None: """Updates the opportunities models with the story changes. Args: @@ -341,13 +413,15 @@ def update_exploration_opportunities_with_story_changes(story, exp_ids): exp_ids: list(str). A list of exploration IDs whose exploration opportunity summary models need to be updated. """ - exp_opportunity_models = ( + exp_opportunity_models_with_none = ( opportunity_models.ExplorationOpportunitySummaryModel.get_multi( exp_ids)) exploration_opportunity_summary_list = [] - for exp_opportunity_model in exp_opportunity_models: + for exp_opportunity_model in exp_opportunity_models_with_none: + # Ruling out the possibility of None for mypy type checking. + assert exp_opportunity_model is not None exploration_opportunity_summary = ( get_exploration_opportunity_summary_from_model( exp_opportunity_model)) @@ -361,35 +435,11 @@ def update_exploration_opportunities_with_story_changes(story, exp_ids): exploration_opportunity_summary) _save_multi_exploration_opportunity_summary( - exploration_opportunity_summary_list) - - -def update_exploration_voiceover_opportunities( - exp_id, assigned_voice_artist_in_language_code): - """Updates the language_codes_with_assigned_voice_artists of exploration - opportunity model. - - Args: - exp_id: str. The ID of the exploration. - assigned_voice_artist_in_language_code: str. The language code in which - a voice artist is assigned to the exploration. - """ - model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id) - exploration_opportunity_summary = ( - get_exploration_opportunity_summary_from_model(model)) - - exploration_opportunity_summary.language_codes_needing_voice_artists.remove( - assigned_voice_artist_in_language_code) - ( - exploration_opportunity_summary - .language_codes_with_assigned_voice_artists.append( - assigned_voice_artist_in_language_code)) - exploration_opportunity_summary.validate() - _save_multi_exploration_opportunity_summary( - [exploration_opportunity_summary]) + exploration_opportunity_summary_list + ) -def delete_exploration_opportunities(exp_ids): +def delete_exploration_opportunities(exp_ids: List[str]) -> None: """Deletes the ExplorationOpportunitySummaryModel models corresponding to the given exp_ids. @@ -407,7 +457,9 @@ def delete_exploration_opportunities(exp_ids): exp_opportunity_models_to_be_deleted) -def delete_exploration_opportunities_corresponding_to_topic(topic_id): +def delete_exploration_opportunities_corresponding_to_topic( + topic_id: str +) -> None: """Deletes the ExplorationOpportunitySummaryModel models which corresponds to the given topic_id. @@ -418,10 +470,13 @@ def delete_exploration_opportunities_corresponding_to_topic(topic_id): opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic( topic_id)) opportunity_models.ExplorationOpportunitySummaryModel.delete_multi( - exp_opportunity_models) + list(exp_opportunity_models)) -def update_exploration_opportunities(old_story, new_story): +def update_exploration_opportunities( + old_story: story_domain.Story, + new_story: story_domain.Story +) -> None: """Updates the opportunities models according to the changes made in the story. @@ -450,11 +505,11 @@ def update_exploration_opportunities(old_story, new_story): update_exploration_opportunities_with_story_changes( new_story, list(model_ids_need_update)) - add_new_exploration_opportunities(new_story.id, new_added_exp_ids) + add_new_exploration_opportunities(new_story.id, list(new_added_exp_ids)) delete_exploration_opportunities(list(deleted_exp_ids)) -def delete_exp_opportunities_corresponding_to_story(story_id): +def delete_exp_opportunities_corresponding_to_story(story_id: str) -> None: """Deletes the ExplorationOpportunitySummaryModel models which corresponds to the given story_id. @@ -463,13 +518,23 @@ def delete_exp_opportunities_corresponding_to_story(story_id): """ exp_opprtunity_model_class = ( opportunity_models.ExplorationOpportunitySummaryModel) - exp_opportunity_models = exp_opprtunity_model_class.get_all().filter( + exp_opportunity_models: Sequence[ + opportunity_models.ExplorationOpportunitySummaryModel + ] = exp_opprtunity_model_class.get_all().filter( exp_opprtunity_model_class.story_id == story_id - ) - exp_opprtunity_model_class.delete_multi(exp_opportunity_models) - - -def get_translation_opportunities(language_code, topic_name, cursor): + ).fetch() + exp_opprtunity_model_class.delete_multi(list(exp_opportunity_models)) + + +def get_translation_opportunities( + language_code: str, + topic_name: Optional[str], + cursor: Optional[str] +) -> Tuple[ + List[opportunity_domain.ExplorationOpportunitySummary], + Optional[str], + bool +]: """Returns a list of opportunities available for translation in a specific language. @@ -497,67 +562,61 @@ def get_translation_opportunities(language_code, topic_name, cursor): opportunity_models .ExplorationOpportunitySummaryModel.get_all_translation_opportunities( page_size, cursor, language_code, topic_name)) - opportunities = [] - suggestion_ids = [] - opportunity_ids = [] - translations_in_review = [] - opportunity_ids = [ + opportunity_summaries = [] + opportunity_summary_exp_ids = [ opportunity.id for opportunity in exp_opportunity_summary_models] - if len(opportunity_ids) > 0: - suggestion_ids = ( - suggestion_models - .GeneralSuggestionModel - .get_translation_suggestions_in_review_ids_with_exp_id( - opportunity_ids)) - translations_in_review = ( - suggestion_models - .GeneralSuggestionModel - .get_multiple_suggestions_from_suggestion_ids(suggestion_ids)) + exp_id_to_in_review_count = {} + if len(opportunity_summary_exp_ids) > 0: + exp_id_to_in_review_count = ( + _build_exp_id_to_translation_suggestion_in_review_count( + opportunity_summary_exp_ids, language_code)) for exp_opportunity_summary_model in exp_opportunity_summary_models: - exp_opportunity_summary = ( - get_exp_opportunity_summary_with_in_review_translations_from_model( - exp_opportunity_summary_model, translations_in_review)) - opportunities.append(exp_opportunity_summary) - return opportunities, cursor, more - - -def get_voiceover_opportunities(language_code, cursor): - """Returns a list of opportunities available for voiceover in a specific - language. + opportunity_summary = ( + get_exploration_opportunity_summary_from_model( + exp_opportunity_summary_model)) + if opportunity_summary.id in exp_id_to_in_review_count: + # Compute the translation_in_review_counts domain object field + # adhoc. Note that this field is not persisted and is only used in + # the frontend. + # TODO(#14833): Compute this value in the backend controller + # instead. + opportunity_summary.translation_in_review_counts = { + language_code: exp_id_to_in_review_count[opportunity_summary.id] + } + opportunity_summaries.append(opportunity_summary) + return opportunity_summaries, cursor, more + + +def _build_exp_id_to_translation_suggestion_in_review_count( + exp_ids: List[str], language_code: str +) -> Dict[str, int]: + """Returns a dict mapping exploration ID to the count of corresponding + translation suggestions that are currently in review. Args: - cursor: str or None. If provided, the list of returned entities - starts from this datastore cursor. Otherwise, the returned - entities start from the beginning of the full list of entities. - language_code: str. The language for which voiceover opportunities - to be fetched. + exp_ids: list(str). List of exploration IDs for which to count + corresponding translations suggestions. + language_code: str. The language for which translation suggestions + should be fetched. Returns: - 3-tuple(opportunities, cursor, more). where: - opportunities: list(ExplorationOpportunitySummary). A list of - ExplorationOpportunitySummary domain objects. - cursor: str or None. A query cursor pointing to the next - batch of results. If there are no more results, this might - be None. - more: bool. If True, there are (probably) more results after - this batch. If False, there are no further results after - this batch. + dict(str, int). Dict of exploration IDs to counts of corresponding + translation suggestions currently in review. """ - page_size = constants.OPPORTUNITIES_PAGE_SIZE - exp_opportunity_summary_models, new_cursor, more = ( - opportunity_models.ExplorationOpportunitySummaryModel - .get_all_voiceover_opportunities(page_size, cursor, language_code)) - - opportunities = [] - for exp_opportunity_summary_model in exp_opportunity_summary_models: - exp_opportunity_summary = ( - get_exploration_opportunity_summary_from_model( - exp_opportunity_summary_model)) - opportunities.append(exp_opportunity_summary) - return opportunities, new_cursor, more - - -def get_exploration_opportunity_summaries_by_ids(ids): + exp_id_to_in_review_count: Dict[str, int] = collections.defaultdict(int) + suggestions_in_review = ( + suggestion_services + .get_translation_suggestions_in_review_by_exp_ids( + exp_ids, language_code)) + for suggestion in suggestions_in_review: + if suggestion is not None: + exp_id_to_in_review_count[suggestion.target_id] += 1 + return exp_id_to_in_review_count + + +def get_exploration_opportunity_summaries_by_ids( + ids: List[str] +) -> Dict[str, Optional[opportunity_domain.ExplorationOpportunitySummary]]: """Returns a dict with key as id and value representing ExplorationOpportunitySummary objects corresponding to the opportunity id. @@ -569,7 +628,9 @@ def get_exploration_opportunity_summaries_by_ids(ids): opportunity id and values representing the ExplorationOpportunitySummary domain objects corresponding to the opportunity id if exist else None. """ - opportunities = {opportunity_id: None for opportunity_id in ids} + opportunities: Dict[ + str, Optional[opportunity_domain.ExplorationOpportunitySummary] + ] = {opportunity_id: None for opportunity_id in ids} exp_opportunity_summary_models = ( opportunity_models.ExplorationOpportunitySummaryModel.get_multi(ids)) for exp_opportunity_summary_model in exp_opportunity_summary_models: @@ -580,7 +641,59 @@ def get_exploration_opportunity_summaries_by_ids(ids): return opportunities -def update_opportunities_with_new_topic_name(topic_id, topic_name): +def get_exploration_opportunity_summary_by_id( + opportunity_id: str +) -> Optional[opportunity_domain.ExplorationOpportunitySummary]: + """Returns an ExplorationOpportunitySummary object corresponding to the + opportunity id. + + Args: + opportunity_id: str. An opportunity id. + + Returns: + ExplorationOpportunitySummary|None. An ExplorationOpportunitySummary + domain object corresponding to the opportunity id if it exists, else + None. + """ + exp_opportunity_summary_model = ( + opportunity_models.ExplorationOpportunitySummaryModel.get( + opportunity_id, strict=False)) + if exp_opportunity_summary_model is None: + return None + return get_exploration_opportunity_summary_from_model( + exp_opportunity_summary_model) + + +def get_exploration_opportunity_summaries_by_topic_id( + topic_id: str +) -> List[opportunity_domain.ExplorationOpportunitySummary]: + """Returns a list of all exploration opportunity summaries + with the given topic ID. + + Args: + topic_id: str. The topic for which opportunity summaries + are fetched. + + Returns: + list(ExplorationOpportunitySummary). A list of all + exploration opportunity summaries with the given topic ID. + """ + opportunity_summaries = [] + exp_opportunity_summary_models = ( + opportunity_models. + ExplorationOpportunitySummaryModel.get_by_topic(topic_id) + ) + for exp_opportunity_summary_model in exp_opportunity_summary_models: + opportunity_summary = ( + get_exploration_opportunity_summary_from_model( + exp_opportunity_summary_model)) + opportunity_summaries.append(opportunity_summary) + return opportunity_summaries + + +def update_opportunities_with_new_topic_name( + topic_id: str, topic_name: str +) -> None: """Updates the exploration opportunity summary models with new topic name. Args: @@ -603,10 +716,13 @@ def update_opportunities_with_new_topic_name(topic_id, topic_name): exploration_opportunity_summary) _save_multi_exploration_opportunity_summary( - exploration_opportunity_summary_list) + exploration_opportunity_summary_list + ) -def get_skill_opportunity_from_model(model): +def get_skill_opportunity_from_model( + model: opportunity_models.SkillOpportunityModel +) -> opportunity_domain.SkillOpportunity: """Returns a SkillOpportunity domain object from a SkillOpportunityModel. Args: @@ -619,7 +735,11 @@ def get_skill_opportunity_from_model(model): model.id, model.skill_description, model.question_count) -def get_skill_opportunities(cursor): +def get_skill_opportunities( + cursor: Optional[str] +) -> Tuple[ + List[opportunity_domain.SkillOpportunity], Optional[str], bool +]: """Returns a list of skill opportunities available for questions. Args: @@ -649,7 +769,9 @@ def get_skill_opportunities(cursor): return opportunities, cursor, more -def get_skill_opportunities_by_ids(ids): +def get_skill_opportunities_by_ids( + ids: List[str] +) -> Dict[str, Optional[opportunity_domain.SkillOpportunity]]: """Returns a list of SkillOpportunity domain objects corresponding to the given list of ids. @@ -661,7 +783,9 @@ def get_skill_opportunities_by_ids(ids): opportunity id and values representing the SkillOpportunity domain objects corresponding to the opportunity id if exist else None. """ - opportunities = {opportunity_id: None for opportunity_id in ids} + opportunities: Dict[ + str, Optional[opportunity_domain.SkillOpportunity] + ] = {opportunity_id: None for opportunity_id in ids} skill_opportunity_models = ( opportunity_models.SkillOpportunityModel.get_multi(ids)) @@ -672,7 +796,7 @@ def get_skill_opportunities_by_ids(ids): return opportunities -def create_skill_opportunity(skill_id, skill_description): +def create_skill_opportunity(skill_id: str, skill_description: str) -> None: """Creates a SkillOpportunityModel entity in the datastore. Args: @@ -701,7 +825,9 @@ def create_skill_opportunity(skill_id, skill_description): _save_skill_opportunities([skill_opportunity]) -def _save_skill_opportunities(skill_opportunities): +def _save_skill_opportunities( + skill_opportunities: List[opportunity_domain.SkillOpportunity] +) -> None: """Saves SkillOpportunity domain objects into datastore as SkillOpportunityModel objects. @@ -723,7 +849,9 @@ def _save_skill_opportunities(skill_opportunities): opportunity_models.SkillOpportunityModel.put_multi(skill_opportunity_models) -def update_skill_opportunity_skill_description(skill_id, new_description): +def update_skill_opportunity_skill_description( + skill_id: str, new_description: str +) -> None: """Updates the skill_description of the SkillOpportunityModel with new_description. @@ -737,7 +865,9 @@ def update_skill_opportunity_skill_description(skill_id, new_description): _save_skill_opportunities([skill_opportunity]) -def _get_skill_opportunity(skill_id): +def _get_skill_opportunity( + skill_id: str +) -> Optional[opportunity_domain.SkillOpportunity]: """Returns the SkillOpportunity domain object representing a SkillOpportunityModel with the supplied skill_id in the datastore. @@ -756,7 +886,7 @@ def _get_skill_opportunity(skill_id): return None -def delete_skill_opportunity(skill_id): +def delete_skill_opportunity(skill_id: str) -> None: """Deletes the SkillOpportunityModel corresponding to the supplied skill_id. Args: @@ -769,7 +899,7 @@ def delete_skill_opportunity(skill_id): opportunity_models.SkillOpportunityModel.delete(skill_opportunity_model) -def increment_question_counts(skill_ids, delta): +def increment_question_counts(skill_ids: List[str], delta: int) -> None: """Increments question_count(s) of SkillOpportunityModel(s) with corresponding skill_ids. @@ -784,7 +914,8 @@ def increment_question_counts(skill_ids, delta): def update_skill_opportunities_on_question_linked_skills_change( - old_skill_ids, new_skill_ids): + old_skill_ids: List[str], new_skill_ids: List[str] +) -> None: """Updates question_count(s) of SkillOpportunityModel(s) corresponding to the change in linked skill IDs for a question from old_skill_ids to new_skill_ids, e.g. if skill_id1 is in old_skill_ids, but not in @@ -806,19 +937,21 @@ def update_skill_opportunities_on_question_linked_skills_change( updated_skill_opportunities = [] updated_skill_opportunities.extend( _get_skill_opportunities_with_updated_question_counts( - new_skill_ids_added_to_question, 1)) + list(new_skill_ids_added_to_question), 1)) updated_skill_opportunities.extend( _get_skill_opportunities_with_updated_question_counts( - skill_ids_removed_from_question, -1)) + list(skill_ids_removed_from_question), -1)) _save_skill_opportunities(updated_skill_opportunities) -def _get_skill_opportunities_with_updated_question_counts(skill_ids, delta): +def _get_skill_opportunities_with_updated_question_counts( + skill_ids: List[str], delta: int +) -> List[opportunity_domain.SkillOpportunity]: """Returns a list of SkillOpportunities with corresponding skill_ids with question_count(s) updated by delta. Args: - skill_ids: iterable(str). The IDs of the matching SkillOpportunityModels + skill_ids: List(str). The IDs of the matching SkillOpportunityModels in the datastore. delta: int. The delta by which to update each question_count (can be negative). @@ -833,13 +966,18 @@ def _get_skill_opportunities_with_updated_question_counts(skill_ids, delta): if skill_opportunity_model is not None: skill_opportunity = get_skill_opportunity_from_model( skill_opportunity_model) - skill_opportunity.question_count += delta + # The question count should never be negative. We default to 0 + # if some operation tries to reduce question count down to a + # negative value. + skill_opportunity.question_count = max( + skill_opportunity.question_count + delta, 0) updated_skill_opportunities.append(skill_opportunity) return updated_skill_opportunities def regenerate_opportunities_related_to_topic( - topic_id, delete_existing_opportunities=False): + topic_id: str, delete_existing_opportunities: bool = False +) -> int: """Regenerates opportunity models which belongs to a given topic. Args: @@ -849,13 +987,16 @@ def regenerate_opportunities_related_to_topic( Returns: int. The number of opportunity models created. + + Raises: + Exception. Failure to regenerate opportunities for given topic. """ if delete_existing_opportunities: exp_opportunity_models = ( opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic( topic_id)) opportunity_models.ExplorationOpportunitySummaryModel.delete_multi( - exp_opportunity_models) + list(exp_opportunity_models)) topic = topic_fetchers.get_topic_by_id(topic_id) story_ids = topic.get_canonical_story_ids() @@ -881,6 +1022,9 @@ def regenerate_opportunities_related_to_topic( exploration_opportunity_summary_list = [] for story in stories: + # Ruling out the possibility of None for mypy type checking, because + # above we are already validating that story is not None. + assert story is not None for exp_id in story.story_contents.get_all_linked_exp_ids(): exploration_opportunity_summary_list.append( create_exp_opportunity_summary( diff --git a/core/domain/opportunity_services_test.py b/core/domain/opportunity_services_test.py index 991366ad6d98..3744039f72f1 100644 --- a/core/domain/opportunity_services_test.py +++ b/core/domain/opportunity_services_test.py @@ -23,12 +23,14 @@ from core import feconf from core.constants import constants from core.domain import exp_domain +from core.domain import exp_fetchers from core.domain import exp_services from core.domain import opportunity_domain from core.domain import opportunity_services from core.domain import question_services from core.domain import skill_domain from core.domain import skill_services +from core.domain import state_domain from core.domain import story_domain from core.domain import story_services from core.domain import subtopic_page_domain @@ -36,35 +38,50 @@ from core.domain import suggestion_services from core.domain import topic_domain from core.domain import topic_services +from core.domain import translation_domain from core.domain import user_services from core.platform import models from core.tests import test_utils +from typing import Dict, List, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import feedback_models + from mypy_imports import opportunity_models + from mypy_imports import story_models + from mypy_imports import suggestion_models + ( - feedback_models, opportunity_models, story_models, suggestion_models + feedback_models, + opportunity_models, + story_models, + suggestion_models ) = models.Registry.import_models([ - models.NAMES.feedback, models.NAMES.opportunity, models.NAMES.story, - models.NAMES.suggestion + models.Names.FEEDBACK, + models.Names.OPPORTUNITY, + models.Names.STORY, + models.Names.SUGGESTION ]) class OpportunityServicesIntegrationTest(test_utils.GenericTestBase): """Test the opportunity services module.""" - suggestion_target_id = '0' - suggestion_target_version_at_submission = 1 - suggestion_change = { + suggestion_target_id: str = '0' + suggestion_target_version_at_submission: int = 1 + suggestion_change: Dict[str, str] = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': 'End State', - 'content_id': 'content', + 'state_name': 'Introduction', + 'content_id': 'content_0', 'language_code': 'hi', 'content_html': '', 'translation_html': '

    This is translated html.

    ', 'data_format': 'html' } - def setUp(self): - super(OpportunityServicesIntegrationTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) @@ -88,7 +105,7 @@ def setUp(self): '%s' % i, self.owner_id, title='title %d' % i, - category='category%d' % i, + category=constants.ALL_CATEGORIES[i], end_state_name='End State', correctness_feedback_enabled=True ) for i in range(5)] @@ -97,7 +114,7 @@ def setUp(self): self.publish_exploration(self.owner_id, exp.id) topic = topic_domain.Topic.create_default_topic( - self.TOPIC_ID, 'topic', 'abbrev', 'description') + self.TOPIC_ID, 'topic', 'abbrev', 'description', 'fragm') topic.thumbnail_filename = 'thumbnail.svg' topic.thumbnail_bg_color = '#C6DCDA' topic.subtopics = [ @@ -106,6 +123,7 @@ def setUp(self): constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-url')] topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, self.TOPIC_ID)) @@ -114,7 +132,8 @@ def setUp(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'dummy-fragment' })] ) topic_services.save_new_topic(self.owner_id, topic) @@ -130,11 +149,16 @@ def setUp(self): self.TOPIC_ID, self.STORY_ID, self.admin_id) def mock_generate_new_thread_id_for_suggestion( - self, unused_entity_type, unused_entity_id): + self, + unused_entity_type: feedback_models.GeneralFeedbackThreadModel, + unused_entity_id: str + ) -> str: """Mock generate_new_thread_id function when creating suggestions.""" return self.THREAD_ID - def create_translation_suggestion_for_exploration_0_and_verify(self): + def create_translation_suggestion_for_exploration_0_and_verify( + self + ) -> None: """Creates a translation suggestion for exploration 0 and performs basic assertions. """ @@ -154,7 +178,7 @@ def create_translation_suggestion_for_exploration_0_and_verify(self): self.assertIsNotNone(suggestion) self.assertEqual(suggestion.status, suggestion_models.STATUS_IN_REVIEW) - def add_exploration_0_to_story(self): + def add_exploration_0_to_story(self) -> None: """Adds exploration 0 as a node to the test story.""" story_services.update_story( self.owner_id, self.STORY_ID, [story_domain.StoryChange({ @@ -169,7 +193,9 @@ def add_exploration_0_to_story(self): 'new_value': '0' })], 'Changes.') - def test_new_opportunity_with_adding_exploration_in_story_node(self): + def test_new_opportunity_with_adding_exploration_in_story_node( + self + ) -> None: translation_opportunities, _, _ = ( opportunity_services.get_translation_opportunities( 'hi', 'topic', None)) @@ -186,43 +212,45 @@ def test_new_opportunity_with_adding_exploration_in_story_node(self): self.assertEqual(opportunity.story_title, 'A story') def test_get_translation_opportunities_with_translations_in_review( - self): + self + ) -> None: translation_opportunities, _, _ = ( opportunity_services.get_translation_opportunities( 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 0) - self.add_exploration_0_to_story() self.create_translation_suggestion_for_exploration_0_and_verify() translation_opportunities, _, _ = ( opportunity_services.get_translation_opportunities( 'hi', 'topic', None)) + self.assertEqual(len(translation_opportunities), 1) opportunity = translation_opportunities[0] - languages_of_translations_in_review = ( - opportunity.translation_in_review_counts.keys()) - self.assertEqual(len(languages_of_translations_in_review), 1) + self.assertEqual( + opportunity.translation_in_review_counts, + {'hi': 1}) - def test_get_translation_opportunities_with_no_translations_in_review(self): + def test_get_translation_opportunities_with_no_translations_in_review( + self + ) -> None: translation_opportunities, _, _ = ( opportunity_services.get_translation_opportunities( 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 0) - self.add_exploration_0_to_story() translation_opportunities, _, _ = ( opportunity_services.get_translation_opportunities( 'hi', 'topic', None)) + self.assertEqual(len(translation_opportunities), 1) opportunity = translation_opportunities[0] - languages_of_translations_in_review = ( - opportunity.translation_in_review_counts.keys()) - self.assertEqual(len(languages_of_translations_in_review), 0) + self.assertEqual(opportunity.translation_in_review_counts, {}) def test_opportunity_get_deleted_with_removing_exploration_from_story_node( - self): + self + ) -> None: self.add_exploration_0_to_story() translation_opportunities, _, _ = ( @@ -241,7 +269,7 @@ def test_opportunity_get_deleted_with_removing_exploration_from_story_node( 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 0) - def test_opportunity_get_deleted_with_deleting_story(self): + def test_opportunity_get_deleted_with_deleting_story(self) -> None: self.add_exploration_0_to_story() translation_opportunities, _, _ = ( @@ -256,7 +284,7 @@ def test_opportunity_get_deleted_with_deleting_story(self): 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 0) - def test_opportunity_get_deleted_with_deleting_topic(self): + def test_opportunity_get_deleted_with_deleting_topic(self) -> None: self.add_exploration_0_to_story() translation_opportunities, _, _ = ( @@ -271,7 +299,7 @@ def test_opportunity_get_deleted_with_deleting_topic(self): 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 0) - def test_opportunities_updates_with_updating_topic_name(self): + def test_opportunities_updates_with_updating_topic_name(self) -> None: self.add_exploration_0_to_story() translation_opportunities, _, _ = ( @@ -300,7 +328,7 @@ def test_opportunities_updates_with_updating_topic_name(self): self.assertEqual(opportunity.story_title, 'A story') self.assertEqual(opportunity.topic_name, 'A new topic') - def test_opportunities_updates_with_updating_story_title(self): + def test_opportunities_updates_with_updating_story_title(self) -> None: self.add_exploration_0_to_story() translation_opportunities, _, _ = ( @@ -327,7 +355,7 @@ def test_opportunities_updates_with_updating_story_title(self): opportunity = translation_opportunities[0] self.assertEqual(opportunity.story_title, 'A new story') - def test_opportunity_updates_with_updating_story_node_title(self): + def test_opportunity_updates_with_updating_story_node_title(self) -> None: self.add_exploration_0_to_story() translation_opportunities, _, _ = ( @@ -355,20 +383,34 @@ def test_opportunity_updates_with_updating_story_node_title(self): opportunity = translation_opportunities[0] self.assertEqual(opportunity.chapter_title, 'A new Node1') - def test_opportunity_updates_with_updating_exploration(self): + def test_opportunity_updates_with_updating_exploration(self) -> None: self.add_exploration_0_to_story() translation_opportunities, _, _ = ( opportunity_services.get_translation_opportunities( 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 1) - self.assertEqual(translation_opportunities[0].content_count, 2) + self.assertEqual(translation_opportunities[0].content_count, 0) + + exp = exp_fetchers.get_exploration_by_id('0') + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index) + answer_group_dict_inputs_value: Dict[str, Union[str, List[str]]] = { + 'contentId': content_id_generator.generate( + translation_domain.ContentType.RULE, + extra_prefix='input' + ), + 'normalizedStrSet': ['Test'] + } - answer_group_dict = { + answer_group_dict: state_domain.AnswerGroupDict = { 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': content_id_generator.generate( + translation_domain.ContentType.FEEDBACK + ), 'html': '

    Feedback

    ' }, 'labelled_as_correct': False, @@ -378,10 +420,7 @@ def test_opportunity_updates_with_updating_exploration(self): }, 'rule_specs': [{ 'inputs': { - 'x': { - 'contentId': 'rule_input_4', - 'normalizedStrSet': ['Test'] - } + 'x': answer_group_dict_inputs_value }, 'rule_type': 'Contains' }], @@ -392,16 +431,20 @@ def test_opportunity_updates_with_updating_exploration(self): hints_list = [] hints_list.append({ 'hint_content': { - 'content_id': 'hint_1', + 'content_id': content_id_generator.generate( + translation_domain.ContentType.HINT + ), 'html': '

    hint one

    ' }, }) - solution_dict = { + solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': False, 'correct_answer': 'helloworld!', 'explanation': { - 'content_id': 'solution', + 'content_id': content_id_generator.generate( + translation_domain.ContentType.SOLUTION + ), 'html': '

    hello_world is a string

    ' }, } @@ -421,11 +464,16 @@ def test_opportunity_updates_with_updating_exploration(self): 'new_value': { 'placeholder': { 'value': { - 'content_id': 'ca_placeholder_0', + 'content_id': content_id_generator.generate( + translation_domain + .ContentType.CUSTOMIZATION_ARG, + extra_prefix='placeholder' + ), 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } }), exp_domain.ExplorationChange({ @@ -435,13 +483,6 @@ def test_opportunity_updates_with_updating_exploration(self): 'state_name': 'Introduction', 'new_value': [answer_group_dict] }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'state_name': 'Introduction', - 'property_name': ( - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX), - 'new_value': 4 - }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': ( @@ -455,15 +496,22 @@ def test_opportunity_updates_with_updating_exploration(self): exp_domain.STATE_PROPERTY_INTERACTION_SOLUTION), 'state_name': 'Introduction', 'new_value': solution_dict + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index, + 'old_value': 0 })], 'Add state name') translation_opportunities, _, _ = ( opportunity_services.get_translation_opportunities( 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 1) - self.assertEqual(translation_opportunities[0].content_count, 6) + self.assertEqual(translation_opportunities[0].content_count, 4) def test_completing_translation_removes_language_from_incomplete_language_codes( # pylint: disable=line-too-long - self): + self + ) -> None: story_services.update_story( self.owner_id, self.STORY_ID, [story_domain.StoryChange({ 'cmd': 'add_story_node', @@ -488,40 +536,20 @@ def test_completing_translation_removes_language_from_incomplete_language_codes( 'property_name': 'content', 'new_value': { 'html': '

    Test content

    ', - 'content_id': 'content', - } - }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': 'Introduction', - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    Test content

    ', - 'translation_html': '

    Translated text

    ', - 'data_format': 'html' - }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'state_name': 'End State', - 'property_name': 'content', - 'new_value': { - 'html': '

    Test content

    ', - 'content_id': 'content', + 'content_id': 'content_0', } - }), - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': 'End State', - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    Test content

    ', - 'translation_html': '

    Translated text

    ', - 'data_format': 'html' - }), + }) ] exp_services.update_exploration( self.owner_id, '0', change_list, 'commit message') + ( + opportunity_services + .update_translation_opportunity_with_accepted_suggestion( + '0', 'hi' + ) + ) + # get_translation_opportunities should no longer return the opportunity # after translation completion. translation_opportunities, _, _ = ( @@ -532,10 +560,8 @@ def test_completing_translation_removes_language_from_incomplete_language_codes( # The translation opportunity should be returned after marking a # translation as stale. translation_needs_update_change_list = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_MARK_WRITTEN_TRANSLATION_AS_NEEDING_UPDATE, - 'state_name': 'Introduction', - 'content_id': 'content', - 'language_code': 'hi' + 'cmd': exp_domain.CMD_MARK_TRANSLATIONS_NEEDS_UPDATE, + 'content_id': 'content_0' })] exp_services.update_exploration( self.owner_id, '0', translation_needs_update_change_list, @@ -545,7 +571,7 @@ def test_completing_translation_removes_language_from_incomplete_language_codes( 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 1) - def test_create_new_skill_creates_new_skill_opportunity(self): + def test_create_new_skill_creates_new_skill_opportunity(self) -> None: skill_opportunities, _, _ = ( opportunity_services.get_skill_opportunities(None)) self.assertEqual(len(skill_opportunities), 0) @@ -560,10 +586,15 @@ def test_create_new_skill_creates_new_skill_opportunity(self): self.assertEqual(opportunity.id, self.SKILL_ID) self.assertEqual(opportunity.skill_description, 'skill_description') - def test_create_skill_opportunity_counts_existing_linked_questions(self): + def test_create_skill_opportunity_counts_existing_linked_questions( + self + ) -> None: + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_ID, self.USER_ID, - self._create_valid_question_data('ABC'), [self.SKILL_ID]) + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_ID], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.USER_ID, self.QUESTION_ID, self.SKILL_ID, 0.3) @@ -579,17 +610,18 @@ def test_create_skill_opportunity_counts_existing_linked_questions(self): self.assertEqual(opportunity.question_count, 1) def test_create_skill_opportunity_for_existing_opportunity_raises_exception( - self): + self + ) -> None: opportunity_services.create_skill_opportunity( self.SKILL_ID, 'description') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'SkillOpportunity corresponding to skill ID %s already exists.' % self.SKILL_ID): opportunity_services.create_skill_opportunity( self.SKILL_ID, 'description') - def test_update_skill_description_updates_skill_opportunity(self): + def test_update_skill_description_updates_skill_opportunity(self) -> None: self.save_new_skill( self.SKILL_ID, self.USER_ID, description='skill_description') changelist = [ @@ -612,7 +644,9 @@ def test_update_skill_description_updates_skill_opportunity(self): self.assertEqual(opportunity.id, self.SKILL_ID) self.assertEqual(opportunity.skill_description, 'new_description') - def test_update_skill_opportunity_skill_description_invalid_skill_id(self): + def test_update_skill_opportunity_skill_description_invalid_skill_id( + self + ) -> None: opportunity_services.update_skill_opportunity_skill_description( 'bad_skill_id', 'bad_description') @@ -620,7 +654,7 @@ def test_update_skill_opportunity_skill_description_invalid_skill_id(self): opportunity_services.get_skill_opportunities(None)) self.assertEqual(len(skill_opportunities), 0) - def test_delete_skill_deletes_skill_opportunity(self): + def test_delete_skill_deletes_skill_opportunity(self) -> None: self.save_new_skill( self.SKILL_ID, self.USER_ID, description='skill_description') skill_opportunities, _, _ = ( @@ -633,7 +667,7 @@ def test_delete_skill_deletes_skill_opportunity(self): opportunity_services.get_skill_opportunities(None)) self.assertEqual(len(skill_opportunities), 0) - def test_publish_story_creates_exploration_opportunity(self): + def test_publish_story_creates_exploration_opportunity(self) -> None: self.add_exploration_0_to_story() # Story is already published, so unpublish first. topic_services.unpublish_story( @@ -652,7 +686,8 @@ def test_publish_story_creates_exploration_opportunity(self): self.assertEqual(len(translation_opportunities), 1) def test_publish_story_creates_exploration_opportunity_if_topic_is_not_published( # pylint: disable=line-too-long - self): + self + ) -> None: self.add_exploration_0_to_story() # Story and topic are already published, so unpublish first. topic_services.unpublish_story( @@ -670,7 +705,7 @@ def test_publish_story_creates_exploration_opportunity_if_topic_is_not_published opportunity_services.get_translation_opportunities('hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 1) - def test_unpublish_story_deletes_exploration_opportunity(self): + def test_unpublish_story_deletes_exploration_opportunity(self) -> None: self.add_exploration_0_to_story() translation_opportunities, _, _ = ( opportunity_services.get_translation_opportunities( @@ -685,7 +720,7 @@ def test_unpublish_story_deletes_exploration_opportunity(self): 'hi', 'topic', None)) self.assertEqual(len(translation_opportunities), 0) - def test_unpublish_story_rejects_translation_suggestions(self): + def test_unpublish_story_rejects_translation_suggestions(self) -> None: self.add_exploration_0_to_story() self.create_translation_suggestion_for_exploration_0_and_verify() @@ -695,13 +730,17 @@ def test_unpublish_story_rejects_translation_suggestions(self): suggestion = suggestion_services.get_suggestion_by_id(self.THREAD_ID) self.assertEqual(suggestion.status, suggestion_models.STATUS_REJECTED) - def test_add_question_increments_skill_opportunity_question_count(self): + def test_add_question_increments_skill_opportunity_question_count( + self + ) -> None: opportunity_services.create_skill_opportunity( self.SKILL_ID, 'description') - + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_ID, self.USER_ID, - self._create_valid_question_data('ABC'), [self.SKILL_ID]) + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_ID], + content_id_generator.next_content_id_index) skill_opportunities, _, _ = ( opportunity_services.get_skill_opportunities(None)) @@ -709,12 +748,15 @@ def test_add_question_increments_skill_opportunity_question_count(self): self.assertEqual(len(skill_opportunities), 1) self.assertEqual(opportunity.question_count, 1) - def test_create_question_skill_link_increments_question_count(self): + def test_create_question_skill_link_increments_question_count(self) -> None: opportunity_services.create_skill_opportunity( self.SKILL_ID, 'description') + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_ID, self.USER_ID, - self._create_valid_question_data('ABC'), [self.SKILL_ID]) + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_ID], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.USER_ID, self.QUESTION_ID, self.SKILL_ID, 0.3) @@ -724,12 +766,17 @@ def test_create_question_skill_link_increments_question_count(self): opportunity = skill_opportunities[0] self.assertEqual(opportunity.question_count, 1) - def test_link_multiple_skills_for_question_increments_question_count(self): + def test_link_multiple_skills_for_question_increments_question_count( + self + ) -> None: opportunity_services.create_skill_opportunity( self.SKILL_ID, 'description') + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_ID, self.USER_ID, - self._create_valid_question_data('ABC'), ['skill_2']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_2'], + content_id_generator.next_content_id_index) question_services.link_multiple_skills_for_question( self.USER_ID, self.QUESTION_ID, [self.SKILL_ID], [0.3]) @@ -739,12 +786,15 @@ def test_link_multiple_skills_for_question_increments_question_count(self): opportunity = skill_opportunities[0] self.assertEqual(opportunity.question_count, 1) - def test_delete_question_decrements_question_count(self): + def test_delete_question_decrements_question_count(self) -> None: opportunity_services.create_skill_opportunity( self.SKILL_ID, 'description') + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_ID, self.USER_ID, - self._create_valid_question_data('ABC'), [self.SKILL_ID]) + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_ID], + content_id_generator.next_content_id_index) question_services.delete_question(self.USER_ID, self.QUESTION_ID) @@ -754,12 +804,17 @@ def test_delete_question_decrements_question_count(self): self.assertEqual(len(skill_opportunities), 1) self.assertEqual(opportunity.question_count, 0) - def test_delete_question_skill_link_decrements_question_count(self): + def test_delete_question_skill_link_decrements_question_count( + self + ) -> None: opportunity_services.create_skill_opportunity( self.SKILL_ID, 'description') + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_ID, self.USER_ID, - self._create_valid_question_data('ABC'), ['skill_2']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_2'], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.USER_ID, self.QUESTION_ID, self.SKILL_ID, 0.3) @@ -775,8 +830,8 @@ def test_delete_question_skill_link_decrements_question_count(self): class OpportunityServicesUnitTest(test_utils.GenericTestBase): """Test the opportunity services methods.""" - def setUp(self): - super(OpportunityServicesUnitTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -791,7 +846,7 @@ def setUp(self): '%s' % i, self.owner_id, title='title %d' % i, - category='category%d' % i, + category=constants.ALL_CATEGORIES[i], end_state_name='End State', correctness_feedback_enabled=True ) for i in range(5)] @@ -800,7 +855,7 @@ def setUp(self): self.publish_exploration(self.owner_id, exp.id) topic = topic_domain.Topic.create_default_topic( - self.TOPIC_ID, 'topic', 'abbrev', 'description') + self.TOPIC_ID, 'topic', 'abbrev', 'description', 'fragm') topic.thumbnail_filename = 'thumbnail.svg' topic.thumbnail_bg_color = '#C6DCDA' topic.subtopics = [ @@ -809,6 +864,7 @@ def setUp(self): constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-url')] topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] topic_services.save_new_topic(self.owner_id, topic) topic_services.publish_topic(self.TOPIC_ID, self.admin_id) @@ -834,7 +890,7 @@ def setUp(self): 'new_value': '0' })], 'Changes.') - def test_get_exploration_opportunity_summaries_by_ids(self): + def test_get_exploration_opportunity_summaries_by_ids(self) -> None: output = ( opportunity_services.get_exploration_opportunity_summaries_by_ids( [])) @@ -845,13 +901,40 @@ def test_get_exploration_opportunity_summaries_by_ids(self): opportunity_services.get_exploration_opportunity_summaries_by_ids( ['0'])) + opportunities_first_value = opportunities['0'] + # Ruling out the possibility of None for mypy type checking. + assert opportunities_first_value is not None self.assertEqual(len(opportunities), 1) self.assertIsInstance( - opportunities['0'], + opportunities_first_value, opportunity_domain.ExplorationOpportunitySummary) - self.assertEqual(opportunities['0'].id, '0') + self.assertEqual(opportunities_first_value.id, '0') + + def test_get_exploration_opportunity_summaries_by_no_topic_id(self) -> None: + opportunity_summaries = ( + opportunity_services + .get_exploration_opportunity_summaries_by_topic_id( + 'None')) + + self.assertEqual(opportunity_summaries, []) + + def test_get_exploration_opportunity_summaries_by_valid_topic_id( + self + ) -> None: + opportunity_summaries = ( + opportunity_services + .get_exploration_opportunity_summaries_by_topic_id( + 'topic')) - def test_get_exploration_opportunity_summaries_by_ids_for_invalid_id(self): + self.assertEqual(len(opportunity_summaries), 1) + self.assertIsInstance( + opportunity_summaries[0], + opportunity_domain.ExplorationOpportunitySummary) + self.assertEqual(opportunity_summaries[0].topic_id, 'topic') + + def test_get_exploration_opportunity_summaries_by_ids_for_invalid_id( + self + ) -> None: opportunities = ( opportunity_services.get_exploration_opportunity_summaries_by_ids( ['badID'])) @@ -860,10 +943,11 @@ def test_get_exploration_opportunity_summaries_by_ids_for_invalid_id(self): self.assertEqual(opportunities['badID'], None) def test_get_exploration_opportunity_summary_from_model_populates_new_lang( - self): + self + ) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) @@ -874,6 +958,8 @@ def _mock_logging_function(msg, *args): opportunity = opportunities['0'] + # Ruling out the possibility of None for mypy type checking. + assert opportunity is not None self.assertFalse( 'new_lang' in opportunity.incomplete_translation_language_codes) @@ -894,6 +980,8 @@ def _mock_logging_function(msg, *args): opportunity = opportunities['0'] + # Ruling out the possibility of None for mypy type checking. + assert opportunity is not None self.assertTrue( 'new_lang' in opportunity.incomplete_translation_language_codes) self.assertEqual(len(observed_log_messages), 1) @@ -903,8 +991,17 @@ def _mock_logging_function(msg, *args): 'opportunity model with id 0' ) + def test_get_exploration_opportunity_summary_by_id_for_none_result( + self + ) -> None: + self.assertIsNone( + opportunity_services.get_exploration_opportunity_summary_by_id( + 'exp_1') + ) + def test_delete_exp_opportunities_corresponding_to_story_when_story_deleted( - self): + self + ) -> None: opportunity_models.ExplorationOpportunitySummaryModel( id='exp_1', topic_id='topic_id', @@ -939,11 +1036,118 @@ def test_delete_exp_opportunities_corresponding_to_story_when_story_deleted( ) ) - def test_regenerate_opportunities_related_to_topic_when_story_deleted(self): + def test_regenerate_opportunities_related_to_topic_when_story_deleted( + self + ) -> None: story_models.StoryModel.delete_by_id(self.STORY_ID) - self.assertRaisesRegexp( - Exception, 'Failed to regenerate opportunities', - lambda: ( - opportunity_services.regenerate_opportunities_related_to_topic( - self.TOPIC_ID))) + with self.assertRaisesRegex( + Exception, 'Failed to regenerate opportunities' + ): + opportunity_services.regenerate_opportunities_related_to_topic( + self.TOPIC_ID + ) + + +class OpportunityUpdateOnAcceeptingSuggestionUnitTest( + test_utils.GenericTestBase): + """Unit test validating opportunity gets updated after accepting translation + suggetion. + """ + + def setUp(self) -> None: + super().setUp() + supported_language_codes = set( + language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES) + self.new_incomplete_translation_language_codes = list( + supported_language_codes - set(['en'])) + + self.opportunity_model = ( + opportunity_models.ExplorationOpportunitySummaryModel( + id='exp_1', + topic_id='topic_id', + topic_name='topic_name', + story_id='story_id', + story_title='story_title', + chapter_title='chapter_title', + content_count=2, + incomplete_translation_language_codes=( + self.new_incomplete_translation_language_codes), + translation_counts={}, + language_codes_needing_voice_artists=['en'], + language_codes_with_assigned_voice_artists=[] + )) + self.opportunity_model.put() + + def test_update_translation_opportunity_with_accepted_suggestion( + self + ) -> None: + ( + opportunity_services + .update_translation_opportunity_with_accepted_suggestion( + 'exp_1', 'hi' + ) + ) + + opportunity = ( + opportunity_services.get_exploration_opportunity_summaries_by_ids( + ['exp_1'] + ) + ) + assert opportunity['exp_1'] is not None + + self.assertEqual(opportunity['exp_1'].translation_counts, {'hi': 1}) + + def test_fully_translated_content_in_language_updated_in_opportunity( + self + ) -> None: + ( + opportunity_services + .update_translation_opportunity_with_accepted_suggestion( + 'exp_1', 'hi' + ) + ) + + opportunity = ( + opportunity_services.get_exploration_opportunity_summaries_by_ids( + ['exp_1'] + ) + ) + assert opportunity['exp_1'] is not None + + self.assertEqual(opportunity['exp_1'].translation_counts, {'hi': 1}) + self.assertTrue( + 'hi' in opportunity['exp_1'].incomplete_translation_language_codes) + + ( + opportunity_services + .update_translation_opportunity_with_accepted_suggestion( + 'exp_1', 'hi' + ) + ) + + opportunity = ( + opportunity_services.get_exploration_opportunity_summaries_by_ids( + ['exp_1'] + ) + ) + assert opportunity['exp_1'] is not None + + self.assertEqual(opportunity['exp_1'].translation_counts, {'hi': 2}) + self.assertFalse( + 'hi' in opportunity['exp_1'].incomplete_translation_language_codes) + + def test_update_opportunity_with_updated_exploration(self) -> None: + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + + self.save_new_default_exploration('exp_1', owner_id) + opportunity = ( + opportunity_services + .compute_opportunity_models_with_updated_exploration( + 'exp_1', 2, {'hi': 2} + ) + )[0] + + self.assertFalse( + 'hi' in opportunity.incomplete_translation_language_codes) diff --git a/core/domain/param_domain.py b/core/domain/param_domain.py index 1923b5f2f008..2db2bf5216d7 100644 --- a/core/domain/param_domain.py +++ b/core/domain/param_domain.py @@ -22,18 +22,49 @@ from core import feconf from core import utils -from core.domain import object_registry from core.domain import value_generators_domain +from typing import Dict, List, TypedDict, Union + + +class CustomizationArgsDict(TypedDict): + """Dictionary representing the customization_args argument.""" + + parse_with_jinja: bool + + +class CustomizationArgsDictWithValue(CustomizationArgsDict): + """Dictionary representing the customization_args argument + containing value key. + """ + + value: str + + +class CustomizationArgsDictWithValueList(CustomizationArgsDict): + """Dictionary representing the customization_args argument + containing list_of_values key. + """ + + list_of_values: List[str] + + +AllowedCustomizationArgsDict = Union[ + CustomizationArgsDictWithValue, + CustomizationArgsDictWithValueList +] + + +class ParamSpecDict(TypedDict): + """Dictionary representing the ParamSpec object.""" + + obj_type: str + class ParamSpec: """Value object for an exploration parameter specification.""" - SUPPORTED_OBJ_TYPES = { - 'UnicodeString', - } - - def __init__(self, obj_type): + def __init__(self, obj_type: str) -> None: """Initializes a ParamSpec object with the specified object type. Args: @@ -42,7 +73,7 @@ def __init__(self, obj_type): """ self.obj_type = obj_type - def to_dict(self): + def to_dict(self) -> ParamSpecDict: """Returns a dict representation of this ParamSpec. Returns: @@ -54,7 +85,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, param_spec_dict): + def from_dict(cls, param_spec_dict: ParamSpecDict) -> ParamSpec: """Creates a ParamSpec object from its dict representation. Args: @@ -68,24 +99,34 @@ def from_dict(cls, param_spec_dict): """ return cls(param_spec_dict['obj_type']) - def validate(self): + def validate(self) -> None: """Validate the existence of the object class.""" - # Ensure that this object class exists. - object_registry.Registry.get_object_class_by_type(self.obj_type) - # Ensure the obj_type is among the supported ParamSpec types. - if self.obj_type not in self.SUPPORTED_OBJ_TYPES: + if self.obj_type not in feconf.SUPPORTED_OBJ_TYPES: raise utils.ValidationError( '%s is not among the supported object types for parameters:' ' {%s}.' % - (self.obj_type, ', '.join(sorted(self.SUPPORTED_OBJ_TYPES)))) + (self.obj_type, ', '.join(sorted(feconf.SUPPORTED_OBJ_TYPES)))) + + +class ParamChangeDict(TypedDict): + """Dictionary representing the ParamChange object.""" + + name: str + generator_id: str + customization_args: AllowedCustomizationArgsDict class ParamChange: """Value object for a parameter change.""" - def __init__(self, name, generator_id, customization_args): + def __init__( + self, + name: str, + generator_id: str, + customization_args: AllowedCustomizationArgsDict + ) -> None: """Initialize a ParamChange object with the specified arguments. Args: @@ -107,7 +148,7 @@ def __init__(self, name, generator_id, customization_args): self._customization_args = customization_args @property - def name(self): + def name(self) -> str: """The name of the changing parameter. Returns: @@ -116,7 +157,7 @@ def name(self): return self._name @property - def generator(self): + def generator(self) -> value_generators_domain.BaseValueGenerator: """The value generator used to define the new value of the changing parameter. @@ -128,7 +169,7 @@ def generator(self): self._generator_id)() @property - def customization_args(self): + def customization_args(self) -> AllowedCustomizationArgsDict: """A dict containing several arguments that determine the changing value of the parameter. @@ -142,7 +183,7 @@ def customization_args(self): """ return self._customization_args - def to_dict(self): + def to_dict(self) -> ParamChangeDict: """Returns a dict representing this ParamChange domain object. Returns: @@ -155,7 +196,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, param_change_dict): + def from_dict(cls, param_change_dict: ParamChangeDict) -> ParamChange: """Create a ParamChange object with the specified arguments. Args: @@ -181,18 +222,13 @@ def from_dict(cls, param_change_dict): param_change_dict['customization_args'] ) - def _get_value(self, context_params): + def get_value(self, context_params: Dict[str, str]) -> str: """Generates a single value for a parameter change.""" - return self.generator.generate_value( + value: str = self.generator.generate_value( context_params, **self.customization_args) + return value - def get_normalized_value(self, obj_type, context_params): - """Generates a single normalized value for a parameter change.""" - raw_value = self._get_value(context_params) - return object_registry.Registry.get_object_class_by_type( - obj_type).normalize(raw_value) - - def validate(self): + def validate(self) -> None: """Checks that the properties of this ParamChange object are valid.""" if not isinstance(self.name, str): raise utils.ValidationError( @@ -210,9 +246,9 @@ def validate(self): try: hasattr(self, 'generator') - except KeyError: + except KeyError as e: raise utils.ValidationError( - 'Invalid generator ID %s' % self._generator_id) + 'Invalid generator ID %s' % self._generator_id) from e if not isinstance(self.customization_args, dict): raise utils.ValidationError( diff --git a/core/domain/param_domain_test.py b/core/domain/param_domain_test.py index 45a18c23844e..349df55fcb35 100644 --- a/core/domain/param_domain_test.py +++ b/core/domain/param_domain_test.py @@ -18,7 +18,9 @@ from __future__ import annotations +from core import feconf from core import utils +from core.domain import object_registry from core.domain import param_domain from core.tests import test_utils @@ -26,14 +28,19 @@ class ParameterDomainUnitTests(test_utils.GenericTestBase): """Tests for parameter domain objects.""" - def test_param_spec_validation(self): + def setUp(self) -> None: + self.sample_customization_args: ( + param_domain.CustomizationArgsDictWithValue + ) = { + 'value': '5', + 'parse_with_jinja': True, + } + + def test_param_spec_validation(self) -> None: """Test validation of param specs.""" - param_spec = param_domain.ParamSpec('FakeType') - with self.assertRaisesRegexp(TypeError, 'is not a valid object class'): - param_spec.validate() - param_spec.obj_type = 'Real' - with self.assertRaisesRegexp( + param_spec = param_domain.ParamSpec('Real') + with self.assertRaisesRegex( utils.ValidationError, 'is not among the supported object types' ): param_spec.validate() @@ -42,54 +49,117 @@ def test_param_spec_validation(self): param_spec.obj_type = 'UnicodeString' param_spec.validate() - def test_param_change_validation(self): + def test_supported_object_types_exist_in_registry(self) -> None: + """Test the supported object types of param specs.""" + + # Ensure that this object class exists. + for obj_type in feconf.SUPPORTED_OBJ_TYPES: + object_registry.Registry.get_object_class_by_type(obj_type) + + def test_param_change_validation(self) -> None: """Test validation of parameter changes.""" # Raise an error because the name is invalid. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Only parameter names' ): - param_domain.ParamChange('¡hola', 'Copier', {}).validate() + param_domain.ParamChange( + '¡hola', + 'Copier', + self.sample_customization_args + ).validate() # Raise an error because generator ID is not string. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected generator ID to be a string' ): - param_domain.ParamChange('abc', 123, {}).validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + param_domain.ParamChange( + 'abc', + 123, # type: ignore[arg-type] + self.sample_customization_args + ).validate() # Raise an error because no such generator type exists. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid generator ID' ): - param_domain.ParamChange('abc', 'InvalidGenerator', {}).validate() + param_domain.ParamChange( + 'abc', + 'InvalidGenerator', + self.sample_customization_args + ).validate() # Raise an error because customization_args is not a dict. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected a dict' ): - param_domain.ParamChange('abc', 'Copier', ['a', 'b']).validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + param_domain.ParamChange('abc', 'Copier', ['a', 'b']).validate() # type: ignore[arg-type] # Raise an error because the param_change name is not a string. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected param_change name to be a string, received' ): - param_domain.ParamChange(3, 'Copier', {}).validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + param_domain.ParamChange( + 3, # type: ignore[arg-type] + 'Copier', + self.sample_customization_args + ).validate() # Raise an error because the arg names in customization_args are not # strings. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Invalid parameter change customization_arg name:'): - param_domain.ParamChange('abc', 'Copier', {1: '1'}).validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + customization_args_dict = {1: '1'} + param_domain.ParamChange( + 'abc', 'Copier', customization_args_dict # type: ignore[arg-type] + ).validate() + + def test_param_spec_to_dict(self) -> None: + sample_dict = { + 'obj_type': 'UnicodeString' + } + param_spec = param_domain.ParamSpec(sample_dict['obj_type']) + self.assertEqual(param_spec.to_dict(), sample_dict) - def test_param_change_class(self): + def test_param_spec_from_dict(self) -> None: + sample_dict: param_domain.ParamSpecDict = { + 'obj_type': 'UnicodeString' + } + param_spec = param_domain.ParamSpec.from_dict(sample_dict) + self.assertEqual(param_spec.to_dict(), sample_dict) + + def test_param_change_class(self) -> None: """Test the ParamChange class.""" param_change = param_domain.ParamChange( - 'abc', 'Copier', {'value': '3'}) + 'abc', 'Copier', {'value': '3', 'parse_with_jinja': True}) + param_change.validate() self.assertEqual(param_change.name, 'abc') self.assertEqual(param_change.generator.id, 'Copier') self.assertEqual(param_change.to_dict(), { 'name': 'abc', 'generator_id': 'Copier', - 'customization_args': {'value': '3'} + 'customization_args': {'value': '3', 'parse_with_jinja': True} }) - self.assertEqual(param_change.get_normalized_value('Int', {}), 3) + self.assertEqual(param_change.get_value({}), '3') + + def test_param_change_from_dict(self) -> None: + sample_dict: param_domain.ParamChangeDict = { + 'name': 'abc', + 'generator_id': 'Copier', + 'customization_args': self.sample_customization_args + } + param_change = param_domain.ParamChange.from_dict(sample_dict) + param_change.validate() + self.assertEqual(param_change.to_dict(), sample_dict) diff --git a/core/domain/platform_feature_services.py b/core/domain/platform_feature_services.py index ba6ffa895446..6fe4d3c7c43f 100644 --- a/core/domain/platform_feature_services.py +++ b/core/domain/platform_feature_services.py @@ -37,13 +37,17 @@ from core.domain import platform_parameter_domain from core.domain import platform_parameter_registry as registry -ALL_FEATURES_LIST = ( +from typing import Dict, List, Set + +ALL_FEATURES_LIST: List[platform_feature_list.ParamNames] = ( platform_feature_list.DEV_FEATURES_LIST + platform_feature_list.TEST_FEATURES_LIST + platform_feature_list.PROD_FEATURES_LIST ) -ALL_FEATURES_NAMES_SET = set(feature.value for feature in ALL_FEATURES_LIST) +ALL_FEATURES_NAMES_SET: Set[str] = set( + feature.value for feature in ALL_FEATURES_LIST +) class FeatureFlagNotFoundException(Exception): @@ -52,7 +56,9 @@ class FeatureFlagNotFoundException(Exception): pass -def create_evaluation_context_for_client(client_context_dict): +def create_evaluation_context_for_client( + client_context_dict: platform_parameter_domain.ClientSideContextDict +) -> platform_parameter_domain.EvaluationContext: """Returns context instance for evaluation, using the information provided by clients. @@ -70,7 +76,9 @@ def create_evaluation_context_for_client(client_context_dict): ) -def get_all_feature_flag_dicts(): +def get_all_feature_flag_dicts() -> List[ + platform_parameter_domain.PlatformParameterDict +]: """Returns dict representations of all feature flags. This method is used for providing detailed feature flags information to the admin panel. @@ -84,7 +92,9 @@ def get_all_feature_flag_dicts(): ] -def evaluate_all_feature_flag_values_for_client(context): +def evaluate_all_feature_flag_values_for_client( + context: platform_parameter_domain.EvaluationContext +) -> Dict[str, bool]: """Evaluates and returns the values for all feature flags. Args: @@ -98,7 +108,7 @@ def evaluate_all_feature_flag_values_for_client(context): ALL_FEATURES_NAMES_SET, context) -def is_feature_enabled(feature_name): +def is_feature_enabled(feature_name: str) -> bool: """A short-form method for server-side usage. This method evaluates and returns the values of the feature flag, using context from the server only. @@ -113,15 +123,19 @@ def is_feature_enabled(feature_name): def update_feature_flag_rules( - feature_name, committer_id, commit_message, new_rule_dicts): + feature_name: str, + committer_id: str, + commit_message: str, + new_rules: List[platform_parameter_domain.PlatformParameterRule] +) -> None: """Updates the feature flag's rules. Args: feature_name: str. The name of the feature to update. committer_id: str. ID of the committer. commit_message: str. The commit message. - new_rule_dicts: list(dict). A list of dict mappings of all fields - of PlatformParameterRule object. + new_rules: list(PlatformParameterRule). A list of PlatformParameterRule + objects to update. Raises: FeatureFlagNotFoundException. The feature_name is not registered in @@ -132,14 +146,14 @@ def update_feature_flag_rules( 'Unknown feature flag: %s.' % feature_name) registry.Registry.update_platform_parameter( - feature_name, committer_id, commit_message, new_rule_dicts) + feature_name, committer_id, commit_message, new_rules) # TODO(#10211): Currently Oppia runs in either of the two modes: # dev or prod. There should be another mode 'test' added for QA testing, # once it is added, this function needs to be updated to take that into # consideration. -def _get_server_mode(): +def _get_server_mode() -> platform_parameter_domain.ServerMode: """Returns the running mode of Oppia. Returns: @@ -147,22 +161,29 @@ def _get_server_mode(): in development mode, prod if in production mode. """ return ( - platform_parameter_domain.SERVER_MODES.dev + platform_parameter_domain.ServerMode.DEV if constants.DEV_MODE - else platform_parameter_domain.SERVER_MODES.prod + else platform_parameter_domain.ServerMode.PROD ) -def _create_evaluation_context_for_server(): +def _create_evaluation_context_for_server() -> ( + platform_parameter_domain.EvaluationContext +): """Returns evaluation context with information of the server. Returns: EvaluationContext. The context for evaluation. """ - # TODO(#11208): Properly set app version below using GAE app version as - # part of the server & client context. + # TODO(#11208): Here we use MyPy ignore because due to the missing + # `browser_type` key MyPy throwing missing key error. Also, `app_version` + # key is set as none which forces us to use `.get()` method while fetching + # the values from dictionaries. So, to remove 'type ignore' from here and + # '.get()' method from '.from_dict' method, properly set app version and + # browser type key below using GAE app version as part of the server & + # client context. return platform_parameter_domain.EvaluationContext.from_dict( - { + { # type: ignore[typeddict-item] 'platform_type': 'Backend', 'app_version': None, }, @@ -172,7 +193,10 @@ def _create_evaluation_context_for_server(): ) -def _evaluate_feature_flag_values_for_context(feature_names_set, context): +def _evaluate_feature_flag_values_for_context( + feature_names_set: Set[str], + context: platform_parameter_domain.EvaluationContext +) -> Dict[str, bool]: """Evaluates and returns the values for specified feature flags. Args: @@ -197,11 +221,14 @@ def _evaluate_feature_flag_values_for_context(feature_names_set, context): for feature_name in feature_names_set: param = registry.Registry.get_platform_parameter( feature_name) - result_dict[feature_name] = param.evaluate(context) + feature_name_value = param.evaluate(context) + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(feature_name_value, bool) + result_dict[feature_name] = feature_name_value return result_dict -def _evaluate_feature_flag_value_for_server(feature_name): +def _evaluate_feature_flag_value_for_server(feature_name: str) -> bool: """Evaluates and returns the values of the feature flag, using context from the server only. diff --git a/core/domain/platform_feature_services_test.py b/core/domain/platform_feature_services_test.py index 865a740df495..07da3de63613 100644 --- a/core/domain/platform_feature_services_test.py +++ b/core/domain/platform_feature_services_test.py @@ -18,7 +18,8 @@ from __future__ import annotations -from core import python_utils +import enum + from core import utils from core.constants import constants from core.domain import caching_services @@ -27,16 +28,23 @@ from core.domain import platform_parameter_registry as registry from core.tests import test_utils -PARAM_NAMES = python_utils.create_enum('feature_a', 'feature_b') # pylint: disable=invalid-name -SERVER_MODES = platform_parameter_domain.SERVER_MODES -FEATURE_STAGES = platform_parameter_domain.FEATURE_STAGES + +class ParamNames(enum.Enum): + """Enum for parameter names.""" + + FEATURE_A = 'feature_a' + FEATURE_B = 'feature_b' + + +ServerMode = platform_parameter_domain.ServerMode +FeatureStages = platform_parameter_domain.FeatureStages class PlatformFeatureServiceTest(test_utils.GenericTestBase): """Test for the platform feature services.""" - def setUp(self): - super(PlatformFeatureServiceTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.user_id = self.get_user_id_from_email(self.OWNER_EMAIL) @@ -44,67 +52,75 @@ def setUp(self): registry.Registry.parameter_registry.clear() # Parameter names that might be used in following tests. param_names = ['feature_a', 'feature_b'] - param_name_enums = [PARAM_NAMES.feature_a, PARAM_NAMES.feature_b] + param_name_enums = [ParamNames.FEATURE_A, ParamNames.FEATURE_B] caching_services.delete_multi( caching_services.CACHE_NAMESPACE_PLATFORM_PARAMETER, None, param_names) self.dev_feature = registry.Registry.create_feature_flag( - PARAM_NAMES.feature_a, 'a feature in dev stage', - platform_parameter_domain.FEATURE_STAGES.dev) + ParamNames.FEATURE_A, 'a feature in dev stage', + FeatureStages.DEV) self.prod_feature = registry.Registry.create_feature_flag( - PARAM_NAMES.feature_b, 'a feature in prod stage', - platform_parameter_domain.FEATURE_STAGES.prod) + ParamNames.FEATURE_B, 'a feature in prod stage', + FeatureStages.PROD) registry.Registry.update_platform_parameter( self.dev_feature.name, self.user_id, 'edit rules', [ - { + platform_parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', 'conditions': [ - ['=', SERVER_MODES.dev.value] + ['=', ServerMode.DEV.value] ] } ], 'value_when_matched': True - } + }) ] ) registry.Registry.update_platform_parameter( self.prod_feature.name, self.user_id, 'edit rules', [ - { + platform_parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', 'conditions': [ - ['=', SERVER_MODES.dev.value], - ['=', SERVER_MODES.test.value], - ['=', SERVER_MODES.prod.value] + ['=', ServerMode.DEV.value], + ['=', ServerMode.TEST.value], + ['=', ServerMode.PROD.value] ] } ], 'value_when_matched': True - } + }) ] ) # Replace feature lists with mocked names. self.original_feature_list = feature_services.ALL_FEATURES_LIST self.original_feature_name_set = ( - feature_services.ALL_FEATURES_NAMES_SET) - feature_services.ALL_FEATURES_LIST = param_name_enums + feature_services.ALL_FEATURES_NAMES_SET + ) + # Here we use MyPy ignore because the expected type of ALL_FEATURES_LIST + # is a list of 'PARAM_NAMES' Enum, but here for testing purposes we are + # providing a list of 'ParamNames' enums, which causes MyPy to throw an + # 'Incompatible types in assignment' error. Thus to avoid the error, we + # used ignore here. + feature_services.ALL_FEATURES_LIST = param_name_enums # type: ignore[assignment] feature_services.ALL_FEATURES_NAMES_SET = set(param_names) - def tearDown(self): - super(PlatformFeatureServiceTest, self).tearDown() + def tearDown(self) -> None: + super().tearDown() feature_services.ALL_FEATURES_LIST = self.original_feature_list feature_services.ALL_FEATURES_NAMES_SET = ( self.original_feature_name_set) - def test_create_evaluation_context_for_client_returns_correct_context(self): + def test_create_evaluation_context_for_client_returns_correct_context( + self + ) -> None: with self.swap(constants, 'DEV_MODE', True): context = feature_services.create_evaluation_context_for_client( { @@ -115,12 +131,12 @@ def test_create_evaluation_context_for_client_returns_correct_context(self): ) self.assertEqual( context.server_mode, - platform_parameter_domain.FEATURE_STAGES.dev) + FeatureStages.DEV) self.assertEqual(context.platform_type, 'Android') self.assertEqual(context.browser_type, None) self.assertEqual(context.app_version, '1.0.0') - def test_get_all_feature_flag_dicts_returns_correct_dicts(self): + def test_get_all_feature_flag_dicts_returns_correct_dicts(self) -> None: expected_dicts = [ self.dev_feature.to_dict(), self.prod_feature.to_dict(), @@ -129,7 +145,9 @@ def test_get_all_feature_flag_dicts_returns_correct_dicts(self): feature_services.get_all_feature_flag_dicts(), expected_dicts) - def test_get_all_feature_flag_values_in_dev_returns_correct_values(self): + def test_get_all_feature_flag_values_in_dev_returns_correct_values( + self + ) -> None: with self.swap(constants, 'DEV_MODE', True): context = feature_services.create_evaluation_context_for_client({ 'platform_type': 'Android', @@ -144,7 +162,9 @@ def test_get_all_feature_flag_values_in_dev_returns_correct_values(self): self.prod_feature.name: True, }) - def test_get_all_feature_flag_values_in_prod_returns_correct_values(self): + def test_get_all_feature_flag_values_in_prod_returns_correct_values( + self + ) -> None: with self.swap(constants, 'DEV_MODE', False): context = feature_services.create_evaluation_context_for_client({ 'platform_type': 'Android', @@ -159,38 +179,38 @@ def test_get_all_feature_flag_values_in_prod_returns_correct_values(self): self.prod_feature.name: True, }) - def test_evaluate_dev_feature_for_dev_server_returns_true(self): + def test_evaluate_dev_feature_for_dev_server_returns_true(self) -> None: with self.swap(constants, 'DEV_MODE', True): self.assertTrue( feature_services.is_feature_enabled(self.dev_feature.name)) - def test_evaluate_prod_feature_for_dev_server_returns_true(self): + def test_evaluate_prod_feature_for_dev_server_returns_true(self) -> None: with self.swap(constants, 'DEV_MODE', True): self.assertTrue( feature_services.is_feature_enabled(self.prod_feature.name)) - def test_evaluate_dev_feature_for_prod_server_returns_false(self): + def test_evaluate_dev_feature_for_prod_server_returns_false(self) -> None: with self.swap(constants, 'DEV_MODE', False): self.assertFalse( feature_services.is_feature_enabled(self.dev_feature.name)) - def test_evaluate_prod_feature_for_prod_server_returns_true( - self): + def test_evaluate_prod_feature_for_prod_server_returns_true(self) -> None: with self.swap(constants, 'DEV_MODE', False): self.assertTrue( feature_services.is_feature_enabled(self.prod_feature.name)) def test_evaluate_feature_for_prod_server_matches_to_backend_filter( - self): + self + ) -> None: registry.Registry.update_platform_parameter( self.prod_feature.name, self.user_id, 'edit rules', [ - { + platform_parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', 'conditions': [ - ['=', SERVER_MODES.prod.value] + ['=', ServerMode.PROD.value] ], }, { @@ -201,33 +221,35 @@ def test_evaluate_feature_for_prod_server_matches_to_backend_filter( } ], 'value_when_matched': True - } + }) ] ) with self.swap(constants, 'DEV_MODE', False): self.assertTrue( feature_services.is_feature_enabled(self.prod_feature.name)) - def test_get_feature_flag_values_with_unknown_name_raises_error(self): - with self.assertRaisesRegexp( + def test_get_feature_flag_values_with_unknown_name_raises_error( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Unknown feature flag'): feature_services.is_feature_enabled('feature_that_does_not_exist') - def test_update_feature_flag_rules_successfully_updates_rules(self): + def test_update_feature_flag_rules_successfully_updates_rules(self) -> None: feature_services.update_feature_flag_rules( self.dev_feature.name, self.user_id, 'test update', [ - { + platform_parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', 'conditions': [ - ['=', FEATURE_STAGES.dev.value] + ['=', FeatureStages.DEV.value] ] } ], 'value_when_matched': False - }, + }) ] ) @@ -235,24 +257,30 @@ def test_update_feature_flag_rules_successfully_updates_rules(self): self.assertFalse( feature_services.is_feature_enabled(self.dev_feature.name)) - def test_update_feature_flag_rules_with_unknown_name_raises_error(self): + def test_update_feature_flag_rules_with_unknown_name_raises_error( + self + ) -> None: unknown_name = 'feature_that_does_not_exist' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unknown feature flag: %s' % unknown_name): feature_services.update_feature_flag_rules( unknown_name, self.user_id, 'test update', [ - {'filters': [], 'value_when_matched': False}, + platform_parameter_domain.PlatformParameterRule.from_dict( + {'filters': [], 'value_when_matched': False} + ), ] ) - def test_update_feature_flag_rules_with_invalid_rules_raises_error(self): - with self.assertRaisesRegexp( + def test_update_feature_flag_rules_with_invalid_rules_raises_error( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'must have a server_mode filter'): feature_services.update_feature_flag_rules( self.dev_feature.name, self.user_id, 'test update', [ - { + platform_parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'app_version', @@ -260,9 +288,9 @@ def test_update_feature_flag_rules_with_invalid_rules_raises_error(self): } ], 'value_when_matched': True - }, - { + }), + platform_parameter_domain.PlatformParameterRule.from_dict({ 'filters': [], 'value_when_matched': False - } + }) ] ) diff --git a/core/domain/platform_parameter_domain.py b/core/domain/platform_parameter_domain.py index 7b66dd752b91..65520daa4c35 100644 --- a/core/domain/platform_parameter_domain.py +++ b/core/domain/platform_parameter_domain.py @@ -18,35 +18,68 @@ from __future__ import annotations +import enum import json import re from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain -SERVER_MODES = python_utils.create_enum('dev', 'test', 'prod') # pylint: disable=invalid-name -FEATURE_STAGES = SERVER_MODES # pylint: disable=invalid-name -DATA_TYPES = python_utils.create_enum('bool', 'string', 'number') # pylint: disable=invalid-name +from typing import ( + Callable, Dict, Final, List, Optional, Pattern, TypedDict, Union) + + +class ServerMode(enum.Enum): + """Enum for server modes.""" + + DEV = 'dev' + TEST = 'test' + PROD = 'prod' + + +FeatureStages = ServerMode + +# Union type defined from allowed types that a platform can contain +# for it's data types. +PlatformDataTypes = Union[str, int, bool] -ALLOWED_SERVER_MODES = [ - SERVER_MODES.dev.value, SERVER_MODES.test.value, SERVER_MODES.prod.value] -ALLOWED_FEATURE_STAGES = [ - FEATURE_STAGES.dev.value, - FEATURE_STAGES.test.value, - FEATURE_STAGES.prod.value -] -ALLOWED_PLATFORM_TYPES = constants.PLATFORM_PARAMETER_ALLOWED_PLATFORM_TYPES -ALLOWED_BROWSER_TYPES = constants.PLATFORM_PARAMETER_ALLOWED_BROWSER_TYPES -ALLOWED_APP_VERSION_FLAVORS = ( - constants.PLATFORM_PARAMETER_ALLOWED_APP_VERSION_FLAVORS) -APP_VERSION_WITH_HASH_REGEXP = re.compile( - constants.PLATFORM_PARAMETER_APP_VERSION_WITH_HASH_REGEXP) -APP_VERSION_WITHOUT_HASH_REGEXP = re.compile( - constants.PLATFORM_PARAMETER_APP_VERSION_WITHOUT_HASH_REGEXP) +class DataTypes(enum.Enum): + """Enum for data types.""" + + BOOL = 'bool' + STRING = 'string' + NUMBER = 'number' + + +ALLOWED_SERVER_MODES: Final = [ + ServerMode.DEV.value, + ServerMode.TEST.value, + ServerMode.PROD.value +] +ALLOWED_FEATURE_STAGES: Final = [ + FeatureStages.DEV.value, + FeatureStages.TEST.value, + FeatureStages.PROD.value +] +ALLOWED_PLATFORM_TYPES: List[str] = ( + constants.PLATFORM_PARAMETER_ALLOWED_PLATFORM_TYPES +) +ALLOWED_BROWSER_TYPES: List[str] = ( + constants.PLATFORM_PARAMETER_ALLOWED_BROWSER_TYPES +) +ALLOWED_APP_VERSION_FLAVORS: List[str] = ( + constants.PLATFORM_PARAMETER_ALLOWED_APP_VERSION_FLAVORS +) + +APP_VERSION_WITH_HASH_REGEXP: Pattern[str] = re.compile( + constants.PLATFORM_PARAMETER_APP_VERSION_WITH_HASH_REGEXP +) +APP_VERSION_WITHOUT_HASH_REGEXP: Pattern[str] = re.compile( + constants.PLATFORM_PARAMETER_APP_VERSION_WITHOUT_HASH_REGEXP +) class PlatformParameterChange(change_domain.BaseChange): @@ -56,36 +89,65 @@ class PlatformParameterChange(change_domain.BaseChange): - 'edit_rules' (with new_rules) """ - CMD_EDIT_RULES = 'edit_rules' - ALLOWED_COMMANDS = [{ + CMD_EDIT_RULES: Final = 'edit_rules' + ALLOWED_COMMANDS: List[feconf.ValidCmdDict] = [{ 'name': CMD_EDIT_RULES, 'required_attribute_names': ['new_rules'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }] +class EditRulesPlatformParameterCmd(PlatformParameterChange): + """Class representing the PlatformParameterChange's + CMD_EDIT_RULES command. + """ + + new_rules: List[str] + + +class ClientSideContextDict(TypedDict): + """Dictionary representing the client's side Context object.""" + + platform_type: Optional[str] + browser_type: Optional[str] + app_version: Optional[str] + + +class ServerSideContextDict(TypedDict): + """Dictionary representing the server's side Context object.""" + + server_mode: ServerMode + + class EvaluationContext: """Domain object representing the context for parameter evaluation.""" def __init__( - self, platform_type, browser_type, app_version, server_mode): + self, + platform_type: Optional[str], + browser_type: Optional[str], + app_version: Optional[str], + server_mode: ServerMode + ) -> None: self._platform_type = platform_type self._browser_type = browser_type self._app_version = app_version self._server_mode = server_mode @property - def platform_type(self): + def platform_type(self) -> Optional[str]: """Returns platform type. Returns: - str. The platform type, e.g. 'Web', 'Android', 'Backend'. + str|None. The platform type, e.g. 'Web', 'Android', 'Backend'. """ return self._platform_type @property - def browser_type(self): + def browser_type(self) -> Optional[str]: """Returns client browser type. Returns: @@ -95,7 +157,7 @@ def browser_type(self): return self._browser_type @property - def app_version(self): + def app_version(self) -> Optional[str]: # TODO(#11208): Update the documentation below to reflect the change # when the GAE app version is used for web & backend. """Returns client application version. @@ -107,17 +169,17 @@ def app_version(self): return self._app_version @property - def server_mode(self): + def server_mode(self) -> ServerMode: """Returns the server mode of Oppia. Returns: - Enum(SERVER_MODES). The the server mode of Oppia, + Enum(ServerMode). The the server mode of Oppia, must be one of the following: dev, test, prod. """ return self._server_mode @property - def is_valid(self): + def is_valid(self) -> bool: """Returns whether this context object is valid for evaluating parameters. An invalid context object usually indicates that one of the object's required fields is missing or an unexpected value. Note that @@ -133,7 +195,7 @@ def is_valid(self): self._platform_type is not None and self._platform_type in ALLOWED_PLATFORM_TYPES) - def validate(self): + def validate(self) -> None: """Validates the EvaluationContext domain object, raising an exception if the object is in an irrecoverable error state. """ @@ -150,7 +212,8 @@ def validate(self): raise utils.ValidationError( 'Invalid version \'%s\', expected to match regexp %s.' % ( self._app_version, APP_VERSION_WITH_HASH_REGEXP)) - elif ( + + if ( match.group(2) is not None and match.group(2) not in ALLOWED_APP_VERSION_FLAVORS): raise utils.ValidationError( @@ -164,7 +227,11 @@ def validate(self): self._server_mode.value, ALLOWED_SERVER_MODES)) @classmethod - def from_dict(cls, client_context_dict, server_context_dict): + def from_dict( + cls, + client_context_dict: ClientSideContextDict, + server_context_dict: ServerSideContextDict + ) -> EvaluationContext: """Creates a new EvaluationContext object by combining both client side and server side context. @@ -176,23 +243,34 @@ def from_dict(cls, client_context_dict, server_context_dict): EvaluationContext. The corresponding EvaluationContext domain object. """ + # TODO(#11208): After `app version` and `browser type` are set properly + # in the codebase as a part of the server & client context. Please + # convert `.get()` method to `[]`, so that we have a more strict method + # to fetch dictionary keys. return cls( - client_context_dict.get('platform_type'), + client_context_dict['platform_type'], client_context_dict.get('browser_type'), client_context_dict.get('app_version'), - server_context_dict.get('server_mode'), + server_context_dict['server_mode'], ) +class PlatformParameterFilterDict(TypedDict): + """Dictionary representing the PlatformParameterFilter object.""" + + type: str + conditions: List[List[str]] + + class PlatformParameterFilter: """Domain object for filters in platform parameters.""" - SUPPORTED_FILTER_TYPES = [ + SUPPORTED_FILTER_TYPES: Final = [ 'server_mode', 'platform_type', 'browser_type', 'app_version', 'app_version_flavor', ] - SUPPORTED_OP_FOR_FILTERS = { + SUPPORTED_OP_FOR_FILTERS: Final = { 'server_mode': ['='], 'platform_type': ['='], 'browser_type': ['='], @@ -200,12 +278,16 @@ class PlatformParameterFilter: 'app_version': ['=', '<', '<=', '>', '>='], } - def __init__(self, filter_type, conditions): + def __init__( + self, + filter_type: str, + conditions: List[List[str]] + ) -> None: self._type = filter_type self._conditions = conditions @property - def type(self): + def type(self) -> str: """Returns filter type. Returns: @@ -214,17 +296,17 @@ def type(self): return self._type @property - def conditions(self): + def conditions(self) -> List[List[str]]: """Returns filter conditions. Returns: - list((str, str)). The filter conditions. Each element of the list - is a 2-tuple (op, value), where op is the operator for comparison, - value is the value used for comparison. + list(list(str)). The filter conditions. Each element of the list + contain a list with 2-elements [op, value], where op is the operator + for comparison, value is the value used for comparison. """ return self._conditions - def evaluate(self, context): + def evaluate(self, context: EvaluationContext) -> bool: """Tries to match the given context with the filter against its value(s). @@ -239,7 +321,12 @@ def evaluate(self, context): for op, value in self._conditions ) - def _evaluate_single_value(self, op, value, context): + def _evaluate_single_value( + self, + op: str, + value: str, + context: EvaluationContext + ) -> bool: """Tries to match the given context with the filter against the given value. @@ -250,8 +337,14 @@ def _evaluate_single_value(self, op, value, context): Returns: bool. True if the filter is matched. + + Raises: + Exception. Given operator is not supported. """ - if op not in self.SUPPORTED_OP_FOR_FILTERS[self._type]: + if ( + self._type in ['server_mode', 'platform_type', 'browser_type'] + and op != '=' + ): raise Exception( 'Unsupported comparison operator \'%s\' for %s filter, ' 'expected one of %s.' % ( @@ -265,6 +358,8 @@ def _evaluate_single_value(self, op, value, context): elif self._type == 'browser_type' and op == '=': matched = context.browser_type == value elif self._type == 'app_version_flavor': + # Ruling out the possibility of None for mypy type checking. + assert context.app_version is not None matched = self._match_version_flavor(op, value, context.app_version) elif self._type == 'app_version': matched = self._match_version_expression( @@ -272,7 +367,7 @@ def _evaluate_single_value(self, op, value, context): return matched - def validate(self): + def validate(self) -> None: """Validates the PlatformParameterFilter domain object.""" if self._type not in self.SUPPORTED_FILTER_TYPES: raise utils.ValidationError( @@ -315,7 +410,7 @@ def validate(self): 'regexp %s.' % ( version, APP_VERSION_WITHOUT_HASH_REGEXP)) - def to_dict(self): + def to_dict(self) -> PlatformParameterFilterDict: """Returns a dict representation of the PlatformParameterFilter domain object. @@ -329,7 +424,9 @@ def to_dict(self): } @classmethod - def from_dict(cls, filter_dict): + def from_dict( + cls, filter_dict: PlatformParameterFilterDict + ) -> PlatformParameterFilter: """Returns an PlatformParameterFilter object from a dict. Args: @@ -342,7 +439,12 @@ def from_dict(cls, filter_dict): """ return cls(filter_dict['type'], filter_dict['conditions']) - def _match_version_expression(self, op, value, client_version): + def _match_version_expression( + self, + op: str, + value: str, + client_version: Optional[str] + ) -> bool: """Tries to match the version expression against the client version. Args: @@ -352,11 +454,16 @@ def _match_version_expression(self, op, value, client_version): Returns: bool. True if the expression matches the version. + + Raises: + Exception. Given operator is not supported. """ if client_version is None: return False match = APP_VERSION_WITH_HASH_REGEXP.match(client_version) + # Ruling out the possibility of None for mypy type checking. + assert match is not None client_version_without_hash = match.group(1) is_equal = value == client_version_without_hash @@ -375,8 +482,17 @@ def _match_version_expression(self, op, value, client_version): return is_client_version_larger elif op == '>=': return is_equal or is_client_version_larger + else: + raise Exception( + 'Unsupported comparison operator \'%s\' for %s filter, ' + 'expected one of %s.' % ( + op, self._type, self.SUPPORTED_OP_FOR_FILTERS[self._type])) - def _is_first_version_smaller(self, version_a, version_b): + def _is_first_version_smaller( + self, + version_a: str, + version_b: str + ) -> bool: """Compares two version strings, return True if the first version is smaller. @@ -387,18 +503,25 @@ def _is_first_version_smaller(self, version_a, version_b): Returns: bool. True if the first version is smaller. """ - version_a = version_a.split('.') - version_b = version_b.split('.') + splitted_version_a = version_a.split('.') + splitted_version_b = version_b.split('.') - for sub_version_a, sub_version_b in python_utils.ZIP( - version_a, version_b): + for sub_version_a, sub_version_b in zip( + splitted_version_a, + splitted_version_b + ): if int(sub_version_a) < int(sub_version_b): return True elif int(sub_version_a) > int(sub_version_b): return False return False - def _match_version_flavor(self, op, flavor, client_version): + def _match_version_flavor( + self, + op: str, + flavor: str, + client_version: str + ) -> bool: """Matches the client version flavor. Args: @@ -413,8 +536,13 @@ def _match_version_flavor(self, op, flavor, client_version): Returns: bool. True is the client_version matches the given flavor using the operator. + + Raises: + Exception. Given operator is not supported. """ match = APP_VERSION_WITH_HASH_REGEXP.match(client_version) + # Ruling out the possibility of None for mypy type checking. + assert match is not None client_flavor = match.group(2) # An unspecified client flavor means no flavor-based filters should @@ -438,8 +566,17 @@ def _match_version_flavor(self, op, flavor, client_version): return is_client_flavor_larger elif op == '>=': return is_equal or is_client_flavor_larger + else: + raise Exception( + 'Unsupported comparison operator \'%s\' for %s filter, ' + 'expected one of %s.' % ( + op, self._type, self.SUPPORTED_OP_FOR_FILTERS[self._type])) - def _is_first_flavor_smaller(self, flavor_a, flavor_b): + def _is_first_flavor_smaller( + self, + flavor_a: str, + flavor_b: str + ) -> bool: """Compares two version flavors, return True if the first version is smaller in the following ordering: 'test' < 'alpha' < 'beta' < 'release'. @@ -457,15 +594,26 @@ def _is_first_flavor_smaller(self, flavor_a, flavor_b): ) +class PlatformParameterRuleDict(TypedDict): + """Dictionary representing the PlatformParameterRule object.""" + + filters: List[PlatformParameterFilterDict] + value_when_matched: PlatformDataTypes + + class PlatformParameterRule: """Domain object for rules in platform parameters.""" - def __init__(self, filters, value_when_matched): + def __init__( + self, + filters: List[PlatformParameterFilter], + value_when_matched: PlatformDataTypes + ) -> None: self._filters = filters self._value_when_matched = value_when_matched @property - def filters(self): + def filters(self) -> List[PlatformParameterFilter]: """Returns the filters of the rule. Returns: @@ -474,7 +622,7 @@ def filters(self): return self._filters @property - def value_when_matched(self): + def value_when_matched(self) -> PlatformDataTypes: """Returns the value outcome if this rule is matched. Returns: @@ -482,7 +630,7 @@ def value_when_matched(self): """ return self._value_when_matched - def evaluate(self, context): + def evaluate(self, context: EvaluationContext) -> bool: """Tries to match the given context with the rule against its filter(s). A rule is matched when all its filters are matched. @@ -496,7 +644,7 @@ def evaluate(self, context): filter_domain.evaluate(context) for filter_domain in self._filters) - def has_server_mode_filter(self): + def has_server_mode_filter(self) -> bool: """Checks if the rule has a filter with type 'server_mode'. Returns: @@ -506,7 +654,7 @@ def has_server_mode_filter(self): filter_domain.type == 'server_mode' for filter_domain in self._filters) - def to_dict(self): + def to_dict(self) -> PlatformParameterRuleDict: """Returns a dict representation of the PlatformParameterRule domain object. @@ -520,13 +668,15 @@ def to_dict(self): 'value_when_matched': self._value_when_matched, } - def validate(self): + def validate(self) -> None: """Validates the PlatformParameterRule domain object.""" for filter_domain_object in self._filters: filter_domain_object.validate() @classmethod - def from_dict(cls, rule_dict): + def from_dict( + cls, rule_dict: PlatformParameterRuleDict + ) -> PlatformParameterRule: """Returns an PlatformParameterRule object from a dict. Args: @@ -545,20 +695,43 @@ def from_dict(cls, rule_dict): ) +class PlatformParameterDict(TypedDict): + """Dictionary representing the PlatformParameter object.""" + + name: str + description: str + data_type: str + rules: List[PlatformParameterRuleDict] + rule_schema_version: int + default_value: PlatformDataTypes + is_feature: bool + feature_stage: Optional[str] + + class PlatformParameter: """Domain object for platform parameters.""" - DATA_TYPE_PREDICATES_DICT = { - DATA_TYPES.bool.value: lambda x: isinstance(x, bool), - DATA_TYPES.string.value: lambda x: isinstance(x, str), - DATA_TYPES.number.value: lambda x: isinstance(x, (float, int)), + DATA_TYPE_PREDICATES_DICT: ( + Dict[str, Callable[[PlatformDataTypes], bool]] + ) = { + DataTypes.BOOL.value: lambda x: isinstance(x, bool), + DataTypes.STRING.value: lambda x: isinstance(x, str), + DataTypes.NUMBER.value: lambda x: isinstance(x, (float, int)), } - PARAMETER_NAME_REGEXP = r'^[A-Za-z0-9_]{1,100}$' + PARAMETER_NAME_REGEXP: Final = r'^[A-Za-z0-9_]{1,100}$' def __init__( - self, name, description, data_type, rules, - rule_schema_version, default_value, is_feature, feature_stage): + self, + name: str, + description: str, + data_type: str, + rules: List[PlatformParameterRule], + rule_schema_version: int, + default_value: PlatformDataTypes, + is_feature: bool, + feature_stage: Optional[str] + ) -> None: self._name = name self._description = description self._data_type = data_type @@ -569,7 +742,7 @@ def __init__( self._feature_stage = feature_stage @property - def name(self): + def name(self) -> str: """Returns the name of the platform parameter. Returns: @@ -578,7 +751,7 @@ def name(self): return self._name @property - def description(self): + def description(self) -> str: """Returns the description of the platform parameter. Returns: @@ -587,7 +760,7 @@ def description(self): return self._description @property - def data_type(self): + def data_type(self) -> str: """Returns the data type of the platform parameter. Returns: @@ -596,7 +769,7 @@ def data_type(self): return self._data_type @property - def rules(self): + def rules(self) -> List[PlatformParameterRule]: """Returns the rules of the platform parameter. Returns: @@ -604,7 +777,7 @@ def rules(self): """ return self._rules - def set_rules(self, new_rules): + def set_rules(self, new_rules: List[PlatformParameterRule]) -> None: """Sets the rules of the PlatformParameter. Args: @@ -614,7 +787,7 @@ def set_rules(self, new_rules): self._rules = new_rules @property - def rule_schema_version(self): + def rule_schema_version(self) -> int: """Returns the schema version of the rules. Returns: @@ -623,7 +796,7 @@ def rule_schema_version(self): return self._rule_schema_version @property - def default_value(self): + def default_value(self) -> PlatformDataTypes: """Returns the default value of the platform parameter. Returns: @@ -632,7 +805,7 @@ def default_value(self): return self._default_value @property - def is_feature(self): + def is_feature(self) -> bool: """Returns whether this parameter is also a feature flag. Returns: @@ -641,16 +814,16 @@ def is_feature(self): return self._is_feature @property - def feature_stage(self): + def feature_stage(self) -> Optional[str]: """Returns the stage of the feature flag. Returns: - FEATURE_STAGES|None. The stage of the feature flag, None if the + FeatureStages|None. The stage of the feature flag, None if the parameter isn't a feature flag. """ return self._feature_stage - def validate(self): + def validate(self) -> None: """Validates the PlatformParameter domain object.""" if re.match(self.PARAMETER_NAME_REGEXP, self._name) is None: raise utils.ValidationError( @@ -679,7 +852,9 @@ def validate(self): if self._is_feature: self._validate_feature_flag() - def evaluate(self, context): + def evaluate( + self, context: EvaluationContext + ) -> PlatformDataTypes: """Evaluates the value of the platform parameter in the given context. The value of first matched rule is returned as the result. @@ -700,7 +875,7 @@ def evaluate(self, context): return rule.value_when_matched return self._default_value - def to_dict(self): + def to_dict(self) -> PlatformParameterDict: """Returns a dict representation of the PlatformParameter domain object. @@ -718,11 +893,11 @@ def to_dict(self): 'feature_stage': self._feature_stage } - def _validate_feature_flag(self): + def _validate_feature_flag(self) -> None: """Validates the PlatformParameter domain object that is a feature flag. """ - if self._data_type != DATA_TYPES.bool.value: + if self._data_type != DataTypes.BOOL.value: raise utils.ValidationError( 'Data type of feature flags must be bool, got \'%s\' ' 'instead.' % self._data_type) @@ -742,21 +917,22 @@ def _validate_feature_flag(self): for server_mode_filter in server_mode_filters: server_modes = [ value for _, value in server_mode_filter.conditions] - if self._feature_stage == FEATURE_STAGES.dev.value: + if self._feature_stage == FeatureStages.DEV.value: if ( - SERVER_MODES.test.value in server_modes or - SERVER_MODES.prod.value in server_modes): + ServerMode.TEST.value in server_modes or + ServerMode.PROD.value in server_modes + ): raise utils.ValidationError( 'Feature in dev stage cannot be enabled in test or' ' production environments.') - elif self._feature_stage == FEATURE_STAGES.test.value: - if SERVER_MODES.prod.value in server_modes: + elif self._feature_stage == FeatureStages.TEST.value: + if ServerMode.PROD.value in server_modes: raise utils.ValidationError( 'Feature in test stage cannot be enabled in ' 'production environment.') @classmethod - def from_dict(cls, param_dict): + def from_dict(cls, param_dict: PlatformParameterDict) -> PlatformParameter: """Returns an PlatformParameter object from a dict. Args: @@ -766,6 +942,9 @@ def from_dict(cls, param_dict): Returns: PlatformParameter. The corresponding PlatformParameter domain object. + + Raises: + Exception. Given schema version is not supported. """ if (param_dict['rule_schema_version'] != feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION): @@ -795,7 +974,7 @@ def from_dict(cls, param_dict): param_dict['feature_stage'], ) - def serialize(self): + def serialize(self) -> str: """Returns the object serialized as a JSON string. Returns: @@ -806,7 +985,7 @@ def serialize(self): return json.dumps(platform_parameter_dict) @classmethod - def deserialize(cls, json_string): + def deserialize(cls, json_string: str) -> PlatformParameter: """Returns a PlatformParameter domain object decoded from a JSON string. diff --git a/core/domain/platform_parameter_domain_test.py b/core/domain/platform_parameter_domain_test.py index ba4eb9ad3720..24bbe7594c33 100644 --- a/core/domain/platform_parameter_domain_test.py +++ b/core/domain/platform_parameter_domain_test.py @@ -25,27 +25,36 @@ from core.domain import platform_parameter_domain as parameter_domain from core.tests import test_utils -SERVER_MODES = parameter_domain.SERVER_MODES +from typing import Dict, Final, List, Optional, Union + +ServerMode = parameter_domain.ServerMode class PlatformParameterChangeTests(test_utils.GenericTestBase): """Test for the PlatformParameterChange class.""" - CMD_EDIT_RULES = parameter_domain.PlatformParameterChange.CMD_EDIT_RULES + CMD_EDIT_RULES: Final = ( + parameter_domain.PlatformParameterChange.CMD_EDIT_RULES + ) - def test_param_change_object_with_missing_cmd_raises_exception(self): - with self.assertRaisesRegexp( + def test_param_change_object_with_missing_cmd_raises_exception( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): parameter_domain.PlatformParameterChange({'invalid': 'data'}) - def test_param_change_object_with_invalid_cmd_raises_exception(self): - with self.assertRaisesRegexp( + def test_param_change_object_with_invalid_cmd_raises_exception( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): parameter_domain.PlatformParameterChange({'cmd': 'invalid'}) def test_param_change_object_missing_attribute_in_cmd_raises_exception( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'The following required attributes are missing: new_rules'): parameter_domain.PlatformParameterChange({ @@ -53,30 +62,33 @@ def test_param_change_object_missing_attribute_in_cmd_raises_exception( }) def test_param_change_object_with_extra_attribute_in_cmd_raises_exception( - self): - with self.assertRaisesRegexp( + self + ) -> None: + param_change_dict: Dict[str, Union[str, List[str]]] = { + 'cmd': self.CMD_EDIT_RULES, + 'new_rules': [], + 'invalid': 'invalid' + } + with self.assertRaisesRegex( utils.ValidationError, 'The following extra attributes are present: invalid'): - parameter_domain.PlatformParameterChange({ - 'cmd': self.CMD_EDIT_RULES, - 'new_rules': [], - 'invalid': 'invalid' - }) + parameter_domain.PlatformParameterChange(param_change_dict) - def test_param_change_object_with_valid_data_success(self): + def test_param_change_object_with_valid_data_success(self) -> None: + param_change_dict: Dict[str, Union[str, List[str]]] = { + 'cmd': self.CMD_EDIT_RULES, + 'new_rules': [] + } param_change_object = ( - parameter_domain.PlatformParameterChange({ - 'cmd': self.CMD_EDIT_RULES, - 'new_rules': [] - })) - + parameter_domain.PlatformParameterChange(param_change_dict) + ) self.assertEqual( param_change_object.cmd, self.CMD_EDIT_RULES) self.assertEqual( param_change_object.new_rules, []) - def test_to_dict_returns_correct_dict(self): - param_change_dict = { + def test_to_dict_returns_correct_dict(self) -> None: + param_change_dict: Dict[str, Union[str, List[str]]] = { 'cmd': self.CMD_EDIT_RULES, 'new_rules': [] } @@ -90,7 +102,7 @@ def test_to_dict_returns_correct_dict(self): class EvaluationContextTests(test_utils.GenericTestBase): """Test for the EvaluationContext.""" - def test_create_context_from_dict_returns_correct_instance(self): + def test_create_context_from_dict_returns_correct_instance(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Android', @@ -98,15 +110,15 @@ def test_create_context_from_dict_returns_correct_instance(self): 'app_version': '1.0.0', }, { - 'server_mode': 'dev', + 'server_mode': ServerMode.DEV, }, ) self.assertEqual(context.platform_type, 'Android') self.assertEqual(context.browser_type, None) self.assertEqual(context.app_version, '1.0.0') - self.assertEqual(context.server_mode, 'dev') + self.assertEqual(context.server_mode, ServerMode.DEV) - def test_is_valid_with_invalid_platform_type_returns_false(self): + def test_is_valid_with_invalid_platform_type_returns_false(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'invalid', @@ -114,12 +126,12 @@ def test_is_valid_with_invalid_platform_type_returns_false(self): 'app_version': '1.0.0', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) self.assertFalse(context.is_valid) - def test_is_valid_with_valid_android_context_returns_true(self): + def test_is_valid_with_valid_android_context_returns_true(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Android', @@ -127,36 +139,38 @@ def test_is_valid_with_valid_android_context_returns_true(self): 'app_version': '1.0.0', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) self.assertTrue(context.is_valid) - def test_is_valid_with_valid_web_context_returns_true(self): + def test_is_valid_with_valid_web_context_returns_true(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Web', 'browser_type': 'Chrome', + 'app_version': None, }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) self.assertTrue(context.is_valid) - def test_is_valid_with_valid_backend_context_returns_true(self): + def test_is_valid_with_valid_backend_context_returns_true(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Backend', + 'browser_type': None, 'app_version': '3.0.0', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) self.assertTrue(context.is_valid) - def test_validate_with_valid_context_passes_without_exception(self): + def test_validate_with_valid_context_passes_without_exception(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Android', @@ -164,12 +178,14 @@ def test_validate_with_valid_context_passes_without_exception(self): 'app_version': '1.0.0', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) context.validate() - def test_validate_with_invalid_platform_type_does_not_raise_exception(self): + def test_validate_with_invalid_platform_type_does_not_raise_exception( + self + ) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'invalid', @@ -177,14 +193,14 @@ def test_validate_with_invalid_platform_type_does_not_raise_exception(self): 'app_version': '1.0.0', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) # No exception should be raised since invalid platform types are # ignored. context.validate() - def test_validate_with_invalid_browser_type_raises_exception(self): + def test_validate_with_invalid_browser_type_raises_exception(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Web', @@ -192,14 +208,14 @@ def test_validate_with_invalid_browser_type_raises_exception(self): 'app_version': '1.0.0', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid browser type \'Invalid\''): context.validate() - def test_validate_with_invalid_app_version_raises_exception(self): + def test_validate_with_invalid_app_version_raises_exception(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Android', @@ -207,15 +223,16 @@ def test_validate_with_invalid_app_version_raises_exception(self): 'app_version': 'a.a.a', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid version \'a.a.a\''): context.validate() def test_validate_with_invalid_app_sub_version_numbers_raises_exception( - self): + self + ) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Android', @@ -223,14 +240,16 @@ def test_validate_with_invalid_app_sub_version_numbers_raises_exception( 'app_version': '1.0.0.0', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid version \'1.0.0.0\''): context.validate() - def test_validate_with_invalid_app_version_flavor_raises_exception(self): + def test_validate_with_invalid_app_version_flavor_raises_exception( + self + ) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Android', @@ -238,15 +257,15 @@ def test_validate_with_invalid_app_version_flavor_raises_exception(self): 'app_version': '1.0.0-abcedef-invalid', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid version flavor \'invalid\''): context.validate() - def test_validate_with_invalid_server_mode_raises_exception(self): - MockEnum = collections.namedtuple('Enum', ['value']) + def test_validate_with_invalid_server_mode_raises_exception(self) -> None: + MockEnum = collections.namedtuple('MockEnum', ['value']) mock_enum = MockEnum('invalid') context = parameter_domain.EvaluationContext.from_dict( { @@ -255,10 +274,15 @@ def test_validate_with_invalid_server_mode_raises_exception(self): 'app_version': '1.0.0', }, { - 'server_mode': mock_enum, + # Here we use MyPy ignore because the expected type of + # 'server_mode' key is Enum defined under the name ServerMode, + # but for testing purpose we are providing namedtuple (MockEnum) + # which causes MyPy to throw error. Thus to avoid the error, + # we used ignore here. + 'server_mode': mock_enum, # type: ignore[typeddict-item] }, ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid server mode \'invalid\'' ): context.validate() @@ -268,8 +292,12 @@ class PlatformParameterFilterTests(test_utils.GenericTestBase): """Test for the PlatformParameterFilter.""" def _create_example_context( - self, platform_type='Android', browser_type=None, - app_version='1.2.3', mode='dev'): + self, + platform_type: str = 'Android', + browser_type: Optional[str] = None, + app_version: Optional[str] = '1.2.3', + mode: str = 'DEV' + ) -> parameter_domain.EvaluationContext: """Creates and returns an EvaluationContext using the given arguments. """ @@ -280,77 +308,105 @@ def _create_example_context( 'app_version': app_version, }, { - 'server_mode': getattr(SERVER_MODES, mode), + 'server_mode': getattr(ServerMode, mode), }, ) - def _test_flavor_relation_holds(self, version, op, flavor_b): + def _test_flavor_relation_holds( + self, + version: str, + op: str, + flavor_b: str + ) -> None: """Helper method to test relation 'flavor_a flavor_b' hold, where flavor_a is the flavor of the argument 'version'. """ filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'app_version_flavor', 'conditions': [(op, flavor_b)]} + {'type': 'app_version_flavor', 'conditions': [[op, flavor_b]]} ) ) self.assertTrue(filter_domain.evaluate( self._create_example_context( app_version=version))) - def _test_flavor_relation_does_not_hold(self, version, op, flavor_b): + def _test_flavor_relation_does_not_hold( + self, + version: str, + op: str, + flavor_b: str + ) -> None: """Helper method to test relation 'flavor_a flavor_b' doesn't holds, where flavor_a is the flavor of the argument 'version'. """ filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'app_version_flavor', 'conditions': [(op, flavor_b)]} + {'type': 'app_version_flavor', 'conditions': [[op, flavor_b]]} ) ) self.assertFalse(filter_domain.evaluate( self._create_example_context( app_version=version))) - def test_create_from_dict_returns_correct_instance(self): - filter_dict = {'type': 'app_version', 'conditions': [('=', '1.2.3')]} + def test_create_from_dict_returns_correct_instance(self) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) self.assertEqual(filter_domain.type, 'app_version') - self.assertEqual(filter_domain.conditions, [('=', '1.2.3')]) + self.assertEqual(filter_domain.conditions, [['=', '1.2.3']]) - def test_to_dict_returns_correct_dict(self): - filter_dict = {'type': 'app_version', 'conditions': [('=', '1.2.3')]} + def test_to_dict_returns_correct_dict(self) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) self.assertEqual(filter_domain.to_dict(), filter_dict) - def test_evaluate_dev_server_mode_filter_with_dev_env_returns_true(self): - filter_dict = {'type': 'server_mode', 'conditions': [('=', 'dev')]} + def test_evaluate_dev_server_mode_filter_with_dev_env_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'server_mode', + 'conditions': [['=', 'dev']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) - dev_context = self._create_example_context(mode='dev') + dev_context = self._create_example_context(mode='DEV') self.assertTrue(filter_domain.evaluate(dev_context)) - def test_evaluate_dev_server_mode_filter_with_prod_env_returns_false(self): - filter_dict = {'type': 'server_mode', 'conditions': [('=', 'dev')]} + def test_evaluate_dev_server_mode_filter_with_prod_env_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'server_mode', + 'conditions': [['=', 'dev']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) - prod_context = self._create_example_context(mode='prod') + prod_context = self._create_example_context(mode='PROD') self.assertFalse(filter_domain.evaluate(prod_context)) - def test_eval_backend_client_filter_with_backend_client_returns_true(self): - filter_dict = { + def test_eval_backend_client_filter_with_backend_client_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { 'type': 'platform_type', - 'conditions': [('=', 'Backend')] + 'conditions': [['=', 'Backend']] } filter_domain = ( parameter_domain @@ -359,8 +415,13 @@ def test_eval_backend_client_filter_with_backend_client_returns_true(self): web_context = self._create_example_context(platform_type='Backend') self.assertTrue(filter_domain.evaluate(web_context)) - def test_evaluate_web_client_filter_with_web_client_returns_true(self): - filter_dict = {'type': 'platform_type', 'conditions': [('=', 'Web')]} + def test_evaluate_web_client_filter_with_web_client_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'platform_type', + 'conditions': [['=', 'Web']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -368,8 +429,13 @@ def test_evaluate_web_client_filter_with_web_client_returns_true(self): web_context = self._create_example_context(platform_type='Web') self.assertTrue(filter_domain.evaluate(web_context)) - def test_evaluate_web_client_filter_with_native_client_returns_false(self): - filter_dict = {'type': 'platform_type', 'conditions': [('=', 'Web')]} + def test_evaluate_web_client_filter_with_native_client_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'platform_type', + 'conditions': [['=', 'Web']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -377,8 +443,13 @@ def test_evaluate_web_client_filter_with_native_client_returns_false(self): native_context = self._create_example_context(platform_type='Android') self.assertFalse(filter_domain.evaluate(native_context)) - def test_evaluate_chrome_browser_filter_with_chrome_returns_true(self): - filter_dict = {'type': 'browser_type', 'conditions': [('=', 'Chrome')]} + def test_evaluate_chrome_browser_filter_with_chrome_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'browser_type', + 'conditions': [['=', 'Chrome']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -386,8 +457,13 @@ def test_evaluate_chrome_browser_filter_with_chrome_returns_true(self): chrome_context = self._create_example_context(browser_type='Chrome') self.assertTrue(filter_domain.evaluate(chrome_context)) - def test_evaluate_chrome_browser_filter_with_firefox_returns_false(self): - filter_dict = {'type': 'browser_type', 'conditions': [('=', 'Chrome')]} + def test_evaluate_chrome_browser_filter_with_firefox_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'browser_type', + 'conditions': [['=', 'Chrome']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -395,24 +471,39 @@ def test_evaluate_chrome_browser_filter_with_firefox_returns_false(self): firefox_context = self._create_example_context(browser_type='Firefox') self.assertFalse(filter_domain.evaluate(firefox_context)) - def test_evaluate_eq_version_filter_with_same_version_returns_true(self): - filter_dict = {'type': 'app_version', 'conditions': [('=', '1.2.3')]} + def test_evaluate_eq_version_filter_with_same_version_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) self.assertTrue(filter_domain.evaluate( self._create_example_context(app_version='1.2.3'))) - def test_evaluate_eq_version_filter_with_diff_version_returns_false(self): - filter_dict = {'type': 'app_version', 'conditions': [('=', '1.2.3')]} + def test_evaluate_eq_version_filter_with_diff_version_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) self.assertFalse(filter_domain.evaluate( self._create_example_context(app_version='1.2.4'))) - def test_evaluate_gt_version_filter_with_small_version_returns_false(self): - filter_dict = {'type': 'app_version', 'conditions': [('>', '1.2.3')]} + def test_evaluate_gt_version_filter_with_small_version_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['>', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -421,16 +512,26 @@ def test_evaluate_gt_version_filter_with_small_version_returns_false(self): self.assertFalse(filter_domain.evaluate( self._create_example_context(app_version='1.1.2'))) - def test_evaluate_gt_version_filter_with_same_version_returns_false(self): - filter_dict = {'type': 'app_version', 'conditions': [('>', '1.2.3')]} + def test_evaluate_gt_version_filter_with_same_version_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['>', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) self.assertFalse(filter_domain.evaluate( self._create_example_context(app_version='1.2.3'))) - def test_evaluate_gt_version_filter_with_large_version_returns_true(self): - filter_dict = {'type': 'app_version', 'conditions': [('>', '1.2.3')]} + def test_evaluate_gt_version_filter_with_large_version_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['>', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -441,8 +542,13 @@ def test_evaluate_gt_version_filter_with_large_version_returns_true(self): self.assertTrue(filter_domain.evaluate( self._create_example_context(app_version='2.0.0'))) - def test_evaluate_gte_version_filter_with_small_version_returns_false(self): - filter_dict = {'type': 'app_version', 'conditions': [('>=', '1.2.3')]} + def test_evaluate_gte_version_filter_with_small_version_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['>=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -451,16 +557,26 @@ def test_evaluate_gte_version_filter_with_small_version_returns_false(self): self.assertFalse(filter_domain.evaluate( self._create_example_context(app_version='1.1.2'))) - def test_evaluate_gte_version_filter_with_same_version_returns_true(self): - filter_dict = {'type': 'app_version', 'conditions': [('>=', '1.2.3')]} + def test_evaluate_gte_version_filter_with_same_version_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['>=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) self.assertTrue(filter_domain.evaluate( self._create_example_context(app_version='1.2.3'))) - def test_evaluate_gte_version_filter_with_large_version_returns_true(self): - filter_dict = {'type': 'app_version', 'conditions': [('>=', '1.2.3')]} + def test_evaluate_gte_version_filter_with_large_version_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['>=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -471,8 +587,13 @@ def test_evaluate_gte_version_filter_with_large_version_returns_true(self): self.assertTrue(filter_domain.evaluate( self._create_example_context(app_version='2.0.0'))) - def test_evaluate_lt_version_filter_with_small_version_returns_true(self): - filter_dict = {'type': 'app_version', 'conditions': [('<', '1.2.3')]} + def test_evaluate_lt_version_filter_with_small_version_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['<', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -485,16 +606,26 @@ def test_evaluate_lt_version_filter_with_small_version_returns_true(self): self.assertTrue(filter_domain.evaluate( self._create_example_context(app_version='1.2.2'))) - def test_evaluate_lt_version_filter_with_same_version_returns_false(self): - filter_dict = {'type': 'app_version', 'conditions': [('<', '1.2.3')]} + def test_evaluate_lt_version_filter_with_same_version_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['<', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) self.assertFalse(filter_domain.evaluate( self._create_example_context(app_version='1.2.3'))) - def test_evaluate_lt_version_filter_with_large_version_returns_false(self): - filter_dict = {'type': 'app_version', 'conditions': [('<', '1.2.3')]} + def test_evaluate_lt_version_filter_with_large_version_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['<', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -507,8 +638,13 @@ def test_evaluate_lt_version_filter_with_large_version_returns_false(self): self.assertFalse(filter_domain.evaluate( self._create_example_context(app_version='2.0.0'))) - def test_evaluate_lte_version_filter_with_small_version_returns_true(self): - filter_dict = {'type': 'app_version', 'conditions': [('<=', '1.2.3')]} + def test_evaluate_lte_version_filter_with_small_version_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['<=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -519,16 +655,26 @@ def test_evaluate_lte_version_filter_with_small_version_returns_true(self): self.assertTrue(filter_domain.evaluate( self._create_example_context(app_version='1.2.2'))) - def test_evaluate_lte_version_filter_with_same_version_returns_true(self): - filter_dict = {'type': 'app_version', 'conditions': [('<=', '1.2.3')]} + def test_evaluate_lte_version_filter_with_same_version_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['<=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) self.assertTrue(filter_domain.evaluate( self._create_example_context(app_version='1.2.3'))) - def test_evaluate_lte_version_filter_with_large_version_returns_false(self): - filter_dict = {'type': 'app_version', 'conditions': [('<=', '1.2.3')]} + def test_evaluate_lte_version_filter_with_large_version_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['<=', '1.2.3']] + } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) @@ -541,534 +687,638 @@ def test_evaluate_lte_version_filter_with_large_version_returns_false(self): self.assertFalse(filter_domain.evaluate( self._create_example_context(app_version='2.0.0'))) - def test_evaluate_test_version_with_eq_test_cond_returns_true( - self): + def test_evaluate_test_version_with_eq_test_cond_returns_true(self) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '=', 'test') def test_evaluate_test_version_with_eq_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '=', 'alpha') def test_evaluate_test_version_with_eq_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '=', 'beta') def test_evaluate_test_version_with_eq_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '=', 'release') def test_evaluate_test_version_with_lt_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '<', 'test') def test_evaluate_test_version_with_lt_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '<', 'alpha') def test_evaluate_test_version_with_lt_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '<', 'beta') def test_evaluate_test_version_with_lt_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '<', 'release') def test_evaluate_test_version_with_lte_test_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '<=', 'test') def test_evaluate_test_version_with_lte_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '<=', 'alpha') def test_evaluate_test_version_with_lte_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '<=', 'beta') def test_evaluate_test_version_with_lte_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '<=', 'release') def test_evaluate_test_version_with_gt_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '>', 'test') def test_evaluate_test_version_with_gt_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '>', 'alpha') def test_evaluate_test_version_with_gt_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '>', 'beta') def test_evaluate_test_version_with_gt_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '>', 'release') def test_evaluate_test_version_with_gte_test_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-test', '>=', 'test') def test_evaluate_test_version_with_gte_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '>=', 'alpha') def test_evaluate_test_version_with_gte_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '>=', 'beta') def test_evaluate_test_version_with_gte_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-test', '>=', 'release') def test_evaluate_alpha_version_with_eq_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '=', 'test') def test_evaluate_alpha_version_with_eq_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '=', 'alpha') def test_evaluate_alpha_version_with_eq_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '=', 'beta') def test_evaluate_alpha_version_with_eq_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '=', 'release') def test_evaluate_alpha_version_with_lt_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '<', 'test') def test_evaluate_alpha_version_with_lt_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '<', 'alpha') def test_evaluate_alpha_version_with_lt_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '<', 'beta') def test_evaluate_alpha_version_with_lt_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '<', 'release') def test_evaluate_alpha_version_with_lte_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '<=', 'test') def test_evaluate_alpha_version_with_lte_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '<=', 'alpha') def test_evaluate_alpha_version_with_lte_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '<=', 'beta') def test_evaluate_alpha_version_with_lte_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '<=', 'release') def test_evaluate_alpha_version_with_gt_test_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '>', 'test') def test_evaluate_alpha_version_with_gt_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '>', 'alpha') def test_evaluate_alpha_version_with_gt_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '>', 'beta') def test_evaluate_alpha_version_with_gt_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '>', 'release') def test_evaluate_alpha_version_with_gte_test_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '>=', 'test') def test_evaluate_alpha_version_with_gte_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-alpha', '>=', 'alpha') def test_evaluate_alpha_version_with_gte_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '>=', 'beta') def test_evaluate_alpha_version_with_gte_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-alpha', '>=', 'release') def test_evaluate_beta_version_with_eq_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '=', 'test') def test_evaluate_beta_version_with_eq_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '=', 'alpha') def test_evaluate_beta_version_with_eq_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '=', 'beta') def test_evaluate_beta_version_with_eq_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '=', 'release') def test_evaluate_beta_version_with_lt_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '<', 'test') def test_evaluate_beta_version_with_lt_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '<', 'alpha') def test_evaluate_beta_version_with_lt_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '<', 'beta') def test_evaluate_beta_version_with_lt_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '<', 'release') def test_evaluate_beta_version_with_lte_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '<=', 'test') def test_evaluate_beta_version_with_lte_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '<=', 'alpha') def test_evaluate_beta_version_with_lte_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '<=', 'beta') def test_evaluate_beta_version_with_lte_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '<=', 'release') def test_evaluate_beta_version_with_gt_test_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '>', 'test') def test_evaluate_beta_version_with_gt_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '>', 'alpha') def test_evaluate_beta_version_with_gt_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '>', 'beta') def test_evaluate_beta_version_with_gt_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '>', 'release') def test_evaluate_beta_version_with_gte_test_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '>=', 'test') def test_evaluate_beta_version_with_gte_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '>=', 'alpha') def test_evaluate_beta_version_with_gte_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-beta', '>=', 'beta') def test_evaluate_beta_version_with_gte_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-beta', '>=', 'release') def test_evaluate_release_version_with_eq_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '=', 'test') def test_evaluate_release_version_with_eq_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '=', 'alpha') def test_evaluate_release_version_with_eq_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '=', 'beta') def test_evaluate_release_version_with_eq_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '=', 'release') def test_evaluate_release_version_with_lt_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '<', 'test') def test_evaluate_release_version_with_lt_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '<', 'alpha') def test_evaluate_release_version_with_lt_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '<', 'beta') def test_evaluate_release_version_with_lt_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '<', 'release') def test_evaluate_release_version_with_lte_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '<=', 'test') def test_evaluate_release_version_with_lte_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '<=', 'alpha') def test_evaluate_release_version_with_lte_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '<=', 'beta') def test_evaluate_release_version_with_lte_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '<=', 'release') def test_evaluate_release_version_with_gt_test_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '>', 'test') def test_evaluate_release_version_with_gt_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '>', 'alpha') def test_evaluate_release_version_with_gt_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '>', 'beta') def test_evaluate_release_version_with_gt_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef-release', '>', 'release') def test_evaluate_release_version_with_gte_test_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '>=', 'test') def test_evaluate_release_version_with_gte_alpha_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '>=', 'alpha') def test_evaluate_release_version_with_gte_beta_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '>=', 'beta') def test_evaluate_release_version_with_gte_release_cond_returns_true( - self): + self + ) -> None: self._test_flavor_relation_holds( '1.0.0-abcdef-release', '>=', 'release') def test_evaluate_unspecified_version_with_eq_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '=', 'test') def test_evaluate_unspecified_version_with_eq_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '=', 'alpha') def test_evaluate_unspecified_version_with_eq_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '=', 'beta') def test_evaluate_unspecified_version_with_eq_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '=', 'release') def test_evaluate_unspecified_version_with_lt_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '<', 'test') def test_evaluate_unspecified_version_with_lt_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '<', 'alpha') def test_evaluate_unspecified_version_with_lt_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '<', 'beta') def test_evaluate_unspecified_version_with_lt_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '<', 'release') def test_evaluate_unspecified_version_with_lte_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '<=', 'test') def test_evaluate_unspecified_version_with_lte_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '<=', 'alpha') def test_evaluate_unspecified_version_with_lte_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '<=', 'beta') def test_evaluate_unspecified_version_with_lte_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '<=', 'release') def test_evaluate_unspecified_version_with_gt_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '>', 'test') def test_evaluate_unspecified_version_with_gt_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '>', 'alpha') def test_evaluate_unspecified_version_with_gt_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '>', 'beta') def test_evaluate_unspecified_version_with_gt_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '>', 'release') def test_evaluate_unspecified_version_with_gte_test_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '>=', 'test') def test_evaluate_unspecified_version_with_gte_alpha_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '>=', 'alpha') def test_evaluate_unspecified_version_with_gte_beta_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '>=', 'beta') def test_evaluate_unspecified_version_with_gte_release_cond_returns_false( - self): + self + ) -> None: self._test_flavor_relation_does_not_hold( '1.0.0-abcdef', '>=', 'release') - def test_evaluate_multi_value_filter_with_one_matched_returns_true(self): - filter_dict = { + def test_evaluate_multi_value_filter_with_one_matched_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { 'type': 'server_mode', - 'conditions': [('=', 'dev'), ('=', 'prod')] + 'conditions': [['=', 'dev'], ['=', 'prod']] } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) - dev_context = self._create_example_context(mode='dev') + dev_context = self._create_example_context(mode='DEV') self.assertTrue(filter_domain.evaluate(dev_context)) - def test_evaluate_multi_value_filter_with_none_matched_returns_true(self): - filter_dict = { + def test_evaluate_multi_value_filter_with_none_matched_returns_true( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { 'type': 'server_mode', - 'conditions': [('=', 'dev'), ('=', 'prod')] + 'conditions': [['=', 'dev'], ['=', 'prod']] } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) - test_context = self._create_example_context(mode='test') + test_context = self._create_example_context(mode='TEST') self.assertFalse(filter_domain.evaluate(test_context)) - def test_evaluate_app_version_filter_without_version_returns_false(self): - filter_dict = { + def test_evaluate_app_version_filter_without_version_returns_false( + self + ) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { 'type': 'app_version', - 'conditions': [('=', '1.2.3'), ('=', '1.2.4')] + 'conditions': [['=', '1.2.3'], ['=', '1.2.4']] } filter_domain = parameter_domain.PlatformParameterFilter.from_dict( filter_dict) @@ -1076,85 +1326,120 @@ def test_evaluate_app_version_filter_without_version_returns_false(self): self.assertFalse(filter_domain.evaluate( self._create_example_context(app_version=None))) - def test_evaluate_filter_with_unsupported_operation_raises_exception(self): + def test_evaluate_filter_with_unsupported_operation_raises_exception( + self + ) -> None: filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'server_mode', 'conditions': [('!=', 'dev')]} + {'type': 'server_mode', 'conditions': [['!=', 'dev']]} )) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unsupported comparison operator \'!=\''): filter_domain.evaluate(self._create_example_context()) - def test_validate_filter_passes_without_exception(self): - filter_dict = { + filter_dict: parameter_domain.PlatformParameterFilterDict = { + 'type': 'app_version', + 'conditions': [['>>', '1.2.3']] + } + filter_domain = parameter_domain.PlatformParameterFilter.from_dict( + filter_dict) + + with self.assertRaisesRegex( + Exception, 'Unsupported comparison operator \'>>\''): + self.assertFalse(filter_domain.evaluate( + self._create_example_context(app_version='1.0.0-abcdef-test'))) + + filter_domain = ( + parameter_domain + .PlatformParameterFilter.from_dict( + {'type': 'app_version_flavor', 'conditions': [['==', 'beta']]} + )) + with self.assertRaisesRegex( + Exception, 'Unsupported comparison operator \'==\''): + filter_domain.evaluate( + self._create_example_context(app_version='1.0.0-abcdef-test') + ) + + def test_validate_filter_passes_without_exception(self) -> None: + filter_dict: parameter_domain.PlatformParameterFilterDict = { 'type': 'server_mode', - 'conditions': [('=', 'dev'), ('=', 'prod')] + 'conditions': [['=', 'dev'], ['=', 'prod']] } filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict(filter_dict)) filter_domain.validate() - def test_validate_filter_with_invalid_type_raises_exception(self): + def test_validate_filter_with_invalid_type_raises_exception(self) -> None: filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'invalid', 'conditions': [('=', 'value1')]} + {'type': 'invalid', 'conditions': [['=', 'value1']]} )) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Unsupported filter type \'invalid\''): filter_domain.validate() - def test_validate_filter_with_unsupported_operation_raises_exception(self): + def test_validate_filter_with_unsupported_operation_raises_exception( + self + ) -> None: filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'server_mode', 'conditions': [('!=', 'dev')]} + {'type': 'server_mode', 'conditions': [['!=', 'dev']]} )) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Unsupported comparison operator \'!=\''): filter_domain.validate() - def test_validate_filter_with_invalid_server_mode_raises_exception(self): + def test_validate_filter_with_invalid_server_mode_raises_exception( + self + ) -> None: filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'server_mode', 'conditions': [('=', 'invalid')]} + {'type': 'server_mode', 'conditions': [['=', 'invalid']]} )) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid server mode \'invalid\''): filter_domain.validate() - def test_validate_filter_with_invalid_platform_type_raises_exception(self): + def test_validate_filter_with_invalid_platform_type_raises_exception( + self + ) -> None: filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'platform_type', 'conditions': [('=', 'invalid')]} + {'type': 'platform_type', 'conditions': [['=', 'invalid']]} )) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid platform type \'invalid\''): filter_domain.validate() - def test_validate_filter_with_invalid_version_expr_raises_exception(self): + def test_validate_filter_with_invalid_version_expr_raises_exception( + self + ) -> None: filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'app_version', 'conditions': [('=', '1.a.2')]} + {'type': 'app_version', 'conditions': [['=', '1.a.2']]} )) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid version expression \'1.a.2\''): filter_domain.validate() - def test_validate_filter_with_invalid_version_flavor_raises_exception(self): + def test_validate_filter_with_invalid_version_flavor_raises_exception( + self + ) -> None: filter_domain = ( parameter_domain .PlatformParameterFilter.from_dict( - {'type': 'app_version_flavor', 'conditions': [('=', 'invalid')]} + {'type': 'app_version_flavor', 'conditions': [['=', 'invalid']]} )) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid app version flavor \'invalid\''): filter_domain.validate() @@ -1162,15 +1447,15 @@ def test_validate_filter_with_invalid_version_flavor_raises_exception(self): class PlatformParameterRuleTests(test_utils.GenericTestBase): """Test for the PlatformParameterRule.""" - def test_create_from_dict_returns_correct_instance(self): - filters = [ + def test_create_from_dict_returns_correct_instance(self) -> None: + filters: List[parameter_domain.PlatformParameterFilterDict] = [ { 'type': 'app_version', - 'conditions': [('=', '1.2.3')] + 'conditions': [['=', '1.2.3']] }, { 'type': 'server_mode', - 'conditions': [('=', 'dev'), ('=', 'test')] + 'conditions': [['=', 'dev'], ['=', 'test']] } ] rule = parameter_domain.PlatformParameterRule.from_dict( @@ -1186,15 +1471,15 @@ def test_create_from_dict_returns_correct_instance(self): filter_domain, parameter_domain.PlatformParameterFilter) self.assertEqual(len(rule.filters), 2) self.assertEqual(filter_domain.type, 'app_version') - self.assertEqual(filter_domain.conditions, [('=', '1.2.3')]) + self.assertEqual(filter_domain.conditions, [['=', '1.2.3']]) self.assertEqual(rule.value_when_matched, False) - def test_to_dict_returns_correct_dict(self): - rule_dict = { + def test_to_dict_returns_correct_dict(self) -> None: + rule_dict: parameter_domain.PlatformParameterRuleDict = { 'filters': [ { 'type': 'app_version', - 'conditions': [('=', '1.2.3')] + 'conditions': [['=', '1.2.3']] } ], 'value_when_matched': False, @@ -1202,34 +1487,36 @@ def test_to_dict_returns_correct_dict(self): rule = parameter_domain.PlatformParameterRule.from_dict(rule_dict) self.assertEqual(rule.to_dict(), rule_dict) - def test_has_server_mode_filter_with_mode_filter_returns_true(self): + def test_has_server_mode_filter_with_mode_filter_returns_true(self) -> None: rule = parameter_domain.PlatformParameterRule.from_dict( { 'filters': [ - {'type': 'server_mode', 'conditions': [('=', 'dev')]} + {'type': 'server_mode', 'conditions': [['=', 'dev']]} ], 'value_when_matched': False, }, ) self.assertTrue(rule.has_server_mode_filter()) - def test_has_server_mode_filter_without_mode_filter_returns_false(self): + def test_has_server_mode_filter_without_mode_filter_returns_false( + self + ) -> None: rule = parameter_domain.PlatformParameterRule.from_dict( { 'filters': [ - {'type': 'app_version', 'conditions': [('=', '1.2.3')]} + {'type': 'app_version', 'conditions': [['=', '1.2.3']]} ], 'value_when_matched': False, }, ) self.assertFalse(rule.has_server_mode_filter()) - def test_evaluation_with_matching_context_returns_true(self): + def test_evaluation_with_matching_context_returns_true(self) -> None: rule = parameter_domain.PlatformParameterRule.from_dict( { 'filters': [ - {'type': 'app_version', 'conditions': [('=', '1.2.3')]}, - {'type': 'platform_type', 'conditions': [('=', 'Android')]}, + {'type': 'app_version', 'conditions': [['=', '1.2.3']]}, + {'type': 'platform_type', 'conditions': [['=', 'Android']]}, ], 'value_when_matched': 'matched_val', }, @@ -1241,17 +1528,17 @@ def test_evaluation_with_matching_context_returns_true(self): 'app_version': '1.2.3', }, { - 'server_mode': 'dev', + 'server_mode': ServerMode.DEV, }, ) self.assertTrue(rule.evaluate(context)) - def test_evaluation_with_unmatching_context_returns_false(self): + def test_evaluation_with_unmatching_context_returns_false(self) -> None: rule = parameter_domain.PlatformParameterRule.from_dict( { 'filters': [ - {'type': 'app_version', 'conditions': [('=', '1.2.3')]}, - {'type': 'platform_type', 'conditions': [('=', 'Web')]}, + {'type': 'app_version', 'conditions': [['=', '1.2.3']]}, + {'type': 'platform_type', 'conditions': [['=', 'Web']]}, ], 'value_when_matched': 'matched_val', }, @@ -1263,15 +1550,15 @@ def test_evaluation_with_unmatching_context_returns_false(self): 'app_version': '1.2.3', }, { - 'server_mode': 'dev', + 'server_mode': ServerMode.DEV, }, ) self.assertFalse(rule.evaluate(context)) - def test_validate_with_invalid_filter_raises_exception(self): - filters = [ - {'type': 'app_version', 'conditions': [('=', '1.2.3')]}, - {'type': 'invalid', 'conditions': [('=', '1.2.3')]}, + def test_validate_with_invalid_filter_raises_exception(self) -> None: + filters: List[parameter_domain.PlatformParameterFilterDict] = [ + {'type': 'app_version', 'conditions': [['=', '1.2.3']]}, + {'type': 'invalid', 'conditions': [['=', '1.2.3']]}, ] rule = parameter_domain.PlatformParameterRule.from_dict( { @@ -1279,7 +1566,7 @@ def test_validate_with_invalid_filter_raises_exception(self): 'value_when_matched': False, } ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Unsupported filter type \'invalid\''): rule.validate() @@ -1287,7 +1574,7 @@ def test_validate_with_invalid_filter_raises_exception(self): class PlatformParameterTests(test_utils.GenericTestBase): """Test for the PlatformParameter.""" - def test_create_from_dict_returns_correct_instance(self): + def test_create_from_dict_returns_correct_instance(self) -> None: param = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1297,7 +1584,7 @@ def test_create_from_dict_returns_correct_instance(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1322,7 +1609,7 @@ def test_create_from_dict_returns_correct_instance(self): param.rule_schema_version, feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION) - def test_validate_with_invalid_name_raises_exception(self): + def test_validate_with_invalid_name_raises_exception(self) -> None: param = parameter_domain.PlatformParameter.from_dict({ 'name': 'Invalid~Name', 'description': 'for test', @@ -1332,7 +1619,7 @@ def test_validate_with_invalid_name_raises_exception(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1344,7 +1631,7 @@ def test_validate_with_invalid_name_raises_exception(self): 'is_feature': False, 'feature_stage': None, }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid parameter name \'%s\'' % param.name): param.validate() @@ -1358,7 +1645,7 @@ def test_validate_with_invalid_name_raises_exception(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1370,12 +1657,12 @@ def test_validate_with_invalid_name_raises_exception(self): 'is_feature': False, 'feature_stage': None, }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid parameter name \'%s\'' % param1.name): param1.validate() - def test_validate_with_long_name_raises_exception(self): + def test_validate_with_long_name_raises_exception(self) -> None: long_name = 'Long_' * 50 + 'Name' param = parameter_domain.PlatformParameter.from_dict({ 'name': long_name, @@ -1386,7 +1673,7 @@ def test_validate_with_long_name_raises_exception(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1398,12 +1685,12 @@ def test_validate_with_long_name_raises_exception(self): 'is_feature': False, 'feature_stage': None, }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid parameter name \'%s\'' % long_name): param.validate() - def test_validate_with_unsupported_data_type_raises_exception(self): + def test_validate_with_unsupported_data_type_raises_exception(self) -> None: param = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1413,7 +1700,7 @@ def test_validate_with_unsupported_data_type_raises_exception(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1425,12 +1712,13 @@ def test_validate_with_unsupported_data_type_raises_exception(self): 'is_feature': False, 'feature_stage': None, }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Unsupported data type \'InvalidType\''): param.validate() def test_validate_with_inconsistent_data_type_in_rules_raises_exception( - self): + self + ) -> None: param = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1440,7 +1728,7 @@ def test_validate_with_inconsistent_data_type_in_rules_raises_exception( 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1452,13 +1740,14 @@ def test_validate_with_inconsistent_data_type_in_rules_raises_exception( 'is_feature': False, 'feature_stage': None, }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected bool, received \'222\' in value_when_matched'): param.validate() def test_validate_with_inconsistent_default_value_type_raises_exception( - self): + self + ) -> None: param = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1470,15 +1759,15 @@ def test_validate_with_inconsistent_default_value_type_raises_exception( 'is_feature': False, 'feature_stage': None, }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected bool, received \'111\' in default value'): param.validate() - def test_create_with_old_rule_schema_version_failure(self): + def test_create_with_old_rule_schema_version_failure(self) -> None: with self.swap( feconf, 'CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION', 2): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Current platform parameter rule schema version is v2, ' 'received v1'): @@ -1491,7 +1780,7 @@ def test_create_with_old_rule_schema_version_failure(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1503,8 +1792,8 @@ def test_create_with_old_rule_schema_version_failure(self): 'feature_stage': None, }) - def test_to_dict_returns_correct_dict(self): - param_dict = { + def test_to_dict_returns_correct_dict(self) -> None: + param_dict: parameter_domain.PlatformParameterDict = { 'name': 'parameter_a', 'description': 'for test', 'data_type': 'string', @@ -1513,7 +1802,7 @@ def test_to_dict_returns_correct_dict(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1528,7 +1817,7 @@ def test_to_dict_returns_correct_dict(self): parameter = parameter_domain.PlatformParameter.from_dict(param_dict) self.assertDictEqual(parameter.to_dict(), param_dict) - def test_set_rules_correctly_changes_rules(self): + def test_set_rules_correctly_changes_rules(self) -> None: param = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1538,7 +1827,7 @@ def test_set_rules_correctly_changes_rules(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1547,7 +1836,7 @@ def test_set_rules_correctly_changes_rules(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'test')] + 'conditions': [['=', 'test']] } ], 'value_when_matched': '555' @@ -1559,9 +1848,9 @@ def test_set_rules_correctly_changes_rules(self): 'is_feature': False, 'feature_stage': None }) - new_rule_dict = { + new_rule_dict: parameter_domain.PlatformParameterRuleDict = { 'filters': [ - {'type': 'server_mode', 'conditions': [('=', 'test')]} + {'type': 'server_mode', 'conditions': [['=', 'test']]} ], 'value_when_matched': 'new rule value', } @@ -1572,7 +1861,7 @@ def test_set_rules_correctly_changes_rules(self): self.assertEqual(len(param.rules), 1) self.assertEqual(param.rules[0].to_dict(), new_rule_dict) - def test_evaluate_with_matched_rule_returns_correct_value(self): + def test_evaluate_with_matched_rule_returns_correct_value(self) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1582,7 +1871,7 @@ def test_evaluate_with_matched_rule_returns_correct_value(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1602,12 +1891,12 @@ def test_evaluate_with_matched_rule_returns_correct_value(self): 'app_version': '1.2.3', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) self.assertEqual(parameter.evaluate(dev_context), '222') - def test_evaluate_without_matched_rule_returns_default_value(self): + def test_evaluate_without_matched_rule_returns_default_value(self) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1617,7 +1906,7 @@ def test_evaluate_without_matched_rule_returns_default_value(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1637,12 +1926,14 @@ def test_evaluate_without_matched_rule_returns_default_value(self): 'app_version': '1.2.3', }, { - 'server_mode': SERVER_MODES.prod, + 'server_mode': ServerMode.PROD, }, ) self.assertEqual(parameter.evaluate(prod_context), '111') - def test_evaluate_matching_feature_invalid_platform_type_returns_def(self): + def test_evaluate_matching_feature_invalid_platform_type_returns_def( + self + ) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1652,7 +1943,7 @@ def test_evaluate_matching_feature_invalid_platform_type_returns_def(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1672,12 +1963,14 @@ def test_evaluate_matching_feature_invalid_platform_type_returns_def(self): 'app_version': '1.2.3', }, { - 'server_mode': SERVER_MODES.dev, + 'server_mode': ServerMode.DEV, }, ) self.assertEqual(parameter.evaluate(dev_context), '111') - def test_evaluate_matching_feature_missing_platform_type_returns_def(self): + def test_evaluate_matching_feature_missing_platform_type_returns_def( + self + ) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1687,7 +1980,7 @@ def test_evaluate_matching_feature_missing_platform_type_returns_def(self): 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', 'dev')] + 'conditions': [['=', 'dev']] } ], 'value_when_matched': '222' @@ -1702,16 +1995,17 @@ def test_evaluate_matching_feature_missing_platform_type_returns_def(self): dev_context = parameter_domain.EvaluationContext.from_dict( { + 'platform_type': '', 'browser_type': None, 'app_version': '1.2.3', }, { - 'server_mode': 'dev', + 'server_mode': ServerMode.DEV, }, ) self.assertEqual(parameter.evaluate(dev_context), '111') - def test_validate_feature_passes_without_exception(self): + def test_validate_feature_passes_without_exception(self) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1719,7 +2013,7 @@ def test_validate_feature_passes_without_exception(self): 'rules': [ { 'filters': [ - {'type': 'server_mode', 'conditions': [('=', 'dev')]} + {'type': 'server_mode', 'conditions': [['=', 'dev']]} ], 'value_when_matched': False } @@ -1732,7 +2026,7 @@ def test_validate_feature_passes_without_exception(self): }) parameter.validate() - def test_validate_feature_with_invalid_type_raises_exception(self): + def test_validate_feature_with_invalid_type_raises_exception(self) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1744,12 +2038,12 @@ def test_validate_feature_with_invalid_type_raises_exception(self): 'is_feature': True, 'feature_stage': 'dev', }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Data type of feature flags must be bool, got \'string\' instead'): parameter.validate() - def test_validate_feature_with_invalid_stage_raises_exception(self): + def test_validate_feature_with_invalid_stage_raises_exception(self) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1761,11 +2055,13 @@ def test_validate_feature_with_invalid_stage_raises_exception(self): 'is_feature': True, 'feature_stage': 'Invalid', }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid feature stage, got \'Invalid\''): parameter.validate() - def test_validate_feature_with_no_mode_filter_raises_exception(self): + def test_validate_feature_with_no_mode_filter_raises_exception( + self + ) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': 'for test', @@ -1782,11 +2078,11 @@ def test_validate_feature_with_no_mode_filter_raises_exception(self): 'is_feature': True, 'feature_stage': 'dev', }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'must have a server_mode filter'): parameter.validate() - def test_validate_dev_feature_for_test_env_raises_exception(self): + def test_validate_dev_feature_for_test_env_raises_exception(self) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': '', @@ -1794,7 +2090,7 @@ def test_validate_dev_feature_for_test_env_raises_exception(self): 'rules': [ { 'filters': [ - {'type': 'server_mode', 'conditions': [('=', 'test')]}], + {'type': 'server_mode', 'conditions': [['=', 'test']]}], 'value_when_matched': True } ], @@ -1804,11 +2100,11 @@ def test_validate_dev_feature_for_test_env_raises_exception(self): 'is_feature': True, 'feature_stage': 'dev', }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'cannot be enabled in test or production'): parameter.validate() - def test_validate_dev_feature_for_prod_env_raises_exception(self): + def test_validate_dev_feature_for_prod_env_raises_exception(self) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': '', @@ -1816,7 +2112,7 @@ def test_validate_dev_feature_for_prod_env_raises_exception(self): 'rules': [ { 'filters': [ - {'type': 'server_mode', 'conditions': [('=', 'prod')]}], + {'type': 'server_mode', 'conditions': [['=', 'prod']]}], 'value_when_matched': True } ], @@ -1826,12 +2122,13 @@ def test_validate_dev_feature_for_prod_env_raises_exception(self): 'is_feature': True, 'feature_stage': 'dev', }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'cannot be enabled in test or production'): parameter.validate() def test_validate_test_feature_for_prod_env_raises_exception( - self): + self + ) -> None: parameter = parameter_domain.PlatformParameter.from_dict({ 'name': 'parameter_a', 'description': '', @@ -1839,7 +2136,7 @@ def test_validate_test_feature_for_prod_env_raises_exception( 'rules': [ { 'filters': [ - {'type': 'server_mode', 'conditions': [('=', 'prod')]}], + {'type': 'server_mode', 'conditions': [['=', 'prod']]}], 'value_when_matched': True } ], @@ -1849,12 +2146,13 @@ def test_validate_test_feature_for_prod_env_raises_exception( 'is_feature': True, 'feature_stage': 'test', }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'cannot be enabled in production'): parameter.validate() def test_serialize_and_deserialize_returns_unchanged_platform_parameter( - self): + self + ) -> None: """Checks that serializing and then deserializing a default parameter works as intended by leaving the parameter unchanged. """ @@ -1865,7 +2163,10 @@ def test_serialize_and_deserialize_returns_unchanged_platform_parameter( 'rules': [ { 'filters': [ - {'type': 'server_mode', 'conditions': [['=', 'prod']]}], + { + 'type': 'server_mode', 'conditions': [['=', 'prod']] + } + ], 'value_when_matched': True } ], diff --git a/core/domain/platform_parameter_list.py b/core/domain/platform_parameter_list.py index 477d96ef8f9a..8491443f3abe 100644 --- a/core/domain/platform_parameter_list.py +++ b/core/domain/platform_parameter_list.py @@ -18,27 +18,68 @@ from __future__ import annotations -from core import python_utils +import enum + from core.domain import platform_parameter_domain from core.domain import platform_parameter_registry as registry Registry = registry.Registry -FEATURE_STAGES = platform_parameter_domain.FEATURE_STAGES # pylint: disable=invalid-name -DATA_TYPES = platform_parameter_domain.DATA_TYPES # pylint: disable=invalid-name -PARAM_NAMES = python_utils.create_enum( # pylint: disable=invalid-name - 'dummy_feature', 'dummy_parameter') + +class ParamNames(enum.Enum): + """Enum for parameter names.""" + + DUMMY_FEATURE = 'dummy_feature' + DUMMY_PARAMETER = 'dummy_parameter' + + END_CHAPTER_CELEBRATION = 'end_chapter_celebration' + CHECKPOINT_CELEBRATION = 'checkpoint_celebration' + CONTRIBUTOR_DASHBOARD_ACCOMPLISHMENTS = ( + 'contributor_dashboard_accomplishments') + ANDROID_BETA_LANDING_PAGE = 'android_beta_landing_page' + BLOG_PAGES = 'blog_pages' + # Platform parameters should all be defined below. Registry.create_feature_flag( - PARAM_NAMES.dummy_feature, + ParamNames.DUMMY_FEATURE, 'This is a dummy feature flag.', - FEATURE_STAGES.dev, + platform_parameter_domain.FeatureStages.DEV, ) Registry.create_platform_parameter( - PARAM_NAMES.dummy_parameter, + ParamNames.DUMMY_PARAMETER, 'This is a dummy platform parameter.', - DATA_TYPES.string + platform_parameter_domain.DataTypes.STRING +) + +Registry.create_feature_flag( + ParamNames.END_CHAPTER_CELEBRATION, + 'This flag is for the end chapter celebration feature.', + platform_parameter_domain.FeatureStages.PROD, +) + +Registry.create_feature_flag( + ParamNames.CHECKPOINT_CELEBRATION, + 'This flag is for the checkpoint celebration feature.', + platform_parameter_domain.FeatureStages.PROD, +) + +Registry.create_feature_flag( + ParamNames.CONTRIBUTOR_DASHBOARD_ACCOMPLISHMENTS, + 'This flag enables showing per-contributor accomplishments on the' + + ' contributor dashboard.', + platform_parameter_domain.FeatureStages.PROD, ) + +Registry.create_feature_flag( + ParamNames.ANDROID_BETA_LANDING_PAGE, + 'This flag is for Android beta promo landing page.', + platform_parameter_domain.FeatureStages.PROD) + +Registry.create_feature_flag( + ParamNames.BLOG_PAGES, + 'This flag is for blog home page, blog author profile page and blog post' + + ' page.', + platform_parameter_domain.FeatureStages.PROD) diff --git a/core/domain/platform_parameter_list_test.py b/core/domain/platform_parameter_list_test.py index 780c273db5ba..db7265309a9b 100644 --- a/core/domain/platform_parameter_list_test.py +++ b/core/domain/platform_parameter_list_test.py @@ -27,15 +27,20 @@ class ExistingPlatformParameterValidityTests(test_utils.GenericTestBase): core/domain/platform_parameter_list.py. """ - EXPECTED_PARAM_NAMES = ['dummy_feature', 'dummy_parameter'] - - def test_all_defined_parameters_are_valid(self): + EXPECTED_PARAM_NAMES = ['dummy_feature', 'dummy_parameter', + 'end_chapter_celebration', + 'checkpoint_celebration', + 'android_beta_landing_page', + 'blog_pages', + 'contributor_dashboard_accomplishments'] + + def test_all_defined_parameters_are_valid(self) -> None: all_names = params.Registry.get_all_platform_parameter_names() for name in all_names: param = params.Registry.get_platform_parameter(name) param.validate() - def test_number_of_parameters_meets_expectation(self): + def test_number_of_parameters_meets_expectation(self) -> None: """Test that the Registry and EXPECTED_PARAM_NAMES have the same number of platform parameters. @@ -52,7 +57,7 @@ def test_number_of_parameters_meets_expectation(self): len(params.Registry.get_all_platform_parameter_names()), len(self.EXPECTED_PARAM_NAMES)) - def test_all_expected_parameters_are_present_in_registry(self): + def test_all_expected_parameters_are_present_in_registry(self) -> None: """Test that all parameters in EXPECTED_PARAM_NAMES are present in Registry. @@ -73,7 +78,7 @@ def test_all_expected_parameters_are_present_in_registry(self): list(missing_names)) ) - def test_no_unexpected_parameter_in_registry(self): + def test_no_unexpected_parameter_in_registry(self) -> None: """Test that all parameters registered in Registry are expected. If this test fails, it means some parameters in diff --git a/core/domain/platform_parameter_registry.py b/core/domain/platform_parameter_registry.py index bf77b95d624e..68c745e444b1 100644 --- a/core/domain/platform_parameter_registry.py +++ b/core/domain/platform_parameter_registry.py @@ -18,48 +18,67 @@ from __future__ import annotations +import enum + from core import feconf from core.domain import caching_services from core.domain import platform_parameter_domain from core.platform import models -(config_models,) = models.Registry.import_models( - [models.NAMES.config]) +from typing import Dict, List, Optional, Union -DATA_TYPES = platform_parameter_domain.DATA_TYPES # pylint: disable=invalid-name +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import config_models + +(config_models,) = models.Registry.import_models( + [models.Names.CONFIG]) class Registry: """Registry of all platform parameters.""" - DEFAULT_VALUE_BY_TYPE_DICT = { - DATA_TYPES.bool: False, - DATA_TYPES.number: 0, - DATA_TYPES.string: '', + DEFAULT_VALUE_BY_TYPE_DICT: Dict[ + platform_parameter_domain.DataTypes, + Union[bool, str, int] + ] = { + platform_parameter_domain.DataTypes.BOOL: False, + platform_parameter_domain.DataTypes.NUMBER: 0, + platform_parameter_domain.DataTypes.STRING: '', } # The keys of parameter_registry are the property names, and the values # are PlatformParameter instances with initial settings defined in this # file. - parameter_registry = {} + parameter_registry: Dict[ + str, platform_parameter_domain.PlatformParameter + ] = {} @classmethod def create_platform_parameter( - cls, name, description, data_type, is_feature=False, - feature_stage=None): + cls, + name: enum.Enum, + description: str, + data_type: platform_parameter_domain.DataTypes, + is_feature: bool = False, + feature_stage: Optional[platform_parameter_domain.FeatureStages] = None + ) -> platform_parameter_domain.PlatformParameter: """Creates, registers and returns a platform parameter. Args: name: Enum(PARAMS). The name of the platform parameter. description: str. The description of the platform parameter. - data_type: Enum(DATA_TYPES). The data type of the platform + data_type: Enum(DataTypes). The data type of the platform parameter, must be one of the following: bool, number, string. is_feature: bool. True if the platform parameter is a feature flag. - feature_stage: Enum(FEATURE_STAGES)|None. The stage of the feature, + feature_stage: Enum(FeatureStages)|None. The stage of the feature, required if 'is_feature' is True. Returns: PlatformParameter. The created platform parameter. + + Raises: + Exception. The data type is not supported. """ if data_type in cls.DEFAULT_VALUE_BY_TYPE_DICT: default = cls.DEFAULT_VALUE_BY_TYPE_DICT[data_type] @@ -72,8 +91,8 @@ def create_platform_parameter( 'Unsupported data type \'%s\', must be one of'' %s.' % ( data_type.value, allowed_data_types)) - param_dict = { - 'name': name.value if name else None, + param_dict: platform_parameter_domain.PlatformParameterDict = { + 'name': name.value, 'description': description, 'data_type': data_type.value, 'rules': [], @@ -87,37 +106,50 @@ def create_platform_parameter( @classmethod def create_feature_flag( - cls, name, description, stage): + cls, + name: enum.Enum, + description: str, + stage: platform_parameter_domain.FeatureStages + ) -> platform_parameter_domain.PlatformParameter: """Creates, registers and returns a platform parameter that is also a feature flag. Args: name: Enum(PARAMS). The name of the platform parameter. description: str. The description of the platform parameter. - stage: Enum(FEATURE_STAGES). The stage of the feature. + stage: Enum(FeatureStages). The stage of the feature. Returns: PlatformParameter. The created feature flag. """ return cls.create_platform_parameter( - name, description, DATA_TYPES.bool, + name, description, platform_parameter_domain.DataTypes.BOOL, is_feature=True, feature_stage=stage) @classmethod - def init_platform_parameter(cls, name, instance): + def init_platform_parameter( + cls, + name: str, + instance: platform_parameter_domain.PlatformParameter + ) -> None: """Initializes parameter_registry with keys as the parameter names and values as instances of the specified parameter. Args: name: str. The name of the platform parameter. instance: PlatformParameter. The instance of the platform parameter. + + Raises: + Exception. The given name of the platform parameter already exists. """ if cls.parameter_registry.get(name): raise Exception('Parameter with name %s already exists.' % name) cls.parameter_registry[name] = instance @classmethod - def get_platform_parameter(cls, name): + def get_platform_parameter( + cls, name: str + ) -> platform_parameter_domain.PlatformParameter: """Returns the instance of the specified name of the platform parameter. @@ -127,6 +159,9 @@ def get_platform_parameter(cls, name): Returns: PlatformParameter. The instance of the specified platform parameter. + + Raises: + Exception. The given name of the platform parameter doesn't exist. """ parameter_from_cache = cls.load_platform_parameter_from_memcache( name) @@ -150,31 +185,33 @@ def get_platform_parameter(cls, name): @classmethod def update_platform_parameter( - cls, name, committer_id, commit_message, new_rule_dicts): + cls, + name: str, + committer_id: str, + commit_message: str, + new_rules: List[platform_parameter_domain.PlatformParameterRule] + ) -> None: """Updates the platform parameter with new rules. Args: name: str. The name of the platform parameter to update. committer_id: str. ID of the committer. commit_message: str. The commit message. - new_rule_dicts: list(dist). A list of dict mappings of all fields - of PlatformParameterRule object. + new_rules: list(PlatformParameterRule). A list of + PlatformParameterRule objects. """ param = cls.get_platform_parameter(name) # Create a temporary param instance with new rules for validation, # if the new rules are invalid, an exception will be raised in # validate() method. + new_rule_dicts = [rules.to_dict() for rules in new_rules] param_dict = param.to_dict() param_dict['rules'] = new_rule_dicts updated_param = param.from_dict(param_dict) updated_param.validate() model_instance = cls._to_platform_parameter_model(param) - - new_rules = [ - platform_parameter_domain.PlatformParameterRule.from_dict(rule_dict) - for rule_dict in new_rule_dicts] param.set_rules(new_rules) model_instance.rules = [rule.to_dict() for rule in param.rules] @@ -193,7 +230,7 @@ def update_platform_parameter( caching_services.CACHE_NAMESPACE_PLATFORM_PARAMETER, None, [name]) @classmethod - def get_all_platform_parameter_names(cls): + def get_all_platform_parameter_names(cls) -> List[str]: """Return a list of all the platform parameter names. Returns: @@ -202,7 +239,10 @@ def get_all_platform_parameter_names(cls): return list(cls.parameter_registry.keys()) @classmethod - def evaluate_all_platform_parameters(cls, context): + def evaluate_all_platform_parameters( + cls, + context: platform_parameter_domain.EvaluationContext + ) -> Dict[str, Union[str, bool, int]]: """Evaluate all platform parameters with the given context. Args: @@ -219,7 +259,10 @@ def evaluate_all_platform_parameters(cls, context): return result_dict @classmethod - def init_platform_parameter_from_dict(cls, parameter_dict): + def init_platform_parameter_from_dict( + cls, + parameter_dict: platform_parameter_domain.PlatformParameterDict + ) -> platform_parameter_domain.PlatformParameter: """Creates, registers and returns a platform parameter using the given dict representation of a platform parameter. @@ -238,7 +281,9 @@ def init_platform_parameter_from_dict(cls, parameter_dict): return parameter @classmethod - def load_platform_parameter_from_storage(cls, name): + def load_platform_parameter_from_storage( + cls, name: str + ) -> Optional[platform_parameter_domain.PlatformParameter]: """Loads platform parameter from storage. Args: @@ -252,7 +297,7 @@ def load_platform_parameter_from_storage(cls, name): name, strict=False) if parameter_model: - param_with_init_settings = cls.parameter_registry.get(name) + param_with_init_settings = cls.parameter_registry[name] return platform_parameter_domain.PlatformParameter.from_dict({ 'name': param_with_init_settings.name, 'description': param_with_init_settings.description, @@ -267,7 +312,9 @@ def load_platform_parameter_from_storage(cls, name): return None @classmethod - def load_platform_parameter_from_memcache(cls, name): + def load_platform_parameter_from_memcache( + cls, name: str + ) -> Optional[platform_parameter_domain.PlatformParameter]: """Loads cached platform parameter from memcache. Args: @@ -283,7 +330,10 @@ def load_platform_parameter_from_memcache(cls, name): return cached_parameter @classmethod - def _to_platform_parameter_model(cls, param): + def _to_platform_parameter_model( + cls, + param: platform_parameter_domain.PlatformParameter + ) -> config_models.PlatformParameterModel: """Returns the platform parameter model corresponding to the given domain object. diff --git a/core/domain/platform_parameter_registry_test.py b/core/domain/platform_parameter_registry_test.py index e63619565e77..c069baae50ee 100644 --- a/core/domain/platform_parameter_registry_test.py +++ b/core/domain/platform_parameter_registry_test.py @@ -18,55 +18,57 @@ from __future__ import annotations +import enum + from core import feconf -from core import python_utils from core import utils from core.domain import caching_services from core.domain import platform_parameter_domain as parameter_domain from core.domain import platform_parameter_registry as registry -from core.platform import models from core.tests import test_utils -(config_models,) = models.Registry.import_models( - [models.NAMES.config]) +DataTypes = parameter_domain.DataTypes +FeatureStages = parameter_domain.FeatureStages + + +class ParamNames(enum.Enum): + """Enum for parameter names.""" -DATA_TYPES = parameter_domain.DATA_TYPES # pylint: disable=invalid-name -FEATURE_STAGES = parameter_domain.FEATURE_STAGES # pylint: disable=invalid-name -PARAM_NAMES = python_utils.create_enum('parameter_a') # pylint: disable=invalid-name + PARAMETER_A = 'parameter_a' class PlatformParameterRegistryTests(test_utils.GenericTestBase): """Tests for the platform parameter Registry.""" - def setUp(self): - super(PlatformParameterRegistryTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.original_param_registry = registry.Registry.parameter_registry registry.Registry.parameter_registry.clear() # Parameter names that might be used in following tests. - parameter_names = ('parameter_a', 'parameter_b') + parameter_names = ['parameter_a', 'parameter_b'] caching_services.delete_multi( caching_services.CACHE_NAMESPACE_PLATFORM_PARAMETER, None, parameter_names) - def tearDown(self): - super(PlatformParameterRegistryTests, self).tearDown() + def tearDown(self) -> None: + super().tearDown() registry.Registry.parameter_registry = self.original_param_registry - def _create_example_parameter_with_name(self, name): + def _create_example_parameter_with_name(self, name: str) -> None: """Creates and returns an example parameter with the given name.""" registry.Registry.init_platform_parameter_from_dict({ 'name': name, 'description': 'for test', - 'data_type': DATA_TYPES.string.value, + 'data_type': DataTypes.STRING.value, 'rules': [ { 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.dev.value)] + 'conditions': [['=', FeatureStages.DEV.value]] } ], 'value_when_matched': '222' @@ -79,64 +81,70 @@ def _create_example_parameter_with_name(self, name): 'feature_stage': None, }) - def test_create_platform_parameter(self): + def test_create_platform_parameter(self) -> None: parameter = registry.Registry.create_platform_parameter( - PARAM_NAMES.parameter_a, 'test', registry.DATA_TYPES.bool) + ParamNames.PARAMETER_A, 'test', DataTypes.BOOL) self.assertIsInstance(parameter, parameter_domain.PlatformParameter) parameter.validate() - def test_create_platform_parameter_with_invalid_type_failure(self): - _data_type = python_utils.create_enum('invalid') - with self.assertRaisesRegexp( + def test_create_platform_parameter_with_invalid_type_failure(self) -> None: + class DataType(enum.Enum): + """Enum for data type.""" + + INVALID = 'invalid' + with self.assertRaisesRegex( Exception, 'Unsupported data type \'invalid\''): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. registry.Registry.create_platform_parameter( - PARAM_NAMES.parameter_a, 'test', _data_type.invalid) + ParamNames.PARAMETER_A, 'test', DataType.INVALID) # type: ignore[arg-type] - def test_create_platform_parameter_with_the_same_name_failure(self): + def test_create_platform_parameter_with_the_same_name_failure(self) -> None: param_name = 'parameter_a' self._create_example_parameter_with_name(param_name) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Parameter with name %s already exists' % param_name): self._create_example_parameter_with_name(param_name) - def test_create_feature_flag(self): + def test_create_feature_flag(self) -> None: feature = registry.Registry.create_feature_flag( - PARAM_NAMES.parameter_a, 'test feature', FEATURE_STAGES.dev) - self.assertEqual(feature.data_type, registry.DATA_TYPES.bool.value) + ParamNames.PARAMETER_A, 'test feature', FeatureStages.DEV) + self.assertEqual(feature.data_type, DataTypes.BOOL.value) self.assertTrue(feature.is_feature) - self.assertEqual(feature.feature_stage, FEATURE_STAGES.dev.value) + self.assertEqual(feature.feature_stage, FeatureStages.DEV.value) feature.validate() - def test_default_value_of_bool_platform_parameter(self): + def test_default_value_of_bool_platform_parameter(self) -> None: parameter = registry.Registry.create_platform_parameter( - PARAM_NAMES.parameter_a, 'test feature', registry.DATA_TYPES.bool) + ParamNames.PARAMETER_A, 'test feature', DataTypes.BOOL) parameter.validate() self.assertEqual(parameter.default_value, False) - def test_default_value_of_string_platform_parameter(self): + def test_default_value_of_string_platform_parameter(self) -> None: parameter = registry.Registry.create_platform_parameter( - PARAM_NAMES.parameter_a, 'test', DATA_TYPES.string) + ParamNames.PARAMETER_A, 'test', DataTypes.STRING) parameter.validate() self.assertEqual(parameter.default_value, '') - def test_default_value_of_number_platform_parameter(self): + def test_default_value_of_number_platform_parameter(self) -> None: parameter = registry.Registry.create_platform_parameter( - PARAM_NAMES.parameter_a, 'test', DATA_TYPES.number) + ParamNames.PARAMETER_A, 'test', DataTypes.NUMBER) parameter.validate() self.assertEqual(parameter.default_value, 0) - def test_get_platform_parameter(self): + def test_get_platform_parameter(self) -> None: parameter_name = 'parameter_a' self._create_example_parameter_with_name(parameter_name) parameter = registry.Registry.get_platform_parameter(parameter_name) self.assertIsNotNone(parameter) self.assertIsInstance(parameter, parameter_domain.PlatformParameter) - def test_get_non_existing_parameter_failure(self): - with self.assertRaisesRegexp(Exception, 'not found'): + def test_get_non_existing_parameter_failure(self) -> None: + with self.assertRaisesRegex(Exception, 'not found'): registry.Registry.get_platform_parameter('parameter_a') - def test_get_all_parameter_names(self): + def test_get_all_parameter_names(self) -> None: parameter_names = ['parameter_a', 'parameter_b'] for parameter_name in parameter_names: self._create_example_parameter_with_name(parameter_name) @@ -144,7 +152,7 @@ def test_get_all_parameter_names(self): sorted(registry.Registry.get_all_platform_parameter_names()), sorted(parameter_names)) - def test_memcache_is_set_after_getting(self): + def test_memcache_is_set_after_getting(self) -> None: parameter_name = 'parameter_a' self._create_example_parameter_with_name(parameter_name) @@ -156,7 +164,7 @@ def test_memcache_is_set_after_getting(self): registry.Registry.load_platform_parameter_from_memcache( parameter_name)) - def test_update_parameter(self): + def test_update_parameter(self) -> None: parameter_name = 'parameter_a' self._create_example_parameter_with_name(parameter_name) @@ -165,15 +173,15 @@ def test_update_parameter(self): feconf.SYSTEM_COMMITTER_ID, 'commit message', [ - { + parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.dev.value)] + 'conditions': [['=', FeatureStages.DEV.value]] } ], 'value_when_matched': 'updated' - } + }) ], ) parameter_updated = registry.Registry.get_platform_parameter( @@ -184,7 +192,7 @@ def test_update_parameter(self): self.assertEqual( parameter_updated.rules[0].value_when_matched, 'updated') - def test_cached_value_is_invalidated_after_update(self): + def test_cached_value_is_invalidated_after_update(self) -> None: parameter_name = 'parameter_a' self._create_example_parameter_with_name(parameter_name) @@ -193,54 +201,55 @@ def test_cached_value_is_invalidated_after_update(self): feconf.SYSTEM_COMMITTER_ID, 'commit message', [ - { + parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.dev.value)] + 'conditions': [['=', FeatureStages.DEV.value]] } ], 'value_when_matched': 'updated' - } + }) ], ) self.assertIsNone( registry.Registry.load_platform_parameter_from_memcache( parameter_name)) - def test_update_parameter_with_invalid_rules_failure(self): + def test_update_parameter_with_invalid_rules_failure(self) -> None: parameter_name = 'parameter_a' self._create_example_parameter_with_name(parameter_name) param = registry.Registry.get_platform_parameter(parameter_name) param.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected string'): registry.Registry.update_platform_parameter( parameter_name, feconf.SYSTEM_COMMITTER_ID, 'commit message', [ - { + parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.dev.value)] + 'conditions': [['=', FeatureStages.DEV.value]] } ], 'value_when_matched': True - } + }) ], ) def test_update_dev_feature_with_rule_enabled_for_test_raises_exception( - self): + self + ) -> None: parameter_name = 'parameter_a' registry.Registry.create_feature_flag( - PARAM_NAMES.parameter_a, 'dev feature', FEATURE_STAGES.dev) + ParamNames.PARAMETER_A, 'dev feature', FeatureStages.DEV) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Feature in dev stage cannot be enabled in test or production ' 'environments.'): @@ -249,25 +258,26 @@ def test_update_dev_feature_with_rule_enabled_for_test_raises_exception( feconf.SYSTEM_COMMITTER_ID, 'commit message', [ - { + parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.test.value)] + 'conditions': [['=', FeatureStages.TEST.value]] } ], 'value_when_matched': True - } + }) ], ) def test_update_dev_feature_with_rule_enabled_for_prod_raises_exception( - self): + self + ) -> None: parameter_name = 'parameter_a' registry.Registry.create_feature_flag( - PARAM_NAMES.parameter_a, 'dev feature', FEATURE_STAGES.dev) + ParamNames.PARAMETER_A, 'dev feature', FeatureStages.DEV) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Feature in dev stage cannot be enabled in test or production ' 'environments.'): @@ -276,25 +286,26 @@ def test_update_dev_feature_with_rule_enabled_for_prod_raises_exception( feconf.SYSTEM_COMMITTER_ID, 'commit message', [ - { + parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.prod.value)] + 'conditions': [['=', FeatureStages.PROD.value]] } ], 'value_when_matched': True - } + }) ], ) def test_update_test_feature_with_rule_enabled_for_prod_raises_exception( - self): + self + ) -> None: parameter_name = 'parameter_a' registry.Registry.create_feature_flag( - PARAM_NAMES.parameter_a, 'dev feature', FEATURE_STAGES.test) + ParamNames.PARAMETER_A, 'dev feature', FeatureStages.TEST) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Feature in test stage cannot be enabled in production ' 'environment.'): @@ -303,19 +314,19 @@ def test_update_test_feature_with_rule_enabled_for_prod_raises_exception( feconf.SYSTEM_COMMITTER_ID, 'commit message', [ - { + parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.prod.value)] + 'conditions': [['=', FeatureStages.PROD.value]] } ], 'value_when_matched': True - } + }) ], ) - def test_updated_parameter_is_saved_in_storage(self): + def test_updated_parameter_is_saved_in_storage(self) -> None: parameter_name = 'parameter_a' self._create_example_parameter_with_name(parameter_name) self.assertIsNone( @@ -327,15 +338,15 @@ def test_updated_parameter_is_saved_in_storage(self): feconf.SYSTEM_COMMITTER_ID, 'commit message', [ - { + parameter_domain.PlatformParameterRule.from_dict({ 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.dev.value)] + 'conditions': [['=', FeatureStages.DEV.value]] } ], 'value_when_matched': 'updated' - } + }) ], ) @@ -345,7 +356,7 @@ def test_updated_parameter_is_saved_in_storage(self): ) self.assertIsNotNone(parameter_updated) - def test_evaluate_all_parameters(self): + def test_evaluate_all_parameters(self) -> None: context = parameter_domain.EvaluationContext.from_dict( { 'platform_type': 'Android', @@ -353,19 +364,19 @@ def test_evaluate_all_parameters(self): 'app_version': '1.2.3', }, { - 'server_mode': FEATURE_STAGES.dev, + 'server_mode': FeatureStages.DEV, }, ) registry.Registry.init_platform_parameter_from_dict({ 'name': 'parameter_a', 'description': 'for test', - 'data_type': DATA_TYPES.string.value, + 'data_type': DataTypes.STRING.value, 'rules': [ { 'filters': [ { 'type': 'server_mode', - 'conditions': [('=', FEATURE_STAGES.dev.value)] + 'conditions': [['=', FeatureStages.DEV.value]] } ], 'value_when_matched': '222' @@ -375,12 +386,12 @@ def test_evaluate_all_parameters(self): feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION), 'default_value': '333', 'is_feature': True, - 'feature_stage': FEATURE_STAGES.dev.value, + 'feature_stage': FeatureStages.DEV.value, }) registry.Registry.init_platform_parameter_from_dict({ 'name': 'parameter_b', 'description': 'for test', - 'data_type': registry.DATA_TYPES.bool.value, + 'data_type': DataTypes.BOOL.value, 'rules': [], 'rule_schema_version': ( feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION), diff --git a/core/domain/playthrough_issue_registry.py b/core/domain/playthrough_issue_registry.py index e98bd60e88e7..5f7ea48119ff 100644 --- a/core/domain/playthrough_issue_registry.py +++ b/core/domain/playthrough_issue_registry.py @@ -23,18 +23,25 @@ from core import feconf from core.platform import models +from extensions.issues import base -(stats_models,) = models.Registry.import_models([models.NAMES.statistics]) +from typing import Dict, List + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import stats_models + +(stats_models,) = models.Registry.import_models([models.Names.STATISTICS]) class Registry: """Registry of all issues.""" # Dict mapping issue types to instances of the issues. - _issues = {} + _issues: Dict[str, base.BaseExplorationIssueSpec] = {} @classmethod - def get_all_issue_types(cls): + def get_all_issue_types(cls) -> List[str]: """Get a list of all issue types. Returns: @@ -43,7 +50,7 @@ def get_all_issue_types(cls): return stats_models.ALLOWED_ISSUE_TYPES @classmethod - def _refresh(cls): + def _refresh(cls) -> None: """Initializes the mapping between issue types to instances of the issue classes. """ @@ -62,7 +69,7 @@ def _refresh(cls): cls._issues[clazz.__name__] = clazz() @classmethod - def get_all_issues(cls): + def get_all_issues(cls) -> List[base.BaseExplorationIssueSpec]: """Get a list of instances of all issues. Returns: @@ -74,7 +81,9 @@ def get_all_issues(cls): return list(cls._issues.values()) @classmethod - def get_issue_by_type(cls, issue_type): + def get_issue_by_type( + cls, issue_type: str + ) -> base.BaseExplorationIssueSpec: """Gets an issue by its type. Refreshes once if the issue is not found; subsequently, throws a diff --git a/core/domain/playthrough_issue_registry_test.py b/core/domain/playthrough_issue_registry_test.py index 9bfed3a08c33..e066f3f045fb 100644 --- a/core/domain/playthrough_issue_registry_test.py +++ b/core/domain/playthrough_issue_registry_test.py @@ -20,12 +20,48 @@ from core.domain import playthrough_issue_registry from core.tests import test_utils +from extensions.issues.CyclicStateTransitions import CyclicStateTransitions +from extensions.issues.EarlyQuit import EarlyQuit +from extensions.issues.MultipleIncorrectSubmissions import ( + MultipleIncorrectSubmissions) class IssueRegistryUnitTests(test_utils.GenericTestBase): """Test for the issue registry.""" - def test_issue_registry(self): + def setUp(self) -> None: + super().setUp() + self.issues_dict = { + 'EarlyQuit': EarlyQuit.EarlyQuit, + 'CyclicStateTransitions': ( + CyclicStateTransitions.CyclicStateTransitions), + 'MultipleIncorrectSubmissions': ( + MultipleIncorrectSubmissions.MultipleIncorrectSubmissions) + } + self.invalid_issue_type = 'InvalidIssueType' + + def tearDown(self) -> None: + playthrough_issue_registry.Registry._issues = {} # pylint: disable=protected-access + super().tearDown() + + def test_issue_registry(self) -> None: """Do some sanity checks on the issue registry.""" self.assertEqual( len(playthrough_issue_registry.Registry.get_all_issues()), 3) + + def test_correct_issue_registry_types(self) -> None: + """Tests issue registry for fetching of issue instances of correct + issue types. + """ + for issue_type, _class in self.issues_dict.items(): + self.assertIsInstance( + playthrough_issue_registry.Registry.get_issue_by_type( + issue_type), _class) + + def test_incorrect_issue_registry_types(self) -> None: + """Tests that an error is raised when fetching an incorrect issue + type. + """ + with self.assertRaisesRegex(KeyError, self.invalid_issue_type): + playthrough_issue_registry.Registry.get_issue_by_type( + self.invalid_issue_type) diff --git a/core/domain/question_domain.py b/core/domain/question_domain.py index e50f3908e1ce..61d764d12439 100644 --- a/core/domain/question_domain.py +++ b/core/domain/question_domain.py @@ -31,38 +31,45 @@ from core.domain import customization_args_util from core.domain import exp_domain from core.domain import expression_parser -from core.domain import html_cleaner -from core.domain import html_validation_service -from core.domain import interaction_registry from core.domain import state_domain -from core.platform import models +from core.domain import translation_domain from extensions import domain from pylatexenc import latex2text +from typing import ( + Dict, Final, List, Literal, Optional, Set, Tuple, TypedDict, Union, cast, + overload) -(question_models,) = models.Registry.import_models([models.NAMES.question]) +from core.domain import html_cleaner # pylint: disable=invalid-import-from # isort:skip +from core.domain import html_validation_service # pylint: disable=invalid-import-from # isort:skip +from core.domain import interaction_registry # pylint: disable=invalid-import-from # isort:skip + +# TODO(#14537): Refactor this file and remove imports marked +# with 'invalid-import-from'. # Do not modify the values of these constants. This is to preserve backwards # compatibility with previous change dicts. -QUESTION_PROPERTY_LANGUAGE_CODE = 'language_code' -QUESTION_PROPERTY_QUESTION_STATE_DATA = 'question_state_data' -QUESTION_PROPERTY_LINKED_SKILL_IDS = 'linked_skill_ids' -QUESTION_PROPERTY_INAPPLICABLE_SKILL_MISCONCEPTION_IDS = ( +QUESTION_PROPERTY_LANGUAGE_CODE: Final = 'language_code' +QUESTION_PROPERTY_QUESTION_STATE_DATA: Final = 'question_state_data' +QUESTION_PROPERTY_LINKED_SKILL_IDS: Final = 'linked_skill_ids' +QUESTION_PROPERTY_INAPPLICABLE_SKILL_MISCONCEPTION_IDS: Final = ( 'inapplicable_skill_misconception_ids') +QUESTION_PROPERTY_NEXT_CONTENT_ID_INDEX: Final = 'next_content_id_index' # This takes additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. -CMD_UPDATE_QUESTION_PROPERTY = 'update_question_property' -CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION = 'create_new_fully_specified_question' -CMD_MIGRATE_STATE_SCHEMA_TO_LATEST_VERSION = ( +CMD_UPDATE_QUESTION_PROPERTY: Final = 'update_question_property' +CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION: Final = ( + 'create_new_fully_specified_question') +CMD_MIGRATE_STATE_SCHEMA_TO_LATEST_VERSION: Final = ( 'migrate_state_schema_to_latest_version') # The following commands are deprecated, as these functionalities will be # handled by a QuestionSkillLink class in the future. -CMD_ADD_QUESTION_SKILL = 'add_question_skill' -CMD_REMOVE_QUESTION_SKILL = 'remove_question_skill' +CMD_ADD_QUESTION_SKILL: Final = 'add_question_skill' +CMD_REMOVE_QUESTION_SKILL: Final = 'remove_question_skill' -CMD_CREATE_NEW = 'create_new' +CMD_CREATE_NEW: Final = 'create_new' class QuestionChange(change_domain.BaseChange): @@ -80,36 +87,143 @@ class QuestionChange(change_domain.BaseChange): # The allowed list of question properties which can be used in # update_question_property command. - QUESTION_PROPERTIES = ( + QUESTION_PROPERTIES: List[str] = [ QUESTION_PROPERTY_QUESTION_STATE_DATA, QUESTION_PROPERTY_LANGUAGE_CODE, QUESTION_PROPERTY_LINKED_SKILL_IDS, - QUESTION_PROPERTY_INAPPLICABLE_SKILL_MISCONCEPTION_IDS) + QUESTION_PROPERTY_INAPPLICABLE_SKILL_MISCONCEPTION_IDS, + QUESTION_PROPERTY_NEXT_CONTENT_ID_INDEX] - ALLOWED_COMMANDS = [{ + ALLOWED_COMMANDS: List[feconf.ValidCmdDict] = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_QUESTION_PROPERTY, 'required_attribute_names': ['property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': QUESTION_PROPERTIES} + 'allowed_values': {'property_name': QUESTION_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'required_attribute_names': ['question_dict', 'skill_id'], 'optional_attribute_names': ['topic_name'], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_MIGRATE_STATE_SCHEMA_TO_LATEST_VERSION, 'required_attribute_names': ['from_version', 'to_version'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }] +class CreateNewQuestionCmd(QuestionChange): + """Class representing the QuestionChange's + CMD_CREATE_NEW command. + """ + + pass + + +class UpdateQuestionPropertyQuestionStateDataCmd(QuestionChange): + """Class representing the QuestionChange's + CMD_UPDATE_QUESTION_PROPERTY command with + QUESTION_PROPERTY_QUESTION_STATE_DATA as allowed value. + """ + + property_name: Literal['question_state_data'] + new_value: state_domain.StateDict + old_value: state_domain.StateDict + + +class UpdateQuestionPropertyLanguageCodeCmd(QuestionChange): + """Class representing the QuestionChange's + CMD_UPDATE_QUESTION_PROPERTY command with + QUESTION_PROPERTY_LANGUAGE_CODE as allowed value. + """ + + property_name: Literal['language_code'] + new_value: str + old_value: str + + +class UpdateQuestionPropertyNextContentIdIndexCmd(QuestionChange): + """Class representing the QuestionChange's + CMD_UPDATE_QUESTION_PROPERTY command with + QUESTION_PROPERTY_NEXT_CONTENT_ID_INDEX as allowed value. + """ + + property_name: Literal['next_content_id_index'] + new_value: int + old_value: int + + +class UpdateQuestionPropertyLinkedSkillIdsCmd(QuestionChange): + """Class representing the QuestionChange's + CMD_UPDATE_QUESTION_PROPERTY command with + QUESTION_PROPERTY_LINKED_SKILL_IDS as allowed value. + """ + + property_name: Literal['linked_skill_ids'] + new_value: List[str] + old_value: List[str] + + +class UpdateQuestionPropertySkillMisconceptionIdsCmd(QuestionChange): + """Class representing the QuestionChange's + CMD_UPDATE_QUESTION_PROPERTY command with + QUESTION_PROPERTY_INAPPLICABLE_SKILL_MISCONCEPTION_IDS + as allowed value. + """ + + property_name: Literal['inapplicable_skill_misconception_ids'] + new_value: List[str] + old_value: List[str] + + +class CreateNewFullySpecifiedQuestionCmd(QuestionChange): + """Class representing the QuestionChange's + CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION command. + """ + + question_dict: QuestionDict + skill_id: str + topic_name: str + + +class MigrateStateSchemaToLatestVersion(QuestionChange): + """Class representing the QuestionChange's + CMD_MIGRATE_STATE_SCHEMA_TO_LATEST_VERSION command. + """ + + from_version: str + to_version: str + + +class QuestionSuggestionChangeDict(TypedDict): + """Dictionary representing the QuestionSuggestionChange domain object.""" + + # Note: Here we are defining question's id as None, because while submitting + # question suggestion from the frontend we are never providing question id + # in its payload. + id: None + question_state_data: state_domain.StateDict + question_state_data_schema_version: int + language_code: str + version: int + linked_skill_ids: List[str] + inapplicable_skill_misconception_ids: List[str] + next_content_id_index: int + + class QuestionSuggestionChange(change_domain.BaseChange): """Domain object for changes made to question suggestion object. @@ -124,19 +238,59 @@ class QuestionSuggestionChange(change_domain.BaseChange): 'required_attribute_names': [ 'question_dict', 'skill_id', 'skill_difficulty'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} } ] -class Question: +class CreateNewFullySpecifiedQuestionSuggestionCmd(QuestionSuggestionChange): + """Class representing the QuestionSuggestionChange's + CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION command. + """ + + question_dict: QuestionDict + skill_id: str + skill_difficulty: float + + +class QuestionDict(TypedDict): + """Dictionary representing the Question domain object.""" + + id: str + question_state_data: state_domain.StateDict + question_state_data_schema_version: int + language_code: str + version: int + linked_skill_ids: List[str] + inapplicable_skill_misconception_ids: List[str] + next_content_id_index: int + + +class VersionedQuestionStateDict(TypedDict): + """Dictionary representing the versioned State object for Question.""" + + state_schema_version: int + state: state_domain.StateDict + + +class Question(translation_domain.BaseTranslatableObject): """Domain object for a question.""" def __init__( - self, question_id, question_state_data, - question_state_data_schema_version, language_code, version, - linked_skill_ids, inapplicable_skill_misconception_ids, - created_on=None, last_updated=None): + self, + question_id: str, + question_state_data: state_domain.State, + question_state_data_schema_version: int, + language_code: str, + version: int, + linked_skill_ids: List[str], + inapplicable_skill_misconception_ids: List[str], + next_content_id_index: int, + created_on: Optional[datetime.datetime] = None, + last_updated: Optional[datetime.datetime] = None + ) -> None: """Constructs a Question domain object. Args: @@ -154,6 +308,8 @@ def __init__( inapplicable_skill_misconception_ids: list(str). Optional misconception ids that are marked as not relevant to the question. + next_content_id_index: int. The next content_id index to use for + generation of new content_ids. created_on: datetime.datetime. Date and time when the question was created. last_updated: datetime.datetime. Date and time when the @@ -168,10 +324,28 @@ def __init__( self.linked_skill_ids = linked_skill_ids self.inapplicable_skill_misconception_ids = ( inapplicable_skill_misconception_ids) + self.next_content_id_index = next_content_id_index self.created_on = created_on self.last_updated = last_updated - def to_dict(self): + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the question. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_fields_from_translatable_object( + self.question_state_data) + return translatable_contents_collection + + def to_dict(self) -> QuestionDict: """Returns a dict representing this Question domain object. Returns: @@ -186,11 +360,14 @@ def to_dict(self): 'version': self.version, 'linked_skill_ids': self.linked_skill_ids, 'inapplicable_skill_misconception_ids': ( - self.inapplicable_skill_misconception_ids) + self.inapplicable_skill_misconception_ids), + 'next_content_id_index': self.next_content_id_index, } @classmethod - def create_default_question_state(cls): + def create_default_question_state( + cls, content_id_generator: translation_domain.ContentIdGenerator + ) -> state_domain.State: """Return a State domain object with default value for being used as question state data. @@ -198,10 +375,17 @@ def create_default_question_state(cls): State. The corresponding State domain object. """ return state_domain.State.create_default_state( - None, is_initial_state=True) + None, + content_id_generator.generate( + translation_domain.ContentType.CONTENT), + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME), + is_initial_state=True) @classmethod - def _convert_state_v27_dict_to_v28_dict(cls, question_state_dict): + def _convert_state_v27_dict_to_v28_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 27 to 28. Version 28 replaces content_ids_to_audio_translations with recorded_voiceovers. @@ -212,14 +396,23 @@ def _convert_state_v27_dict_to_v28_dict(cls, question_state_dict): Returns: dict. The converted question_state_dict. """ + # Here we use MyPy ignore because in _convert_* functions, we allow less + # strict typing because here we are working with previous versions of + # the domain object and in previous versions of the domain object there + # are some fields that are discontinued in the latest domain object and + # here 'content_ids_to_audio_translations' is discontinued in the + # latest recorded_voiceovers. So, while accessing these discontinued + # fields MyPy throws an error. Thus to avoid the error, we used ignore. question_state_dict['recorded_voiceovers'] = { 'voiceovers_mapping': ( - question_state_dict.pop('content_ids_to_audio_translations')) + question_state_dict.pop('content_ids_to_audio_translations')) # type: ignore[misc] } return question_state_dict @classmethod - def _convert_state_v28_dict_to_v29_dict(cls, question_state_dict): + def _convert_state_v28_dict_to_v29_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 28 to 29. Version 29 adds solicit_answer_details boolean variable to the state, which allows the creator to ask for answer details from the learner @@ -236,7 +429,9 @@ def _convert_state_v28_dict_to_v29_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v29_dict_to_v30_dict(cls, question_state_dict): + def _convert_state_v29_dict_to_v30_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 29 to 30. Version 30 replaces tagged_misconception_id with tagged_skill_misconception_id, which is default to None. @@ -252,12 +447,22 @@ def _convert_state_v29_dict_to_v30_dict(cls, question_state_dict): answer_groups = question_state_dict['interaction']['answer_groups'] for answer_group in answer_groups: answer_group['tagged_skill_misconception_id'] = None - del answer_group['tagged_misconception_id'] + # Here we use MyPy ignore because in _convert_* functions, we allow + # less strict typing because here we are working with previous + # versions of the domain object and in previous versions of the + # domain object there are some fields that are discontinued in + # the latest domain object and here 'tagged_misconception_id' is + # discontinued in the latest answer_group. So, while accessing these + # discontinued fields MyPy throws an error. Thus to avoid the + # error, we used ignore here. + del answer_group['tagged_misconception_id'] # type: ignore[misc] return question_state_dict @classmethod - def _convert_state_v30_dict_to_v31_dict(cls, question_state_dict): + def _convert_state_v30_dict_to_v31_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 30 to 31. Version 31 updates the Voiceover model to have an initialized duration_secs attribute of 0.0. @@ -283,7 +488,9 @@ def _convert_state_v30_dict_to_v31_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v31_dict_to_v32_dict(cls, question_state_dict): + def _convert_state_v31_dict_to_v32_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 31 to 32. Version 32 adds a new customization arg to SetInput interaction which allows creators to add custom text to the "Add" button. @@ -308,7 +515,9 @@ def _convert_state_v31_dict_to_v32_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v32_dict_to_v33_dict(cls, question_state_dict): + def _convert_state_v32_dict_to_v33_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 32 to 33. Version 33 adds a new customization arg to MultipleChoiceInput Interaction which allows answer choices to be shuffled. @@ -333,7 +542,9 @@ def _convert_state_v32_dict_to_v33_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v33_dict_to_v34_dict(cls, question_state_dict): + def _convert_state_v33_dict_to_v34_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 33 to 34. Version 34 adds a new attribute for math components. The new attribute has an additional field to for storing SVG filenames. @@ -354,7 +565,9 @@ def _convert_state_v33_dict_to_v34_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v34_dict_to_v35_dict(cls, question_state_dict): + def _convert_state_v34_dict_to_v35_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 34 to 35. Version 35 upgrades all explorations that use the MathExpressionInput interaction to use one of AlgebraicExpressionInput, NumericExpressionInput, or MathEquationInput @@ -388,11 +601,11 @@ def _convert_state_v34_dict_to_v35_dict(cls, question_state_dict): rule_input) type_of_input = exp_domain.TYPE_INVALID_EXPRESSION - if is_valid_algebraic_expression(rule_input): + if is_valid_numeric_expression(rule_input): + type_of_input = exp_domain.TYPE_VALID_NUMERIC_EXPRESSION + elif is_valid_algebraic_expression(rule_input): type_of_input = ( exp_domain.TYPE_VALID_ALGEBRAIC_EXPRESSION) - elif is_valid_numeric_expression(rule_input): - type_of_input = exp_domain.TYPE_VALID_NUMERIC_EXPRESSION elif is_valid_math_equation(rule_input): type_of_input = exp_domain.TYPE_VALID_MATH_EQUATION @@ -463,14 +676,31 @@ def _convert_state_v34_dict_to_v35_dict(cls, question_state_dict): del question_state_dict['recorded_voiceovers'][ 'voiceovers_mapping'][content_id] if content_id in question_state_dict[ - 'written_translations']['translations_mapping']: - del question_state_dict['written_translations'][ + # Here we use MyPy ignore because this is a + # conversion function for old schema and the + # StateDict doesn't have the writtent translation + # property in the latest schema. + 'written_translations']['translations_mapping']: # type: ignore[misc] + # Here we use MyPy ignore because this is a + # conversion function for old schema and the + # StateDict doesn't have the writtent translation + # property in the latest schema. + del question_state_dict['written_translations'][ # type: ignore[misc] 'translations_mapping'][content_id] question_state_dict['interaction']['id'] = new_interaction_id question_state_dict['interaction']['answer_groups'] = ( new_answer_groups) - if question_state_dict['interaction']['solution']: + if question_state_dict['interaction']['solution'] is not None: + # Ruling out the possibility of any other type for MyPy type + # checking, because for 'ExpressionInput' interactions, the + # correct_answer is formatted as a Dict type. + assert isinstance( + question_state_dict['interaction']['solution'][ + 'correct_answer' + ], + dict + ) correct_answer = question_state_dict['interaction'][ 'solution']['correct_answer']['ascii'] correct_answer = exp_domain.clean_math_expression( @@ -481,7 +711,9 @@ def _convert_state_v34_dict_to_v35_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v35_dict_to_v36_dict(cls, question_state_dict): + def _convert_state_v35_dict_to_v36_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 35 to 36. Version 35 adds translation support for interaction customization arguments. This migration converts customization arguments whose schemas have been changed from unicode to @@ -500,8 +732,10 @@ def _convert_state_v35_dict_to_v36_dict(cls, question_state_dict): dict. The converted question_state_dict. """ max_existing_content_id_index = -1 + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. translations_mapping = question_state_dict[ - 'written_translations']['translations_mapping'] + 'written_translations']['translations_mapping'] # type: ignore[misc] for content_id in translations_mapping: # Find maximum existing content_id index. content_id_suffix = content_id.split('_')[-1] @@ -523,11 +757,13 @@ def _convert_state_v35_dict_to_v36_dict(cls, question_state_dict): translations_mapping[ content_id][lang_code]['translation'] = ( translations_mapping[content_id][lang_code]['html']) + # Here we use MyPy ignore because MyPy doesn't allow key + # deletion from TypedDict. del translations_mapping[content_id][lang_code]['html'] interaction_id = question_state_dict['interaction']['id'] if interaction_id is None: - question_state_dict['next_content_id_index'] = ( + question_state_dict['next_content_id_index'] = ( # type: ignore[misc] max_existing_content_id_index + 1) return question_state_dict @@ -539,7 +775,7 @@ class ContentIdCounter: new_content_ids = [] - def __init__(self, next_content_id_index): + def __init__(self, next_content_id_index: int) -> None: """Initializes a ContentIdCounter object. Args: @@ -547,7 +783,7 @@ def __init__(self, next_content_id_index): """ self.next_content_id_index = next_content_id_index - def generate_content_id(self, content_id_prefix): + def generate_content_id(self, content_id_prefix: str) -> str: """Generate a new content_id from the prefix provided and the next content id index. @@ -610,11 +846,29 @@ def generate_content_id(self, content_id_prefix): if is_subtitled_unicode_spec: # Default is a SubtitledHtml dict or SubtitleUnicode dict. - new_value = copy.deepcopy(ca_spec.default_value) + # Here we use cast because in this if is_subtitled_unicode_spec + # clause, default_value can only be of SubtitledUnicodeDict + # type. So, to narrow down the type from various default_value + # types, we used cast here. + default_value = cast( + state_domain.SubtitledUnicodeDict, ca_spec.default_value + ) + new_value = copy.deepcopy(default_value) + # Here we use cast because in this _convert function we are + # converting older versions of customization arg dicts that + # contains Unicode and Html to newer versions of customization + # arg dicts that contains 'SubtitledUnicodeDict' and + # 'SubtitledHtmlDict', and by using cast here we are + # representing an older version of customization arg dictionary. + older_version_unicode_ca_dict = cast( + Dict[str, Dict[str, str]], ca_dict + ) # If available, assign value to html or unicode_str. if ca_name in ca_dict: - new_value['unicode_str'] = ca_dict[ca_name]['value'] + new_value['unicode_str'] = older_version_unicode_ca_dict[ + ca_name + ]['value'] # Assign content_id. new_value['content_id'] = ( @@ -622,30 +876,86 @@ def generate_content_id(self, content_id_prefix): .generate_content_id(content_id_prefix) ) - ca_dict[ca_name] = {'value': new_value} + # Here we use cast because in this _convert function we are + # converting older versions of customization arg dicts that + # contains Unicode and Html to newer versions of customization + # arg dicts that contains 'SubtitledUnicodeDict' and + # 'SubtitledHtmlDict', and by using cast here we are + # representing an newer version of customization arg dictionary. + updated_unicode_cust_arg_dict = cast( + Dict[str, Dict[str, state_domain.SubtitledUnicodeDict]], + ca_dict + ) + + updated_unicode_cust_arg_dict[ca_name] = {'value': new_value} elif is_subtitled_html_list_spec: - new_value = [] + new_subtitled_html_list_value: ( + List[state_domain.SubtitledHtmlDict] + ) = [] + + # Here we use cast because in this _convert function we are + # converting older versions of customization arg dicts that + # contains list of Unicode and Html to newer versions of + # customization arg dicts that contains list of + # 'SubtitledUnicodeDict' and 'SubtitledHtmlDict', and by using + # cast here we are representing an older version of + # customization arg dictionary. + older_version_html_list_ca_dict = cast( + Dict[str, Dict[str, List[str]]], ca_dict + ) if ca_name in ca_dict: # Assign values to html fields. - for html in ca_dict[ca_name]['value']: - new_value.append({ - 'html': html, 'content_id': None + for html in older_version_html_list_ca_dict[ + ca_name + ]['value']: + new_subtitled_html_list_value.append({ + 'html': html, 'content_id': '' }) else: # Default is a list of SubtitledHtml dict. - new_value.extend(copy.deepcopy(ca_spec.default_value)) + # Here we use cast because in this 'else' clause + # default_value can only be of List[SubtitledHtmlDict] + # type. So, to narrow down the type from various + # default_value types, we used cast here. + new_subtitled_html_list_value.extend( + cast( + List[state_domain.SubtitledHtmlDict], + ca_spec.default_value + ) + ) # Assign content_ids. - for subtitled_html_dict in new_value: + for subtitled_html_dict in new_subtitled_html_list_value: subtitled_html_dict['content_id'] = ( content_id_counter .generate_content_id(content_id_prefix) ) - ca_dict[ca_name] = {'value': new_value} + # Here we use cast because in this _convert function we are + # converting older versions of customization arg dicts that + # contains list of Unicode and Html to newer versions of + # customization arg dicts that contains list of + # 'SubtitledUnicodeDict' and 'SubtitledHtmlDict', and + # by using cast here we are representing an newer version + # of customization arg dictionary. + updated_html_list_ca_dict = cast( + Dict[str, Dict[str, List[state_domain.SubtitledHtmlDict]]], + ca_dict + ) + + updated_html_list_ca_dict[ca_name] = { + 'value': new_subtitled_html_list_value + } elif ca_name not in ca_dict: - ca_dict[ca_name] = {'value': ca_spec.default_value} + # Here we use cast because we are narrowing down to the types + # of customization arg values that are not altered by above + # if clauses. + ca_default_value = cast( + state_domain.UnionOfCustomizationArgsDictValues, + ca_spec.default_value + ) + ca_dict[ca_name] = {'value': ca_default_value} ( customization_args_util @@ -655,13 +965,15 @@ def generate_content_id(self, content_id_prefix): ca_dict, ca_specs) ) - - question_state_dict['next_content_id_index'] = ( + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + question_state_dict['next_content_id_index'] = ( # type: ignore[misc] content_id_counter.next_content_id_index) for new_content_id in content_id_counter.new_content_ids: - question_state_dict[ - 'written_translations'][ - 'translations_mapping'][new_content_id] = {} + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + question_state_dict['written_translations'][ # type: ignore[misc] + 'translations_mapping'][new_content_id] = {} question_state_dict[ 'recorded_voiceovers'][ 'voiceovers_mapping'][new_content_id] = {} @@ -669,7 +981,9 @@ def generate_content_id(self, content_id_prefix): return question_state_dict @classmethod - def _convert_state_v36_dict_to_v37_dict(cls, question_state_dict): + def _convert_state_v36_dict_to_v37_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 36 to 37. Version 37 changes all rules with type CaseSensitiveEquals to Equals. @@ -681,18 +995,20 @@ def _convert_state_v36_dict_to_v37_dict(cls, question_state_dict): Returns: dict. The converted question_state_dict. """ - if question_state_dict['interaction']['id'] != 'TextInput': - return question_state_dict - answer_group_dicts = question_state_dict['interaction']['answer_groups'] - for answer_group_dict in answer_group_dicts: - for rule_spec_dict in answer_group_dict['rule_specs']: - if rule_spec_dict['rule_type'] == 'CaseSensitiveEquals': - rule_spec_dict['rule_type'] = 'Equals' + if question_state_dict['interaction']['id'] == 'TextInput': + answer_group_dicts = question_state_dict[ + 'interaction']['answer_groups'] + for answer_group_dict in answer_group_dicts: + for rule_spec_dict in answer_group_dict['rule_specs']: + if rule_spec_dict['rule_type'] == 'CaseSensitiveEquals': + rule_spec_dict['rule_type'] = 'Equals' return question_state_dict @classmethod - def _convert_state_v37_dict_to_v38_dict(cls, question_state_dict): + def _convert_state_v37_dict_to_v38_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 37 to 38. Version 38 adds a customization arg for the Math interactions that allows creators to specify the letters that would be displayed to the learner. @@ -712,6 +1028,9 @@ def _convert_state_v37_dict_to_v38_dict(cls, question_state_dict): 'interaction']['answer_groups']: for rule_spec in group['rule_specs']: rule_input = rule_spec['inputs']['x'] + # Ruling out the possibility of any other type for mypy + # type checking. + assert isinstance(rule_input, str) for variable in expression_parser.get_variables( rule_input): # Replacing greek letter names with greek symbols. @@ -732,7 +1051,9 @@ def _convert_state_v37_dict_to_v38_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v38_dict_to_v39_dict(cls, question_state_dict): + def _convert_state_v38_dict_to_v39_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 38 to 39. Version 39 adds a new customization arg to NumericExpressionInput interaction which allows creators to modify the placeholder text. @@ -757,7 +1078,9 @@ def _convert_state_v38_dict_to_v39_dict(cls, question_state_dict): } } }) - question_state_dict['written_translations']['translations_mapping'][ + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + question_state_dict['written_translations']['translations_mapping'][ # type: ignore[misc] 'ca_placeholder_0'] = {} question_state_dict['recorded_voiceovers']['voiceovers_mapping'][ 'ca_placeholder_0'] = {} @@ -765,7 +1088,9 @@ def _convert_state_v38_dict_to_v39_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v39_dict_to_v40_dict(cls, question_state_dict): + def _convert_state_v39_dict_to_v40_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 39 to 40. Version 40 converts TextInput rule inputs from NormalizedString to SetOfNormalizedString. @@ -777,25 +1102,35 @@ def _convert_state_v39_dict_to_v40_dict(cls, question_state_dict): Returns: dict. The converted question_state_dict. """ - if question_state_dict['interaction']['id'] != 'TextInput': - return question_state_dict - - answer_group_dicts = question_state_dict['interaction']['answer_groups'] - for answer_group_dict in answer_group_dicts: - rule_type_to_inputs = collections.defaultdict(set) - for rule_spec_dict in answer_group_dict['rule_specs']: - rule_type = rule_spec_dict['rule_type'] - rule_inputs = rule_spec_dict['inputs']['x'] - rule_type_to_inputs[rule_type].add(rule_inputs) - answer_group_dict['rule_specs'] = [{ - 'rule_type': rule_type, - 'inputs': {'x': list(rule_type_to_inputs[rule_type])} - } for rule_type in rule_type_to_inputs] + if question_state_dict['interaction']['id'] == 'TextInput': + answer_group_dicts = question_state_dict[ + 'interaction']['answer_groups'] + for answer_group_dict in answer_group_dicts: + rule_type_to_inputs: Dict[ + str, Set[state_domain.AllowedRuleSpecInputTypes] + ] = collections.defaultdict(set) + for rule_spec_dict in answer_group_dict['rule_specs']: + rule_type = rule_spec_dict['rule_type'] + rule_inputs = rule_spec_dict['inputs']['x'] + rule_type_to_inputs[rule_type].add(rule_inputs) + # Here we use MyPy ignore because in _convert_* functions, we + # allow less strict typing because here we are working with + # previous versions of the domain object and in previous + # versions of the domain object there are some fields whose type + # does not match with the latest domain object's types. So, + # while assigning these old fields MyPy throws an error. Thus + # to avoid the error, we used ignore here. + answer_group_dict['rule_specs'] = [{ + 'rule_type': rule_type, + 'inputs': {'x': list(rule_type_to_inputs[rule_type])} # type: ignore[dict-item] + } for rule_type in rule_type_to_inputs] return question_state_dict @classmethod - def _convert_state_v40_dict_to_v41_dict(cls, question_state_dict): + def _convert_state_v40_dict_to_v41_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 40 to 41. Version 41 adds TranslatableSetOfUnicodeString and TranslatableSetOfNormalizedString objects to RuleSpec domain objects to allow for translations. @@ -814,16 +1149,16 @@ class ContentIdCounter: function to generate new content_ids. """ - def __init__(self, next_content_id_index): + def __init__(self, next_content_id_index: int) -> None: """Initializes a ContentIdCounter object. Args: next_content_id_index: int. The next content id index. """ - self.new_content_ids = [] + self.new_content_ids: List[str] = [] self.next_content_id_index = next_content_id_index - def generate_content_id(self, content_id_prefix): + def generate_content_id(self, content_id_prefix: str) -> str: """Generate a new content_id from the prefix provided and the next content id index. @@ -844,42 +1179,61 @@ def generate_content_id(self, content_id_prefix): # TextInput and SetInput have translatable rule inputs, and every rule # for these interactions takes exactly one translatable input named x. interaction_id = question_state_dict['interaction']['id'] - if interaction_id not in ['TextInput', 'SetInput']: - return question_state_dict - - content_id_counter = ContentIdCounter( - question_state_dict['next_content_id_index']) - answer_group_dicts = question_state_dict['interaction']['answer_groups'] - for answer_group_dict in answer_group_dicts: - for rule_spec_dict in answer_group_dict['rule_specs']: - content_id = content_id_counter.generate_content_id( - 'rule_input_') - if interaction_id == 'TextInput': + if interaction_id in ['TextInput', 'SetInput']: + content_id_counter = ContentIdCounter( + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + question_state_dict['next_content_id_index']) # type: ignore[misc] + answer_group_dicts = question_state_dict[ + 'interaction']['answer_groups'] + for answer_group_dict in answer_group_dicts: + for rule_spec_dict in answer_group_dict['rule_specs']: + content_id = content_id_counter.generate_content_id( + 'rule_input_') + # Here we use MyPy ignore because the expected + # type for `rule_spec_dict['inputs']['x']` is + # AllowedRuleSpecInputTypes but here we are providing + # Dict[str, AllowedRuleSpecInputTypes] values which + # causes MyPy to throw `incompatible type` error. Thus + # to avoid the error, we used ignore here. # Convert to TranslatableSetOfNormalizedString. - rule_spec_dict['inputs']['x'] = { - 'contentId': content_id, - 'normalizedStrSet': rule_spec_dict['inputs']['x'] - } - elif interaction_id == 'SetInput': - # Convert to TranslatableSetOfUnicodeString. - rule_spec_dict['inputs']['x'] = { - 'contentId': content_id, - 'unicodeStrSet': rule_spec_dict['inputs']['x'] - } - question_state_dict['next_content_id_index'] = ( - content_id_counter.next_content_id_index) - for new_content_id in content_id_counter.new_content_ids: - question_state_dict[ - 'written_translations'][ - 'translations_mapping'][new_content_id] = {} - question_state_dict[ - 'recorded_voiceovers'][ - 'voiceovers_mapping'][new_content_id] = {} + if interaction_id == 'TextInput': + rule_spec_dict['inputs']['x'] = { + 'contentId': content_id, + 'normalizedStrSet': rule_spec_dict['inputs']['x'] # type: ignore[dict-item] + } + # Here we use MyPy ignore because the expected + # type for `rule_spec_dict['inputs']['x']` is + # AllowedRuleSpecInputTypes but here we are providing + # Dict[str, AllowedRuleSpecInputTypes] values which + # causes MyPy to throw `incompatible type` error. Thus + # to avoid the error, we used ignore here. + elif interaction_id == 'SetInput': + # Convert to TranslatableSetOfUnicodeString. + rule_spec_dict['inputs']['x'] = { + 'contentId': content_id, + 'unicodeStrSet': rule_spec_dict['inputs']['x'] # type: ignore[dict-item] + } + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + question_state_dict['next_content_id_index'] = ( # type: ignore[misc] + content_id_counter.next_content_id_index) + for new_content_id in content_id_counter.new_content_ids: + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + question_state_dict[ + 'written_translations'][ # type: ignore[misc] + 'translations_mapping'][new_content_id] = {} + question_state_dict[ + 'recorded_voiceovers'][ + 'voiceovers_mapping'][new_content_id] = {} return question_state_dict @classmethod - def _convert_state_v41_dict_to_v42_dict(cls, question_state_dict): + def _convert_state_v41_dict_to_v42_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 41 to 42. Version 42 changes rule input types for DragAndDropSortInput and ItemSelectionInput interactions to better support translations. Specifically, the rule inputs will store content @@ -896,7 +1250,38 @@ def _convert_state_v41_dict_to_v42_dict(cls, question_state_dict): dict. The converted question_state_dict. """ - def migrate_rule_inputs_and_answers(new_type, value, choices): + @overload + def migrate_rule_inputs_and_answers( + new_type: str, + value: str, + choices: List[state_domain.SubtitledHtmlDict] + ) -> str: ... + + @overload + def migrate_rule_inputs_and_answers( + new_type: str, + value: List[str], + choices: List[state_domain.SubtitledHtmlDict] + ) -> List[str]: ... + + @overload + def migrate_rule_inputs_and_answers( + new_type: str, + value: List[List[str]], + choices: List[state_domain.SubtitledHtmlDict] + ) -> List[List[str]]: ... + + # Here we use MyPy ignore because MyPy expects a return value in + # every condition when we define a return type but here we are + # returning only in if-else conditions and we are not returning + # when none of the condition matches which causes MyPy to throw + # a 'Missing return statement' error. Thus to avoid the error, + # we used ignore here. + def migrate_rule_inputs_and_answers( # type: ignore[return] + new_type: str, + value: Union[List[List[str]], List[str], str], + choices: List[state_domain.SubtitledHtmlDict] + ) -> Union[List[List[str]], List[str], str]: """Migrates SetOfHtmlString to SetOfTranslatableHtmlContentIds, ListOfSetsOfHtmlStrings to ListOfSetsOfTranslatableHtmlContentIds, and DragAndDropHtmlString to TranslatableHtmlContentId. These @@ -914,7 +1299,7 @@ def migrate_rule_inputs_and_answers(new_type, value, choices): *. The migrated rule input. """ - def extract_content_id_from_choices(html): + def extract_content_id_from_choices(html: str) -> str: """Given a html, find its associated content id in choices, which is a list of subtitled html dicts. @@ -933,105 +1318,172 @@ def extract_content_id_from_choices(html): return feconf.INVALID_CONTENT_ID if new_type == 'TranslatableHtmlContentId': + # Here 'TranslatableHtmlContentId' can only be of str type, thus + # to narrow down the type we used assert here. + assert isinstance(value, str) return extract_content_id_from_choices(value) elif new_type == 'SetOfTranslatableHtmlContentIds': + # Here we use cast because this 'elif' condition forces value + # to have type List[str]. + set_of_content_ids = cast(List[str], value) return [ migrate_rule_inputs_and_answers( 'TranslatableHtmlContentId', html, choices - ) for html in value + ) for html in set_of_content_ids ] elif new_type == 'ListOfSetsOfTranslatableHtmlContentIds': + # Here we use cast because this 'elif' condition forces value + # to have type List[List[str]]. + list_of_set_of_content_ids = cast( + List[List[str]], value + ) return [ migrate_rule_inputs_and_answers( 'SetOfTranslatableHtmlContentIds', html_set, choices - ) for html_set in value + ) for html_set in list_of_set_of_content_ids ] interaction_id = question_state_dict['interaction']['id'] - if interaction_id not in [ - 'DragAndDropSortInput', 'ItemSelectionInput']: - return question_state_dict - - solution = question_state_dict['interaction']['solution'] - choices = question_state_dict['interaction']['customization_args'][ - 'choices']['value'] - - if interaction_id == 'ItemSelectionInput': - # The solution type will be migrated from SetOfHtmlString to - # SetOfTranslatableHtmlContentIds. - if solution is not None: - solution['correct_answer'] = ( - migrate_rule_inputs_and_answers( - 'SetOfTranslatableHtmlContentIds', - solution['correct_answer'], - choices) - ) - if interaction_id == 'DragAndDropSortInput': - # The solution type will be migrated from ListOfSetsOfHtmlString - # to ListOfSetsOfTranslatableHtmlContentIds. - if solution is not None: - solution['correct_answer'] = ( - migrate_rule_inputs_and_answers( - 'ListOfSetsOfTranslatableHtmlContentIds', - solution['correct_answer'], - choices) - ) + if interaction_id in ['DragAndDropSortInput', 'ItemSelectionInput']: + solution = question_state_dict['interaction']['solution'] + # Here we use cast because we are narrowing down the type from + # various customization args value types to List[SubtitledHtmlDict] + # type, and this is done because here we are accessing 'choices' key + # over 'DragAndDropSortInput' and 'ItemSelectionInput' customization + # args and in these customization args 'choices' key will only have + # values of type List[SubtitledHtmlDict]. + choices = cast( + List[state_domain.SubtitledHtmlDict], + question_state_dict['interaction']['customization_args'][ + 'choices']['value'] + ) - answer_group_dicts = question_state_dict['interaction']['answer_groups'] - for answer_group_dict in answer_group_dicts: - for rule_spec_dict in answer_group_dict['rule_specs']: - rule_type = rule_spec_dict['rule_type'] - rule_inputs = rule_spec_dict['inputs'] - - if interaction_id == 'ItemSelectionInput': - # All rule inputs for ItemSelectionInput will be - # migrated from SetOfHtmlString to - # SetOfTranslatableHtmlContentIds. - rule_inputs['x'] = migrate_rule_inputs_and_answers( - 'SetOfTranslatableHtmlContentIds', - rule_inputs['x'], - choices) - if interaction_id == 'DragAndDropSortInput': - rule_types_with_list_of_sets = [ - 'IsEqualToOrdering', - 'IsEqualToOrderingWithOneItemAtIncorrectPosition' - ] - if rule_type in rule_types_with_list_of_sets: - # For rule type IsEqualToOrdering and - # IsEqualToOrderingWithOneItemAtIncorrectPosition, - # the x input will be migrated from - # ListOfSetsOfHtmlStrings to - # ListOfSetsOfTranslatableHtmlContentIds. - rule_inputs['x'] = migrate_rule_inputs_and_answers( + if interaction_id == 'ItemSelectionInput': + # The solution type will be migrated from SetOfHtmlString to + # SetOfTranslatableHtmlContentIds. + if solution is not None: + # Ruling out the possibility of any other type for MyPy type + # checking because for interaction 'ItemSelectionInput', + # the correct_answer is formatted as List[str] type. + assert isinstance(solution['correct_answer'], list) + list_of_html_contents = [] + for html_content in solution['correct_answer']: + assert isinstance(html_content, str) + list_of_html_contents.append(html_content) + solution['correct_answer'] = ( + migrate_rule_inputs_and_answers( + 'SetOfTranslatableHtmlContentIds', + list_of_html_contents, + choices) + ) + if interaction_id == 'DragAndDropSortInput': + # The solution type will be migrated from ListOfSetsOfHtmlString + # to ListOfSetsOfTranslatableHtmlContentIds. + if solution is not None: + # Ruling out the possibility of any other type for MyPy type + # checking because for interaction 'DragAndDropSortInput', + # the correct_answer is formatted as List[List[str]] type. + assert isinstance(solution['correct_answer'], list) + list_of_html_content_list = [] + for html_content_list in solution['correct_answer']: + assert isinstance(html_content_list, list) + list_of_html_content_list.append(html_content_list) + solution['correct_answer'] = ( + migrate_rule_inputs_and_answers( 'ListOfSetsOfTranslatableHtmlContentIds', - rule_inputs['x'], + list_of_html_content_list, choices) - elif rule_type == 'HasElementXAtPositionY': - # For rule type HasElementXAtPositionY, - # the x input will be migrated from - # DragAndDropHtmlString to - # TranslatableHtmlContentId, and the y input will - # remain as DragAndDropPositiveInt. + ) + + answer_group_dicts = question_state_dict[ + 'interaction']['answer_groups'] + for answer_group_dict in answer_group_dicts: + for rule_spec_dict in answer_group_dict['rule_specs']: + rule_type = rule_spec_dict['rule_type'] + rule_inputs = rule_spec_dict['inputs'] + + if interaction_id == 'ItemSelectionInput': + # All rule inputs for ItemSelectionInput will be + # migrated from SetOfHtmlString to + # SetOfTranslatableHtmlContentIds. + # Ruling out the possibility of any other type + # for MyPy type checking because for interaction + # 'ItemSelectionInput', the rule inputs are formatted + # as List[str] type. + assert isinstance(rule_inputs['x'], list) + list_of_html_contents = [] + for html_content in rule_inputs['x']: + assert isinstance(html_content, str) + list_of_html_contents.append(html_content) rule_inputs['x'] = migrate_rule_inputs_and_answers( - 'TranslatableHtmlContentId', - rule_inputs['x'], + 'SetOfTranslatableHtmlContentIds', + list_of_html_contents, choices) - elif rule_type == 'HasElementXBeforeElementY': - # For rule type HasElementXBeforeElementY, - # the x and y inputs will be migrated from - # DragAndDropHtmlString to - # TranslatableHtmlContentId. - for rule_input_name in ['x', 'y']: - rule_inputs[rule_input_name] = ( - migrate_rule_inputs_and_answers( - 'TranslatableHtmlContentId', - rule_inputs[rule_input_name], - choices)) + if interaction_id == 'DragAndDropSortInput': + rule_types_with_list_of_sets = [ + 'IsEqualToOrdering', + 'IsEqualToOrderingWithOneItemAtIncorrectPosition' + ] + if rule_type in rule_types_with_list_of_sets: + # For rule type IsEqualToOrdering and + # IsEqualToOrderingWithOneItemAtIncorrectPosition, + # the x input will be migrated from + # ListOfSetsOfHtmlStrings to + # ListOfSetsOfTranslatableHtmlContentIds. + # Ruling out the possibility of any other type + # for MyPy type checking because for interaction + # 'DragAndDropSortInput', the rule inputs are + # formatted as List[List[str]] type. + assert isinstance(rule_inputs['x'], list) + list_of_html_content_list = [] + for html_content_list in rule_inputs['x']: + assert isinstance(html_content_list, list) + list_of_html_content_list.append( + html_content_list + ) + rule_inputs['x'] = migrate_rule_inputs_and_answers( + 'ListOfSetsOfTranslatableHtmlContentIds', + list_of_html_content_list, + choices) + elif rule_type == 'HasElementXAtPositionY': + # For rule type HasElementXAtPositionY, + # the x input will be migrated from + # DragAndDropHtmlString to + # TranslatableHtmlContentId, and the y input will + # remain as DragAndDropPositiveInt. + # Ruling out the possibility of any other type + # for MyPy type checking because for interaction + # 'HasElementXAtPositionY', the rule inputs are + # formatted as str type. + assert isinstance(rule_inputs['x'], str) + rule_inputs['x'] = migrate_rule_inputs_and_answers( + 'TranslatableHtmlContentId', + rule_inputs['x'], + choices) + elif rule_type == 'HasElementXBeforeElementY': + # For rule type HasElementXBeforeElementY, + # the x and y inputs will be migrated from + # DragAndDropHtmlString to + # TranslatableHtmlContentId. + for rule_input_name in ['x', 'y']: + rule_input_value = rule_inputs[rule_input_name] + # Ruling out the possibility of any other type + # for MyPy type checking because for interaction + # 'HasElementXBeforeElementY', the rule inputs + # are formatted as str type. + assert isinstance(rule_input_value, str) + rule_inputs[rule_input_name] = ( + migrate_rule_inputs_and_answers( + 'TranslatableHtmlContentId', + rule_input_value, + choices)) return question_state_dict @classmethod - def _convert_state_v42_dict_to_v43_dict(cls, question_state_dict): + def _convert_state_v42_dict_to_v43_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 42 to 43. Version 43 adds a new customization arg to NumericExpressionInput, AlgebraicExpressionInput, and MathEquationInput. The customization arg will allow creators to choose @@ -1060,7 +1512,9 @@ def _convert_state_v42_dict_to_v43_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v43_dict_to_v44_dict(cls, question_state_dict): + def _convert_state_v43_dict_to_v44_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 43 to version 44. Version 44 adds card_is_checkpoint boolean to the state, which allows creators to mark a state as a checkpoint for the learners. @@ -1076,7 +1530,9 @@ def _convert_state_v43_dict_to_v44_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v44_dict_to_v45_dict(cls, question_state_dict): + def _convert_state_v44_dict_to_v45_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 44 to 45. Version 45 contains linked skil id. @@ -1094,7 +1550,9 @@ def _convert_state_v44_dict_to_v45_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v45_dict_to_v46_dict(cls, question_state_dict): + def _convert_state_v45_dict_to_v46_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 45 to 46. Version 46 ensures that the written translations in a state containing unicode content do not contain HTML tags and the data_format is unicode. This does not affect questions, so @@ -1112,7 +1570,9 @@ def _convert_state_v45_dict_to_v46_dict(cls, question_state_dict): return question_state_dict @classmethod - def _convert_state_v46_dict_to_v47_dict(cls, question_state_dict): + def _convert_state_v46_dict_to_v47_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 46 to 47. Version 52 deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. @@ -1128,11 +1588,14 @@ def _convert_state_v46_dict_to_v47_dict(cls, question_state_dict): state_domain.State.convert_html_fields_in_state( question_state_dict, - html_validation_service.convert_svg_diagram_tags_to_image_tags) + html_validation_service.convert_svg_diagram_tags_to_image_tags, + state_schema_version=46) return question_state_dict @classmethod - def _convert_state_v47_dict_to_v48_dict(cls, question_state_dict): + def _convert_state_v47_dict_to_v48_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts draft change list from state version 47 to 48. Version 48 fixes encoding issues in HTML fields. @@ -1147,11 +1610,14 @@ def _convert_state_v47_dict_to_v48_dict(cls, question_state_dict): state_domain.State.convert_html_fields_in_state( question_state_dict, - html_validation_service.fix_incorrectly_encoded_chars) + html_validation_service.fix_incorrectly_encoded_chars, + state_schema_version=48) return question_state_dict @classmethod - def _convert_state_v48_dict_to_v49_dict(cls, question_state_dict): + def _convert_state_v48_dict_to_v49_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: """Converts from version 48 to 49. Version 49 adds requireNonnegativeInput customization arg to NumericInput interaction which allows creators to set input range greater than @@ -1175,9 +1641,184 @@ def _convert_state_v48_dict_to_v49_dict(cls, question_state_dict): }) return question_state_dict + @classmethod + def _convert_state_v49_dict_to_v50_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: + """Converts from version 49 to 50. Version 50 removes rules from + explorations that use one of the following rules: + [ContainsSomeOf, OmitsSomeOf, MatchesWithGeneralForm]. It also renames + `customOskLetters` cust arg to `allowedVariables`. + + Args: + question_state_dict: dict. A dict where each key-value pair + represents respectively, a state name and a dict used to + initialize a State domain object. + + Returns: + dict. The converted question_state_dict. + """ + if question_state_dict[ + 'interaction']['id'] in exp_domain.MATH_INTERACTION_TYPES: + filtered_answer_groups = [] + for answer_group_dict in question_state_dict[ + 'interaction']['answer_groups']: + filtered_rule_specs = [] + for rule_spec_dict in answer_group_dict['rule_specs']: + rule_type = rule_spec_dict['rule_type'] + if rule_type not in ( + exp_domain.MATH_INTERACTION_DEPRECATED_RULES): + filtered_rule_specs.append( + copy.deepcopy(rule_spec_dict)) + answer_group_dict['rule_specs'] = filtered_rule_specs + if len(filtered_rule_specs) > 0: + filtered_answer_groups.append( + copy.deepcopy(answer_group_dict)) + question_state_dict[ + 'interaction']['answer_groups'] = filtered_answer_groups + + # Renaming cust arg. + if question_state_dict[ + 'interaction']['id'] in exp_domain.ALGEBRAIC_MATH_INTERACTIONS: + customization_args = question_state_dict[ + 'interaction']['customization_args'] + customization_args['allowedVariables'] = copy.deepcopy( + customization_args['customOskLetters']) + del customization_args['customOskLetters'] + + return question_state_dict + + @classmethod + def _convert_state_v50_dict_to_v51_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: + """Converts from version 50 to 51. Version 51 adds a new + dest_if_really_stuck field to Outcome class to redirect learners + to a state for strengthening concepts when they get really stuck. + + Args: + question_state_dict: dict. A dict where each key-value pair + represents respectively, a state name and a dict used to + initialize a State domain object. + + Returns: + dict. The converted question_state_dict. + """ + + answer_groups = question_state_dict['interaction']['answer_groups'] + for answer_group in answer_groups: + answer_group['outcome']['dest_if_really_stuck'] = None + + if question_state_dict['interaction']['default_outcome'] is not None: + question_state_dict[ + 'interaction']['default_outcome']['dest_if_really_stuck'] = None + + return question_state_dict + + @classmethod + def _convert_state_v51_dict_to_v52_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: + """Converts from version 51 to 52. Version 52 fixes content IDs for + translations and voiceovers in exploration but no action is required in + question dicts. + + Args: + question_state_dict: dict. A dict where each key-value pair + represents respectively, a state name and a dict used to + initialize a State domain object. + + Returns: + dict. The converted question_state_dict. + """ + + return question_state_dict + + @classmethod + def _convert_state_v52_dict_to_v53_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: + """Converts from version 52 to 53. Version 53 fixes errored data present + in exploration state, RTE and interactions. + + Args: + question_state_dict: dict. A dict where each key-value pair + represents respectively, a state name and a dict used to + initialize a State domain object. + + Returns: + dict. The converted question_state_dict. + """ + + # The version 53 only fixes the data for `Exploration` and make + # no changes in the `Question` that is why we are simply returning. + return question_state_dict + + @classmethod + def _convert_state_v53_dict_to_v54_dict( + cls, question_state_dict: state_domain.StateDict + ) -> state_domain.StateDict: + """Converts from version 53 to 54. Version 54 adds + catchMisspellings customization arg to TextInput + interaction which allows creators to detect misspellings. + + Args: + question_state_dict: dict. A dict where each key-value pair + represents respectively, a state name and a dict used to + initialize a State domain object. + + Returns: + dict. The converted question_state_dict. + """ + if question_state_dict['interaction']['id'] == 'TextInput': + customization_args = question_state_dict[ + 'interaction']['customization_args'] + customization_args.update({ + 'catchMisspellings': { + 'value': False + } + }) + return question_state_dict + + @classmethod + def _convert_state_v54_dict_to_v55_dict( + cls, + question_state_dict: state_domain.StateDict + ) -> Tuple[state_domain.StateDict, int]: + """Converts from v54 to v55. Version 55 removes next_content_id_index + and WrittenTranslation from State. This version also updates the + content-ids for each translatable field in the state with its new + content-id. + + Args: + question_state_dict: dict. A dict where each key-value pair + represents respectively, a state name and a dict used to + initialize a State domain object. + + Returns: + dict. The converted question_state_dict. + """ + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + del question_state_dict['next_content_id_index'] # type: ignore[misc] + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + del question_state_dict['written_translations'] # type: ignore[misc] + states_dict, next_content_id_index = ( + state_domain.State + .update_old_content_id_to_new_content_id_in_v54_states({ + 'question_state': question_state_dict + }) + ) + + return states_dict['question_state'], next_content_id_index + @classmethod def update_state_from_model( - cls, versioned_question_state, current_state_schema_version): + cls, + versioned_question_state: VersionedQuestionStateDict, + current_state_schema_version: int + ) -> Optional[int]: """Converts the state object contained in the given versioned_question_state dict from current_state_schema_version to current_state_schema_version + 1. @@ -1192,6 +1833,10 @@ def update_state_from_model( state data. current_state_schema_version: int. The current state schema version. + + Returns: + int|None. The next content id index if the current state schema + version is 53 else None. """ versioned_question_state['state_schema_version'] = ( current_state_schema_version + 1) @@ -1199,10 +1844,19 @@ def update_state_from_model( conversion_fn = getattr(cls, '_convert_state_v%s_dict_to_v%s_dict' % ( current_state_schema_version, current_state_schema_version + 1)) + if current_state_schema_version == 54: + versioned_question_state['state'], next_content_id_index = ( + conversion_fn(versioned_question_state['state']) + ) + assert isinstance(next_content_id_index, int) + return next_content_id_index + versioned_question_state['state'] = conversion_fn( versioned_question_state['state']) - def partial_validate(self): + return None + + def partial_validate(self) -> None: """Validates the Question domain object, but doesn't require the object to contain an ID and a version. To be used to validate the question before it is finalized. @@ -1256,6 +1910,14 @@ def partial_validate(self): 'Expected schema version to be an integer, received %s' % self.question_state_data_schema_version) + if self.question_state_data_schema_version != ( + feconf.CURRENT_STATE_SCHEMA_VERSION): + raise utils.ValidationError( + 'Expected question state schema version to be %s, received ' + '%s' % ( + feconf.CURRENT_STATE_SCHEMA_VERSION, + self.question_state_data_schema_version)) + if not isinstance(self.question_state_data, state_domain.State): raise utils.ValidationError( 'Expected question state data to be a State object, ' @@ -1268,19 +1930,31 @@ def partial_validate(self): interaction_specs = interaction_registry.Registry.get_all_specs() at_least_one_correct_answer = False dest_is_specified = False + dest_if_stuck_is_specified = False interaction = self.question_state_data.interaction for answer_group in interaction.answer_groups: if answer_group.outcome.labelled_as_correct: at_least_one_correct_answer = True if answer_group.outcome.dest is not None: dest_is_specified = True - + if answer_group.outcome.dest_if_really_stuck is not None: + dest_if_stuck_is_specified = True + + # Ruling out the possibility of None for MyPy type checking, because + # interaction.default_outcome can be None in the case of explorations + # but while creating the questions we are always providing default + # outcome. So, we are sure that here interaction.default_outcome is + # never going to be None, that's why we used assert here. + assert interaction.default_outcome is not None if interaction.default_outcome.labelled_as_correct: at_least_one_correct_answer = True if interaction.default_outcome.dest is not None: dest_is_specified = True + if interaction.default_outcome.dest_if_really_stuck is not None: + dest_if_stuck_is_specified = True + if not at_least_one_correct_answer: raise utils.ValidationError( 'Expected at least one answer group to have a correct ' + @@ -1292,19 +1966,36 @@ def partial_validate(self): 'Expected all answer groups to have destination as None.' ) + if dest_if_stuck_is_specified: + raise utils.ValidationError( + 'Expected all answer groups to have destination for the ' + 'stuck learner as None.' + ) + if not interaction.hints: raise utils.ValidationError( 'Expected the question to have at least one hint') + # Here, we are asserting that id is never going to be None, because + # None interactions are not allowed to contain questions, so if an + # interaction have questions then it definitely have interaction_id. + assert interaction.id is not None if ( (interaction.solution is None) and (interaction_specs[interaction.id]['can_have_solution'])): raise utils.ValidationError( 'Expected the question to have a solution' ) - self.question_state_data.validate({}, False) - - def validate(self): + # Here the variable `tagged_skill_misconception_id_required` + # represents that the tagged skill misconception id field is + # required for it. + self.question_state_data.validate( + {}, + False, + tagged_skill_misconception_id_required=True) + self.validate_translatable_contents(self.next_content_id_index) + + def validate(self) -> None: """Validates the Question domain object before it is saved.""" if not isinstance(self.id, str): @@ -1319,7 +2010,7 @@ def validate(self): self.partial_validate() @classmethod - def from_dict(cls, question_dict): + def from_dict(cls, question_dict: QuestionDict) -> Question: """Returns a Question domain object from dict. Returns: @@ -1331,12 +2022,15 @@ def from_dict(cls, question_dict): question_dict['question_state_data_schema_version'], question_dict['language_code'], question_dict['version'], question_dict['linked_skill_ids'], - question_dict['inapplicable_skill_misconception_ids']) + question_dict['inapplicable_skill_misconception_ids'], + question_dict['next_content_id_index']) return question @classmethod - def create_default_question(cls, question_id, skill_ids): + def create_default_question( + cls, question_id: str, skill_ids: List[str] + ) -> Question: """Returns a Question domain object with default values. Args: @@ -1346,14 +2040,17 @@ def create_default_question(cls, question_id, skill_ids): Returns: Question. A Question domain object with default values. """ - default_question_state_data = cls.create_default_question_state() + content_id_generator = translation_domain.ContentIdGenerator() + default_question_state_data = cls.create_default_question_state( + content_id_generator) return cls( question_id, default_question_state_data, feconf.CURRENT_STATE_SCHEMA_VERSION, - constants.DEFAULT_LANGUAGE_CODE, 0, skill_ids, []) + constants.DEFAULT_LANGUAGE_CODE, 0, skill_ids, [], + content_id_generator.next_content_id_index) - def update_language_code(self, language_code): + def update_language_code(self, language_code: str) -> None: """Updates the language code of the question. Args: @@ -1362,7 +2059,7 @@ def update_language_code(self, language_code): """ self.language_code = language_code - def update_linked_skill_ids(self, linked_skill_ids): + def update_linked_skill_ids(self, linked_skill_ids: List[str]) -> None: """Updates the linked skill ids of the question. Args: @@ -1371,7 +2068,8 @@ def update_linked_skill_ids(self, linked_skill_ids): self.linked_skill_ids = list(set(linked_skill_ids)) def update_inapplicable_skill_misconception_ids( - self, inapplicable_skill_misconception_ids): + self, inapplicable_skill_misconception_ids: List[str] + ) -> None: """Updates the optional misconception ids marked as not applicable to the question. @@ -1383,7 +2081,15 @@ def update_inapplicable_skill_misconception_ids( self.inapplicable_skill_misconception_ids = list( set(inapplicable_skill_misconception_ids)) - def update_question_state_data(self, question_state_data): + def update_next_content_id_index( + self, next_content_id_index: int + ) -> None: + """Updates the next content id index for the question.""" + self.next_content_id_index = next_content_id_index + + def update_question_state_data( + self, question_state_data: state_domain.State + ) -> None: """Updates the question data of the question. Args: @@ -1393,20 +2099,36 @@ def update_question_state_data(self, question_state_data): self.question_state_data = question_state_data +class QuestionSummaryDict(TypedDict): + """Dictionary representing the QuestionSummary domain object.""" + + id: str + question_content: str + interaction_id: str + last_updated_msec: float + created_on_msec: float + misconception_ids: List[str] + + class QuestionSummary: """Domain object for Question Summary.""" def __init__( - self, question_id, question_content, misconception_ids, - interaction_id, question_model_created_on=None, - question_model_last_updated=None): + self, + question_id: str, + question_content: str, + misconception_ids: List[str], + interaction_id: str, + question_model_created_on: datetime.datetime, + question_model_last_updated: datetime.datetime + ) -> None: """Constructs a Question Summary domain object. Args: question_id: str. The ID of the question. question_content: str. The static HTML of the question shown to the learner. - misconception_ids: str. The misconception ids addressed in + misconception_ids: list(str). The misconception ids addressed in the question. This includes tagged misconceptions ids as well as inapplicable misconception ids in the question. interaction_id: str. The ID of the interaction. @@ -1422,12 +2144,13 @@ def __init__( self.created_on = question_model_created_on self.last_updated = question_model_last_updated - def to_dict(self): + def to_dict(self) -> QuestionSummaryDict: """Returns a dictionary representation of this domain object. Returns: dict. A dict representing this QuestionSummary object. """ + return { 'id': self.id, 'question_content': self.question_content, @@ -1437,7 +2160,7 @@ def to_dict(self): 'misconception_ids': self.misconception_ids } - def validate(self): + def validate(self) -> None: """Validates the Question summary domain object before it is saved. Raises: @@ -1476,6 +2199,15 @@ def validate(self): 'strings, received %s' % self.misconception_ids) +class QuestionSkillLinkDict(TypedDict): + """Dictionary representing the QuestionSkillLink domain object.""" + + question_id: str + skill_id: str + skill_description: str + skill_difficulty: float + + class QuestionSkillLink: """Domain object for Question Skill Link. @@ -1488,7 +2220,12 @@ class QuestionSkillLink: """ def __init__( - self, question_id, skill_id, skill_description, skill_difficulty): + self, + question_id: str, + skill_id: str, + skill_description: str, + skill_difficulty: float + ) -> None: """Constructs a Question Skill Link domain object. Args: @@ -1502,7 +2239,7 @@ def __init__( self.skill_description = skill_description self.skill_difficulty = skill_difficulty - def to_dict(self): + def to_dict(self) -> QuestionSkillLinkDict: """Returns a dictionary representation of this domain object. Returns: @@ -1516,6 +2253,15 @@ def to_dict(self): } +class MergedQuestionSkillLinkDict(TypedDict): + """Dictionary representing the MergedQuestionSkillLink domain object.""" + + question_id: str + skill_ids: List[str] + skill_descriptions: List[str] + skill_difficulties: List[float] + + class MergedQuestionSkillLink: """Domain object for the Merged Question Skill Link object, returned to the editors. @@ -1530,8 +2276,12 @@ class MergedQuestionSkillLink: """ def __init__( - self, question_id, skill_ids, skill_descriptions, - skill_difficulties): + self, + question_id: str, + skill_ids: List[str], + skill_descriptions: List[str], + skill_difficulties: List[float] + ) -> None: """Constructs a Merged Question Skill Link domain object. Args: @@ -1547,7 +2297,7 @@ def __init__( self.skill_descriptions = skill_descriptions self.skill_difficulties = skill_difficulties - def to_dict(self): + def to_dict(self) -> MergedQuestionSkillLinkDict: """Returns a dictionary representation of this domain object. Returns: diff --git a/core/domain/question_domain_test.py b/core/domain/question_domain_test.py index c10ab4dc3bdd..4f5d3bb41914 100644 --- a/core/domain/question_domain_test.py +++ b/core/domain/question_domain_test.py @@ -16,20 +16,25 @@ from __future__ import annotations +import copy import datetime import re from core import feconf from core import utils +from core.domain import customization_args_util from core.domain import question_domain from core.domain import state_domain +from core.domain import translation_domain from core.tests import test_utils +from typing import Dict, List, Union + class QuestionChangeTest(test_utils.GenericTestBase): """Test for Question Change object.""" - def test_to_dict(self): + def test_to_dict(self) -> None: """Test to verify to_dict method of the Question Change object.""" expected_object_dict = { 'cmd': 'update_question_property', @@ -50,72 +55,72 @@ def test_to_dict(self): self.assertEqual(expected_object_dict, observed_object.to_dict()) - def test_change_dict_without_cmd(self): + def test_change_dict_without_cmd(self) -> None: """Test to verify __init__ method of the Question Change object when change_dict is without cmd key. """ - self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, - 'Missing cmd key in change dict', - callableObj=question_domain.QuestionChange, - change_dict={} - ) + 'Missing cmd key in change dict' + ): + question_domain.QuestionChange({}) - def test_change_dict_with_wrong_cmd(self): + def test_change_dict_with_wrong_cmd(self) -> None: """Test to verify __init__ method of the Question Change object when change_dict is with wrong cmd value. """ - self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, - 'Command wrong is not allowed', - callableObj=question_domain.QuestionChange, - change_dict={'cmd': 'wrong', } - ) + 'Command wrong is not allowed' + ): + question_domain.QuestionChange({'cmd': 'wrong'}) - def test_change_dict_with_missing_attributes_in_cmd(self): + def test_change_dict_with_missing_attributes_in_cmd(self) -> None: """Test to verify __init__ method of the Question Change object when change_dict is with missing attributes in cmd. """ - self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, - 'The following required attributes are present: new_value', - callableObj=question_domain.QuestionChange, - change_dict={ + 'The following required attributes are missing: new_value' + ): + question_domain.QuestionChange({ 'cmd': 'update_question_property', 'property_name': 'question_state_data', 'old_value': 'old_value' } ) - def test_change_dict_with_extra_attributes_in_cmd(self): + def test_change_dict_with_extra_attributes_in_cmd(self) -> None: """Test to verify __init__ method of the Question Change object when change_dict is with extra attributes in cmd. """ - self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, - 'The following extra attributes are present: invalid', - callableObj=question_domain.QuestionChange, - change_dict={'cmd': 'create_new', 'invalid': 'invalid'} - ) + 'The following extra attributes are present: invalid' + ): + question_domain.QuestionChange( + {'cmd': 'create_new', 'invalid': 'invalid'} + ) - def test_update_question_property_with_wrong_property_name(self): + def test_update_question_property_with_wrong_property_name(self) -> None: """Test to verify __init__ method of the Question Change object when cmd is update_question_property and wrong property_name is given. """ - self.assertRaisesRegexp( - utils.ValidationError, ( - 'Value for property_name in cmd update_question_property: ' - 'wrong is not allowed'), - callableObj=question_domain.QuestionChange, - change_dict={ + with self.assertRaisesRegex( + utils.ValidationError, + 'Value for property_name in cmd update_question_property: ' + 'wrong is not allowed' + ): + question_domain.QuestionChange( + { 'cmd': 'update_question_property', 'property_name': 'wrong', 'new_value': 'new_value', 'old_value': 'old_value' - } - ) + } + ) - def test_create_new(self): + def test_create_new(self) -> None: """Test to verify __init__ method of the Question Change object when cmd is create_new. """ @@ -128,7 +133,7 @@ def test_create_new(self): self.assertEqual('create_new', observed_object.cmd) - def test_update_question_property(self): + def test_update_question_property(self) -> None: """Test to verify __init__ method of the Question Change object when cmd is update_question_property. """ @@ -147,13 +152,14 @@ def test_update_question_property(self): self.assertEqual('new_value', observed_object.new_value) self.assertEqual('old_value', observed_object.old_value) - def test_create_new_fully_specified_question(self): + def test_create_new_fully_specified_question(self) -> None: """Test to verify __init__ method of the Question Change object when cmd is create_new_fully_specified_question. """ - change_dict = { + test_question_dict: Dict[str, str] = {} + change_dict: Dict[str, Union[str, Dict[str, str]]] = { 'cmd': 'create_new_fully_specified_question', - 'question_dict': {}, + 'question_dict': test_question_dict, 'skill_id': '10', } observed_object = question_domain.QuestionChange( @@ -165,11 +171,11 @@ def test_create_new_fully_specified_question(self): self.assertEqual('10', observed_object.skill_id) self.assertEqual({}, observed_object.question_dict) - def test_migrate_state_schema_to_latest_version(self): + def test_migrate_state_schema_to_latest_version(self) -> None: """Test to verify __init__ method of the Question Change object when cmd is migrate_state_schema_to_latest_version. """ - change_dict = { + change_dict: Dict[str, Union[str, int]] = { 'cmd': 'migrate_state_schema_to_latest_version', 'from_version': 0, 'to_version': 10, @@ -187,7 +193,7 @@ def test_migrate_state_schema_to_latest_version(self): class QuestionSuggestionChangeTest(test_utils.GenericTestBase): """Test for QuestionSuggestionChange object.""" - def test_to_dict(self): + def test_to_dict(self) -> None: """Test to verify to_dict method of the Question Change object.""" expected_object_dict = { 'cmd': 'create_new_fully_specified_question', @@ -208,64 +214,67 @@ def test_to_dict(self): self.assertEqual(expected_object_dict, observed_object.to_dict()) - def test_change_dict_without_cmd(self): + def test_change_dict_without_cmd(self) -> None: """Test to verify __init__ method of the QuestionSuggestionChange object when change_dict is without cmd key. """ - self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, - 'Missing cmd key in change dict', - callableObj=question_domain.QuestionSuggestionChange, - change_dict={} - ) + 'Missing cmd key in change dict' + ): + question_domain.QuestionSuggestionChange({}) - def test_change_dict_with_wrong_cmd(self): + def test_change_dict_with_wrong_cmd(self) -> None: """Test to verify __init__ method of the QuestionSuggestionChange object when change_dict is with wrong cmd value. """ - self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, - 'Command wrong is not allowed', - callableObj=question_domain.QuestionSuggestionChange, - change_dict={'cmd': 'wrong', } - ) + 'Command wrong is not allowed' + ): + question_domain.QuestionSuggestionChange( + {'cmd': 'wrong', } + ) - def test_change_dict_with_missing_attributes_in_cmd(self): + def test_change_dict_with_missing_attributes_in_cmd(self) -> None: """Test to verify __init__ method of the QuestionSuggestionChange object when change_dict is with missing attributes in cmd. """ - self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, - 'The following required attributes are present: new_value', - callableObj=question_domain.QuestionSuggestionChange, - change_dict={ + 'The following required attributes are missing: skill_difficulty,' + ' skill_id' + ): + question_domain.QuestionSuggestionChange( + { 'cmd': 'create_new_fully_specified_question', 'question_dict': 'question_dict', - } - ) + } + ) - def test_change_dict_with_extra_attributes_in_cmd(self): + def test_change_dict_with_extra_attributes_in_cmd(self) -> None: """Test to verify __init__ method of the QuestionSuggestionChange object when change_dict is with extra attributes in cmd. """ - self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, - 'The following extra attributes are present: invalid', - callableObj=question_domain.QuestionSuggestionChange, - change_dict={ + 'The following extra attributes are present: invalid' + ): + question_domain.QuestionSuggestionChange( + { 'cmd': 'create_new_fully_specified_question', 'question_dict': 'question_dict', 'skill_id': 'skill_1', 'skill_difficulty': '0.3', 'invalid': 'invalid' - } - ) + } + ) - def test_create_new_fully_specified_question(self): + def test_create_new_fully_specified_question(self) -> None: """Test to verify __init__ method of the QuestionSuggestionChange object when cmd is create_new_fully_specified_question. """ - change_dict = { + change_dict: Dict[str, Union[str, Dict[str, str]]] = { 'cmd': 'create_new_fully_specified_question', 'question_dict': {}, 'skill_id': '10', @@ -284,70 +293,159 @@ def test_create_new_fully_specified_question(self): class QuestionDomainTest(test_utils.GenericTestBase): """Tests for Question domain object.""" - def setUp(self): + def setUp(self) -> None: """Before each individual test, create a question.""" - super(QuestionDomainTest, self).setUp() - question_state_data = self._create_valid_question_data('ABC') + super().setUp() + content_id_generator = translation_domain.ContentIdGenerator() + question_state_data = self._create_valid_question_data( + 'ABC', content_id_generator) self.question = question_domain.Question( 'question_id', question_state_data, feconf.CURRENT_STATE_SCHEMA_VERSION, 'en', 1, ['skill1'], - ['skillId12345-123']) + ['skillId12345-123'], + content_id_generator.next_content_id_index) + + self.content_id_generator = translation_domain.ContentIdGenerator() + self.question_state_dict = ( + question_domain.Question.create_default_question_state( + self.content_id_generator + ).to_dict()) + translation_dict = { + 'content_id_3': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + True + ) + } + self.dummy_entity_translations = translation_domain.EntityTranslation( + 'question_id', feconf.TranslatableEntityType.QUESTION, 1, 'hi', + translation_dict) + self.state_answer_group = state_domain.AnswerGroup( + state_domain.Outcome( + None, None, state_domain.SubtitledHtml( + 'feedback_1', 'Feedback'), + False, [], None, None), + [ + state_domain.RuleSpec( + 'Contains', + { + 'x': + { + 'contentId': 'rule_input_Contains', + 'normalizedStrSet': ['Test'] + } + }) + ], + [], + None + ) - def test_to_and_from_dict(self): + def test_to_and_from_dict(self) -> None: """Test to verify to_dict and from_dict methods of Question domain object. """ - default_question_state_data = ( - question_domain.Question.create_default_question_state()) - question_dict = { + question_dict: question_domain.QuestionDict = { 'id': 'col1.random', - 'question_state_data': default_question_state_data.to_dict(), + 'question_state_data': self.question_state_dict, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'language_code': 'en', 'version': 1, 'linked_skill_ids': ['skill1'], - 'inapplicable_skill_misconception_ids': ['skill1-123'] + 'inapplicable_skill_misconception_ids': ['skill1-123'], + 'next_content_id_index': ( + self.content_id_generator.next_content_id_index) } observed_object = question_domain.Question.from_dict(question_dict) self.assertEqual(question_dict, observed_object.to_dict()) - def _assert_validation_error(self, expected_error_substring): + def _assert_question_domain_validation_error( + self, expected_error_substring: str + ) -> None: """Checks that the skill passes strict validation.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring ): self.question.validate() - def test_strict_validation(self): + def test_tagged_skill_misconception_id(self) -> None: + """Checks the tagged skill misconception id's format.""" + state = self.question.question_state_data + state.update_interaction_answer_groups( + [self.state_answer_group]) + state.interaction.answer_groups[0].tagged_skill_misconception_id = ( + 'invalid_tagged_skill_misconception_id' + ) + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected the format of tagged skill misconception id ' + 'to be -, received ' + 'invalid_tagged_skill_misconception_id' + ): + self.question.validate() + + # Here we use MyPy ignore because we want to add a test which would + # check the tagged_skill_misconception_id's format as well as the + # regex. + state.interaction.answer_groups[ + 0].tagged_skill_misconception_id = 1 # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected tagged skill misconception id to be a str, received 1' + ): + self.question.validate() + + def test_strict_validation(self) -> None: """Test to verify validate method of Question domain object with strict as True. """ state = self.question.question_state_data + + # TODO(#13059): After we fully type the codebase we plan to get + # rid of the tests that intentionally test wrong inputs that we + # can normally catch by typing. state.interaction.solution = None - self._assert_validation_error( + self._assert_question_domain_validation_error( 'Expected the question to have a solution') state.interaction.hints = [] - self._assert_validation_error( + self._assert_question_domain_validation_error( 'Expected the question to have at least one hint') + # Ruling out the possibility of None for mypy type checking. + assert state.interaction.default_outcome is not None state.interaction.default_outcome.dest = 'abc' - self._assert_validation_error( + self._assert_question_domain_validation_error( 'Expected all answer groups to have destination as None.') + + # TODO(#13059): After we fully type the codebase we plan to get + # rid of the tests that intentionally test wrong inputs that we + # can normally catch by typing. + state.interaction.default_outcome.dest = None + state.interaction.default_outcome.dest_if_really_stuck = 'pqr' + self._assert_question_domain_validation_error( + 'Expected all answer groups to have destination for the ' + 'stuck learner as None.') state.interaction.default_outcome.labelled_as_correct = False - self._assert_validation_error( + self._assert_question_domain_validation_error( 'Expected at least one answer group to have a correct answer') - def test_strict_validation_for_answer_groups(self): + def test_strict_validation_for_answer_groups(self) -> None: """Test to verify validate method of Question domain object with strict as True for interaction with answer group. """ state = self.question.question_state_data + # Ruling out the possibility of None for mypy type checking. + assert state.interaction.default_outcome is not None state.interaction.default_outcome.labelled_as_correct = False + rule_spec_input_test_dict: Dict[str, Union[str, List[str]]] = { + 'contentId': 'rule_input_4', + 'normalizedStrSet': ['Test'] + } state.interaction.answer_groups = [ state_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -359,10 +457,7 @@ def test_strict_validation_for_answer_groups(self): }, 'rule_specs': [{ 'inputs': { - 'x': { - 'contentId': 'rule_input_4', - 'normalizedStrSet': ['Test'] - } + 'x': rule_spec_input_test_dict }, 'rule_type': 'Contains' }], @@ -371,104 +466,180 @@ def test_strict_validation_for_answer_groups(self): }) ] - self._assert_validation_error( + self._assert_question_domain_validation_error( 'Expected all answer groups to have destination as None.') + state.interaction.answer_groups = [ + state_domain.AnswerGroup.from_dict({ + 'outcome': { + 'dest': None, + 'dest_if_really_stuck': 'pqr', + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'rule_specs': [{ + 'inputs': { + 'x': rule_spec_input_test_dict + }, + 'rule_type': 'Contains' + }], + 'training_data': [], + 'tagged_skill_misconception_id': None + }) + ] + + self._assert_question_domain_validation_error( + 'Expected all answer groups to have destination for the ' + 'stuck learner as None.') + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_validate_invalid_list_of_inapplicable_skill_misconception_ids( - self): + self + ) -> None: """Test to verify that the validation fails when inapplicable_skill_misconception_ids value is an invalid list. """ - self.question.inapplicable_skill_misconception_ids = ['Test', 1] - self._assert_validation_error( + self.question.inapplicable_skill_misconception_ids = ['Test', 1] # type: ignore[list-item] + self._assert_question_domain_validation_error( re.escape( 'Expected inapplicable_skill_misconception_ids to be a list of ' 'strings, received [\'Test\', 1]')) + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_validate_invalid_type_of_inapplicable_skill_misconception_ids( - self): + self + ) -> None: """Test to verify that the validation fails when inapplicable_skill_misconception_ids value is an invalid type. """ - self.question.inapplicable_skill_misconception_ids = 123 - self._assert_validation_error( + self.question.inapplicable_skill_misconception_ids = 123 # type: ignore[assignment] + self._assert_question_domain_validation_error( 'Expected inapplicable_skill_misconception_ids to be a list of ' 'strings, received 123') def test_validate_invalid_format_of_inapplicable_skill_misconception_ids( - self): + self + ) -> None: """Test to verify that the validation fails when inapplicable_skill_misconception_ids value is an invalid format i.e. it is not of the form -. """ self.question.inapplicable_skill_misconception_ids = ['abc', 'def'] - self._assert_validation_error( + self._assert_question_domain_validation_error( re.escape( 'Expected inapplicable_skill_misconception_ids to be a list ' 'of strings of the format -, ' 'received [\'abc\', \'def\']')) def test_validate_duplicate_inapplicable_skill_misconception_ids_list( - self): + self + ) -> None: """Test to verify that the validation fails when inapplicable_skill_misconception_ids list is has duplicate values. """ self.question.inapplicable_skill_misconception_ids = [ 'skillid12345-1', 'skillid12345-1'] - self._assert_validation_error( + self._assert_question_domain_validation_error( 'inapplicable_skill_misconception_ids has duplicate values') - def test_strict_validation_passes(self): + def test_strict_validation_passes(self) -> None: """Test to verify validate method of a finalized Question domain object with correct input. """ self.question.validate() - def test_not_strict_validation(self): + def test_not_strict_validation(self) -> None: """Test to verify validate method of Question domain object with strict as False. """ self.question.language_code = 'abc' - self._assert_validation_error('Invalid language code') + self._assert_question_domain_validation_error('Invalid language code') - self.question.question_state_data = 'State data' - self._assert_validation_error( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.question.question_state_data = 'State data' # type: ignore[assignment] + self._assert_question_domain_validation_error( 'Expected question state data to be a State object') - self.question.question_state_data_schema_version = 'abc' - self._assert_validation_error( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.question.question_state_data_schema_version = 'abc' # type: ignore[assignment] + self._assert_question_domain_validation_error( 'Expected schema version to be an integer') - self.question.linked_skill_ids = 'Test' - self._assert_validation_error( + self.question.question_state_data_schema_version = 45 + self._assert_question_domain_validation_error( + 'Expected question state schema version to be %s, received ' + '%s' % ( + feconf.CURRENT_STATE_SCHEMA_VERSION, + self.question.question_state_data_schema_version)) + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.question.linked_skill_ids = 'Test' # type: ignore[assignment] + self._assert_question_domain_validation_error( 'Expected linked_skill_ids to be a list of strings') - self.question.linked_skill_ids = None - self._assert_validation_error( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.question.linked_skill_ids = None # type: ignore[assignment] + self._assert_question_domain_validation_error( 'inked_skill_ids is either null or an empty list') self.question.linked_skill_ids = [] - self._assert_validation_error( + self._assert_question_domain_validation_error( 'linked_skill_ids is either null or an empty list') - self.question.linked_skill_ids = ['Test', 1] - self._assert_validation_error( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.question.linked_skill_ids = ['Test', 1] # type: ignore[list-item] + self._assert_question_domain_validation_error( 'Expected linked_skill_ids to be a list of strings') self.question.linked_skill_ids = ['skill1', 'skill1'] - self._assert_validation_error( + self._assert_question_domain_validation_error( 'linked_skill_ids has duplicate skill ids') - self.question.language_code = 1 - self._assert_validation_error('Expected language_code to be a string') + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.question.language_code = 1 # type: ignore[assignment] + self._assert_question_domain_validation_error( + 'Expected language_code to be a string' + ) - self.question.version = 'abc' - self._assert_validation_error('Expected version to be an integer') + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.question.version = 'abc' # type: ignore[assignment] + self._assert_question_domain_validation_error( + 'Expected version to be an integer' + ) - self.question.id = 123 - self._assert_validation_error('Expected ID to be a string') + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.question.id = 123 # type: ignore[assignment] + self._assert_question_domain_validation_error( + 'Expected ID to be a string' + ) - def test_create_default_question(self): + def test_create_default_question(self) -> None: """Test to verify create_default_question method of the Question domain object. """ @@ -476,8 +647,11 @@ def test_create_default_question(self): skill_ids = ['test_skill1', 'test_skill2'] question = question_domain.Question.create_default_question( question_id, skill_ids) + content_id_generator = translation_domain.ContentIdGenerator() default_question_data = ( - question_domain.Question.create_default_question_state().to_dict()) + question_domain.Question.create_default_question_state( + content_id_generator + ).to_dict()) self.assertEqual(question.id, question_id) self.assertEqual( @@ -486,7 +660,7 @@ def test_create_default_question(self): self.assertEqual(question.version, 0) self.assertEqual(question.linked_skill_ids, skill_ids) - def test_update_language_code(self): + def test_update_language_code(self) -> None: """Test to verify update_language_code method of the Question domain object. """ @@ -494,7 +668,7 @@ def test_update_language_code(self): self.assertEqual('pl', self.question.language_code) - def test_update_linked_skill_ids(self): + def test_update_linked_skill_ids(self) -> None: """Test to verify update_linked_skill_ids method of the Question domain object. """ @@ -502,7 +676,7 @@ def test_update_linked_skill_ids(self): self.assertEqual(['skill_id1'], self.question.linked_skill_ids) - def test_update_inapplicable_skill_misconception_ids(self): + def test_update_inapplicable_skill_misconception_ids(self) -> None: """Test to verify update_inapplicable_skill_misconception_ids method of the Question domain object. """ @@ -515,25 +689,1579 @@ def test_update_inapplicable_skill_misconception_ids(self): ['skillid-misconceptionid'], self.question.inapplicable_skill_misconception_ids) - def test_update_question_state_data(self): + def test_update_question_state_data(self) -> None: """Test to verify update_question_state_data method of the Question domain object. """ - question_state_data = self._create_valid_question_data('Test') + content_id_generator = translation_domain.ContentIdGenerator() + question_state_data = self._create_valid_question_data( + 'Test', content_id_generator) self.question.update_question_state_data(question_state_data) + self.question.update_next_content_id_index( + content_id_generator.next_content_id_index) self.assertEqual( question_state_data.to_dict(), self.question.question_state_data.to_dict() ) + def test_question_state_dict_conversion_from_v27_to_v28(self) -> None: + test_data = self.question_state_dict['recorded_voiceovers'] + # Here we use MyPy ignore because we are defining an older version + # dictionary of state which contains `content_ids_to_audio_translations` + # key, but question_data is of type StateDict (latest version dictionary + # for state) and StateDict do not contain this older key. So, because of + # this MyPy throws an `TypedDict "StateDict" has no key` error. Thus to + # avoid the error, we used ignore here. + self.question_state_dict['content_ids_to_audio_translations'] = ( # type: ignore[misc] + test_data['voiceovers_mapping']) + + # Here we use MyPy ignore because MyPy doesn't allow key deletion + # from TypedDict. + # Removing 'recorded_voiceovers' from question_data. + del self.question_state_dict['recorded_voiceovers'] # type: ignore[misc] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 27 + } + + self.assertNotIn('recorded_voiceovers', test_value['state']) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 28) + self.assertIn('recorded_voiceovers', test_value['state']) + self.assertEqual( + test_value['state']['recorded_voiceovers'], test_data) + + def test_question_state_dict_conversion_from_v28_to_v29(self) -> None: + + # Here we use MyPy ignore because MyPy doesn't allow key deletion + # from TypedDict. + # Removing 'solicit_answer_details' from question_data. + del self.question_state_dict['solicit_answer_details'] # type: ignore[misc] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 28 + } + + self.assertNotIn('solicit_answer_details', test_value['state']) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 29) + self.assertIn('solicit_answer_details', test_value['state']) + self.assertEqual( + test_value['state']['solicit_answer_details'], False) + + def test_question_state_dict_conversion_from_v29_to_v30(self) -> None: + # Here we use MyPy ignore because the expected type for `answer_groups` + # key is AnswerGroupDict but for testing purposes we are providing + # a dictionary which contains `tagged_misconception_id` key and this + # `tagged_misconception_id` key is not defined in AnswerGroupDict. + # So, due to this MyPy throws an `Extra key 'tagged_misconception_id' + # for TypedDict "AnswerGroupDict"` error. Thus to avoid the error, + # we used ignore here. + self.question_state_dict['interaction']['answer_groups'] = [ + { # type: ignore[typeddict-item] + 'tagged_misconception_id': 1 + } + ] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 29 + } + + self.assertIn( + 'tagged_misconception_id', + test_value['state']['interaction']['answer_groups'][0] + ) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 30) + self.assertNotIn( + 'tagged_misconception_id', + test_value['state']['interaction']['answer_groups'][0] + ) + self.assertIn( + 'tagged_skill_misconception_id', + test_value['state']['interaction']['answer_groups'][0] + ) + self.assertIsNone(test_value['state']['interaction'][ + 'answer_groups'][0]['tagged_skill_misconception_id']) + + def test_question_state_dict_conversion_from_v30_to_v31(self) -> None: + # Here we use MyPy ignore because here we are defining an empty + # VoiceoverDict, for checking when this dict passes throw conversion + # functions, keys are populated automatically or not. So, due to the + # absence of keys MyPy throws an `Missing key` error. Thus to avoid + # the error, we used ignore here. + self.question_state_dict[ + 'recorded_voiceovers']['voiceovers_mapping'] = { + 'content': { + 'audio_metadata': {} # type: ignore[typeddict-item] + } + } + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 30 + } + + self.assertNotIn( + 'duration_secs', + test_value['state']['recorded_voiceovers']['voiceovers_mapping'][ + 'content']['audio_metadata'] + ) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 31) + self.assertIn( + 'duration_secs', + test_value['state']['recorded_voiceovers']['voiceovers_mapping'][ + 'content']['audio_metadata'] + ) + self.assertEqual( + test_value['state']['recorded_voiceovers']['voiceovers_mapping'][ + 'content']['audio_metadata']['duration_secs'], + 0.0 + ) + + def test_question_state_dict_conversion_from_v31_to_v32(self) -> None: + + self.question_state_dict['interaction']['id'] = 'SetInput' + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 31 + } + + self.assertEqual( + self.question_state_dict['interaction']['customization_args'], {}) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 32) + self.assertEqual( + self.question_state_dict['interaction']['customization_args'], + { + 'buttonText': { + 'value': 'Add item' + } + } + ) + + def test_question_state_dict_conversion_from_v32_to_v33(self) -> None: + + self.question_state_dict['interaction']['id'] = 'MultipleChoiceInput' + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 32 + } + + self.assertEqual( + self.question_state_dict['interaction']['customization_args'], {}) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 33) + self.assertEqual( + self.question_state_dict['interaction']['customization_args'], + { + 'showChoicesInShuffledOrder': { + 'value': True + } + } + ) + + def test_question_state_dict_conversion_from_v33_to_v34(self) -> None: + + # Ruling out the possibility of None for mypy type checking. + assert self.question_state_dict['interaction'][ + 'default_outcome'] is not None + self.question_state_dict['content']['html'] = '
    ' + self.question_state_dict['interaction']['default_outcome'][ + 'feedback']['html'] = '
    ' + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 33 + } + + # Ruling out the possibility of None for mypy type checking. + assert test_value['state']['interaction']['default_outcome'] is not None + self.assertEqual( + test_value['state']['content']['html'], '
    ') + self.assertEqual( + test_value['state']['interaction']['default_outcome'][ + 'feedback']['html'], + '
    ' + ) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 34) + self.assertEqual( + test_value['state']['content']['html'], '
    ') + self.assertEqual( + test_value['state']['interaction']['default_outcome'][ + 'feedback']['html'], + '
    ' + ) + + def test_question_state_dict_conversion_from_v34_to_v35(self) -> None: + + self.question_state_dict['interaction']['id'] = 'MathExpressionInput' + self.question_state_dict['interaction']['solution'] = { + 'answer_is_exclusive': False, + 'correct_answer': { + 'ascii': '1' + }, + 'explanation': { + 'content_id': 'temp_id', + 'html': '

    This is a solution.

    ' + } + } + self.question_state_dict['interaction']['answer_groups'] = [ + # Here we use MyPy ignore because here we are defining + # AnswerGroupDict and while defining AnswerGroupDict MyPy + # expects that all keys are defined, but for testing purposes + # here we are defining only rule_specs and outcome key which + # causes MyPy to throw `Missing keys' error. Thus to avoid the + # error, we used ignore here. + { # type: ignore[typeddict-item] + 'rule_specs': [{ + 'inputs': { + 'x': '1', + 'y': None + }, + 'rule_type': None + }], + 'outcome': { + 'feedback': { + 'content_id': 'temp_id' + } + }, + }, + # Here we use MyPy ignore because here we are defining + # AnswerGroupDict and while defining AnswerGroupDict MyPy + # expects that all keys are defined, but for testing purposes + # here we are defining only rule_specs and outcome key which + # causes MyPy to throw `Missing keys' error. Thus to avoid the + # error, we used ignore here. + { # type: ignore[typeddict-item] + 'rule_specs': [{ + 'inputs': { + 'x': 'x+1', + 'y': None + }, + 'rule_type': None + }], + 'outcome': { + 'feedback': { + 'content_id': 'temp_id_2' + } + }, + }, + # Here we use MyPy ignore because here we are defining + # AnswerGroupDict and while defining AnswerGroupDict MyPy + # expects that all keys are defined, but for testing purposes + # here we are defining only rule_specs and outcome key which + # causes MyPy to throw `Missing keys' error. Thus to avoid the + # error, we used ignore here. + { # type: ignore[typeddict-item] + 'rule_specs': [{ + 'inputs': { + 'x': 'x=1', + 'y': None + }, + 'rule_type': None + }], + 'outcome': { + 'feedback': { + 'content_id': 'temp_id_3' + } + }, + }, + # Here we use MyPy ignore because here we are defining + # AnswerGroupDict and while defining AnswerGroupDict MyPy + # expects that all keys are defined, but for testing purposes + # here we are defining only rule_specs and outcome key which + # causes MyPy to throw `Missing keys' error. Thus to avoid the + # error, we used ignore here. + { # type: ignore[typeddict-item] + 'rule_specs': [], + 'outcome': { + 'feedback': { + 'content_id': 'temp_id_4' + } + }, + } + ] + self.question_state_dict[ + 'recorded_voiceovers']['voiceovers_mapping'] = { + 'temp_id': {}, 'temp_id_2': {}, 'temp_id_3': {}, 'temp_id_4': {} + } + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + self.question_state_dict['written_translations'] = { # type: ignore[misc] + 'translations_mapping': { + 'temp_id': {}, 'temp_id_2': {}, 'temp_id_3': {}, 'temp_id_4': {} + } + } + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 34 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 35) + self.assertEqual( + test_value['state']['interaction']['id'], + 'MathEquationInput' + ) + self.assertEqual( + test_value['state']['recorded_voiceovers'][ + 'voiceovers_mapping'], + {'temp_id_3': {}} + ) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['inputs']['y'], + 'both' + ) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['rule_type'], + 'MatchesExactlyWith' + ) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'outcome']['feedback']['content_id'], + 'temp_id_3' + ) + # Ruling out the possibility of None for mypy type checking. + assert test_value['state']['interaction']['solution'] is not None + assert isinstance( + test_value['state']['interaction']['solution']['correct_answer'], + str + ) + self.assertNotIn( + 'ascii', + test_value['state']['interaction']['solution']['correct_answer'] + ) + + # Testing with only AlgebraicExpressionInput i.e ('x': 'x+1'). + test_value['state']['interaction']['id'] = 'MathExpressionInput' + test_value['state']['interaction']['solution'] = { + 'answer_is_exclusive': False, + 'correct_answer': { + 'ascii': '1' + }, + 'explanation': { + 'content_id': 'temp_id', + 'html': '

    This is a solution.

    ' + } + } + # Here we use MyPy ignore because we are defining AnswerGroupDict + # and while defining AnswerGroupDict MyPy expects that all keys are + # defined, but for testing purposes here we are defining only rule_specs + # and outcome key which causes MyPy to throw `Missing keys' error. Thus + # to avoid the error, we used ignore here. + test_value['state']['interaction']['answer_groups'] = [ + { # type: ignore[typeddict-item] + 'rule_specs': [{ + 'inputs': { + 'x': 'x+1', + 'y': None + }, + 'rule_type': None + }], + 'outcome': { + 'feedback': { + 'content_id': 'temp_id' + } + }, + } + ] + test_value['state']['recorded_voiceovers']['voiceovers_mapping'] = { + 'temp_id': {} + } + test_value['state_schema_version'] = 34 + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 35) + self.assertEqual( + test_value['state']['interaction']['id'], + 'AlgebraicExpressionInput' + ) + self.assertNotIn( + 'ascii', + test_value['state']['interaction']['solution']['correct_answer'] + ) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['rule_type'], + 'MatchesExactlyWith' + ) + + # Testing with only NumericExpressionInput i.e ('x': '1'). + test_value['state']['interaction']['id'] = 'MathExpressionInput' + test_value['state']['interaction']['solution'] = { + 'answer_is_exclusive': False, + 'correct_answer': { + 'ascii': '1' + }, + 'explanation': { + 'content_id': 'temp_id', + 'html': '

    This is a solution.

    ' + } + } + # Here we use MyPy ignore because we are defining AnswerGroupDict + # and while defining AnswerGroupDict MyPy expects that all keys are + # defined, but for testing purposes here we are defining only rule_specs + # and outcome key which causes MyPy to throw `Missing keys' error. Thus + # to avoid the error, we used ignore here. + test_value['state']['interaction']['answer_groups'] = [ + { # type: ignore[typeddict-item] + 'rule_specs': [{ + 'inputs': { + 'x': '1', + 'y': None + }, + 'rule_type': None + }], + 'outcome': { + 'feedback': { + 'content_id': 'temp_id' + } + }, + } + ] + test_value['state']['recorded_voiceovers']['voiceovers_mapping'] = { + 'temp_id': {} + } + test_value['state_schema_version'] = 34 + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 35) + self.assertEqual( + test_value['state']['interaction']['id'], + 'NumericExpressionInput' + ) + self.assertNotIn( + 'ascii', + test_value['state']['interaction']['solution']['correct_answer'] + ) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['rule_type'], + 'MatchesExactlyWith' + ) + + def test_question_state_dict_conversion_from_v35_to_v36(self) -> None: + # Here we use MyPy ignore because we are defining WrittenTranslationDict + # and WrittenTranslationDict do not accept 'html' key, because the + # latest version of WrittenTranslation does not have any `html` + # attribute, but for testing purposes here we are defining an older + # version of WrittenTranslation for which we have to provide `html` + # key. So, due to this MyPy throws an `Extra key 'html' for TypedDict` + # error. Thus to avoid the error, we used ignore here. + self.question_state_dict['written_translations'] = { # type: ignore[misc] + 'translations_mapping': { + 'temp_id_1': { + 'en': { + 'html': 'html_body_1' + } + }, + 'temp_id_2': { + 'en': { + 'html': 'html_body_2' + } + } + } + } + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 35 + } + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + test_value['state']['next_content_id_index'] = 0 # type: ignore[misc] + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 36) + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + self.assertEqual(test_value['state']['next_content_id_index'], 3) # type: ignore[misc] + + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + t_map = test_value['state']['written_translations'][ # type: ignore[misc] + 'translations_mapping'] + self.assertEqual(t_map['temp_id_1']['en']['data_format'], 'html') + self.assertEqual(t_map['temp_id_2']['en']['data_format'], 'html') + self.assertEqual( + t_map['temp_id_1']['en']['translation'], 'html_body_1') + self.assertEqual( + t_map['temp_id_2']['en']['translation'], 'html_body_2') + self.assertNotIn('html', t_map['temp_id_1']['en']) + self.assertNotIn('html', t_map['temp_id_2']['en']) + + # Testing with interaction id 'PencilCodeEditor'. + test_value['state']['interaction']['id'] = 'PencilCodeEditor' + test_value['state']['interaction']['customization_args'] = { + 'initial_code': {} + } + + # Here we use MyPy ignore because we are defining WrittenTranslationDict + # and WrittenTranslationDict do not accept 'html' key, because the + # latest version of WrittenTranslation does not have any `html` + # attribute, but for testing purposes here we are defining an older + # version of WrittenTranslation for which we have to provide `html` + # key. So, due to this MyPy throws an `Extra key 'html' for TypedDict` + # error. Thus to avoid the error, we used ignore here. + test_value['state']['written_translations']['translations_mapping'] = { # type: ignore[misc] + 'temp_id_1': { + 'en': { + 'html': 'html_body_1' + } + }, + 'temp_id_2': { + 'en': { + 'html': 'html_body_2' + } + } + } + test_value['state_schema_version'] = 35 + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 36) + self.assertEqual( + test_value['state']['interaction']['customization_args'], + {'initialCode': {}} + ) + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + self.assertEqual( + test_value['state']['written_translations']['translations_mapping'], # type: ignore[misc] + { + 'temp_id_1': { + 'en': {'data_format': 'html', 'translation': 'html_body_1'} + }, + 'temp_id_2': { + 'en': {'data_format': 'html', 'translation': 'html_body_2'} + } + } + ) + + # Testing with interaction id 'TextInput'. + test_value['state']['interaction']['id'] = 'TextInput' + test_value['state']['interaction']['customization_args'] = { + 'placeholder': { + 'value': 'temp_value_1' + } + } + + # Here we use MyPy ignore because we are defining WrittenTranslationDict + # and WrittenTranslationDict do not accept 'html' key, because the + # latest version of WrittenTranslation does not have any `html` + # attribute, but for testing purposes here we are defining an older + # version of WrittenTranslation for which we have to provide `html` + # key. So, due to this MyPy throws an `Extra key 'html' for TypedDict` + # error. Thus to avoid the error, we used ignore here. + test_value['state']['written_translations']['translations_mapping'] = { # type: ignore[misc] + 'temp_id_1': { + 'en': { + 'html': 'html_body_1' + } + }, + 'temp_id_2': { + 'en': { + 'html': 'html_body_2' + } + } + } + test_value['state_schema_version'] = 35 + + with self.swap_to_always_return( + customization_args_util, 'validate_customization_args_and_values', + value=True): + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 36) + + # Testing with interaction id 'MultipleChoiceInput'. + test_value['state']['interaction']['id'] = 'MultipleChoiceInput' + test_value['state']['interaction']['customization_args'] = { + 'choices': { + 'value': 'value_1' + } + } + + # Here we use MyPy ignore because we are defining WrittenTranslationDict + # and WrittenTranslationDict do not accept 'html' key, because the + # latest version of WrittenTranslation does not have any `html` + # attribute, but for testing purposes here we are defining an older + # version of WrittenTranslation for which we have to provide `html` + # key. So, due to this MyPy throws an `Extra key 'html' for TypedDict` + # error. Thus to avoid the error, we used ignore here. + test_value['state']['written_translations']['translations_mapping'] = { # type: ignore[misc] + 'temp_id_1': { + 'en': { + 'html': 'html_body_1' + } + }, + 'temp_id_2': { + 'en': { + 'html': 'html_body_2' + } + } + } + test_value['state']['recorded_voiceovers']['voiceovers_mapping'] = {} + test_value['state_schema_version'] = 35 + + with self.swap_to_always_return( + customization_args_util, 'validate_customization_args_and_values', + value=True): + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 36) + self.assertEqual( + test_value['state']['interaction']['customization_args'], + { + 'choices': { + 'value': [ + {'content_id': 'ca_choices_3', 'html': 'v'}, + {'content_id': 'ca_choices_4', 'html': 'a'}, + {'content_id': 'ca_choices_5', 'html': 'l'}, + {'content_id': 'ca_choices_6', 'html': 'u'}, + {'content_id': 'ca_choices_7', 'html': 'e'}, + {'content_id': 'ca_choices_8', 'html': '_'}, + {'content_id': 'ca_choices_9', 'html': '1'} + ] + }, + 'showChoicesInShuffledOrder': {'value': True} + } + ) + self.assertEqual( + test_value['state']['recorded_voiceovers']['voiceovers_mapping'], + { + 'ca_choices_3': {}, 'ca_choices_4': {}, 'ca_choices_5': {}, + 'ca_choices_6': {}, 'ca_choices_7': {}, 'ca_choices_8': {}, + 'ca_choices_9': {} + } + ) + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + self.assertEqual( + test_value['state']['written_translations']['translations_mapping'], # type: ignore[misc] + { + 'temp_id_1': { + 'en': {'data_format': 'html', 'translation': 'html_body_1'} + }, + 'temp_id_2': { + 'en': {'data_format': 'html', 'translation': 'html_body_2'} + }, + 'ca_choices_3': {}, 'ca_choices_4': {}, 'ca_choices_5': {}, + 'ca_choices_6': {}, 'ca_choices_7': {}, 'ca_choices_8': {}, + 'ca_choices_9': {} + } + ) + + # Testing with interaction id 'ItemSelectionInput'. + test_value['state']['interaction']['id'] = 'ItemSelectionInput' + test_value['state']['interaction']['customization_args'] = {} + + # Here we use MyPy ignore because we are defining WrittenTranslationDict + # and WrittenTranslationDict do not accept 'html' key, because the + # latest version of WrittenTranslation does not have any `html` + # attribute, but for testing purposes here we are defining an older + # version of WrittenTranslation for which we have to provide `html` + # key. So, due to this MyPy throws an `Extra key 'html' for TypedDict` + # error. Thus to avoid the error, we used ignore here. + test_value['state']['written_translations']['translations_mapping'] = { # type: ignore[misc] + 'temp_id_1': { + 'en': { + 'html': 'html_body_1' + } + }, + 'temp_id_2': { + 'en': { + 'html': 'html_body_2' + } + } + } + test_value['state']['recorded_voiceovers']['voiceovers_mapping'] = {} + test_value['state_schema_version'] = 35 + + self.assertEqual( + test_value['state']['interaction']['customization_args'], {} + ) + + with self.swap_to_always_return( + customization_args_util, 'validate_customization_args_and_values', + value=True): + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 36) + self.assertEqual( + test_value['state']['interaction']['customization_args'], + { + 'choices': { + 'value': [{'content_id': 'ca_choices_3', 'html': ''}] + }, + 'maxAllowableSelectionCount': {'value': 1}, + 'minAllowableSelectionCount': {'value': 1} + } + ) + self.assertEqual( + test_value['state']['recorded_voiceovers']['voiceovers_mapping'], + {'ca_choices_3': {}} + ) + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + self.assertEqual( + test_value['state']['written_translations']['translations_mapping'], # type: ignore[misc] + { + 'temp_id_1': { + 'en': {'data_format': 'html', 'translation': 'html_body_1'} + }, + 'temp_id_2': { + 'en': {'data_format': 'html', 'translation': 'html_body_2'} + }, + 'ca_choices_3': {} + } + ) + + def test_question_state_dict_conversion_from_v36_to_v37(self) -> None: + + self.question_state_dict['interaction']['id'] = 'TextInput' + self.question_state_dict['interaction']['answer_groups'] = [{ + 'rule_specs': [{ + 'rule_type': 'CaseSensitiveEquals', + 'inputs': {'x': ''} + }], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 36 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 37) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['rule_type'], + 'Equals' + ) + + def test_question_state_dict_conversion_from_v37_to_v38(self) -> None: + + self.question_state_dict['interaction']['id'] = 'MathEquationInput' + self.question_state_dict['interaction']['answer_groups'] = [{ + 'rule_specs': [{ + 'inputs': { + 'x': 'variable=pi' + }, + 'rule_type': '' + }], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + self.question_state_dict['interaction']['customization_args'] = {} + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 37 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 38) + self.assertEqual( + self.question_state_dict['interaction']['customization_args'], + { + 'customOskLetters': { + 'value': ['a', 'b', 'e', 'i', 'l', 'r', 'v', 'π'] + } + } + ) + + def test_question_state_dict_conversion_from_v38_to_v39(self) -> None: + + self.question_state_dict['interaction']['id'] = 'NumericExpressionInput' + self.question_state_dict['interaction']['customization_args'] = {} + self.question_state_dict[ + 'recorded_voiceovers']['voiceovers_mapping'] = {} + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + self.question_state_dict['written_translations'] = { # type: ignore[misc] + 'translations_mapping': {} + } + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 38 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 39) + self.assertEqual( + test_value['state']['interaction']['customization_args'], + { + 'placeholder': { + 'value': { + 'content_id': 'ca_placeholder_0', + 'unicode_str': ( + 'Type an expression here, using only numbers.') + } + } + } + ) + self.assertEqual( + test_value['state']['recorded_voiceovers']['voiceovers_mapping'], + {'ca_placeholder_0': {}} + ) + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + self.assertEqual( + test_value['state']['written_translations']['translations_mapping'], # type: ignore[misc] + {'ca_placeholder_0': {}} + ) + + def test_question_state_dict_conversion_from_v39_to_v40(self) -> None: + + self.question_state_dict['interaction']['id'] = 'TextInput' + self.question_state_dict['interaction']['answer_groups'] = [{ + 'rule_specs': [{ + 'inputs': { + 'x': 'variable=pi' + }, + 'rule_type': 'standard' + }], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 39 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 40) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0], + { + 'rule_type': 'standard', + 'inputs': {'x': ['variable=pi']} + } + ) + + def test_question_state_dict_conversion_from_v40_to_v41(self) -> None: + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains written_translations property. + self.question_state_dict['written_translations'] = { # type: ignore[misc] + 'translations_mapping': {} + } + self.question_state_dict['interaction']['id'] = 'TextInput' + self.question_state_dict['interaction']['answer_groups'] = [{ + 'rule_specs': [{ + 'rule_type': 'standard', + 'inputs': { + 'x': 'text' + }, + }], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + self.question_state_dict['next_content_id_index'] = 0 # type: ignore[misc] + self.question_state_dict[ + 'recorded_voiceovers']['voiceovers_mapping'] = {} + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 40 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 41) + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + self.assertEqual(test_value['state']['next_content_id_index'], 1) # type: ignore[misc] + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['inputs']['x'], + { + 'contentId': 'rule_input_0', + 'normalizedStrSet': 'text' + } + ) + self.assertEqual( + test_value['state']['recorded_voiceovers']['voiceovers_mapping'], + {'rule_input_0': {}} + ) + + # Testing with interaction id 'SetInput'. + test_value['state']['interaction']['id'] = 'SetInput' + test_value['state']['interaction']['answer_groups'] = [{ + 'rule_specs': [{ + 'rule_type': 'standard', + 'inputs': { + 'x': 'text' + } + }], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + test_value['state']['next_content_id_index'] = 0 # type: ignore[misc] + test_value['state']['recorded_voiceovers']['voiceovers_mapping'] = {} + test_value['state_schema_version'] = 40 + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 41) + # Here we use MyPy ignore because the latest schema of state + # dict doesn't contains next_content_id_index property. + self.assertEqual(test_value['state']['next_content_id_index'], 1) # type: ignore[misc] + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['inputs']['x'], + { + 'contentId': 'rule_input_0', + 'unicodeStrSet': 'text' + } + ) + self.assertEqual( + test_value['state']['recorded_voiceovers']['voiceovers_mapping'], + {'rule_input_0': {}} + ) + + def test_question_state_dict_conversion_from_v41_to_v42(self) -> None: + test_solution_dict: state_domain.SolutionDict = { + 'correct_answer': ['correct_value'], + 'explanation': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'answer_is_exclusive': False + } + + ca_choices_dicts: List[state_domain.SubtitledHtmlDict] = [ + {'html': 'correct_value', 'content_id': 'content_id_1'}, + {'html': 'value_2', 'content_id': 'content_id_2'}, + {'html': 'value_3', 'content_id': 'content_id_3'} + ] + + self.question_state_dict['interaction']['id'] = 'ItemSelectionInput' + self.question_state_dict['interaction']['solution'] = test_solution_dict + self.question_state_dict['interaction']['customization_args'] = { + 'choices': { + 'value': ca_choices_dicts + } + } + self.question_state_dict['interaction']['answer_groups'] = [{ + 'rule_specs': [{ + 'inputs': { + 'x': ['correct_value'], + }, + 'rule_type': 'IsEqualToOrdering' + }], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 41 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 42) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['inputs']['x'], + ['content_id_1'] + ) + self.assertEqual( + test_value['state']['interaction']['solution'], + test_solution_dict + ) + + ca_choices_dicts = [ + {'html': 'correct_value', 'content_id': 'content_id_1'}, + ] + + # Testing with invalid 'x' input. + test_value['state']['interaction']['id'] = 'ItemSelectionInput' + test_value['state']['interaction']['solution'] = test_solution_dict + test_value['state']['interaction']['customization_args'] = { + 'choices': { + 'value': ca_choices_dicts + } + } + test_value['state']['interaction']['answer_groups'] = [{ + 'rule_specs': [{ + 'inputs': { + 'x': ['invalid_value'], + }, + 'rule_type': 'IsEqualToOrdering' + }], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + test_value['state_schema_version'] = 41 + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 42) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0][ + 'rule_specs'][0]['inputs']['x'], + ['invalid_content_id'] + ) + self.assertEqual( + test_value['state']['interaction']['solution'], + test_solution_dict + ) + + drag_and_drop_test_solution_dict = copy.deepcopy(test_solution_dict) + drag_and_drop_test_solution_dict['correct_answer'] = [ + ['correct_value'] + ] + + # Testing with interaction id 'DragAndDropSortInput'. + test_value['state']['interaction']['id'] = 'DragAndDropSortInput' + test_value['state']['interaction']['solution'] = ( + drag_and_drop_test_solution_dict + ) + ca_choices_dicts = [ + {'html': 'correct_value', 'content_id': 'content_id_1'}, + {'html': 'value_2', 'content_id': 'content_id_2'}, + {'html': 'value_3', 'content_id': 'content_id_3'} + ] + test_value['state']['interaction']['customization_args'] = { + 'choices': { + 'value': ca_choices_dicts + } + } + test_value['state']['interaction']['answer_groups'] = [{ + 'rule_specs': [ + { + 'inputs': { + 'x': [['value_2']], + }, + 'rule_type': 'IsEqualToOrdering' + }, + { + 'inputs': { + 'x': 'correct_value', + }, + 'rule_type': 'HasElementXAtPositionY' + }, + { + 'inputs': { + 'x': 'correct_value', + 'y': 'value_3' + }, + 'rule_type': 'HasElementXBeforeElementY' + } + ], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + test_value['state_schema_version'] = 41 + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 42) + self.assertEqual( + test_value['state']['interaction']['answer_groups'][0], + { + 'rule_specs': [ + { + 'inputs': { + 'x': [['content_id_2']] + }, + 'rule_type': 'IsEqualToOrdering' + }, + { + 'inputs': {'x': 'content_id_1'}, + 'rule_type': 'HasElementXAtPositionY' + }, + { + 'inputs': { + 'x': 'content_id_1', + 'y': 'content_id_3' + }, + 'rule_type': 'HasElementXBeforeElementY' + } + ], + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + } + ) + self.assertEqual( + test_value['state']['interaction']['solution'], + drag_and_drop_test_solution_dict + ) + + def test_question_state_dict_conversion_from_v42_to_v43(self) -> None: + + self.question_state_dict['interaction']['id'] = 'NumericExpressionInput' + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 42 + } + + self.assertEqual( + test_value['state']['interaction']['customization_args'], + {} + ) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 43) + self.assertEqual( + test_value['state']['interaction']['customization_args'], + { + 'useFractionForDivision': { + 'value': True + } + } + ) + + def test_question_state_dict_conversion_from_v43_to_v44(self) -> None: + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 43 + } + # Here we use MyPy ignore because MyPy doesn't allow key deletion + # from TypedDict. + del test_value['state']['card_is_checkpoint'] # type: ignore[misc] + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 44) + self.assertEqual(test_value['state']['card_is_checkpoint'], False) + + def test_question_state_dict_conversion_from_v44_to_v45(self) -> None: + # Here we use MyPy ignore because MyPy doesn't allow key deletion + # from TypedDict. + del self.question_state_dict['linked_skill_id'] # type: ignore[misc] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 44 + } + + self.assertNotIn('linked_skill_id', test_value['state']) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 45) + self.assertIsNone(test_value['state']['linked_skill_id']) + + def test_question_state_dict_conversion_from_v45_to_v46(self) -> None: + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 45 + } + + initial_json = copy.deepcopy(test_value['state']) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 46) + self.assertEqual(test_value['state'], initial_json) + + def test_question_state_dict_conversion_from_v46_to_v47(self) -> None: + + self.question_state_dict['content']['html'] = ( + '' + '' + ) + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 46 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 47) + self.assertEqual( + test_value['state']['content']['html'], + '' + '' + ) + + def test_question_state_dict_conversion_from_v47_to_v48(self) -> None: + + self.question_state_dict['content']['html'] = ' ' + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 47 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 48) + self.assertEqual(test_value['state']['content']['html'], ' ') + + def test_question_state_dict_conversion_from_v48_to_v49(self) -> None: + self.question_state_dict['interaction']['id'] = 'NumericInput' + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 48 + } + + self.assertEqual( + test_value['state']['interaction']['customization_args'], + {} + ) + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 49) + self.assertEqual( + test_value['state']['interaction']['customization_args'], + { + 'requireNonnegativeInput': { + 'value': False + } + } + ) + + def test_question_state_dict_conversion_from_v49_to_v50(self) -> None: + self.question_state_dict['interaction']['id'] = ( + 'AlgebraicExpressionInput') + self.question_state_dict['interaction']['customization_args'] = { + 'customOskLetters': { + 'value': ['a', 'b', 'c'] + } + } + inputs_variable_test_dict: List[str] = [] + self.question_state_dict['interaction']['answer_groups'] = [{ + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'rule_specs': [{ + 'inputs': { + 'x': 'a - b' + }, + 'rule_type': 'ContainsSomeOf' + }, { + 'inputs': { + 'x': 'a - b', + 'y': inputs_variable_test_dict + }, + 'rule_type': 'MatchesExactlyWith' + }], + 'training_data': [], + 'tagged_skill_misconception_id': None + }] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': self.question_state_dict, + 'state_schema_version': 49 + } + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + self.assertEqual(test_value['state_schema_version'], 50) + rule_specs = test_value[ + 'state']['interaction']['answer_groups'][0]['rule_specs'] + self.assertEqual(len(rule_specs), 1) + self.assertEqual(rule_specs[0]['rule_type'], 'MatchesExactlyWith') + self.assertEqual( + test_value['state']['interaction']['customization_args'], { + 'allowedVariables': { + 'value': ['a', 'b', 'c'] + } + } + ) + + def test_get_all_translatable_content_for_question(self) -> None: + """Get all translatable fields from exploration.""" + translatable_contents = [ + translatable_content.content_value + for translatable_content in + self.question.get_all_contents_which_need_translations( + self.dummy_entity_translations).values() + ] + + self.assertItemsEqual( + translatable_contents, + [ + 'Enter text here', + '

    This is a hint.

    ', + '

    This is a solution.

    ' + ]) + + def test_question_state_dict_conversion_from_v50_to_v51(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + question_data = ( + question_domain.Question.create_default_question_state( + content_id_generator + ).to_dict()) + + # Here we use MyPy ignore because we are defining AnswerGroupDict + # and while defining AnswerGroupDict MyPy expects that all keys + # are defined, but for testing purposes here we are defining only + # outcome key which causes MyPy to throw `Missing keys' error. + # Thus to avoid the error, we used ignore here. + question_data['interaction']['answer_groups'] = [ + { # type: ignore[typeddict-item] + 'outcome': { + 'feedback': { + 'content_id': 'content_id' + } + } + } + ] + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': question_data, + 'state_schema_version': 50 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 51) + + default_outcome_dict = test_value['state']['interaction']['default_outcome'] # pylint: disable=line-too-long + outcome_dict = test_value['state']['interaction']['answer_groups'][0]['outcome'] # pylint: disable=line-too-long + + # Ruling out the possibility of None for mypy type checking. + assert default_outcome_dict is not None + self.assertIn('dest_if_really_stuck', default_outcome_dict) + self.assertEqual(default_outcome_dict['dest_if_really_stuck'], None) + + self.assertIn('dest_if_really_stuck', outcome_dict) + self.assertEqual(outcome_dict['dest_if_really_stuck'], None) + + def test_question_state_dict_conversion_from_v51_to_v52(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + question_data = ( + question_domain.Question.create_default_question_state( + content_id_generator + ).to_dict()) + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': question_data, + 'state_schema_version': 51 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 52) + + def test_question_state_dict_conversion_from_v52_to_v53(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + question_data = question_domain.Question.create_default_question_state( + content_id_generator + ).to_dict() + + test_value: question_domain.VersionedQuestionStateDict = { + 'state': question_data, + 'state_schema_version': 52 + } + + question_domain.Question.update_state_from_model( + test_value, test_value['state_schema_version']) + + self.assertEqual(test_value['state_schema_version'], 53) + class QuestionSummaryTest(test_utils.GenericTestBase): """Test for Question Summary object.""" - def setUp(self): - super(QuestionSummaryTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.fake_date_created = datetime.datetime( 2018, 11, 17, 20, 2, 45, 0) self.fake_date_updated = datetime.datetime( @@ -547,7 +2275,7 @@ def setUp(self): misconception_ids=['skill1-1', 'skill2-2'] ) - def test_to_dict(self): + def test_to_dict(self) -> None: """Test to verify to_dict method of the Question Summary object. """ @@ -564,61 +2292,82 @@ def test_to_dict(self): self.assertEqual(expected_object_dict, self.observed_object.to_dict()) - def test_validation_with_valid_properties(self): + def test_validation_with_valid_properties(self) -> None: self.observed_object.validate() - def test_validation_with_invalid_id(self): - self.observed_object.id = 1 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_with_invalid_id(self) -> None: + self.observed_object.id = 1 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected id to be a string, received 1'): self.observed_object.validate() - def test_validation_with_invalid_interaction_id(self): - self.observed_object.interaction_id = 1 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_with_invalid_interaction_id(self) -> None: + self.observed_object.interaction_id = 1 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected interaction id to be a string, received 1'): self.observed_object.validate() - def test_validation_with_invalid_question_content(self): - self.observed_object.question_content = 1 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_with_invalid_question_content(self) -> None: + self.observed_object.question_content = 1 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected question content to be a string, received 1'): self.observed_object.validate() - def test_validation_with_invalid_created_on(self): - self.observed_object.created_on = 1 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_with_invalid_created_on(self) -> None: + self.observed_object.created_on = 1 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected created on to be a datetime, received 1'): self.observed_object.validate() - def test_validation_with_invalid_last_updated(self): - self.observed_object.last_updated = 1 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_with_invalid_last_updated(self) -> None: + self.observed_object.last_updated = 1 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected last updated to be a datetime, received 1'): self.observed_object.validate() - def test_validate_invalid_list_of_misconception_ids(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_invalid_list_of_misconception_ids(self) -> None: """Test to verify that the validation fails when misconception_ids value is an invalid list. """ - self.observed_object.misconception_ids = ['Test', 1] - with self.assertRaisesRegexp( + self.observed_object.misconception_ids = ['Test', 1] # type: ignore[list-item] + with self.assertRaisesRegex( utils.ValidationError, re.escape( 'Expected misconception ids to be a list of strings, ' 'received [\'Test\', 1]')): self.observed_object.validate() - def test_validate_invalid_type_of_misconception_ids(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_invalid_type_of_misconception_ids(self) -> None: """Test to verify that the validation fails when misconception_ids value is an invalid type. """ - self.observed_object.misconception_ids = 123 - with self.assertRaisesRegexp( + self.observed_object.misconception_ids = 123 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected misconception ids to be a list of strings, ' 'received 123'): @@ -628,7 +2377,7 @@ def test_validate_invalid_type_of_misconception_ids(self): class QuestionSkillLinkDomainTest(test_utils.GenericTestBase): """Test for Question Skill Link Domain object.""" - def test_to_dict(self): + def test_to_dict(self) -> None: """Test to verify to_dict method of the Question Skill Link Domain object. """ @@ -646,7 +2395,7 @@ def test_to_dict(self): class MergedQuestionSkillLinkDomainTest(test_utils.GenericTestBase): """Test for Merged Question Skill Link Domain object.""" - def test_to_dict(self): + def test_to_dict(self) -> None: """Test to verify to_dict method of the Merged Question Skill Link Domain object. """ diff --git a/core/domain/question_fetchers.py b/core/domain/question_fetchers.py index a5c4a970ea35..61623a1cc494 100644 --- a/core/domain/question_fetchers.py +++ b/core/domain/question_fetchers.py @@ -25,12 +25,28 @@ from core.domain import state_domain from core.platform import models +from typing import List, Optional, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import question_models + from mypy_imports import skill_models + (question_models, skill_models) = models.Registry.import_models( - [models.NAMES.question, models.NAMES.skill]) + [models.Names.QUESTION, models.Names.SKILL]) + + +QuestionAndSkillDescriptionsType = Tuple[ + List[Optional[question_domain.Question]], + List[List[Optional[str]]] +] def get_questions_and_skill_descriptions_by_skill_ids( - question_count, skill_ids, offset): + question_count: int, + skill_ids: List[str], + offset: int +) -> QuestionAndSkillDescriptionsType: """Returns the questions linked to the given skill ids. Args: @@ -40,9 +56,9 @@ def get_questions_and_skill_descriptions_by_skill_ids( offset: int. Number of query results to skip. Returns: - list(Question), list(list(str)). The list of questions and the - corresponding linked skill descriptions which are linked to the - given skill ids. + list(Question|None), list(list(str|None)). The list of questions, and + the corresponding linked skill descriptions which are linked to the + given skill ids and None when skill are not available. """ if not skill_ids: return [], [] @@ -70,7 +86,9 @@ def get_questions_and_skill_descriptions_by_skill_ids( return questions, grouped_skill_descriptions -def get_questions_by_ids(question_ids): +def get_questions_by_ids( + question_ids: List[str] +) -> List[Optional[question_domain.Question]]: """Returns a list of domain objects representing questions. Args: @@ -81,7 +99,7 @@ def get_questions_by_ids(question_ids): with the given ids or None when the id is not valid. """ question_model_list = question_models.QuestionModel.get_multi(question_ids) - questions = [] + questions: List[Optional[question_domain.Question]] = [] for question_model in question_model_list: if question_model is not None: questions.append(get_question_from_model(question_model)) @@ -90,7 +108,9 @@ def get_questions_by_ids(question_ids): return questions -def get_question_from_model(question_model): +def get_question_from_model( + question_model: question_models.QuestionModel +) -> question_domain.Question: """Returns domain object representing the given question model. Args: @@ -102,29 +122,37 @@ def get_question_from_model(question_model): """ # Ensure the original question model does not get altered. - versioned_question_state = { + versioned_question_state: question_domain.VersionedQuestionStateDict = { 'state_schema_version': ( question_model.question_state_data_schema_version), 'state': copy.deepcopy( question_model.question_state_data) } + next_content_id_index = None # Migrate the question if it is not using the latest schema version. if (question_model.question_state_data_schema_version != feconf.CURRENT_STATE_SCHEMA_VERSION): - _migrate_state_schema(versioned_question_state) + next_content_id_index = migrate_state_schema(versioned_question_state) + + if next_content_id_index is not None: + question_model.next_content_id_index = next_content_id_index return question_domain.Question( question_model.id, - state_domain.State.from_dict(versioned_question_state['state']), + state_domain.State.from_dict( + versioned_question_state['state'], validate=False), versioned_question_state['state_schema_version'], question_model.language_code, question_model.version, question_model.linked_skill_ids, question_model.inapplicable_skill_misconception_ids, + question_model.next_content_id_index, question_model.created_on, question_model.last_updated) -def _migrate_state_schema(versioned_question_state): +def migrate_state_schema( + versioned_question_state: question_domain.VersionedQuestionStateDict +) -> Optional[int]: """Holds the responsibility of performing a step-by-step, sequential update of the state structure based on the schema version of the input state dictionary. If the current State schema changes, a new @@ -138,6 +166,9 @@ def _migrate_state_schema(versioned_question_state): state: The State domain object representing the question state data. + Returns: + int. The next content id index for generating content id. + Raises: Exception. The given state_schema_version is invalid. """ @@ -152,7 +183,21 @@ def _migrate_state_schema(versioned_question_state): 'Sorry, we can only process v25-v%d state schemas at present.' % feconf.CURRENT_STATE_SCHEMA_VERSION) + next_content_id_index = None while state_schema_version < feconf.CURRENT_STATE_SCHEMA_VERSION: - question_domain.Question.update_state_from_model( - versioned_question_state, state_schema_version) + if state_schema_version == 54: + # State conversion function from 54 to 55 removes + # next_content_id_index from the state level, hence this "if" case + # populates the next_content_id_index from the old state, which will + # be used for introducing next_content_id_index into + # question level. + next_content_id_index = ( + question_domain.Question.update_state_from_model( + versioned_question_state, state_schema_version) + ) + else: + question_domain.Question.update_state_from_model( + versioned_question_state, state_schema_version) state_schema_version += 1 + + return next_content_id_index diff --git a/core/domain/question_fetchers_test.py b/core/domain/question_fetchers_test.py index b8e6d35b1290..5bceab6f3257 100644 --- a/core/domain/question_fetchers_test.py +++ b/core/domain/question_fetchers_test.py @@ -22,18 +22,23 @@ from core.domain import question_domain from core.domain import question_fetchers from core.domain import question_services +from core.domain import translation_domain from core.domain import user_services from core.platform import models from core.tests import test_utils -(question_models,) = models.Registry.import_models([models.NAMES.question]) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import question_models + +(question_models,) = models.Registry.import_models([models.Names.QUESTION]) class QuestionFetchersUnitTests(test_utils.GenericTestBase): """Tests for question fetchers.""" - def setUp(self): - super(QuestionFetchersUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -52,11 +57,14 @@ def setUp(self): 'skill_2', self.admin_id, description='Skill Description 2') self.question_id = question_services.get_new_question_id() + self.content_id_generator = translation_domain.ContentIdGenerator() self.question = self.save_new_question( self.question_id, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', self.content_id_generator), + ['skill_1'], + self.content_id_generator.next_content_id_index) - def test_get_questions_and_skill_descriptions_by_skill_ids(self): + def test_get_questions_and_skill_descriptions_by_skill_ids(self) -> None: question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.3) @@ -64,21 +72,26 @@ def test_get_questions_and_skill_descriptions_by_skill_ids(self): question_fetchers.get_questions_and_skill_descriptions_by_skill_ids( 2, ['skill_1'], 0)) + # Ruling out the possibility of None for mypy type checking. + assert questions[0] is not None self.assertEqual(len(questions), 1) self.assertEqual( questions[0].to_dict(), self.question.to_dict()) - def test_get_no_questions_with_no_skill_ids(self): + def test_get_no_questions_with_no_skill_ids(self) -> None: questions, _ = ( question_fetchers.get_questions_and_skill_descriptions_by_skill_ids( 1, [], 0)) self.assertEqual(len(questions), 0) - def test_get_questions_with_multi_skill_ids(self): + def test_get_questions_with_multi_skill_ids(self) -> None: question_id_1 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() question_1 = self.save_new_question( question_id_1, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1', 'skill_2']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1', 'skill_2'], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.editor_id, question_id_1, 'skill_1', 0.3) question_services.create_new_question_skill_link( @@ -88,23 +101,34 @@ def test_get_questions_with_multi_skill_ids(self): question_fetchers.get_questions_and_skill_descriptions_by_skill_ids( 2, ['skill_1', 'skill_2'], 0)) + # Ruling out the possibility of None for mypy type checking. + assert questions[0] is not None self.assertEqual(len(questions), 1) self.assertEqual( questions[0].to_dict(), question_1.to_dict()) - def test_get_questions_by_ids(self): + def test_get_questions_by_ids(self) -> None: question_id_2 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_2, self.editor_id, - self._create_valid_question_data('DEF'), ['skill_1']) + self._create_valid_question_data('DEF', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) questions = question_fetchers.get_questions_by_ids( [self.question_id, 'invalid_question_id', question_id_2]) self.assertEqual(len(questions), 3) + # Ruling out the possibility of None for mypy type checking. + assert questions[0] is not None self.assertEqual(questions[0].id, self.question_id) self.assertIsNone(questions[1]) + # Ruling out the possibility of None for mypy type checking. + assert questions[2] is not None self.assertEqual(questions[2].id, question_id_2) - def test_cannot_get_question_from_model_with_invalid_schema_version(self): + def test_cannot_get_question_from_model_with_invalid_schema_version( + self + ) -> None: # Delete all question models. all_question_models = question_models.QuestionModel.get_all() question_models.QuestionModel.delete_multi( @@ -115,13 +139,15 @@ def test_cannot_get_question_from_model_with_invalid_schema_version(self): self.assertEqual(all_question_models.count(), 0) question_id = question_services.get_new_question_id() - + content_id_generator = translation_domain.ContentIdGenerator() question_model = question_models.QuestionModel( id=question_id, question_state_data=( - self._create_valid_question_data('ABC').to_dict()), + self._create_valid_question_data( + 'ABC', content_id_generator).to_dict()), language_code='en', version=0, + next_content_id_index=content_id_generator.next_content_id_index, question_state_data_schema_version=0) question_model.commit( @@ -130,10 +156,66 @@ def test_cannot_get_question_from_model_with_invalid_schema_version(self): all_question_models = question_models.QuestionModel.get_all() self.assertEqual(all_question_models.count(), 1) - question_model = all_question_models.get() + fetched_question_models = all_question_models.get() + # Ruling out the possibility of None for mypy type checking. + assert fetched_question_models is not None - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v25-v%d state schemas at present.' % feconf.CURRENT_STATE_SCHEMA_VERSION): - question_fetchers.get_question_from_model(question_model) + question_fetchers.get_question_from_model(fetched_question_models) + + def test_get_question_from_model_with_current_valid_schema_version( + self + ) -> None: + # Delete all question models. + all_question_models = question_models.QuestionModel.get_all() + question_models.QuestionModel.delete_multi( + [question_model.id for question_model in all_question_models], + feconf.SYSTEM_COMMITTER_ID, '', force_deletion=True) + + all_question_models = question_models.QuestionModel.get_all() + self.assertEqual(all_question_models.count(), 0) + + question_id = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() + question_model = question_models.QuestionModel( + id=question_id, + question_state_data=( + self._create_valid_question_data( + 'ABC', content_id_generator).to_dict()), + language_code='en', + version=0, + next_content_id_index=content_id_generator.next_content_id_index, + question_state_data_schema_version=( + feconf.CURRENT_STATE_SCHEMA_VERSION)) + + question_model.commit( + self.editor_id, 'question model created', + [{'cmd': question_domain.CMD_CREATE_NEW}]) + + all_question_models = question_models.QuestionModel.get_all() + self.assertEqual(all_question_models.count(), 1) + fetched_question_models = all_question_models.get() + # Ruling out the possibility of None for mypy type checking. + assert fetched_question_models is not None + updated_question_model = question_fetchers.get_question_from_model( + fetched_question_models + ) + self.assertEqual( + updated_question_model.question_state_data_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + def test_get_questions_by_ids_with_latest_schema_version(self) -> None: + question_id = question_services.get_new_question_id() + self.save_new_question_with_state_data_schema_v27( + question_id, self.editor_id, []) + + question = question_fetchers.get_questions_by_ids([question_id])[0] + + assert question is not None + self.assertEqual( + question.question_state_data_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION + ) diff --git a/core/domain/question_services.py b/core/domain/question_services.py index 2e9339739b52..7033cb4bc08d 100644 --- a/core/domain/question_services.py +++ b/core/domain/question_services.py @@ -25,16 +25,30 @@ from core.domain import opportunity_services from core.domain import question_domain from core.domain import question_fetchers +from core.domain import skill_domain from core.domain import skill_fetchers from core.domain import state_domain from core.platform import models +from typing import Dict, List, Literal, Optional, Tuple, Union, cast, overload + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import question_models + from mypy_imports import skill_models + from mypy_imports import transaction_services + (question_models, skill_models) = models.Registry.import_models( - [models.NAMES.question, models.NAMES.skill]) + [models.Names.QUESTION, models.Names.SKILL]) + transaction_services = models.Registry.import_transaction_services() -def create_new_question(committer_id, question, commit_message): +def create_new_question( + committer_id: str, + question: question_domain.Question, + commit_message: str +) -> None: """Creates a new question. Args: @@ -52,7 +66,8 @@ def create_new_question(committer_id, question, commit_message): question_state_data_schema_version=( question.question_state_data_schema_version), inapplicable_skill_misconception_ids=( - question.inapplicable_skill_misconception_ids) + question.inapplicable_skill_misconception_ids), + next_content_id_index=question.next_content_id_index ) model.commit( committer_id, commit_message, [{'cmd': question_domain.CMD_CREATE_NEW}]) @@ -62,7 +77,11 @@ def create_new_question(committer_id, question, commit_message): def link_multiple_skills_for_question( - user_id, question_id, skill_ids, skill_difficulties): + user_id: str, + question_id: str, + skill_ids: List[str], + skill_difficulties: List[float] +) -> None: """Links multiple skill IDs to a question. To do that, it creates multiple new QuestionSkillLink models. It also adds the skill ids to the linked_skill_ids of the Question. @@ -76,8 +95,8 @@ def link_multiple_skills_for_question( 0 and 1 (inclusive). Raises: - Exception. The lengths of the skill_ids and skill_difficulties - lists are different. + Exception. Number of elements in skill ids and skill difficulties + are different. """ if len(skill_ids) != len(skill_difficulties): raise Exception( @@ -102,7 +121,11 @@ def link_multiple_skills_for_question( def create_new_question_skill_link( - user_id, question_id, skill_id, skill_difficulty): + user_id: str, + question_id: str, + skill_id: str, + skill_difficulty: float +) -> None: """Creates a new QuestionSkillLink model and adds the skill id to the linked skill ids for the Question model. @@ -128,7 +151,10 @@ def create_new_question_skill_link( def update_question_skill_link_difficulty( - question_id, skill_id, new_difficulty): + question_id: str, + skill_id: str, + new_difficulty: float +) -> None: """Updates the difficulty value of question skill link. Args: @@ -153,7 +179,11 @@ def update_question_skill_link_difficulty( def _update_linked_skill_ids_of_question( - user_id, question_id, new_linked_skill_ids, old_linked_skill_ids): + user_id: str, + question_id: str, + new_linked_skill_ids: List[str], + old_linked_skill_ids: List[str] +) -> None: """Updates the question linked_skill ids in the Question model. Args: @@ -163,7 +193,7 @@ def _update_linked_skill_ids_of_question( old_linked_skill_ids: list(str). Current linked skill IDs of the question. """ - change_dict = { + change_dict: Dict[str, Union[str, List[str]]] = { 'cmd': 'update_question_property', 'property_name': 'linked_skill_ids', 'new_value': new_linked_skill_ids, @@ -178,7 +208,9 @@ def _update_linked_skill_ids_of_question( old_linked_skill_ids, new_linked_skill_ids)) -def delete_question_skill_link(user_id, question_id, skill_id): +def delete_question_skill_link( + user_id: str, question_id: str, skill_id: str +) -> None: """Deleted a QuestionSkillLink model and removes the linked skill id from the Question model of question_id. @@ -207,7 +239,7 @@ def delete_question_skill_link(user_id, question_id, skill_id): question_skill_link_model.delete() -def get_total_question_count_for_skill_ids(skill_ids): +def get_total_question_count_for_skill_ids(skill_ids: List[str]) -> int: """Returns the number of questions assigned to the given skill_ids. Args: @@ -226,7 +258,10 @@ def get_total_question_count_for_skill_ids(skill_ids): def get_questions_by_skill_ids( - total_question_count, skill_ids, require_medium_difficulty): + total_question_count: int, + skill_ids: List[str], + require_medium_difficulty: bool +) -> List[question_domain.Question]: """Returns constant number of questions linked to each given skill id. Args: @@ -247,6 +282,9 @@ def get_questions_by_skill_ids( skill is random when require_medium_difficulty is false, otherwise the order is sorted by absolute value of the difference between skill difficulty and the medium difficulty. + + Raises: + Exception. Question count is higher than the maximum limit. """ if total_question_count > feconf.MAX_QUESTIONS_FETCHABLE_AT_ONE_TIME: @@ -267,11 +305,17 @@ def get_questions_by_skill_ids( total_question_count, skill_ids)) question_ids = [model.question_id for model in question_skill_link_models] - questions = question_fetchers.get_questions_by_ids(question_ids) + questions_with_none = question_fetchers.get_questions_by_ids(question_ids) + questions: List[question_domain.Question] = [] + for question in questions_with_none: + # Ruling out the possibility of None for mypy type checking. + assert question is not None + questions.append(question) + return questions -def get_new_question_id(): +def get_new_question_id() -> str: """Returns a new question id. Returns: @@ -280,7 +324,9 @@ def get_new_question_id(): return question_models.QuestionModel.get_new_id('') -def add_question(committer_id, question): +def add_question( + committer_id: str, question: question_domain.Question +) -> None: """Saves a new question. Args: @@ -292,7 +338,8 @@ def add_question(committer_id, question): def delete_question( - committer_id, question_id, force_deletion=False): + committer_id: str, question_id: str, force_deletion: bool = False +) -> None: """Deletes the question with the given question_id. Args: @@ -307,7 +354,8 @@ def delete_question( @transaction_services.run_in_transaction_wrapper def delete_question_model_transactional( - question_id, committer_id, force_deletion): + question_id: str, committer_id: str, force_deletion: bool + ) -> None: """Inner function that is to be done in a transaction.""" question_model = question_models.QuestionModel.get_by_id(question_id) if question_model is not None: @@ -322,13 +370,15 @@ def delete_question_model_transactional( question_id, committer_id, force_deletion) question_summary_model = ( - question_models.QuestionSummaryModel.get(question_id, False)) + question_models.QuestionSummaryModel.get(question_id, strict=False)) if question_summary_model is not None: question_summary_model.delete() def get_question_skill_link_from_model( - question_skill_link_model, skill_description): + question_skill_link_model: question_models.QuestionSkillLinkModel, + skill_description: str +) -> question_domain.QuestionSkillLink: """Returns domain object representing the given question skill link model. Args: @@ -347,7 +397,27 @@ def get_question_skill_link_from_model( question_skill_link_model.skill_difficulty) -def get_question_by_id(question_id, strict=True): +@overload +def get_question_by_id( + question_id: str +) -> question_domain.Question: ... + + +@overload +def get_question_by_id( + question_id: str, *, strict: Literal[True] +) -> question_domain.Question: ... + + +@overload +def get_question_by_id( + question_id: str, *, strict: Literal[False] +) -> Optional[question_domain.Question]: ... + + +def get_question_by_id( + question_id: str, strict: bool = True +) -> Optional[question_domain.Question]: """Returns a domain object representing a question. Args: @@ -368,7 +438,9 @@ def get_question_by_id(question_id, strict=True): return None -def get_question_skill_links_of_skill(skill_id, skill_description): +def get_question_skill_links_of_skill( + skill_id: str, skill_description: str +) -> List[question_domain.QuestionSkillLink]: """Returns a list of QuestionSkillLinks of a particular skill ID. @@ -390,7 +462,7 @@ def get_question_skill_links_of_skill(skill_id, skill_description): return question_skill_links -def get_skills_linked_to_question(question_id): +def get_skills_linked_to_question(question_id: str) -> List[skill_domain.Skill]: """Returns a list of skills linked to a particular question. Args: @@ -405,7 +477,8 @@ def get_skills_linked_to_question(question_id): def replace_skill_id_for_all_questions( - curr_skill_id, curr_skill_description, new_skill_id): + curr_skill_id: str, curr_skill_description: str, new_skill_id: str +) -> None: """Updates the skill ID of QuestionSkillLinkModels to the superseding skill ID. @@ -429,13 +502,15 @@ def replace_skill_id_for_all_questions( question_skill_link.skill_difficulty) ) question_models.QuestionSkillLinkModel.delete_multi_question_skill_links( - old_question_skill_link_models) + list(old_question_skill_link_models)) question_models.QuestionSkillLinkModel.put_multi_question_skill_links( new_question_skill_link_models) old_questions = question_models.QuestionModel.get_multi(list(question_ids)) new_questions = [] for question in old_questions: + # Ruling out the possibility of None for mypy type checking. + assert question is not None new_question = copy.deepcopy(question) new_question.linked_skill_ids.remove(curr_skill_id) new_question.linked_skill_ids.append(new_skill_id) @@ -444,7 +519,11 @@ def replace_skill_id_for_all_questions( def get_displayable_question_skill_link_details( - question_count, skill_ids, offset): + question_count: int, skill_ids: List[str], offset: int +) -> Tuple[ + List[Optional[question_domain.QuestionSummary]], + List[question_domain.MergedQuestionSkillLink] +]: """Returns the list of question summaries and corresponding skill descriptions linked to all the skills given by skill_ids. @@ -503,7 +582,9 @@ def get_displayable_question_skill_link_details( return (question_summaries, merged_question_skill_links) -def get_question_summaries_by_ids(question_ids): +def get_question_summaries_by_ids( + question_ids: List[str] +) -> List[Optional[question_domain.QuestionSummary]]: """Returns a list of domain objects representing question summaries. Args: @@ -515,7 +596,7 @@ def get_question_summaries_by_ids(question_ids): """ question_summary_model_list = ( question_models.QuestionSummaryModel.get_multi(question_ids)) - question_summaries = [] + question_summaries: List[Optional[question_domain.QuestionSummary]] = [] for question_summary_model in question_summary_model_list: if question_summary_model is not None: question_summaries.append( @@ -525,7 +606,9 @@ def get_question_summaries_by_ids(question_ids): return question_summaries -def apply_change_list(question_id, change_list): +def apply_change_list( + question_id: str, change_list: List[question_domain.QuestionChange] +) -> question_domain.Question: """Applies a changelist to a pristine question and returns the result. Args: @@ -536,6 +619,9 @@ def apply_change_list(question_id, change_list): Returns: Question. The resulting question domain object. + + Raises: + Exception. The change list is not applicable to the question. """ question = get_question_by_id(question_id) question_property_inapplicable_skill_misconception_ids = ( @@ -545,19 +631,62 @@ def apply_change_list(question_id, change_list): if change.cmd == question_domain.CMD_UPDATE_QUESTION_PROPERTY: if (change.property_name == question_domain.QUESTION_PROPERTY_LANGUAGE_CODE): - question.update_language_code(change.new_value) + # Here we use cast because this 'if' condition forces + # change to have type UpdateQuestionPropertyLanguageCodeCmd. + update_language_code_cmd = cast( + question_domain.UpdateQuestionPropertyLanguageCodeCmd, + change + ) + question.update_language_code( + update_language_code_cmd.new_value + ) elif (change.property_name == question_domain.QUESTION_PROPERTY_QUESTION_STATE_DATA): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateQuestionPropertyQuestionStateDataCmd. + update_question_state_data_cmd = cast( + question_domain.UpdateQuestionPropertyQuestionStateDataCmd, # pylint: disable=line-too-long + change + ) state_domain_object = state_domain.State.from_dict( - change.new_value) + update_question_state_data_cmd.new_value + ) question.update_question_state_data(state_domain_object) elif (change.property_name == question_domain.QUESTION_PROPERTY_LINKED_SKILL_IDS): - question.update_linked_skill_ids(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateQuestionPropertyLinkedSkillIdsCmd. + update_linked_skill_ids_cmd = cast( + question_domain.UpdateQuestionPropertyLinkedSkillIdsCmd, + change + ) + question.update_linked_skill_ids( + update_linked_skill_ids_cmd.new_value + ) elif (change.property_name == question_property_inapplicable_skill_misconception_ids): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateQuestionPropertySkillMisconceptionIdsCmd. + update_skill_misconception_ids_cmd = cast( + question_domain.UpdateQuestionPropertySkillMisconceptionIdsCmd, # pylint: disable=line-too-long + change + ) question.update_inapplicable_skill_misconception_ids( - change.new_value) + update_skill_misconception_ids_cmd.new_value) + elif (change.property_name == + question_domain.QUESTION_PROPERTY_NEXT_CONTENT_ID_INDEX): + # Here we use cast because this 'if' condition forces + # change to have type + # UpdateQuestionPropertyNextContentIdIndexCmd. + cmd = cast( + question_domain + .UpdateQuestionPropertyNextContentIdIndexCmd, + change + ) + question.update_next_content_id_index(cmd.new_value) return question @@ -569,7 +698,12 @@ def apply_change_list(question_id, change_list): raise e -def _save_question(committer_id, question, change_list, commit_message): +def _save_question( + committer_id: str, + question: question_domain.Question, + change_list: List[question_domain.QuestionChange], + commit_message: str +) -> None: """Validates a question and commits it to persistent storage. Args: @@ -579,7 +713,7 @@ def _save_question(committer_id, question, change_list, commit_message): change_list: list(QuestionChange). A list of QuestionChange objects. These changes are applied in sequence to produce the resulting question. - commit_message: str or None. A description of changes made to the + commit_message: str. A description of changes made to the question. Raises: @@ -599,13 +733,18 @@ def _save_question(committer_id, question, change_list, commit_message): question_model.linked_skill_ids = question.linked_skill_ids question_model.inapplicable_skill_misconception_ids = ( question.inapplicable_skill_misconception_ids) + question_model.next_content_id_index = question.next_content_id_index change_dicts = [change.to_dict() for change in change_list] question_model.commit(committer_id, commit_message, change_dicts) question.version += 1 def update_question( - committer_id, question_id, change_list, commit_message): + committer_id: str, + question_id: str, + change_list: List[question_domain.QuestionChange], + commit_message: str +) -> None: """Updates a question. Commits changes. Args: @@ -615,7 +754,7 @@ def update_question( change_list: list(QuestionChange). A list of QuestionChange objects. These changes are applied in sequence to produce the resulting question. - commit_message: str or None. A description of changes made to the + commit_message: str. A description of changes made to the question. Raises: @@ -630,7 +769,7 @@ def update_question( create_question_summary(question_id) -def create_question_summary(question_id): +def create_question_summary(question_id: str) -> None: """Creates and stores a summary of the given question. Args: @@ -641,7 +780,9 @@ def create_question_summary(question_id): save_question_summary(question_summary) -def compute_summary_of_question(question): +def compute_summary_of_question( + question: question_domain.Question +) -> question_domain.QuestionSummary: """Create a QuestionSummary domain object for a given Question domain object and return it. @@ -651,22 +792,40 @@ def compute_summary_of_question(question): Returns: QuestionSummary. The computed summary for the given question. + + Raises: + Exception. No interaction_id found for the given question. + Exception. No data available for when the question was last_updated on. """ question_content = question.question_state_data.content.html answer_groups = question.question_state_data.interaction.answer_groups - misconception_ids = [ - answer_group.to_dict()['tagged_skill_misconception_id'] - for answer_group in answer_groups - if answer_group.to_dict()['tagged_skill_misconception_id']] + misconception_ids = [] + for answer_group in answer_groups: + misconception_id = answer_group.to_dict()[ + 'tagged_skill_misconception_id' + ] + if misconception_id is not None: + misconception_ids.append(misconception_id) misconception_ids.extend(question.inapplicable_skill_misconception_ids) interaction_id = question.question_state_data.interaction.id + if interaction_id is None: + raise Exception( + 'No interaction_id found for the given question.' + ) + + if question.created_on is None or question.last_updated is None: + raise Exception( + 'No data available for when the question was last_updated on.' + ) question_summary = question_domain.QuestionSummary( question.id, question_content, misconception_ids, interaction_id, question.created_on, question.last_updated) return question_summary -def save_question_summary(question_summary): +def save_question_summary( + question_summary: question_domain.QuestionSummary +) -> None: """Save a question summary domain object as a QuestionSummaryModel entity in the datastore. @@ -687,7 +846,9 @@ def save_question_summary(question_summary): question_summary_model.put() -def get_question_summary_from_model(question_summary_model): +def get_question_summary_from_model( + question_summary_model: question_models.QuestionSummaryModel +) -> question_domain.QuestionSummary: """Returns a domain object for an Oppia question summary given a question summary model. @@ -709,7 +870,7 @@ def get_question_summary_from_model(question_summary_model): ) -def get_interaction_id_for_question(question_id): +def get_interaction_id_for_question(question_id: str) -> Optional[str]: """Returns the interaction id for the given question. Args: @@ -728,8 +889,11 @@ def get_interaction_id_for_question(question_id): def untag_deleted_misconceptions( - committer_id, skill_id, skill_description, - deleted_skill_misconception_ids): + committer_id: str, + skill_id: str, + skill_description: str, + deleted_skill_misconception_ids: List[str] +) -> None: """Untags deleted misconceptions from questions belonging to a skill with the provided skill_id. @@ -744,8 +908,10 @@ def untag_deleted_misconceptions( question_skill_links = get_question_skill_links_of_skill( skill_id, skill_description) question_ids = [model.question_id for model in question_skill_links] - questions = question_fetchers.get_questions_by_ids(question_ids) - for question in questions: + questions_with_none = question_fetchers.get_questions_by_ids(question_ids) + for question in questions_with_none: + # Ruling out the possibility of None for mypy type checking. + assert question is not None change_list = [] inapplicable_skill_misconception_ids = ( question.inapplicable_skill_misconception_ids) diff --git a/core/domain/question_services_test.py b/core/domain/question_services_test.py index edbae4be0c71..80439b2f1b5b 100644 --- a/core/domain/question_services_test.py +++ b/core/domain/question_services_test.py @@ -30,19 +30,26 @@ from core.domain import state_domain from core.domain import topic_domain from core.domain import topic_fetchers +from core.domain import translation_domain from core.domain import user_services from core.platform import models from core.tests import test_utils -(question_models,) = models.Registry.import_models([models.NAMES.question]) +from typing import Callable, Dict, List, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import question_models + +(question_models,) = models.Registry.import_models([models.Names.QUESTION]) class QuestionServicesUnitTest(test_utils.GenericTestBase): """Test the question services module.""" - def setUp(self): + def setUp(self) -> None: """Before each individual test, create dummy user.""" - super(QuestionServicesUnitTest, self).setUp() + super().setUp() self.signup(self.TOPIC_MANAGER_EMAIL, self.TOPIC_MANAGER_USERNAME) self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) @@ -63,7 +70,7 @@ def setUp(self): self.topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( - 1, 'Subtopic Title 1') + 1, 'Subtopic Title 1', 'url-frag-one') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' self.save_new_topic( @@ -85,36 +92,47 @@ def setUp(self): 'skill_3', self.admin_id, description='Skill Description 3') self.question_id = question_services.get_new_question_id() + self.content_id_generator = translation_domain.ContentIdGenerator() self.question = self.save_new_question( self.question_id, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1'], + self._create_valid_question_data('ABC', self.content_id_generator), + ['skill_1'], + self.content_id_generator.next_content_id_index, inapplicable_skill_misconception_ids=[ 'skillid12345-1', 'skillid12345-2']) self.question_id_1 = question_services.get_new_question_id() + self.content_id_generator_1 = translation_domain.ContentIdGenerator() self.question_1 = self.save_new_question( self.question_id_1, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_2']) + self._create_valid_question_data( + 'ABC', self.content_id_generator_1), + ['skill_2'], + self.content_id_generator_1.next_content_id_index) self.question_id_2 = question_services.get_new_question_id() + self.content_id_generator_2 = translation_domain.ContentIdGenerator() self.question_2 = self.save_new_question( self.question_id_2, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_2']) + self._create_valid_question_data( + 'ABC', self.content_id_generator_2), + ['skill_2'], + self.content_id_generator_2.next_content_id_index) - def test_get_question_by_id(self): + def test_get_question_by_id(self) -> None: question = question_services.get_question_by_id(self.question_id) self.assertEqual(question.id, self.question_id) - question = question_services.get_question_by_id( + question_with_none = question_services.get_question_by_id( 'question_id', strict=False) - self.assertIsNone(question) + self.assertIsNone(question_with_none) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class QuestionModel with id question_id ' 'not found'): question_services.get_question_by_id('question_id') - def test_get_questions_by_skill_ids_with_fetch_by_difficulty(self): + def test_get_questions_by_skill_ids_with_fetch_by_difficulty(self) -> None: question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.3) question_services.create_new_question_skill_link( @@ -124,13 +142,17 @@ def test_get_questions_by_skill_ids_with_fetch_by_difficulty(self): questions = question_services.get_questions_by_skill_ids( 2, ['skill_1', 'skill_2'], True) - questions.sort(key=lambda question: question.last_updated) + sort_fn: Callable[[question_domain.Question], float] = ( + lambda question: question.last_updated.timestamp() + if question.last_updated else 0 + ) + questions.sort(key=sort_fn) self.assertEqual(len(questions), 2) self.assertEqual(questions[0].to_dict(), self.question.to_dict()) self.assertEqual(questions[1].to_dict(), self.question_2.to_dict()) - def test_get_total_question_count_for_skill_ids(self): + def test_get_total_question_count_for_skill_ids(self) -> None: question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.3) question_services.create_new_question_skill_link( @@ -163,7 +185,7 @@ def test_get_total_question_count_for_skill_ids(self): ['skill_1', 'skill_1', 'skill_2'])) self.assertEqual(question_count, 3) - def test_update_question_skill_link_difficulty(self): + def test_update_question_skill_link_difficulty(self) -> None: question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.3) @@ -182,12 +204,14 @@ def test_update_question_skill_link_difficulty(self): self.assertEqual( merged_question_skill_links[0].skill_difficulties, [0.9]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The given question and skill are not linked.'): question_services.update_question_skill_link_difficulty( self.question_id, 'skill_10', 0.9) - def test_get_questions_by_skill_ids_without_fetch_by_difficulty(self): + def test_get_questions_by_skill_ids_without_fetch_by_difficulty( + self + ) -> None: question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.3) question_services.create_new_question_skill_link( @@ -197,7 +221,11 @@ def test_get_questions_by_skill_ids_without_fetch_by_difficulty(self): questions = question_services.get_questions_by_skill_ids( 4, ['skill_1', 'skill_2'], False) - questions.sort(key=lambda question: question.last_updated) + sort_fn: Callable[[question_domain.Question], float] = ( + lambda question: question.last_updated.timestamp() + if question.last_updated else 0 + ) + questions.sort(key=sort_fn) self.assertEqual(len(questions), 3) self.assertEqual(questions[0].to_dict(), self.question.to_dict()) @@ -205,19 +233,23 @@ def test_get_questions_by_skill_ids_without_fetch_by_difficulty(self): self.assertEqual(questions[2].to_dict(), self.question_2.to_dict()) def test_get_questions_by_skill_ids_raise_error_with_high_question_count( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Question count is too high, please limit the question ' 'count to %d.' % feconf.MAX_QUESTIONS_FETCHABLE_AT_ONE_TIME): question_services.get_questions_by_skill_ids( 25, ['skill_1', 'skill_2'], False) - def test_create_multi_question_skill_links_for_question(self): + def test_create_multi_question_skill_links_for_question(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator() self.question = self.save_new_question( self.question_id, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Skill difficulties and skill ids should match. ' 'The lengths of the two lists are different.'): question_services.link_multiple_skills_for_question( @@ -231,7 +263,7 @@ def test_create_multi_question_skill_links_for_question(self): self.question_id)] self.assertItemsEqual(skill_ids, ['skill_1', 'skill_2']) - def test_delete_question_skill_link(self): + def test_delete_question_skill_link(self) -> None: question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.3) question_services.create_new_question_skill_link( @@ -250,11 +282,14 @@ def test_delete_question_skill_link(self): self.question_id, strict=False) self.assertIsNone(question) - def test_linking_same_skill_to_question_twice(self): + def test_linking_same_skill_to_question_twice(self) -> None: question_id_2 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_2, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) skill_ids = [skill.id for skill in question_services.get_skills_linked_to_question( question_id_2)] @@ -275,9 +310,9 @@ def test_linking_same_skill_to_question_twice(self): self.assertEqual(len(skill_ids), 2) self.assertItemsEqual(skill_ids, ['skill_1', 'skill_2']) - def test_create_and_get_question_skill_link(self): + def test_create_and_get_question_skill_link(self) -> None: question_id_2 = question_services.get_new_question_id() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape( 'Entity for class QuestionModel with id %s not found' % ( @@ -285,14 +320,20 @@ def test_create_and_get_question_skill_link(self): question_services.create_new_question_skill_link( self.editor_id, question_id_2, 'skill_1', 0.5) + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_2, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) question_id_3 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_3, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_2']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_2'], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.5) question_services.create_new_question_skill_link( @@ -302,16 +343,20 @@ def test_create_and_get_question_skill_link(self): question_services.create_new_question_skill_link( self.editor_id, question_id_3, 'skill_2', 0.2) - question_summaries, merged_question_skill_links = ( + question_summaries_with_none, merged_question_skill_links = ( question_services.get_displayable_question_skill_link_details( 5, ['skill_1', 'skill_2', 'skill_3'], 0)) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Querying linked question summaries for more than 3 ' 'skills at a time is not supported currently.'): question_services.get_displayable_question_skill_link_details( 5, ['skill_1', 'skill_2', 'skill_3', 'skill_4'], 0) - question_ids = [summary.id for summary in question_summaries] + question_ids = [] + for summary in question_summaries_with_none: + # Ruling out the possibility of None for mypy type checking. + assert summary is not None + question_ids.append(summary.id) self.assertEqual(len(question_ids), 3) self.assertEqual(len(merged_question_skill_links), 3) @@ -342,25 +387,37 @@ def test_create_and_get_question_skill_link(self): self.assertEqual( [0.2], link_object.skill_difficulties) - question_summaries, merged_question_skill_links = ( + question_summaries_with_none, merged_question_skill_links = ( question_services.get_displayable_question_skill_link_details( 5, ['skill_1', 'skill_3'], 0)) - question_ids = [summary.id for summary in question_summaries] + question_ids = [] + for summary in question_summaries_with_none: + # Ruling out the possibility of None for mypy type checking. + assert summary is not None + question_ids.append(summary.id) self.assertEqual(len(question_ids), 2) self.assertItemsEqual( question_ids, [self.question_id, question_id_2]) - with self.assertRaisesRegexp( - Exception, 'The given question is already linked to given skill'): + with self.assertRaisesRegex( + Exception, + 'The question with ID %s is already linked to skill skill_1' % ( + self.question_id + ) + ): question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.3) def test_get_displayable_question_skill_link_details_with_no_skill_ids( - self): + self + ) -> None: question_id = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.editor_id, question_id, 'skill_1', 0.5) @@ -372,7 +429,7 @@ def test_get_displayable_question_skill_link_details_with_no_skill_ids( self.assertEqual(question_summaries, []) self.assertEqual(merged_question_skill_links, []) - def test_get_question_skill_links_of_skill(self): + def test_get_question_skill_links_of_skill(self) -> None: # If the skill id doesnt exist at all, it returns an empty list. question_skill_links = ( question_services.get_question_skill_links_of_skill( @@ -386,14 +443,20 @@ def test_get_question_skill_links_of_skill(self): self.assertEqual(len(question_skill_links), 0) question_id_2 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_2, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) question_id_3 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_3, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_2']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_2'], + content_id_generator.next_content_id_index) # Setting skill difficulty for self.question_id. question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.5) @@ -418,10 +481,12 @@ def test_get_question_skill_links_of_skill(self): if question_skill.question_id == self.question_id: self.assertEqual(question_skill.skill_difficulty, 0.5) - def test_get_question_summaries_by_ids(self): + def test_get_question_summaries_by_ids(self) -> None: question_summaries = question_services.get_question_summaries_by_ids([ self.question_id, 'invalid_question_id']) + # Ruling out the possibility of None for mypy type checking. + assert question_summaries[0] is not None self.assertEqual(len(question_summaries), 2) self.assertEqual(question_summaries[0].id, self.question_id) self.assertEqual( @@ -429,24 +494,24 @@ def test_get_question_summaries_by_ids(self): feconf.DEFAULT_INIT_STATE_CONTENT_STR) self.assertIsNone(question_summaries[1]) - def test_delete_question(self): + def test_delete_question(self) -> None: question_summary_model = question_models.QuestionSummaryModel.get( self.question_id) self.assertFalse(question_summary_model is None) question_services.delete_question(self.editor_id, self.question_id) - with self.assertRaisesRegexp(Exception, ( + with self.assertRaisesRegex(Exception, ( 'Entity for class QuestionModel with id %s not found' % ( self.question_id))): question_models.QuestionModel.get(self.question_id) - with self.assertRaisesRegexp(Exception, ( + with self.assertRaisesRegex(Exception, ( 'Entity for class QuestionSummaryModel with id %s not found' % ( self.question_id))): question_models.QuestionSummaryModel.get(self.question_id) - def test_delete_question_marked_deleted(self): + def test_delete_question_marked_deleted(self) -> None: question_models.QuestionModel.delete_multi( [self.question_id], self.editor_id, feconf.COMMIT_MESSAGE_QUESTION_DELETED, force_deletion=False) @@ -463,13 +528,16 @@ def test_delete_question_marked_deleted(self): question_models.QuestionSummaryModel.get( self.question_id, strict=False), None) - def test_delete_question_model_with_deleted_summary_model(self): + def test_delete_question_model_with_deleted_summary_model(self) -> None: question_summary_model = ( question_models.QuestionSummaryModel.get(self.question_id)) question_summary_model.delete() - question_summary_model = ( - question_models.QuestionSummaryModel.get(self.question_id, False)) - self.assertIsNone(question_summary_model) + question_summary_model_with_none = ( + question_models.QuestionSummaryModel.get( + self.question_id, strict=False + ) + ) + self.assertIsNone(question_summary_model_with_none) question_services.delete_question( self.editor_id, self.question_id, force_deletion=True) @@ -480,16 +548,20 @@ def test_delete_question_model_with_deleted_summary_model(self): question_models.QuestionSummaryModel.get( self.question_id, strict=False), None) - def test_update_question(self): - new_question_data = self._create_valid_question_data('DEF') - change_dict = { + def test_update_question(self) -> None: + new_question_data = self._create_valid_question_data( + 'DEF', self.content_id_generator) + change_list = [question_domain.QuestionChange({ + 'cmd': 'update_question_property', + 'property_name': 'next_content_id_index', + 'old_value': 0, + 'new_value': self.content_id_generator.next_content_id_index, + }), question_domain.QuestionChange({ 'cmd': 'update_question_property', 'property_name': 'question_state_data', 'new_value': new_question_data.to_dict(), 'old_value': self.question.question_state_data.to_dict() - } - change_list = [question_domain.QuestionChange(change_dict)] - + })] question_services.update_question( self.editor_id, self.question_id, change_list, 'updated question data') @@ -499,23 +571,31 @@ def test_update_question(self): question.question_state_data.to_dict(), new_question_data.to_dict()) self.assertEqual(question.version, 2) - def test_cannot_update_question_with_no_commit_message(self): - new_question_data = self._create_valid_question_data('DEF') - change_dict = { + def test_cannot_update_question_with_no_commit_message(self) -> None: + new_question_data = self._create_valid_question_data( + 'DEF', self.content_id_generator) + change_list = [question_domain.QuestionChange({ 'cmd': 'update_question_property', 'property_name': 'question_state_data', 'new_value': new_question_data.to_dict(), 'old_value': self.question.question_state_data.to_dict() - } - change_list = [question_domain.QuestionChange(change_dict)] - - with self.assertRaisesRegexp( + }), question_domain.QuestionChange({ + 'cmd': 'update_question_property', + 'property_name': 'next_content_id_index', + 'old_value': 0, + 'new_value': self.content_id_generator.next_content_id_index, + })] + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( Exception, 'Expected a commit message, received none.'): question_services.update_question( - self.editor_id, self.question_id, change_list, None) + self.editor_id, self.question_id, change_list, None) # type: ignore[arg-type] - def test_cannot_update_question_with_no_change_list(self): - with self.assertRaisesRegexp( + def test_cannot_update_question_with_no_change_list(self) -> None: + with self.assertRaisesRegex( Exception, 'Unexpected error: received an invalid change list when trying to ' 'save question'): @@ -523,7 +603,7 @@ def test_cannot_update_question_with_no_change_list(self): self.editor_id, self.question_id, [], 'updated question data') - def test_update_question_language_code(self): + def test_update_question_language_code(self) -> None: self.assertEqual(self.question.language_code, 'en') change_dict = { 'cmd': 'update_question_property', @@ -541,11 +621,11 @@ def test_update_question_language_code(self): self.assertEqual(question.language_code, 'bn') self.assertEqual(question.version, 2) - def test_update_inapplicable_skill_misconception_ids(self): + def test_update_inapplicable_skill_misconception_ids(self) -> None: self.assertEqual( self.question.inapplicable_skill_misconception_ids, ['skillid12345-1', 'skillid12345-2']) - change_dict = { + change_dict: Dict[str, Union[str, List[str]]] = { 'cmd': 'update_question_property', 'property_name': 'inapplicable_skill_misconception_ids', 'new_value': ['skillid12345-1'], @@ -562,38 +642,47 @@ def test_update_inapplicable_skill_misconception_ids(self): question.inapplicable_skill_misconception_ids, ['skillid12345-1']) self.assertEqual(question.version, 2) - def test_cannot_update_question_with_invalid_change_list(self): + def test_cannot_update_question_with_invalid_change_list(self) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.error().""" observed_log_messages.append(msg % args) logging_swap = self.swap(logging, 'error', _mock_logging_function) - assert_raises_context_manager = self.assertRaisesRegexp( + assert_raises_context_manager = self.assertRaisesRegex( Exception, '\'str\' object has no attribute \'cmd\'') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. with logging_swap, assert_raises_context_manager: question_services.update_question( - self.editor_id, self.question_id, 'invalid_change_list', + self.editor_id, self.question_id, 'invalid_change_list', # type: ignore[arg-type] 'updated question language code') self.assertEqual(len(observed_log_messages), 1) - self.assertRegexpMatches( + self.assertRegex( observed_log_messages[0], 'object has no attribute \'cmd\' %s ' 'invalid_change_list' % self.question_id) - def test_replace_skill_id_for_all_questions(self): + def test_replace_skill_id_for_all_questions(self) -> None: question_id_2 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_2, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) question_id_3 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_3, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_2']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_2'], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.5) question_services.create_new_question_skill_link( @@ -637,36 +726,70 @@ def test_replace_skill_id_for_all_questions(self): questions = question_fetchers.get_questions_by_ids( [self.question_id, question_id_2, question_id_3]) for question in questions: + # Ruling out the possibility of None for mypy type checking. + assert question is not None if question.id in ([self.question_id, question_id_2]): self.assertItemsEqual(question.linked_skill_ids, ['skill_3']) else: self.assertItemsEqual(question.linked_skill_ids, ['skill_2']) - def test_compute_summary_of_question(self): + def test_compute_summary_of_question(self) -> None: + question = question_services.get_question_by_id(self.question_id) question_summary = question_services.compute_summary_of_question( - self.question) + question) self.assertEqual(question_summary.id, self.question_id) self.assertEqual( question_summary.question_content, feconf.DEFAULT_INIT_STATE_CONTENT_STR) - def test_get_skills_of_question(self): + def test_raises_error_while_computing_summary_if_interaction_id_is_none( + self + ) -> None: + question = question_services.get_question_by_id(self.question_id) + question.question_state_data.interaction.id = None + + with self.assertRaisesRegex( + Exception, + 'No interaction_id found for the given question.' + ): + question_services.compute_summary_of_question(question) + + def test_raises_error_when_the_question_provided_with_no_created_on_data( + self + ) -> None: + + question = question_services.get_question_by_id(self.question_id) + question.created_on = None + + with self.assertRaisesRegex( + Exception, + 'No data available for when the question was last_updated' + ): + question_services.compute_summary_of_question(question) + + def test_get_skills_of_question(self) -> None: # If the question id doesnt exist at all, it returns an empty list. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class QuestionModel with id ' 'non_existent_question_id not found'): question_services.get_skills_linked_to_question( 'non_existent_question_id') question_id_2 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_2, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) question_id_3 = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( question_id_3, self.editor_id, - self._create_valid_question_data('ABC'), ['skill_2']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_2'], + content_id_generator.next_content_id_index) question_services.create_new_question_skill_link( self.editor_id, self.question_id, 'skill_1', 0.5) question_services.create_new_question_skill_link( @@ -686,14 +809,14 @@ def test_get_skills_of_question(self): self.assertItemsEqual( skill_ids, ['skill_1', 'skill_2']) - def test_get_interaction_id_for_question(self): + def test_get_interaction_id_for_question(self) -> None: self.assertEqual( question_services.get_interaction_id_for_question( self.question_id), 'TextInput') - with self.assertRaisesRegexp(Exception, 'No questions exists with'): + with self.assertRaisesRegex(Exception, 'No questions exists with'): question_services.get_interaction_id_for_question('fake_q_id') - def test_untag_deleted_misconceptions_on_no_change_to_skill(self): + def test_untag_deleted_misconceptions_on_no_change_to_skill(self) -> None: misconceptions = [ skill_domain.Misconception( 0, 'misconception-name', '

    description

    ', @@ -717,13 +840,24 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): misconceptions=misconceptions) self.question_id = question_services.get_new_question_id() - question_state_data = self._create_valid_question_data('state name') + content_id_generator = translation_domain.ContentIdGenerator() + question_state_data = self._create_valid_question_data( + 'state name', content_id_generator) + feedback_content_ids = [ + content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + for _ in range(3)] + rule_content_ids = [ + content_id_generator.generate( + translation_domain.ContentType.RULE, extra_prefix='input') + for _ in range(3)] question_state_data.interaction.answer_groups = [ state_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': feedback_content_ids[0], 'html': '

    Feedback

    ' }, 'labelled_as_correct': True, @@ -734,8 +868,8 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): 'rule_specs': [{ 'inputs': { 'x': { - 'contentId': 'rule_input_3', - 'normalizedStrSet': ['Test'] + 'contentId': rule_content_ids[0], + 'normalizedStrSet': ['Test0'] } }, 'rule_type': 'Contains' @@ -746,8 +880,9 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): state_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_2', + 'content_id': feedback_content_ids[1], 'html': '

    Feedback

    ' }, 'labelled_as_correct': True, @@ -758,8 +893,8 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): 'rule_specs': [{ 'inputs': { 'x': { - 'contentId': 'rule_input_4', - 'normalizedStrSet': ['Test'] + 'contentId': rule_content_ids[1], + 'normalizedStrSet': ['Test1'] } }, 'rule_type': 'Contains' @@ -770,8 +905,9 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): state_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_0', + 'content_id': feedback_content_ids[2], 'html': '

    Feedback

    ' }, 'labelled_as_correct': True, @@ -782,8 +918,8 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): 'rule_specs': [{ 'inputs': { 'x': { - 'contentId': 'rule_input_5', - 'normalizedStrSet': ['Test'] + 'contentId': rule_content_ids[2], + 'normalizedStrSet': ['Test2'] } }, 'rule_type': 'Contains' @@ -792,23 +928,11 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): 'tagged_skill_misconception_id': 'skillid12345-2' }) ] - question_state_data.written_translations.translations_mapping.update({ - 'feedback_0': {}, - 'feedback_1': {}, - 'feedback_2': {}, - 'rule_input_3': {}, - 'rule_input_4': {}, - 'rule_input_5': {} - }) question_state_data.recorded_voiceovers.voiceovers_mapping.update({ - 'feedback_0': {}, - 'feedback_1': {}, - 'feedback_2': {}, - 'rule_input_3': {}, - 'rule_input_4': {}, - 'rule_input_5': {} + content_id: {} for content_id in ( + feedback_content_ids + rule_content_ids) }) - question_state_data.next_content_id_index = 5 + inapplicable_skill_misconception_ids = [ 'skillid12345-3', 'skillid12345-4' @@ -816,6 +940,7 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): self.question = self.save_new_question( self.question_id, self.editor_id, question_state_data, ['skillid12345'], + content_id_generator.next_content_id_index, inapplicable_skill_misconception_ids=( inapplicable_skill_misconception_ids)) question_services.create_new_question_skill_link( @@ -848,7 +973,9 @@ def test_untag_deleted_misconceptions_on_no_change_to_skill(self): inapplicable_skill_misconception_ids) self.assertEqual(actual_misconception_ids, expected_misconception_ids) - def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): + def test_untag_deleted_misconceptions_correctly_on_updating_skill( + self + ) -> None: misconceptions = [ skill_domain.Misconception( 0, 'misconception-name', '

    description

    ', @@ -872,13 +999,24 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): misconceptions=misconceptions) self.question_id = question_services.get_new_question_id() - question_state_data = self._create_valid_question_data('state name') + content_id_generator = translation_domain.ContentIdGenerator() + question_state_data = self._create_valid_question_data( + 'state name', content_id_generator) + feedback_content_ids = [ + content_id_generator.generate( + translation_domain.ContentType.FEEDBACK) + for _ in range(3)] + rule_content_ids = [ + content_id_generator.generate( + translation_domain.ContentType.RULE, extra_prefix='input') + for _ in range(3)] question_state_data.interaction.answer_groups = [ state_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': feedback_content_ids[0], 'html': '

    Feedback

    ' }, 'labelled_as_correct': True, @@ -889,8 +1027,8 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): 'rule_specs': [{ 'inputs': { 'x': { - 'contentId': 'rule_input_3', - 'normalizedStrSet': ['Test'] + 'contentId': rule_content_ids[0], + 'normalizedStrSet': ['Test0'] } }, 'rule_type': 'Contains' @@ -901,8 +1039,9 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): state_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_2', + 'content_id': feedback_content_ids[1], 'html': '

    Feedback

    ' }, 'labelled_as_correct': True, @@ -913,8 +1052,8 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): 'rule_specs': [{ 'inputs': { 'x': { - 'contentId': 'rule_input_4', - 'normalizedStrSet': ['Test'] + 'contentId': rule_content_ids[1], + 'normalizedStrSet': ['Test1'] } }, 'rule_type': 'Contains' @@ -925,8 +1064,9 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): state_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_0', + 'content_id': feedback_content_ids[2], 'html': '

    Feedback

    ' }, 'labelled_as_correct': True, @@ -937,8 +1077,8 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): 'rule_specs': [{ 'inputs': { 'x': { - 'contentId': 'rule_input_5', - 'normalizedStrSet': ['Test'] + 'contentId': rule_content_ids[2], + 'normalizedStrSet': ['Test2'] } }, 'rule_type': 'Contains' @@ -947,23 +1087,10 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): 'tagged_skill_misconception_id': 'skillid12345-2' }) ] - question_state_data.written_translations.translations_mapping.update({ - 'feedback_0': {}, - 'feedback_1': {}, - 'feedback_2': {}, - 'rule_input_3': {}, - 'rule_input_4': {}, - 'rule_input_5': {} - }) question_state_data.recorded_voiceovers.voiceovers_mapping.update({ - 'feedback_0': {}, - 'feedback_1': {}, - 'feedback_2': {}, - 'rule_input_3': {}, - 'rule_input_4': {}, - 'rule_input_5': {} + content_id: {} for content_id in ( + feedback_content_ids + rule_content_ids) }) - question_state_data.next_content_id_index = 5 inapplicable_skill_misconception_ids = [ 'skillid12345-3', 'skillid12345-4' @@ -971,6 +1098,7 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): self.question = self.save_new_question( self.question_id, self.editor_id, question_state_data, ['skillid12345'], + content_id_generator.next_content_id_index, inapplicable_skill_misconception_ids=( inapplicable_skill_misconception_ids)) question_services.create_new_question_skill_link( @@ -1030,10 +1158,11 @@ def test_untag_deleted_misconceptions_correctly_on_updating_skill(self): class QuestionMigrationTests(test_utils.GenericTestBase): - def test_migrate_question_state_from_v29_to_latest(self): + def test_migrate_question_state_from_v29_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1054,15 +1183,25 @@ def test_migrate_question_state_from_v29_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -1071,8 +1210,9 @@ def test_migrate_question_state_from_v29_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1122,10 +1262,11 @@ def test_migrate_question_state_from_v29_to_latest(self): answer_groups = question.question_state_data.interaction.answer_groups self.assertEqual(answer_groups[0].tagged_skill_misconception_id, None) - def test_migrate_question_state_from_v30_to_latest(self): + def test_migrate_question_state_from_v30_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1146,7 +1287,7 @@ def test_migrate_question_state_from_v30_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { @@ -1157,12 +1298,20 @@ def test_migrate_question_state_from_v30_to_latest(self): 'file_size_bytes': 100, 'needs_update': False } - } + }, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -1171,8 +1320,9 @@ def test_migrate_question_state_from_v30_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1223,19 +1373,28 @@ def test_migrate_question_state_from_v30_to_latest(self): question.question_state_data .recorded_voiceovers.to_dict(), { 'voiceovers_mapping': { - 'ca_placeholder_0': {}, - 'content': { + 'ca_placeholder_6': {}, + 'content_0': { 'en': { 'filename': 'test.mp3', 'file_size_bytes': 100, 'needs_update': False, - 'duration_secs': 0.0}}, - 'rule_input_1': {}}}) + 'duration_secs': 0.0 + } + }, + 'rule_input_3': {}, + 'hint_4': {}, + 'default_outcome_1': {}, + 'feedback_2': {}, + 'solution_5': {} + } + }) - def test_migrate_question_state_from_v31_to_latest(self): + def test_migrate_question_state_from_v31_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1256,15 +1415,25 @@ def test_migrate_question_state_from_v31_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -1273,8 +1442,9 @@ def test_migrate_question_state_from_v31_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1288,7 +1458,7 @@ def test_migrate_question_state_from_v31_to_latest(self): 'html': 'Hint 1' } }], - 'solution': {}, + 'solution': None, 'id': 'SetInput' }, 'param_changes': [], @@ -1315,14 +1485,20 @@ def test_migrate_question_state_from_v31_to_latest(self): feconf.CURRENT_STATE_SCHEMA_VERSION) cust_args = question.question_state_data.interaction.customization_args + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance( + cust_args['buttonText'].value, + state_domain.SubtitledUnicode + ) self.assertEqual( cust_args['buttonText'].value.unicode_str, 'Add item') - def test_migrate_question_state_from_v32_to_latest(self): + def test_migrate_question_state_from_v32_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1343,15 +1519,25 @@ def test_migrate_question_state_from_v32_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -1364,8 +1550,9 @@ def test_migrate_question_state_from_v32_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1379,7 +1566,7 @@ def test_migrate_question_state_from_v32_to_latest(self): 'html': 'Hint 1' } }], - 'solution': {}, + 'solution': None, 'id': 'MultipleChoiceInput' }, 'param_changes': [], @@ -1409,13 +1596,15 @@ def test_migrate_question_state_from_v32_to_latest(self): self.assertEqual(cust_args['choices'].value, []) self.assertEqual(cust_args['showChoicesInShuffledOrder'].value, True) - def test_migrate_question_state_from_v33_to_latest(self): + def test_migrate_question_state_from_v33_to_latest(self) -> None: feedback_html_content = ( - '

    Feedback

    ') + '

    Value

    ') answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': feedback_html_content @@ -1436,15 +1625,25 @@ def test_migrate_question_state_from_v33_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -1460,8 +1659,9 @@ def test_migrate_question_state_from_v33_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1475,7 +1675,7 @@ def test_migrate_question_state_from_v33_to_latest(self): 'html': 'Hint 1' } }], - 'solution': {}, + 'solution': None, 'id': 'MultipleChoiceInput' }, 'param_changes': [], @@ -1483,10 +1683,10 @@ def test_migrate_question_state_from_v33_to_latest(self): 'classifier_model_id': None } expected_feeedback_html_content = ( - '

    Feedback

    ') + '

    Value

    ' + '') question_model = ( question_models.QuestionModel( id='question_id', @@ -1514,10 +1714,11 @@ def test_migrate_question_state_from_v33_to_latest(self): migrated_answer_group.outcome.feedback.html, expected_feeedback_html_content) - def test_migrate_question_state_from_v34_to_latest(self): + def test_migrate_question_state_from_v34_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1543,15 +1744,25 @@ def test_migrate_question_state_from_v34_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -1560,8 +1771,9 @@ def test_migrate_question_state_from_v34_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1583,7 +1795,7 @@ def test_migrate_question_state_from_v34_to_latest(self): 'answer_is_exclusive': False, 'explanation': { 'html': 'Solution explanation', - 'content_id': 'content_2' + 'content_id': 'explanation_1' } }, 'id': 'MathExpressionInput' @@ -1624,6 +1836,7 @@ def test_migrate_question_state_from_v34_to_latest(self): answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1649,15 +1862,25 @@ def test_migrate_question_state_from_v34_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -1666,8 +1889,9 @@ def test_migrate_question_state_from_v34_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1689,7 +1913,7 @@ def test_migrate_question_state_from_v34_to_latest(self): 'answer_is_exclusive': False, 'explanation': { 'html': 'Solution explanation', - 'content_id': 'content_2' + 'content_id': 'explanation_1' } }, 'id': 'MathExpressionInput' @@ -1723,7 +1947,7 @@ def test_migrate_question_state_from_v34_to_latest(self): self.assertEqual( question.question_state_data.interaction.id, 'AlgebraicExpressionInput') - self.assertEqual(len(answer_groups[0].rule_specs), 1) + self.assertEqual(len(answer_groups[0].rule_specs), 2) self.assertEqual( answer_groups[0].rule_specs[0].rule_type, 'MatchesExactlyWith') self.assertEqual( @@ -1732,6 +1956,7 @@ def test_migrate_question_state_from_v34_to_latest(self): answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1752,15 +1977,25 @@ def test_migrate_question_state_from_v34_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -1769,8 +2004,9 @@ def test_migrate_question_state_from_v34_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1792,7 +2028,7 @@ def test_migrate_question_state_from_v34_to_latest(self): 'answer_is_exclusive': False, 'explanation': { 'html': 'Solution explanation', - 'content_id': 'content_2' + 'content_id': 'explanation_1' } }, 'id': 'MathExpressionInput' @@ -1835,6 +2071,7 @@ def test_migrate_question_state_from_v34_to_latest(self): answer_groups_list = [{ 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1846,7 +2083,7 @@ def test_migrate_question_state_from_v34_to_latest(self): }, 'rule_specs': [{ 'inputs': { - 'x': 'x+y' + 'x': 'x=y' }, 'rule_type': 'IsMathematicallyEquivalentTo' }], @@ -1855,6 +2092,7 @@ def test_migrate_question_state_from_v34_to_latest(self): }, { 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_2', 'html': '

    Feedback

    ' @@ -1875,12 +2113,12 @@ def test_migrate_question_state_from_v34_to_latest(self): }] question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { 'voiceovers_mapping': { - 'content_1': {}, + 'content': {}, 'feedback_1': {}, 'feedback_2': {}, 'feedback_3': {} @@ -1888,7 +2126,7 @@ def test_migrate_question_state_from_v34_to_latest(self): }, 'written_translations': { 'translations_mapping': { - 'content_1': {}, + 'content': {}, 'feedback_1': {}, 'feedback_2': {}, 'feedback_3': {} @@ -1900,6 +2138,7 @@ def test_migrate_question_state_from_v34_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_3', 'html': 'Correct Answer' @@ -1941,29 +2180,32 @@ def test_migrate_question_state_from_v34_to_latest(self): answer_groups = question.question_state_data.interaction.answer_groups self.assertEqual( question.question_state_data.interaction.id, - 'AlgebraicExpressionInput') + 'MathEquationInput') self.assertEqual(len(answer_groups), 1) self.assertEqual( answer_groups[0].rule_specs[0].rule_type, 'MatchesExactlyWith') self.assertEqual( - answer_groups[0].rule_specs[0].inputs, {'x': 'x+y'}) + answer_groups[0].rule_specs[0].inputs, {'x': 'x=y', 'y': 'both'}) state_data = question.question_state_data self.assertEqual(sorted( state_data.recorded_voiceovers.voiceovers_mapping.keys()), [ - 'content_1', 'feedback_1', 'feedback_3']) - self.assertEqual(sorted( - state_data.written_translations.translations_mapping.keys()), [ - 'content_1', 'feedback_1', 'feedback_3']) + 'content_0', 'default_outcome_1', 'feedback_2']) - def test_migrate_question_state_from_v35_to_latest(self): + def test_migrate_question_state_from_v35_to_latest(self) -> None: # Test restructuring of written_translations. question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { @@ -1981,8 +2223,9 @@ def test_migrate_question_state_from_v35_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -1991,7 +2234,7 @@ def test_migrate_question_state_from_v35_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': None }, 'param_changes': [], @@ -2020,36 +2263,29 @@ def test_migrate_question_state_from_v35_to_latest(self): question.question_state_data_schema_version, feconf.CURRENT_STATE_SCHEMA_VERSION) - migrated_translations_mapping = ( - question - .question_state_data.written_translations.to_dict()) - self.assertEqual( - migrated_translations_mapping, - { - 'translations_mapping': { - 'explanation': { - 'en': { - 'data_format': 'html', - 'translation': '

    test

    ', - 'needs_update': True - } - } - } - }) - # Test migration of PencilCodeEditor customization argument from # intial_code to intialCode. question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -2062,8 +2298,9 @@ def test_migrate_question_state_from_v35_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2072,7 +2309,7 @@ def test_migrate_question_state_from_v35_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': 'PencilCodeEditor' }, 'param_changes': [], @@ -2113,15 +2350,25 @@ def test_migrate_question_state_from_v35_to_latest(self): # Test population of default value of SubtitledHtml list. question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -2130,8 +2377,9 @@ def test_migrate_question_state_from_v35_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2140,7 +2388,7 @@ def test_migrate_question_state_from_v35_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': 'MultipleChoiceInput' }, 'param_changes': [], @@ -2174,7 +2422,7 @@ def test_migrate_question_state_from_v35_to_latest(self): migrated_ca, { 'choices': { - 'value': [{'content_id': 'ca_choices_0', 'html': ''}] + 'value': [{'content_id': 'ca_choices_2', 'html': ''}] }, 'showChoicesInShuffledOrder': {'value': True} }) @@ -2182,11 +2430,17 @@ def test_migrate_question_state_from_v35_to_latest(self): # Test migration of html list to SubtitledHtml list. question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': {} @@ -2201,8 +2455,9 @@ def test_migrate_question_state_from_v35_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2211,7 +2466,7 @@ def test_migrate_question_state_from_v35_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': 'MultipleChoiceInput' }, 'param_changes': [], @@ -2246,39 +2501,54 @@ def test_migrate_question_state_from_v35_to_latest(self): { 'choices': { 'value': [{ - 'content_id': 'ca_choices_0', + 'content_id': 'ca_choices_2', 'html': 'one' }, { - 'content_id': 'ca_choices_1', + 'content_id': 'ca_choices_3', 'html': 'two' }, { - 'content_id': 'ca_choices_2', + 'content_id': 'ca_choices_4', 'html': 'three' }] }, 'showChoicesInShuffledOrder': {'value': True} }) - def test_migrate_question_state_from_v36_to_latest(self): + def test_migrate_question_state_from_v36_to_latest(self) -> None: # Test restructuring of written_translations. question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {}, + 'ca_placeholder_0': {}, + } }, 'written_translations': { - 'translations_mapping': {} + 'translations_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {}, + 'ca_placeholder_0': {}, + } }, 'interaction': { 'answer_groups': [{ 'outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', - 'html': 'Correct Answer' + 'content_id': 'default_outcome_2', + 'html': 'Correct Ans2er' }, 'param_changes': [], 'refresher_exploration_id': None, @@ -2304,8 +2574,9 @@ def test_migrate_question_state_from_v36_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2314,7 +2585,7 @@ def test_migrate_question_state_from_v36_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': 'TextInput' }, 'next_content_id_index': 2, @@ -2353,16 +2624,17 @@ def test_migrate_question_state_from_v36_to_latest(self): migrated_rule_spec, { 'inputs': {'x': { - 'contentId': 'rule_input_2', + 'contentId': 'rule_input_3', 'normalizedStrSet': ['test'] }}, 'rule_type': 'Equals' }) - def test_migrate_question_state_from_v37_to_latest(self): + def test_migrate_question_state_from_v37_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -2383,15 +2655,25 @@ def test_migrate_question_state_from_v37_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -2400,8 +2682,9 @@ def test_migrate_question_state_from_v37_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2415,7 +2698,7 @@ def test_migrate_question_state_from_v37_to_latest(self): 'html': 'Hint 1' } }], - 'solution': {}, + 'solution': None, 'id': 'AlgebraicExpressionInput' }, 'next_content_id_index': 3, @@ -2444,12 +2727,13 @@ def test_migrate_question_state_from_v37_to_latest(self): cust_args = question.question_state_data.interaction.customization_args self.assertEqual( - cust_args['customOskLetters'].value, ['x', 'α', 'β']) + cust_args['allowedVariables'].value, ['x', 'α', 'β']) - def test_migrate_question_state_from_v38_to_latest(self): + def test_migrate_question_state_from_v38_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -2470,15 +2754,25 @@ def test_migrate_question_state_from_v38_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -2487,8 +2781,9 @@ def test_migrate_question_state_from_v38_to_latest(self): 'customization_args': {}, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2502,7 +2797,7 @@ def test_migrate_question_state_from_v38_to_latest(self): 'html': 'Hint 1' } }], - 'solution': {}, + 'solution': None, 'id': 'NumericExpressionInput' }, 'next_content_id_index': 3, @@ -2530,14 +2825,22 @@ def test_migrate_question_state_from_v38_to_latest(self): feconf.CURRENT_STATE_SCHEMA_VERSION) cust_args = question.question_state_data.interaction.customization_args + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance( + cust_args['placeholder'].value, + state_domain.SubtitledUnicode + ) self.assertEqual( cust_args['placeholder'].value.unicode_str, 'Type an expression here, using only numbers.') - def test_migrate_question_state_with_text_input_from_v40_to_latest(self): + def test_migrate_question_state_with_text_input_from_v40_to_latest( + self + ) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -2558,15 +2861,27 @@ def test_migrate_question_state_with_text_input_from_v40_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_placeholder_0': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_placeholder_0': {}, + 'hint_1': {} } }, 'interaction': { @@ -2583,8 +2898,9 @@ def test_migrate_question_state_with_text_input_from_v40_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2593,7 +2909,7 @@ def test_migrate_question_state_with_text_input_from_v40_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': 'TextInput' }, 'next_content_id_index': 4, @@ -2620,20 +2936,24 @@ def test_migrate_question_state_with_text_input_from_v40_to_latest(self): question.question_state_data_schema_version, feconf.CURRENT_STATE_SCHEMA_VERSION) - answer_group = question.question_state_data.interaction.answer_groups[0] - rule_spec = answer_group.rule_specs[0] + answer_group_object = ( + question.question_state_data.interaction.answer_groups[0] + ) + rule_spec = answer_group_object.rule_specs[0] self.assertEqual( rule_spec.inputs['x'], { - 'contentId': 'rule_input_4', + 'contentId': 'rule_input_3', 'normalizedStrSet': ['Test'] }) - self.assertEqual(question.question_state_data.next_content_id_index, 5) - def test_migrate_question_state_with_set_input_from_v40_to_latest(self): + def test_migrate_question_state_with_set_input_from_v40_to_latest( + self + ) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -2654,15 +2974,27 @@ def test_migrate_question_state_with_set_input_from_v40_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_buttonText_0': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'ca_buttonText_0': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -2678,8 +3010,9 @@ def test_migrate_question_state_with_set_input_from_v40_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2688,7 +3021,7 @@ def test_migrate_question_state_with_set_input_from_v40_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': 'SetInput' }, 'next_content_id_index': 4, @@ -2715,20 +3048,24 @@ def test_migrate_question_state_with_set_input_from_v40_to_latest(self): question.question_state_data_schema_version, feconf.CURRENT_STATE_SCHEMA_VERSION) - answer_group = question.question_state_data.interaction.answer_groups[0] - rule_spec = answer_group.rule_specs[0] + answer_group_object = ( + question.question_state_data.interaction.answer_groups[0] + ) + rule_spec = answer_group_object.rule_specs[0] self.assertEqual( rule_spec.inputs['x'], { - 'contentId': 'rule_input_4', + 'contentId': 'rule_input_3', 'unicodeStrSet': ['Test'] }) - self.assertEqual(question.question_state_data.next_content_id_index, 5) - def test_migrate_question_state_from_v41_with_item_selection_input_interaction_to_latest(self): # pylint: disable=line-too-long + def test_migrate_question_state_from_v41_with_item_selection_input_interaction_to_latest( # pylint: disable=line-too-long + self + ) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -2749,15 +3086,29 @@ def test_migrate_question_state_from_v41_with_item_selection_input_interaction_t } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_choices_2': {}, + 'ca_choices_3': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_choices_2': {}, + 'ca_choices_3': {}, + 'hint_1': {} } }, 'interaction': { @@ -2778,8 +3129,9 @@ def test_migrate_question_state_from_v41_with_item_selection_input_interaction_t }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2792,7 +3144,7 @@ def test_migrate_question_state_from_v41_with_item_selection_input_interaction_t 'answer_is_exclusive': True, 'correct_answer': ['

    Choice 1

    '], 'explanation': { - 'content_id': 'solution', + 'content_id': 'explanation_1', 'html': 'This is solution for state1' } }, @@ -2822,19 +3174,24 @@ def test_migrate_question_state_from_v41_with_item_selection_input_interaction_t question.question_state_data_schema_version, feconf.CURRENT_STATE_SCHEMA_VERSION) - answer_group = question.question_state_data.interaction.answer_groups[0] + answer_group_object = question.question_state_data.interaction.answer_groups[0] solution = question.question_state_data.interaction.solution - rule_spec = answer_group.rule_specs[0] + # Ruling out the possibility of None for mypy type checking. + assert solution is not None + rule_spec = answer_group_object.rule_specs[0] self.assertEqual( rule_spec.inputs['x'], - ['ca_choices_2', 'ca_choices_3']) + ['ca_choices_4', 'ca_choices_5']) self.assertEqual( - solution.correct_answer, ['ca_choices_2']) + solution.correct_answer, ['ca_choices_4']) - def test_migrate_question_state_from_v41_with_drag_and_drop_sort_input_interaction_to_latest(self): # pylint: disable=line-too-long + def test_migrate_question_state_from_v41_with_drag_and_drop_sort_input_interaction_to_latest( # pylint: disable=line-too-long + self + ) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -2872,15 +3229,29 @@ def test_migrate_question_state_from_v41_with_drag_and_drop_sort_input_interacti } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_choices_2': {}, + 'ca_choices_3': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_choices_2': {}, + 'ca_choices_3': {}, + 'hint_1': {} } }, 'interaction': { @@ -2900,8 +3271,9 @@ def test_migrate_question_state_from_v41_with_drag_and_drop_sort_input_interacti }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -2914,7 +3286,7 @@ def test_migrate_question_state_from_v41_with_drag_and_drop_sort_input_interacti 'answer_is_exclusive': True, 'correct_answer': [['

    Choice 1

    ', '

    Choice 2

    ']], 'explanation': { - 'content_id': 'solution', + 'content_id': 'explanation_1', 'html': 'This is solution for state1' } }, @@ -2944,27 +3316,31 @@ def test_migrate_question_state_from_v41_with_drag_and_drop_sort_input_interacti question.question_state_data_schema_version, feconf.CURRENT_STATE_SCHEMA_VERSION) - answer_group = question.question_state_data.interaction.answer_groups[0] + answer_group_object = ( + question.question_state_data.interaction.answer_groups[0]) solution = question.question_state_data.interaction.solution + # Ruling out the possibility of None for mypy type checking. + assert solution is not None self.assertEqual( - answer_group.rule_specs[0].inputs['x'], - [['ca_choices_2', 'ca_choices_3', 'invalid_content_id']]) + answer_group_object.rule_specs[0].inputs['x'], + [['ca_choices_4', 'ca_choices_5', 'invalid_content_id']]) self.assertEqual( - answer_group.rule_specs[1].inputs['x'], - [['ca_choices_2']]) + answer_group_object.rule_specs[1].inputs['x'], + [['ca_choices_4']]) self.assertEqual( - answer_group.rule_specs[2].inputs['x'], - 'ca_choices_2') + answer_group_object.rule_specs[2].inputs['x'], + 'ca_choices_4') self.assertEqual( - answer_group.rule_specs[3].inputs, - {'x': 'ca_choices_2', 'y': 'ca_choices_3'}) + answer_group_object.rule_specs[3].inputs, + {'x': 'ca_choices_4', 'y': 'ca_choices_5'}) self.assertEqual( - solution.correct_answer, [['ca_choices_2', 'ca_choices_3']]) + solution.correct_answer, [['ca_choices_4', 'ca_choices_5']]) - def test_migrate_question_state_from_v42_to_latest(self): + def test_migrate_question_state_from_v42_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -2985,15 +3361,27 @@ def test_migrate_question_state_from_v42_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_placeholder_0': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_placeholder_0': {}, + 'hint_1': {} } }, 'interaction': { @@ -3010,8 +3398,9 @@ def test_migrate_question_state_from_v42_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -3025,7 +3414,7 @@ def test_migrate_question_state_from_v42_to_latest(self): 'html': 'Hint 1' } }], - 'solution': {}, + 'solution': None, 'id': 'NumericExpressionInput' }, 'next_content_id_index': 3, @@ -3056,10 +3445,11 @@ def test_migrate_question_state_from_v42_to_latest(self): self.assertEqual( cust_args['useFractionForDivision'].value, True) - def test_migrate_question_state_from_v43_to_latest(self): + def test_migrate_question_state_from_v43_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -3080,15 +3470,27 @@ def test_migrate_question_state_from_v43_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_placeholder_0': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'ca_placeholder_0': {}, + 'hint_1': {} } }, 'interaction': { @@ -3105,8 +3507,9 @@ def test_migrate_question_state_from_v43_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -3115,7 +3518,7 @@ def test_migrate_question_state_from_v43_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': 'TextInput' }, 'next_content_id_index': 4, @@ -3146,10 +3549,11 @@ def test_migrate_question_state_from_v43_to_latest(self): self.assertEqual( linked_skill_id, None) - def test_migrate_question_state_from_v44_to_latest(self): + def test_migrate_question_state_from_v44_to_latest(self) -> None: answer_group = { 'outcome': { 'dest': 'abc', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -3170,15 +3574,25 @@ def test_migrate_question_state_from_v44_to_latest(self): } question_state_dict = { 'content': { - 'content_id': 'content_1', + 'content_id': 'content', 'html': 'Question 1' }, 'recorded_voiceovers': { - 'voiceovers_mapping': {} + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } }, 'written_translations': { 'translations_mapping': { - 'explanation': {} + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} } }, 'interaction': { @@ -3192,8 +3606,9 @@ def test_migrate_question_state_from_v44_to_latest(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'feedback_1', + 'content_id': 'default_outcome_2', 'html': 'Correct Answer' }, 'param_changes': [], @@ -3202,7 +3617,7 @@ def test_migrate_question_state_from_v44_to_latest(self): 'missing_prerequisite_skill_id': None }, 'hints': [], - 'solution': {}, + 'solution': None, 'id': 'NumericInput' }, 'next_content_id_index': 4, @@ -3234,3 +3649,156 @@ def test_migrate_question_state_from_v44_to_latest(self): cust_args = question.question_state_data.interaction.customization_args self.assertEqual( cust_args['requireNonnegativeInput'].value, False) + + def test_migrate_question_state_from_v45_to_latest(self) -> None: + answer_group1 = { + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'rule_specs': [{ + 'inputs': { + 'x': 'a - b' + }, + 'rule_type': 'ContainsSomeOf' + }, { + 'inputs': { + 'x': 'a - b' + }, + 'rule_type': 'MatchesExactlyWith' + }, { + 'inputs': { + 'x': 'a - b' + }, + 'rule_type': 'OmitsSomeOf' + }, { + 'inputs': { + 'x': 'a - b', + 'y': [] + }, + 'rule_type': 'MatchesWithGeneralForm' + }], + 'training_data': [], + 'tagged_skill_misconception_id': None + } + answer_group2 = { + 'outcome': { + 'dest': 'abc', + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'feedback_2', + 'html': '

    Feedback

    ' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'rule_specs': [{ + 'inputs': { + 'x': 'a - b' + }, + 'rule_type': 'ContainsSomeOf' + }, { + 'inputs': { + 'x': 'a - b', + 'y': [] + }, + 'rule_type': 'MatchesWithGeneralForm' + }], + 'training_data': [], + 'tagged_skill_misconception_id': None + } + question_state_dict = { + 'content': { + 'content_id': 'content', + 'html': 'Question 1' + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'content': {}, + 'explanation_1': {}, + 'feedback_1': {}, + 'default_outcome_2': {}, + 'hint_1': {} + } + }, + 'interaction': { + 'answer_groups': [answer_group1, answer_group2], + 'confirmed_unclassified_answers': [], + 'customization_args': { + 'customOskLetters': { + 'value': ['a', 'b'] + }, + 'useFractionForDivision': { + 'value': False + } + }, + 'default_outcome': { + 'dest': None, + 'dest_if_really_stuck': None, + 'feedback': { + 'content_id': 'default_outcome_2', + 'html': 'Correct Answer' + }, + 'param_changes': [], + 'refresher_exploration_id': None, + 'labelled_as_correct': True, + 'missing_prerequisite_skill_id': None + }, + 'hints': [], + 'solution': None, + 'id': 'AlgebraicExpressionInput' + }, + 'next_content_id_index': 4, + 'param_changes': [], + 'solicit_answer_details': False, + 'card_is_checkpoint': False, + 'linked_skill_id': None, + 'classifier_model_id': None + } + question_model = question_models.QuestionModel( + id='question_id', + question_state_data=question_state_dict, + language_code='en', + version=0, + linked_skill_ids=['skill_id'], + question_state_data_schema_version=45) + commit_cmd = question_domain.QuestionChange({ + 'cmd': question_domain.CMD_CREATE_NEW + }) + commit_cmd_dicts = [commit_cmd.to_dict()] + question_model.commit( + 'user_id_admin', 'question model created', commit_cmd_dicts) + + question = question_fetchers.get_question_from_model(question_model) + self.assertEqual( + question.question_state_data_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + answer_groups = question.question_state_data.interaction.answer_groups + self.assertEqual(len(answer_groups), 1) + rule_specs = answer_groups[0].rule_specs + self.assertEqual(len(rule_specs), 1) + self.assertEqual(rule_specs[0].rule_type, 'MatchesExactlyWith') + + cust_args = question.question_state_data.interaction.customization_args + self.assertNotIn('customOskLetters', cust_args) + self.assertIn('allowedVariables', cust_args) diff --git a/core/domain/rating_services.py b/core/domain/rating_services.py index 634f2dd71e0e..da5bf62b616f 100644 --- a/core/domain/rating_services.py +++ b/core/domain/rating_services.py @@ -26,14 +26,25 @@ from core.domain import exp_services from core.platform import models +from typing import Dict, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import transaction_services + from mypy_imports import user_models + (exp_models, user_models,) = models.Registry.import_models([ - models.NAMES.exploration, models.NAMES.user]) + models.Names.EXPLORATION, models.Names.USER]) transaction_services = models.Registry.import_transaction_services() ALLOWED_RATINGS = [1, 2, 3, 4, 5] -def assign_rating_to_exploration(user_id, exploration_id, new_rating): +def assign_rating_to_exploration( + user_id: str, + exploration_id: str, + new_rating: int +) -> None: """Records the rating awarded by the user to the exploration in both the user-specific data and exploration summary. @@ -45,6 +56,11 @@ def assign_rating_to_exploration(user_id, exploration_id, new_rating): assigned a rating. new_rating: int. Value of assigned rating, should be between 1 and 5 inclusive. + + Raises: + ValueError. The assigned rating is not of type int. + ValueError. The assigned rating is lower than 1 or higher than 5. + ValueError. The exploration does not exist. """ if not isinstance(new_rating, int): @@ -60,14 +76,14 @@ def assign_rating_to_exploration(user_id, exploration_id, new_rating): raise ValueError('Invalid exploration id %s' % exploration_id) @transaction_services.run_in_transaction_wrapper - def _update_user_rating_transactional(): + def _update_user_rating_transactional() -> Optional[int]: """Updates the user rating of the exploration. Returns the old rating before updation. """ exp_user_data_model = user_models.ExplorationUserDataModel.get( user_id, exploration_id) if exp_user_data_model: - old_rating = exp_user_data_model.rating + old_rating: Optional[int] = exp_user_data_model.rating else: old_rating = None exp_user_data_model = user_models.ExplorationUserDataModel.create( @@ -98,7 +114,9 @@ def _update_user_rating_transactional(): exp_services.save_exploration_summary(exploration_summary) -def get_user_specific_rating_for_exploration(user_id, exploration_id): +def get_user_specific_rating_for_exploration( + user_id: str, exploration_id: str +) -> Optional[int]: """Fetches a rating for the specified exploration from the specified user if one exists. @@ -115,7 +133,9 @@ def get_user_specific_rating_for_exploration(user_id, exploration_id): return exp_user_data_model.rating if exp_user_data_model else None -def get_when_exploration_rated(user_id, exploration_id): +def get_when_exploration_rated( + user_id: str, exploration_id: str +) -> Optional[datetime.datetime]: """Fetches the datetime the exploration was last rated by this user, or None if no rating has been awarded. @@ -135,7 +155,7 @@ def get_when_exploration_rated(user_id, exploration_id): return exp_user_data_model.rated_on if exp_user_data_model else None -def get_overall_ratings_for_exploration(exploration_id): +def get_overall_ratings_for_exploration(exploration_id: str) -> Dict[str, int]: """Fetches all ratings for an exploration. Args: diff --git a/core/domain/rating_services_test.py b/core/domain/rating_services_test.py index 5f906211afb4..495f09ac037a 100644 --- a/core/domain/rating_services_test.py +++ b/core/domain/rating_services_test.py @@ -27,17 +27,23 @@ from core.platform import models from core.tests import test_utils -(exp_models,) = models.Registry.import_models([models.NAMES.exploration]) +from typing import Final, Literal, Optional, overload + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) class RatingServicesTests(test_utils.GenericTestBase): """Test functions in rating_services.""" - EXP_ID = 'exp_id' - USER_ID_1 = 'user_1' - USER_ID_2 = 'user_2' + EXP_ID: Final = 'exp_id' + USER_ID_1: Final = 'user_1' + USER_ID_2: Final = 'user_2' - def test_rating_assignation(self): + def test_rating_assignation(self) -> None: """Check ratings are correctly assigned to an exploration.""" exp_services.save_new_exploration( @@ -84,7 +90,7 @@ def test_rating_assignation(self): rating_services.get_overall_ratings_for_exploration(self.EXP_ID), {'1': 0, '2': 0, '3': 0, '4': 2, '5': 0}) - def test_time_of_ratings_recorded(self): + def test_time_of_ratings_recorded(self) -> None: """Check that the time a rating is given is recorded correctly.""" time_allowed_for_computation = datetime.timedelta(seconds=10) @@ -102,17 +108,19 @@ def test_time_of_ratings_recorded(self): second_rating_time = rating_services.get_when_exploration_rated( self.USER_ID_1, self.EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert first_rating_time is not None self.assertLess( datetime.datetime.utcnow(), first_rating_time + time_allowed_for_computation) self.assertLess(first_rating_time, second_rating_time) self.assertLess(second_rating_time, datetime.datetime.utcnow()) - def test_rating_assignations_do_not_conflict(self): - """Check that ratings of different explorations are independant.""" + def test_rating_assignations_do_not_conflict(self) -> None: + """Check that ratings of different explorations are independent.""" - exp_id_a = 'exp_id_A' - exp_id_b = 'exp_id_B' + exp_id_a: Final = 'exp_id_A' + exp_id_b: Final = 'exp_id_B' exp_services.save_new_exploration( exp_id_a, @@ -150,49 +158,82 @@ def test_rating_assignations_do_not_conflict(self): rating_services.get_overall_ratings_for_exploration(exp_id_b), {'1': 0, '2': 0, '3': 1, '4': 0, '5': 1}) - def test_invalid_ratings_are_forbidden(self): - with self.assertRaisesRegexp( + def test_invalid_ratings_are_forbidden(self) -> None: + with self.assertRaisesRegex( ValueError, 'Expected a rating 1-5, received 0' ): rating_services.assign_rating_to_exploration( self.USER_ID_1, self.EXP_ID, 0) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'Expected a rating 1-5, received 7' ): rating_services.assign_rating_to_exploration( self.USER_ID_1, self.EXP_ID, 7) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'Expected the rating to be an integer, received 2' ): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. rating_services.assign_rating_to_exploration( - self.USER_ID_1, self.EXP_ID, '2') + self.USER_ID_1, self.EXP_ID, '2') # type: ignore[arg-type] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'Expected the rating to be an integer, received aaa' ): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. rating_services.assign_rating_to_exploration( - self.USER_ID_1, self.EXP_ID, 'aaa') + self.USER_ID_1, self.EXP_ID, 'aaa') # type: ignore[arg-type] - def test_invalid_exploration_ids_are_forbidden(self): - with self.assertRaisesRegexp( + def test_invalid_exploration_ids_are_forbidden(self) -> None: + with self.assertRaisesRegex( Exception, 'Invalid exploration id invalid_id' ): rating_services.assign_rating_to_exploration( self.USER_ID_1, 'invalid_id', 3) - def test_rating_assignation_with_no_exploration_summary_ratings(self): - - def _mock_get_exploration_summary_by_id(exp_id): + def test_rating_assignation_with_no_exploration_summary_ratings( + self + ) -> None: + @overload + def _mock_get_exploration_summary_by_id( + exp_id: str, + ) -> exp_domain.ExplorationSummary: ... + + @overload + def _mock_get_exploration_summary_by_id( + exp_id: str, *, strict: Literal[True] + ) -> exp_domain.ExplorationSummary: ... + + @overload + def _mock_get_exploration_summary_by_id( + exp_id: str, *, strict: Literal[False] + ) -> Optional[exp_domain.ExplorationSummary]: ... + + def _mock_get_exploration_summary_by_id( + exp_id: str, strict: bool = True + ) -> Optional[exp_domain.ExplorationSummary]: """Assign None to exploration summary ratings.""" - exp_summary_model = exp_models.ExpSummaryModel.get(exp_id) - exp_summary_model.ratings = None - return exp_summary_model + exp_summary_model = exp_models.ExpSummaryModel.get( + exp_id, strict=strict + ) + if exp_summary_model: + exp_summary = exp_fetchers.get_exploration_summary_from_model( + exp_summary_model) + else: + return None + exp_summary.ratings = {} + return exp_summary with self.swap( - exp_fetchers, 'get_exploration_summary_by_id', - _mock_get_exploration_summary_by_id): + exp_fetchers, + 'get_exploration_summary_by_id', + _mock_get_exploration_summary_by_id + ): exp_services.save_new_exploration( 'exp_id_a', exp_domain.Exploration.create_default_exploration('exp_id_a')) diff --git a/core/domain/recommendations_services.py b/core/domain/recommendations_services.py index 94c68636a640..eb874a43b3e7 100644 --- a/core/domain/recommendations_services.py +++ b/core/domain/recommendations_services.py @@ -20,18 +20,27 @@ import csv import datetime +import io import json from core import feconf -from core import python_utils +from core.domain import exp_domain from core.domain import rights_domain from core.platform import models -(exp_models, recommendations_models,) = models.Registry.import_models([ - models.NAMES.exploration, models.NAMES.recommendations]) +from typing import Dict, Final, List, Sequence, cast + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import recommendations_models + +(recommendations_models,) = models.Registry.import_models([ + models.Names.RECOMMENDATIONS +]) + # pylint: disable=line-too-long, single-line-pragma -DEFAULT_TOPIC_SIMILARITIES_STRING = ( +DEFAULT_TOPIC_SIMILARITIES_STRING: Final = ( """Architecture,Art,Biology,Business,Chemistry,Computing,Economics,Education,Engineering,Environment,Geography,Government,Hobbies,Languages,Law,Life Skills,Mathematics,Medicine,Music,Philosophy,Physics,Programming,Psychology,Puzzles,Reading,Religion,Sport,Statistics,Welcome 1.0,0.9,0.2,0.4,0.1,0.2,0.3,0.3,0.6,0.6,0.4,0.2,0.5,0.5,0.5,0.3,0.5,0.3,0.3,0.5,0.4,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3 0.9,1.0,0.1,0.6,0.1,0.1,0.6,0.6,0.2,0.3,0.3,0.2,0.5,0.7,0.6,0.2,0.3,0.2,0.9,0.7,0.3,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3 @@ -64,7 +73,7 @@ 0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,1.0""") # pylint: enable=line-too-long, single-line-pragma -RECOMMENDATION_CATEGORIES = [ +RECOMMENDATION_CATEGORIES: Final = [ 'Architecture', 'Art', 'Biology', @@ -97,7 +106,7 @@ ] -def get_topic_similarities_dict(): +def get_topic_similarities_dict() -> Dict[str, Dict[str, float]]: """Returns a 2d dict of topic similarities. Creates the default similarity dict if it does not exist yet. """ @@ -108,31 +117,42 @@ def get_topic_similarities_dict(): if topic_similarities_entity is None: topic_similarities_entity = create_default_topic_similarities() - return json.loads(topic_similarities_entity.content) + # TODO(#15610): Here we use cast because the return type of json.loads() + # method is Dict[str, Any] but from the implementation we know it only + # returns the values of type Dict[str, Dict[str, float]. So to narrow down + # the type from Dict[str, Any], we used cast here. + return cast( + Dict[str, Dict[str, float]], + json.loads(topic_similarities_entity.content) + ) -def save_topic_similarities(topic_similarities): +def save_topic_similarities( + topic_similarities: Dict[str, Dict[str, float]] +) -> recommendations_models.TopicSimilaritiesModel: """Stores topic similarities in the datastore. Returns the newly created or changed entity. """ - topic_similarities_entity = ( + retrieved_topic_similarities_entity = ( recommendations_models.TopicSimilaritiesModel.get( recommendations_models.TOPIC_SIMILARITIES_ID, strict=False)) - if topic_similarities_entity is None: - topic_similarities_entity = ( - recommendations_models.TopicSimilaritiesModel( - id=recommendations_models.TOPIC_SIMILARITIES_ID, - content=json.dumps(topic_similarities))) - else: - topic_similarities_entity.content = json.dumps(topic_similarities) + topic_similarities_entity = ( + retrieved_topic_similarities_entity + if retrieved_topic_similarities_entity is not None + else recommendations_models.TopicSimilaritiesModel( + id=recommendations_models.TOPIC_SIMILARITIES_ID + ) + ) + topic_similarities_entity.content = json.dumps(topic_similarities) topic_similarities_entity.update_timestamps() topic_similarities_entity.put() return topic_similarities_entity -def create_default_topic_similarities(): +def create_default_topic_similarities( +) -> recommendations_models.TopicSimilaritiesModel: """Creates the default topic similarities, and stores them in the datastore. The keys are names of the default categories, and values are DEFAULT_TOPIC_SIMILARITY if the keys are different and @@ -141,11 +161,11 @@ def create_default_topic_similarities(): Returns the newly created TopicSimilaritiesModel. """ - topic_similarities_dict = { + topic_similarities_dict: Dict[str, Dict[str, float]] = { topic: {} for topic in RECOMMENDATION_CATEGORIES } - data = DEFAULT_TOPIC_SIMILARITIES_STRING.splitlines() - data = list(csv.reader(data)) + raw_data = DEFAULT_TOPIC_SIMILARITIES_STRING.splitlines() + data = list(csv.reader(raw_data)) topics_list = data[0] topic_similarities_values = data[1:] for row_ind, topic_1 in enumerate(topics_list): @@ -156,7 +176,7 @@ def create_default_topic_similarities(): return save_topic_similarities(topic_similarities_dict) -def get_topic_similarity(topic_1, topic_2): +def get_topic_similarity(topic_1: str, topic_2: str) -> float: """Gets the similarity between two topics, as a float between 0 and 1. It checks whether the two topics are in the list of default topics. If @@ -177,14 +197,14 @@ def get_topic_similarity(topic_1, topic_2): return feconf.DEFAULT_TOPIC_SIMILARITY -def get_topic_similarities_as_csv(): +def get_topic_similarities_as_csv() -> str: """Downloads all similarities corresponding to the current topics as a string which contains the contents of a csv file. The first line is a list of the current topics. The next lines are an adjacency matrix of similarities. """ - output = python_utils.string_io() + output = io.StringIO() writer = csv.writer(output) writer.writerow(RECOMMENDATION_CATEGORIES) @@ -197,7 +217,7 @@ def get_topic_similarities_as_csv(): return output.getvalue() -def validate_topic_similarities(data): +def validate_topic_similarities(csv_data: str) -> None: """Validates topic similarities given by data, which should be a string of comma-separated values. @@ -208,8 +228,8 @@ def validate_topic_similarities(data): This function checks whether topics belong in the current list of known topics, and if the adjacency matrix is valid. """ - data = data.splitlines() - data = list(csv.reader(data)) + raw_data = csv_data.splitlines() + data = list(csv.reader(raw_data)) topics_list = data[0] topics_length = len(topics_list) topic_similarities_values = data[1:] @@ -233,14 +253,15 @@ def validate_topic_similarities(data): for row_ind in range(topics_length): for col_ind in range(topics_length): - similarity = topic_similarities_values[row_ind][col_ind] + similarity_value = topic_similarities_values[row_ind][col_ind] try: - similarity = float(similarity) - except ValueError: + float(similarity_value) + except ValueError as e: raise ValueError( 'Expected similarity to be a float, received %s' % ( - similarity)) + similarity_value)) from e + similarity = float(similarity_value) if similarity < 0.0 or similarity > 1.0: raise ValueError( 'Expected similarity to be between 0.0 and ' @@ -253,7 +274,7 @@ def validate_topic_similarities(data): raise Exception('Expected topic similarities to be symmetric.') -def update_topic_similarities(data): +def update_topic_similarities(csv_data: str) -> None: """Updates all topic similarity pairs given by data, which should be a string of comma-separated values. @@ -266,10 +287,10 @@ def update_topic_similarities(data): similarities remain as the previous value or the default. """ - validate_topic_similarities(data) + validate_topic_similarities(csv_data) - data = data.splitlines() - data = list(csv.reader(data)) + raw_data = csv_data.splitlines() + data = list(csv.reader(raw_data)) topics_list = data[0] topic_similarities_values = data[1:] @@ -282,7 +303,10 @@ def update_topic_similarities(data): save_topic_similarities(topic_similarities_dict) -def get_item_similarity(reference_exp_summary, compared_exp_summary): +def get_item_similarity( + reference_exp_summary: exp_domain.ExplorationSummary, + compared_exp_summary: exp_domain.ExplorationSummary +) -> float: """Returns the ranking of compared_exp to reference_exp as a recommendation. This returns a value between 0.0 to 10.0. A higher value indicates the compared_exp is a better recommendation as an exploration to @@ -293,10 +317,10 @@ def get_item_similarity(reference_exp_summary, compared_exp_summary): returns 0.0 if compared_exp is private. Args: - reference_exp_summary: ExpSummaryModel. The reference exploration + reference_exp_summary: ExplorationSummary. The reference exploration summary. The similarity score says how similar is the compared summary to this summary. - compared_exp_summary: ExpSummaryModel. The compared exploration + compared_exp_summary: ExplorationSummary. The compared exploration summary. The similarity score says how similar is this summary to the reference summary. @@ -331,7 +355,9 @@ def get_item_similarity(reference_exp_summary, compared_exp_summary): return similarity_score -def set_exploration_recommendations(exp_id, new_recommendations): +def set_exploration_recommendations( + exp_id: str, new_recommendations: List[str] +) -> None: """Stores a list of exploration ids of recommended explorations to play after completing the exploration keyed by exp_id. @@ -346,7 +372,7 @@ def set_exploration_recommendations(exp_id, new_recommendations): id=exp_id, recommended_exploration_ids=new_recommendations).put() -def get_exploration_recommendations(exp_id): +def get_exploration_recommendations(exp_id: str) -> List[str]: """Gets a list of ids of at most 10 recommended explorations to play after completing the exploration keyed by exp_id. @@ -364,10 +390,16 @@ def get_exploration_recommendations(exp_id): if recommendations_model is None: return [] else: - return recommendations_model.recommended_exploration_ids + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + recommended_exploration_ids: List[str] = ( + recommendations_model.recommended_exploration_ids + ) + return recommended_exploration_ids -def delete_explorations_from_recommendations(exp_ids): +def delete_explorations_from_recommendations(exp_ids: List[str]) -> None: """Deletes explorations from recommendations. This deletes both the recommendations for the given explorations, as well as @@ -390,7 +422,9 @@ def delete_explorations_from_recommendations(exp_ids): # objects. all_recommending_models = {} for exp_id in exp_ids: - recommending_models = recs_model_class.query( + recommending_models: Sequence[ + recommendations_models.ExplorationRecommendationsModel + ] = recs_model_class.query( recs_model_class.recommended_exploration_ids == exp_id ).fetch() for recommending_model in recommending_models: diff --git a/core/domain/recommendations_services_test.py b/core/domain/recommendations_services_test.py index 1e80742243ea..a3204761989c 100644 --- a/core/domain/recommendations_services_test.py +++ b/core/domain/recommendations_services_test.py @@ -26,15 +26,22 @@ from core.platform import models from core.tests import test_utils -(recommendations_models, exp_models,) = models.Registry.import_models([ - models.NAMES.recommendations, models.NAMES.exploration]) +from typing import Dict, Final + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import recommendations_models + +(recommendations_models,) = models.Registry.import_models( + [models.Names.RECOMMENDATIONS] +) class TopicSimilarityUnitTests(test_utils.GenericTestBase): """Tests of the recommendation services module.""" # pylint: disable=line-too-long, single-line-pragma - TOPIC_SIMILARITIES_DEFAULT = ( + TOPIC_SIMILARITIES_DEFAULT: Final = ( """Architecture,Art,Biology,Business,Chemistry,Computing,Economics,Education,Engineering,Environment,Geography,Government,Hobbies,Languages,Law,Life Skills,Mathematics,Medicine,Music,Philosophy,Physics,Programming,Psychology,Puzzles,Reading,Religion,Sport,Statistics,Welcome 1.0,0.9,0.2,0.4,0.1,0.2,0.3,0.3,0.6,0.6,0.4,0.2,0.5,0.5,0.5,0.3,0.5,0.3,0.3,0.5,0.4,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3 0.9,1.0,0.1,0.6,0.1,0.1,0.6,0.6,0.2,0.3,0.3,0.2,0.5,0.7,0.6,0.2,0.3,0.2,0.9,0.7,0.3,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3 @@ -66,7 +73,7 @@ class TopicSimilarityUnitTests(test_utils.GenericTestBase): 0.1,0.1,0.6,0.5,0.3,0.6,0.7,0.2,0.5,0.3,0.2,0.4,0.2,0.1,0.2,0.4,0.8,0.1,0.1,0.3,0.4,0.6,0.4,0.5,0.1,0.1,0.3,1.0,0.3 0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,1.0""") - TOPIC_SIMILARITIES_UPDATED = ( + TOPIC_SIMILARITIES_UPDATED: Final = ( """Architecture,Art,Biology,Business,Chemistry,Computing,Economics,Education,Engineering,Environment,Geography,Government,Hobbies,Languages,Law,Life Skills,Mathematics,Medicine,Music,Philosophy,Physics,Programming,Psychology,Puzzles,Reading,Religion,Sport,Statistics,Welcome 1.0,0.9,0.2,0.4,0.1,0.2,0.3,0.3,0.6,0.6,0.4,0.2,0.5,0.5,0.5,0.3,0.5,0.3,0.3,0.5,0.4,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3 0.9,1.0,0.2,0.6,0.1,0.1,0.6,0.6,0.2,0.3,0.3,0.2,0.5,0.7,0.6,0.2,0.3,0.2,0.9,0.7,0.3,0.1,0.6,0.1,0.1,0.1,0.1,0.1,0.3 @@ -99,18 +106,18 @@ class TopicSimilarityUnitTests(test_utils.GenericTestBase): 0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,1.0""") # pylint: enable=line-too-long, single-line-pragma - def test_validate_default_similarities(self): + def test_validate_default_similarities(self) -> None: recommendations_services.validate_topic_similarities( recommendations_services.DEFAULT_TOPIC_SIMILARITIES_STRING) - def test_update_topic_similarities(self): + def test_update_topic_similarities(self) -> None: recommendations_services.update_topic_similarities( 'Art,Biology,Chemistry\n' '1.0,0.2,0.1\n' '0.2,1.0,0.8\n' '0.1,0.8,1.0') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, ( 'Length of topic similarities columns: 2 does not match ' 'length of topic list: 3.') @@ -120,7 +127,7 @@ def test_update_topic_similarities(self): '1.0,0.2,0.1\n' '0.2,1.0,0.8') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, ( 'Length of topic similarities rows: 2 does not match ' 'length of topic list: 3.') @@ -131,7 +138,7 @@ def test_update_topic_similarities(self): '0.2,1.0\n' '0.1,0.8,1.0') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'Expected similarity to be between 0.0 and 1.0, received 800' ): @@ -141,7 +148,7 @@ def test_update_topic_similarities(self): '0.2,1.0,800\n' '0.1,0.8,1.0') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'Expected similarity to be a float, received string' ): @@ -151,7 +158,7 @@ def test_update_topic_similarities(self): '0.2,1.0,0.8\n' '0.1,0.8,1.0') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Topic Fake Topic not in list of known topics.' ): recommendations_services.update_topic_similarities( @@ -160,7 +167,7 @@ def test_update_topic_similarities(self): '0.2,1.0,0.8\n' '0.1,0.8,1.0') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected topic similarities to be symmetric.' ): recommendations_services.update_topic_similarities( @@ -169,7 +176,7 @@ def test_update_topic_similarities(self): '0.3,1.0,0.8\n' '0.8,0.1,1.0') - def test_get_topic_similarity(self): + def test_get_topic_similarity(self) -> None: self.assertEqual(recommendations_services.get_topic_similarity( 'Art', 'Biology'), 0.1) self.assertEqual(recommendations_services.get_topic_similarity( @@ -187,7 +194,7 @@ def test_get_topic_similarity(self): self.assertEqual(recommendations_services.get_topic_similarity( 'Art', 'Biology'), 0.2) - def test_get_topic_similarities_as_csv(self): + def test_get_topic_similarities_as_csv(self) -> None: # The splitlines() is needed because a carriage return is added in # the returned string. topic_similarities = ( @@ -212,7 +219,7 @@ def test_get_topic_similarities_as_csv(self): class RecommendationsServicesUnitTests(test_utils.GenericTestBase): """Test recommendations services relating to exploration comparison.""" - EXP_DATA = { + EXP_DATA: Dict[str, Dict[str, str]] = { 'exp_id_1': { 'category': 'Art', }, @@ -226,7 +233,7 @@ class RecommendationsServicesUnitTests(test_utils.GenericTestBase): 'category': 'Art', } } - USER_DATA = { + USER_DATA: Dict[str, Dict[str, str]] = { 'alice': { 'email': 'alice@example.com' }, @@ -238,9 +245,9 @@ class RecommendationsServicesUnitTests(test_utils.GenericTestBase): }, } - def setUp(self): + def setUp(self) -> None: """Before each individual test, set up dummy explorations and users.""" - super(RecommendationsServicesUnitTests, self).setUp() + super().setUp() for name, user in self.USER_DATA.items(): self.signup(user['email'], name) @@ -259,7 +266,7 @@ def setUp(self): owner = user_services.get_user_actions_info(exp['owner_id']) rights_manager.publish_exploration(owner, exp_id) - def test_recommendation_categories_and_matrix_headers_match(self): + def test_recommendation_categories_and_matrix_headers_match(self) -> None: topic_similarities_lines = ( recommendations_services.DEFAULT_TOPIC_SIMILARITIES_STRING.split( '\n')) @@ -268,7 +275,7 @@ def test_recommendation_categories_and_matrix_headers_match(self): matrix_categories, sorted(recommendations_services.RECOMMENDATION_CATEGORIES)) - def test_get_item_similarity(self): + def test_get_item_similarity(self) -> None: exp_summaries = exp_services.get_all_exploration_summaries() self.assertEqual( @@ -291,7 +298,7 @@ def test_get_item_similarity(self): 0.0 ) - def test_get_and_set_exploration_recommendations(self): + def test_get_and_set_exploration_recommendations(self) -> None: recommended_exp_ids = ['exp_id_2', 'exp_id_3'] recommendations_services.set_exploration_recommendations( 'exp_id_1', recommended_exp_ids) @@ -308,7 +315,12 @@ def test_get_and_set_exploration_recommendations(self): 'exp_id_1')) self.assertEqual(recommended_exp_ids, saved_recommendation_ids) - def test_delete_recommendations_for_exploration(self): + saved_recommendation_ids = ( + recommendations_services.get_exploration_recommendations( + 'exp_id_0')) + self.assertEqual(saved_recommendation_ids, []) + + def test_delete_recommendations_for_exploration(self) -> None: recommendations_services.delete_explorations_from_recommendations([ 'exp_id_1', 'exp_id_2']) self.assertIsNone( @@ -318,7 +330,7 @@ def test_delete_recommendations_for_exploration(self): recommendations_models.ExplorationRecommendationsModel.get_by_id( 'exp_id_2')) - def test_delete_exploration_from_recommendations(self): + def test_delete_exploration_from_recommendations(self) -> None: recommendations_services.set_exploration_recommendations( 'exp_id_1', ['exp_id_3', 'exp_id_4']) recommendations_services.set_exploration_recommendations( diff --git a/core/domain/rights_domain.py b/core/domain/rights_domain.py index 7554b7d81f6e..89d25d8fa175 100644 --- a/core/domain/rights_domain.py +++ b/core/domain/rights_domain.py @@ -17,14 +17,16 @@ from __future__ import annotations from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain -from core.domain import user_services -from typing import List, Optional -from typing_extensions import TypedDict +from typing import List, Optional, TypedDict + +from core.domain import user_services # pylint: disable=invalid-import-from # isort:skip + +# TODO(#14537): Refactor this file and remove imports marked +# with 'invalid-import-from'. # IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve # backward-compatibility with previous exploration snapshots in the datastore. @@ -38,8 +40,8 @@ CMD_RELEASE_OWNERSHIP = feconf.CMD_RELEASE_OWNERSHIP CMD_UPDATE_FIRST_PUBLISHED_MSEC = feconf.CMD_UPDATE_FIRST_PUBLISHED_MSEC -ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE -ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC +ACTIVITY_STATUS_PRIVATE: str = constants.ACTIVITY_STATUS_PRIVATE +ACTIVITY_STATUS_PUBLIC: str = constants.ACTIVITY_STATUS_PUBLIC ROLE_OWNER = feconf.ROLE_OWNER ROLE_EDITOR = feconf.ROLE_EDITOR @@ -82,7 +84,7 @@ def __init__( cloned_from: Optional[str] = None, status: str = ACTIVITY_STATUS_PRIVATE, viewable_if_private: bool = False, - first_published_msec: Optional[str] = None + first_published_msec: Optional[float] = None ) -> None: self.id = exploration_id self.owner_ids = owner_ids @@ -176,13 +178,13 @@ def to_dict(self) -> ActivityRightsDict: 'cloned_from': self.cloned_from, 'status': self.status, 'community_owned': False, - 'owner_names': user_services.get_human_readable_user_ids(# type: ignore[no-untyped-call] + 'owner_names': user_services.get_human_readable_user_ids( self.owner_ids), - 'editor_names': user_services.get_human_readable_user_ids(# type: ignore[no-untyped-call] + 'editor_names': user_services.get_human_readable_user_ids( self.editor_ids), - 'voice_artist_names': user_services.get_human_readable_user_ids(# type: ignore[no-untyped-call] + 'voice_artist_names': user_services.get_human_readable_user_ids( self.voice_artist_ids), - 'viewer_names': user_services.get_human_readable_user_ids(# type: ignore[no-untyped-call] + 'viewer_names': user_services.get_human_readable_user_ids( self.viewer_ids), 'viewable_if_private': self.viewable_if_private, } @@ -237,7 +239,7 @@ def is_published(self) -> bool: Returns: bool. Whether activity is published. """ - return bool(self.status == ACTIVITY_STATUS_PUBLIC) + return self.status == ACTIVITY_STATUS_PUBLIC def is_private(self) -> bool: """Checks whether activity is private. @@ -245,7 +247,7 @@ def is_private(self) -> bool: Returns: bool. Whether activity is private. """ - return bool(self.status == ACTIVITY_STATUS_PRIVATE) + return self.status == ACTIVITY_STATUS_PRIVATE def is_solely_owned_by_user(self, user_id: str) -> bool: """Checks whether the activity is solely owned by the user. @@ -267,6 +269,9 @@ def assign_new_role(self, user_id: str, new_role: str) -> str: Returns: str. The previous role of the user. + + Raises: + Exception. If previous role is assigned again. """ old_role = ROLE_NONE if new_role == ROLE_VIEWER: @@ -274,7 +279,7 @@ def assign_new_role(self, user_id: str, new_role: str) -> str: raise Exception( 'Public explorations can be viewed by anyone.') - for role, user_ids in python_utils.ZIP( + for role, user_ids in zip( [ROLE_OWNER, ROLE_EDITOR, ROLE_VIEWER, ROLE_VOICE_ARTIST], [self.owner_ids, self.editor_ids, self.viewer_ids, self.voice_artist_ids]): @@ -289,13 +294,16 @@ def assign_new_role(self, user_id: str, new_role: str) -> str: if old_role == ROLE_OWNER: raise Exception( 'This user already owns this exploration.') - elif old_role == ROLE_EDITOR: + + if old_role == ROLE_EDITOR: raise Exception( 'This user already can edit this exploration.') - elif old_role == ROLE_VOICE_ARTIST: + + if old_role == ROLE_VOICE_ARTIST: raise Exception( 'This user already can voiceover this exploration.') - elif old_role == ROLE_VIEWER: + + if old_role == ROLE_VIEWER: raise Exception( 'This user already can view this exploration.') @@ -321,6 +329,76 @@ class ExplorationRightsChange(change_domain.BaseChange): ALLOWED_COMMANDS = feconf.EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS +class CreateNewExplorationRightsCmd(ExplorationRightsChange): + """Class representing the ExplorationRightsChange's + CMD_CREATE_NEW command. + """ + + pass + + +class ChangeRoleExplorationRightsCmd(ExplorationRightsChange): + """Class representing the ExplorationRightsChange's + CMD_CHANGE_ROLE command. + """ + + assignee_id: str + old_role: str + new_role: str + + +class RemoveRoleExplorationRightsCmd(ExplorationRightsChange): + """Class representing the ExplorationRightsChange's + CMD_REMOVE_ROLE command. + """ + + removed_user_id: str + old_role: str + + +class ChangePrivateViewabilityExplorationRightsCmd(ExplorationRightsChange): + """Class representing the ExplorationRightsChange's + CMD_CHANGE_PRIVATE_VIEWABILITY command. + """ + + old_viewable_if_private: bool + new_viewable_if_private: bool + + +class ReleaseOwnershipExplorationRightsCmd(ExplorationRightsChange): + """Class representing the ExplorationRightsChange's + CMD_RELEASE_OWNERSHIP command. + """ + + pass + + +class UpdateFirstPublishedMsecExplorationRightsCmd(ExplorationRightsChange): + """Class representing the ExplorationRightsChange's + CMD_UPDATE_FIRST_PUBLISHED_MSEC command. + """ + + old_first_published_msec: float + new_first_published_msec: float + + +class DeleteCommitExplorationRightsCmd(ExplorationRightsChange): + """Class representing the ExplorationRightsChange's + CMD_DELETE_COMMIT command. + """ + + pass + + +class ChangeExplorationStatus(ExplorationRightsChange): + """Class representing the ExplorationRightsChange's + CMD_CHANGE_EXPLORATION_STATUS command. + """ + + old_status: str + new_Status: str + + class CollectionRightsChange(change_domain.BaseChange): """Domain object class for an collection rights change. @@ -338,3 +416,73 @@ class CollectionRightsChange(change_domain.BaseChange): """ ALLOWED_COMMANDS = feconf.COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS + + +class CreateNewCollectionRightsCmd(CollectionRightsChange): + """Class representing the CollectionRightsChange's + CMD_CREATE_NEW command. + """ + + pass + + +class ChangeRoleCollectionRightsCmd(CollectionRightsChange): + """Class representing the CollectionRightsChange's + CMD_CHANGE_ROLE command. + """ + + assignee_id: str + old_role: str + new_role: str + + +class RemoveRoleCollectionRightsCmd(CollectionRightsChange): + """Class representing the CollectionRightsChange's + CMD_REMOVE_ROLE command. + """ + + removed_user_id: str + old_role: str + + +class ChangePrivateViewabilityCollectionRightsCmd(CollectionRightsChange): + """Class representing the CollectionRightsChange's + CMD_CHANGE_PRIVATE_VIEWABILITY command. + """ + + old_viewable_if_private: str + new_viewable_if_private: str + + +class ReleaseOwnershipCollectionRightsCmd(CollectionRightsChange): + """Class representing the CollectionRightsChange's + CMD_RELEASE_OWNERSHIP command. + """ + + pass + + +class UpdateFirstPublishedMsecCollectionRightsCmd(CollectionRightsChange): + """Class representing the CollectionRightsChange's + CMD_UPDATE_FIRST_PUBLISHED_MSEC command. + """ + + old_first_published_msec: float + new_first_published_msec: float + + +class DeleteCommitCollectionRightsCmd(CollectionRightsChange): + """Class representing the CollectionRightsChange's + CMD_DELETE_COMMIT command. + """ + + pass + + +class ChangeCollectionStatus(CollectionRightsChange): + """Class representing the CollectionRightsChange's + CMD_CHANGE_EXPLORATION_STATUS command. + """ + + old_status: str + new_Status: str diff --git a/core/domain/rights_domain_test.py b/core/domain/rights_domain_test.py index ee5c18915fa1..464a457c4a0a 100644 --- a/core/domain/rights_domain_test.py +++ b/core/domain/rights_domain_test.py @@ -21,7 +21,6 @@ import logging from core import utils -from core.constants import constants from core.domain import rights_domain from core.domain import rights_manager from core.domain import user_services @@ -33,23 +32,23 @@ class ActivityRightsTests(test_utils.GenericTestBase): def setUp(self) -> None: - super(ActivityRightsTests, self).setUp() + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) - self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) # type: ignore[no-untyped-call] - self.owner = user_services.get_user_actions_info(self.owner_id) # type: ignore[no-untyped-call] + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.owner = user_services.get_user_actions_info(self.owner_id) self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) - self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL) # type: ignore[no-untyped-call] - self.viewer = user_services.get_user_actions_info(self.viewer_id) # type: ignore[no-untyped-call] + self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL) + self.viewer = user_services.get_user_actions_info(self.viewer_id) self.exp_id = 'exp_id' - self.save_new_valid_exploration(self.exp_id, self.owner_id) # type: ignore[no-untyped-call] - rights_manager.publish_exploration(self.owner, self.exp_id) # type: ignore[no-untyped-call] - self.activity_rights = rights_manager.get_exploration_rights( # type: ignore[no-untyped-call] + self.save_new_valid_exploration(self.exp_id, self.owner_id) + rights_manager.publish_exploration(self.owner, self.exp_id) + self.activity_rights = rights_manager.get_exploration_rights( self.exp_id) def test_validate_community_owned_explorations(self) -> None: self.activity_rights.community_owned = True - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Community-owned explorations should have no owners, ' 'editors, voice artists or viewers specified.'): @@ -57,32 +56,32 @@ def test_validate_community_owned_explorations(self) -> None: self.activity_rights.owner_ids = [] self.activity_rights.status = rights_domain.ACTIVITY_STATUS_PRIVATE - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Community-owned explorations cannot be private'): self.activity_rights.validate() def test_validate_private_explorations(self) -> None: self.activity_rights.viewer_ids = [self.viewer_id] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Public explorations should have no viewers specified.'): self.activity_rights.validate() def test_validate_owner_cannot_be_editor(self) -> None: self.activity_rights.editor_ids = [self.owner_id] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'A user cannot be both an owner and an editor.'): self.activity_rights.validate() def test_validate_owner_cannot_be_voice_artist(self) -> None: self.activity_rights.voice_artist_ids = [self.owner_id] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'A user cannot be both an owner and a voice artist.'): self.activity_rights.validate() def test_validate_owner_cannot_be_viewer(self) -> None: self.activity_rights.viewer_ids = [self.owner_id] self.activity_rights.status = rights_domain.ACTIVITY_STATUS_PRIVATE - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'A user cannot be both an owner and a viewer.'): self.activity_rights.validate() @@ -90,7 +89,7 @@ def test_validate_editor_cannot_be_voice_artist(self) -> None: self.activity_rights.voice_artist_ids = [self.viewer_id] self.activity_rights.editor_ids = [self.viewer_id] self.activity_rights.status = rights_domain.ACTIVITY_STATUS_PRIVATE - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'A user cannot be both an editor and a voice artist.'): self.activity_rights.validate() @@ -98,7 +97,7 @@ def test_validate_editor_cannot_be_viewer(self) -> None: self.activity_rights.viewer_ids = [self.viewer_id] self.activity_rights.editor_ids = [self.viewer_id] self.activity_rights.status = rights_domain.ACTIVITY_STATUS_PRIVATE - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'A user cannot be both an editor and a viewer.'): self.activity_rights.validate() @@ -106,74 +105,59 @@ def test_validate_voice_artist_cannot_be_viewer(self) -> None: self.activity_rights.viewer_ids = [self.viewer_id] self.activity_rights.voice_artist_ids = [self.viewer_id] self.activity_rights.status = rights_domain.ACTIVITY_STATUS_PRIVATE - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'A user cannot be both a voice artist and a viewer.'): self.activity_rights.validate() - def test_update_activity_first_published_msec(self) -> None: - rights_manager.update_activity_first_published_msec( # type: ignore[no-untyped-call] - constants.ACTIVITY_TYPE_EXPLORATION, self.exp_id, 0.0) - - activity_rights = rights_manager.get_exploration_rights(self.exp_id) # type: ignore[no-untyped-call] - self.assertEqual(activity_rights.first_published_msec, 0.0) - - def test_cannot_update_activity_first_published_msec_for_invalid_activity( - self - ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] - Exception, 'Cannot get activity rights for unknown activity type'): - rights_manager.update_activity_first_published_msec( # type: ignore[no-untyped-call] - 'invalid_activity', 'activity_id', 0.0) - def test_check_cannot_access_activity_with_no_activity_rights(self) -> None: - self.assertFalse(rights_manager.check_can_access_activity( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_access_activity( self.owner, None)) def test_check_cannot_edit_activity_with_no_activity_rights(self) -> None: - self.assertFalse(rights_manager.check_can_edit_activity( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_edit_activity( self.owner, None)) def test_check_cannot_voiceover_activity_with_no_activity_rights( self ) -> None: - self.assertFalse(rights_manager.check_can_voiceover_activity( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_voiceover_activity( self.owner, None)) def test_cannot_save_activity_with_no_activity_rights(self) -> None: - self.assertFalse(rights_manager.check_can_save_activity( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_save_activity( self.owner, None)) def test_check_cannot_delete_activity_with_no_activity_rights(self) -> None: - self.assertFalse(rights_manager.check_can_delete_activity( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_delete_activity( self.owner, None)) def test_check_cannot_modify_activity_roles_with_no_activity_rights( self ) -> None: - self.assertFalse(rights_manager.check_can_modify_core_activity_roles( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_modify_core_activity_roles( self.owner, None)) def test_check_cannot_release_ownership_with_no_activity_rights( self ) -> None: - self.assertFalse(rights_manager.check_can_release_ownership( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_release_ownership( self.owner, None)) def test_check_cannnot_publish_activity_with_no_activity_rights( self ) -> None: - self.assertFalse(rights_manager.check_can_publish_activity( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_publish_activity( self.owner, None)) def test_check_cannot_publish_activity_with_cloned_from(self) -> None: - self.activity_rights.cloned_from = True - self.assertFalse(rights_manager.check_can_publish_activity( # type: ignore[no-untyped-call] + self.activity_rights.cloned_from = 'abcdefg' + self.assertFalse(rights_manager.check_can_publish_activity( self.owner, self.activity_rights)) def test_check_cannot_unpublish_activity_with_no_activity_rights( self ) -> None: - self.assertFalse(rights_manager.check_can_unpublish_activity( # type: ignore[no-untyped-call] + self.assertFalse(rights_manager.check_can_unpublish_activity( self.owner, None)) def test_cannot_release_ownership_of_exploration_with_insufficient_rights( @@ -187,11 +171,11 @@ def _mock_logging_function(msg: str, *args: Sequence[str]) -> None: logging_swap = self.swap(logging, 'error', _mock_logging_function) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'The ownership of this exploration cannot be released.') with logging_swap, assert_raises_regexp_context_manager: - rights_manager.release_ownership_of_exploration( # type: ignore[no-untyped-call] + rights_manager.release_ownership_of_exploration( self.viewer, self.exp_id) self.assertEqual(len(observed_log_messages), 1) @@ -204,11 +188,55 @@ def test_activity_should_have_atlest_one_owner(self) -> None: self.activity_rights.community_owned = False self.activity_rights.owner_ids = [] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, 'Activity should have atleast one owner.'): self.activity_rights.validate() + def test_to_dict(self) -> None: + sample_activity_rights_dict: rights_domain.ActivityRightsDict = { + 'cloned_from': None, + 'status': rights_domain.ACTIVITY_STATUS_PUBLIC, + 'community_owned': False, + 'owner_names': ['owner'], + 'editor_names': [], + 'voice_artist_names': [], + 'viewer_names': [], + 'viewable_if_private': False, + } + self.assertEqual( + self.activity_rights.to_dict(), sample_activity_rights_dict + ) + + self.activity_rights.community_owned = True + sample_activity_rights_dict['community_owned'] = True + sample_activity_rights_dict['owner_names'] = [] + self.assertEqual( + self.activity_rights.to_dict(), sample_activity_rights_dict + ) + + def test_is_editor(self) -> None: + self.activity_rights.editor_ids = ['123456'] + self.assertTrue(self.activity_rights.is_editor('123456')) + self.assertFalse(self.activity_rights.is_editor('123457')) + + def test_is_voice_artist(self) -> None: + self.activity_rights.voice_artist_ids = ['123456'] + self.assertTrue(self.activity_rights.is_voice_artist('123456')) + self.assertFalse(self.activity_rights.is_voice_artist('123457')) + + def test_is_viewer(self) -> None: + self.activity_rights.viewer_ids = ['123456'] + self.assertTrue(self.activity_rights.is_viewer('123456')) + self.assertFalse(self.activity_rights.is_viewer('123457')) + + def test_is_solely_owned_by_user(self) -> None: + self.activity_rights.owner_ids = ['123456'] + self.assertTrue(self.activity_rights.is_solely_owned_by_user('123456')) + + self.activity_rights.owner_ids = ['123456', '1234567'] + self.assertFalse(self.activity_rights.is_solely_owned_by_user('123456')) + def test_assign_role_replaces_old_role(self) -> None: self.activity_rights.owner_ids = ['123456'] self.activity_rights.editor_ids = [] @@ -233,25 +261,40 @@ def test_cannot_assign_same_role(self) -> None: self.activity_rights.editor_ids = [] self.activity_rights.viewer_ids = [] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'This user already owns this exploration.'): self.activity_rights.assign_new_role( '123456', rights_domain.ROLE_OWNER) self.activity_rights.assign_new_role( '123456', rights_domain.ROLE_EDITOR) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'This user already can edit this exploration.'): self.activity_rights.assign_new_role( '123456', rights_domain.ROLE_EDITOR) + self.activity_rights.assign_new_role( + '123456', rights_domain.ROLE_VOICE_ARTIST) + with self.assertRaisesRegex( + Exception, 'This user already can voiceover this exploration.'): + self.activity_rights.assign_new_role( + '123456', rights_domain.ROLE_VOICE_ARTIST) + + self.activity_rights.status = rights_domain.ACTIVITY_STATUS_PRIVATE + self.activity_rights.assign_new_role( + '123456', rights_domain.ROLE_VIEWER) + with self.assertRaisesRegex( + Exception, 'This user already can view this exploration.'): + self.activity_rights.assign_new_role( + '123456', rights_domain.ROLE_VIEWER) + def test_cannot_assign_viewer_to_public_exp(self) -> None: self.activity_rights.owner_ids = [] self.activity_rights.editor_ids = [] self.activity_rights.viewer_ids = [] self.activity_rights.status = rights_domain.ACTIVITY_STATUS_PUBLIC - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Public explorations can be viewed by anyone.'): self.activity_rights.assign_new_role( '123456', rights_domain.ROLE_VIEWER) @@ -260,23 +303,23 @@ def test_cannot_assign_viewer_to_public_exp(self) -> None: class ExplorationRightsChangeTests(test_utils.GenericTestBase): def test_exploration_rights_change_object_with_missing_cmd(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): - rights_domain.ExplorationRightsChange({'invalid': 'data'}) # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({'invalid': 'data'}) def test_exploration_rights_change_object_with_invalid_cmd(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): - rights_domain.ExplorationRightsChange({'cmd': 'invalid'}) # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({'cmd': 'invalid'}) def test_exploration_rights_change_object_with_missing_attribute_in_cmd( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, ( 'The following required attributes are missing: ' 'new_role, old_role')): - rights_domain.ExplorationRightsChange({ # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({ 'cmd': 'change_role', 'assignee_id': 'assignee_id', }) @@ -284,10 +327,10 @@ def test_exploration_rights_change_object_with_missing_attribute_in_cmd( def test_exploration_rights_change_object_with_extra_attribute_in_cmd( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, ( 'The following extra attributes are present: invalid')): - rights_domain.ExplorationRightsChange({ # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({ 'cmd': 'change_private_viewability', 'old_viewable_if_private': 'old_viewable_if_private', 'new_viewable_if_private': 'new_viewable_if_private', @@ -295,11 +338,11 @@ def test_exploration_rights_change_object_with_extra_attribute_in_cmd( }) def test_exploration_rights_change_object_with_invalid_role(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for new_role in cmd change_role: ' 'invalid is not allowed')): - rights_domain.ExplorationRightsChange({ # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({ 'cmd': 'change_role', 'assignee_id': 'assignee_id', 'old_role': rights_domain.ROLE_OWNER, @@ -307,11 +350,11 @@ def test_exploration_rights_change_object_with_invalid_role(self) -> None: }) def test_exploration_rights_change_object_with_invalid_status(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for new_status in cmd change_exploration_status: ' 'invalid is not allowed')): - rights_domain.ExplorationRightsChange({ # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({ 'cmd': 'change_exploration_status', 'old_status': rights_domain.ACTIVITY_STATUS_PRIVATE, 'new_status': 'invalid' @@ -319,13 +362,12 @@ def test_exploration_rights_change_object_with_invalid_status(self) -> None: def test_exploration_rights_change_object_with_create_new(self) -> None: exploration_rights_change_object = ( - rights_domain.ExplorationRightsChange({'cmd': 'create_new'})) # type: ignore[no-untyped-call] - + rights_domain.ExplorationRightsChange({'cmd': 'create_new'})) self.assertEqual(exploration_rights_change_object.cmd, 'create_new') def test_exploration_rights_change_object_with_change_role(self) -> None: exploration_rights_change_object = ( - rights_domain.ExplorationRightsChange({ # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({ 'cmd': 'change_role', 'assignee_id': 'assignee_id', 'old_role': rights_domain.ROLE_OWNER, @@ -349,7 +391,7 @@ def test_exploration_rights_change_object_with_release_ownership( self ) -> None: exploration_rights_change_object = ( - rights_domain.ExplorationRightsChange( # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange( {'cmd': 'release_ownership'} ) ) @@ -361,7 +403,7 @@ def test_exploration_rights_change_object_with_change_private_viewability( self ) -> None: exploration_rights_change_object = ( - rights_domain.ExplorationRightsChange({ # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({ 'cmd': 'change_private_viewability', 'old_viewable_if_private': 'old_viewable_if_private', 'new_viewable_if_private': 'new_viewable_if_private' @@ -381,7 +423,7 @@ def test_exploration_rights_change_object_with_update_first_published_msec( self ) -> None: exploration_rights_change_object = ( - rights_domain.ExplorationRightsChange({ # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({ 'cmd': 'update_first_published_msec', 'old_first_published_msec': 'old_first_published_msec', 'new_first_published_msec': 'new_first_published_msec' @@ -401,7 +443,7 @@ def test_exploration_rights_change_object_with_change_exploration_status( self ) -> None: exploration_rights_change_object = ( - rights_domain.ExplorationRightsChange({ # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange({ 'cmd': 'change_exploration_status', 'old_status': rights_domain.ACTIVITY_STATUS_PRIVATE, 'new_status': rights_domain.ACTIVITY_STATUS_PUBLIC @@ -424,33 +466,33 @@ def test_to_dict(self) -> None: 'new_viewable_if_private': 'new_viewable_if_private' } exploration_rights_change_object = ( - rights_domain.ExplorationRightsChange( # type: ignore[no-untyped-call] + rights_domain.ExplorationRightsChange( exploration_rights_change_dict)) self.assertEqual( - exploration_rights_change_object.to_dict(), # type: ignore[no-untyped-call] + exploration_rights_change_object.to_dict(), exploration_rights_change_dict) class CollectionRightsChangeTests(test_utils.GenericTestBase): def test_collection_rights_change_object_with_missing_cmd(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): - rights_domain.CollectionRightsChange({'invalid': 'data'}) # type: ignore[no-untyped-call] + rights_domain.CollectionRightsChange({'invalid': 'data'}) def test_collection_rights_change_object_with_invalid_cmd(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): - rights_domain.CollectionRightsChange({'cmd': 'invalid'}) # type: ignore[no-untyped-call] + rights_domain.CollectionRightsChange({'cmd': 'invalid'}) def test_collection_rights_change_object_with_missing_attribute_in_cmd( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, ( 'The following required attributes are missing: ' 'new_role, old_role')): - rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + rights_domain.CollectionRightsChange({ 'cmd': 'change_role', 'assignee_id': 'assignee_id', }) @@ -458,10 +500,10 @@ def test_collection_rights_change_object_with_missing_attribute_in_cmd( def test_collection_rights_change_object_with_extra_attribute_in_cmd( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, ( 'The following extra attributes are present: invalid')): - rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + rights_domain.CollectionRightsChange({ 'cmd': 'change_private_viewability', 'old_viewable_if_private': 'old_viewable_if_private', 'new_viewable_if_private': 'new_viewable_if_private', @@ -469,11 +511,11 @@ def test_collection_rights_change_object_with_extra_attribute_in_cmd( }) def test_collection_rights_change_object_with_invalid_role(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for new_role in cmd change_role: ' 'invalid is not allowed')): - rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + rights_domain.CollectionRightsChange({ 'cmd': 'change_role', 'assignee_id': 'assignee_id', 'old_role': rights_domain.ROLE_OWNER, @@ -481,25 +523,25 @@ def test_collection_rights_change_object_with_invalid_role(self) -> None: }) def test_collection_rights_change_object_with_invalid_status(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for new_status in cmd change_collection_status: ' 'invalid is not allowed')): - rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + rights_domain.CollectionRightsChange({ 'cmd': 'change_collection_status', 'old_status': rights_domain.ACTIVITY_STATUS_PRIVATE, 'new_status': 'invalid' }) def test_collection_rights_change_object_with_create_new(self) -> None: - collection_rights_change_object = rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + collection_rights_change_object = rights_domain.CollectionRightsChange({ 'cmd': 'create_new' }) self.assertEqual(collection_rights_change_object.cmd, 'create_new') def test_collection_rights_change_object_with_change_role(self) -> None: - collection_rights_change_object = rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + collection_rights_change_object = rights_domain.CollectionRightsChange({ 'cmd': 'change_role', 'assignee_id': 'assignee_id', 'old_role': rights_domain.ROLE_OWNER, @@ -517,7 +559,7 @@ def test_collection_rights_change_object_with_change_role(self) -> None: def test_collection_rights_change_object_with_release_ownership( self ) -> None: - collection_rights_change_object = rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + collection_rights_change_object = rights_domain.CollectionRightsChange({ 'cmd': 'release_ownership' }) @@ -527,7 +569,7 @@ def test_collection_rights_change_object_with_release_ownership( def test_collection_rights_change_object_with_change_private_viewability( self ) -> None: - collection_rights_change_object = rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + collection_rights_change_object = rights_domain.CollectionRightsChange({ 'cmd': 'change_private_viewability', 'old_viewable_if_private': 'old_viewable_if_private', 'new_viewable_if_private': 'new_viewable_if_private' @@ -545,7 +587,7 @@ def test_collection_rights_change_object_with_change_private_viewability( def test_collection_rights_change_object_with_update_first_published_msec( self ) -> None: - collection_rights_change_object = rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + collection_rights_change_object = rights_domain.CollectionRightsChange({ 'cmd': 'update_first_published_msec', 'old_first_published_msec': 'old_first_published_msec', 'new_first_published_msec': 'new_first_published_msec' @@ -564,7 +606,7 @@ def test_collection_rights_change_object_with_change_collection_status( self ) -> None: collection_rights_change_object = ( - rights_domain.CollectionRightsChange({ # type: ignore[no-untyped-call] + rights_domain.CollectionRightsChange({ 'cmd': 'change_collection_status', 'old_status': rights_domain.ACTIVITY_STATUS_PRIVATE, 'new_status': rights_domain.ACTIVITY_STATUS_PUBLIC @@ -586,8 +628,8 @@ def test_to_dict(self) -> None: 'old_viewable_if_private': 'old_viewable_if_private', 'new_viewable_if_private': 'new_viewable_if_private' } - collection_rights_change_object = rights_domain.CollectionRightsChange( # type: ignore[no-untyped-call] + collection_rights_change_object = rights_domain.CollectionRightsChange( collection_rights_change_dict) self.assertEqual( - collection_rights_change_object.to_dict(), # type: ignore[no-untyped-call] + collection_rights_change_object.to_dict(), collection_rights_change_dict) diff --git a/core/domain/rights_manager.py b/core/domain/rights_manager.py index b814b984af53..199275859f61 100644 --- a/core/domain/rights_manager.py +++ b/core/domain/rights_manager.py @@ -20,24 +20,41 @@ import logging -from core import feconf from core import utils from core.constants import constants from core.domain import activity_services +from core.domain import change_domain from core.domain import rights_domain from core.domain import role_services from core.domain import subscription_services from core.domain import taskqueue_services +from core.domain import user_domain from core.domain import user_services from core.platform import models -datastore_services = models.Registry.import_datastore_services() +from typing import ( + Dict, List, Literal, Mapping, Optional, Sequence, Union, overload) + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import collection_models + from mypy_imports import datastore_services + from mypy_imports import exp_models + (collection_models, exp_models) = models.Registry.import_models([ - models.NAMES.collection, models.NAMES.exploration + models.Names.COLLECTION, models.Names.EXPLORATION ]) +datastore_services = models.Registry.import_datastore_services() -def get_activity_rights_from_model(activity_rights_model, activity_type): + +def get_activity_rights_from_model( + activity_rights_model: Union[ + collection_models.CollectionRightsModel, + exp_models.ExplorationRightsModel + ], + activity_type: str +) -> rights_domain.ActivityRights: """Constructs an ActivityRights object from the given activity rights model. Args: @@ -50,6 +67,15 @@ def get_activity_rights_from_model(activity_rights_model, activity_type): Returns: ActivityRights. The rights object created from the model. """ + cloned_from_value = None + if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: + # Ruling out the possibility of CollectionRightsModel for mypy + # type checking. + assert isinstance( + activity_rights_model, + exp_models.ExplorationRightsModel + ) + cloned_from_value = activity_rights_model.cloned_from return rights_domain.ActivityRights( activity_rights_model.id, @@ -58,9 +84,7 @@ def get_activity_rights_from_model(activity_rights_model, activity_type): activity_rights_model.voice_artist_ids, activity_rights_model.viewer_ids, community_owned=activity_rights_model.community_owned, - cloned_from=( - activity_rights_model.cloned_from - if activity_type == constants.ACTIVITY_TYPE_EXPLORATION else None), + cloned_from=cloned_from_value, status=activity_rights_model.status, viewable_if_private=activity_rights_model.viewable_if_private, first_published_msec=activity_rights_model.first_published_msec @@ -68,8 +92,12 @@ def get_activity_rights_from_model(activity_rights_model, activity_type): def _save_activity_rights( - committer_id, activity_rights, activity_type, commit_message, - commit_cmds): + committer_id: str, + activity_rights: rights_domain.ActivityRights, + activity_type: str, + commit_message: str, + commit_cmds: Sequence[Mapping[str, change_domain.AcceptableChangeDictTypes]] +) -> None: """Saves an ExplorationRights or CollectionRights domain object to the datastore. @@ -86,11 +114,25 @@ def _save_activity_rights( """ activity_rights.validate() + # Ruling out the possibility of any other activity type. + assert ( + activity_type in ( + constants.ACTIVITY_TYPE_COLLECTION, + constants.ACTIVITY_TYPE_EXPLORATION + ) + ) + if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: - model_cls = exp_models.ExplorationRightsModel + model: Union[ + exp_models.ExplorationRightsModel, + collection_models.CollectionRightsModel + ] = exp_models.ExplorationRightsModel.get( + activity_rights.id, strict=True + ) elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: - model_cls = collection_models.CollectionRightsModel - model = model_cls.get(activity_rights.id, strict=False) + model = collection_models.CollectionRightsModel.get( + activity_rights.id, strict=True + ) model.owner_ids = activity_rights.owner_ids model.editor_ids = activity_rights.editor_ids @@ -104,7 +146,9 @@ def _save_activity_rights( model.commit(committer_id, commit_message, commit_cmds) -def _update_exploration_summary(activity_rights): +def _update_exploration_summary( + activity_rights: rights_domain.ActivityRights +) -> None: """Updates the exploration summary for the activity associated with the given rights object. @@ -120,7 +164,9 @@ def _update_exploration_summary(activity_rights): activity_rights.id) -def _update_collection_summary(activity_rights): +def _update_collection_summary( + activity_rights: rights_domain.ActivityRights +) -> None: """Updates the collection summary for the given activity associated with the given rights object. @@ -135,7 +181,10 @@ def _update_collection_summary(activity_rights): activity_rights.id) -def _update_activity_summary(activity_type, activity_rights): +def _update_activity_summary( + activity_type: str, + activity_rights: rights_domain.ActivityRights +) -> None: """Updates the activity summary for the given activity associated with the given rights object. @@ -154,34 +203,9 @@ def _update_activity_summary(activity_type, activity_rights): _update_collection_summary(activity_rights) -def update_activity_first_published_msec( - activity_type, activity_id, first_published_msec): - """Updates the first_published_msec field for the given activity. - - The caller is responsible for ensuring that this value is not already - set before updating it. - - Args: - activity_type: str. The type of activity. Possible values: - constants.ACTIVITY_TYPE_EXPLORATION, - constants.ACTIVITY_TYPE_COLLECTION. - activity_id: str. ID of the activity. - first_published_msec: float. First publication time in milliseconds - since the Epoch. - """ - activity_rights = _get_activity_rights(activity_type, activity_id) - commit_cmds = [{ - 'cmd': rights_domain.CMD_UPDATE_FIRST_PUBLISHED_MSEC, - 'old_first_published_msec': activity_rights.first_published_msec, - 'new_first_published_msec': first_published_msec - }] - activity_rights.first_published_msec = first_published_msec - _save_activity_rights( - feconf.SYSTEM_COMMITTER_ID, activity_rights, activity_type, - 'set first published time in msec', commit_cmds) - - -def create_new_exploration_rights(exploration_id, committer_id): +def create_new_exploration_rights( + exploration_id: str, committer_id: str +) -> None: """Creates a new exploration rights object and saves it to the datastore. Subscribes the committer to the new exploration. @@ -191,7 +215,7 @@ def create_new_exploration_rights(exploration_id, committer_id): """ exploration_rights = rights_domain.ActivityRights( exploration_id, [committer_id], [], [], []) - commit_cmds = [{'cmd': rights_domain.CMD_CREATE_NEW}] + commit_cmds: List[Dict[str, str]] = [{'cmd': rights_domain.CMD_CREATE_NEW}] exp_models.ExplorationRightsModel( id=exploration_rights.id, @@ -209,7 +233,33 @@ def create_new_exploration_rights(exploration_id, committer_id): committer_id, exploration_id) -def get_exploration_rights(exploration_id, strict=True): +@overload +def get_exploration_rights( + exploration_id: str +) -> rights_domain.ActivityRights: ... + + +@overload +def get_exploration_rights( + exploration_id: str, *, strict: Literal[True] +) -> rights_domain.ActivityRights: ... + + +@overload +def get_exploration_rights( + exploration_id: str, *, strict: Literal[False] +) -> Optional[rights_domain.ActivityRights]: ... + + +@overload +def get_exploration_rights( + exploration_id: str, *, strict: bool = False +) -> Optional[rights_domain.ActivityRights]: ... + + +def get_exploration_rights( + exploration_id: str, strict: bool = True +) -> Optional[rights_domain.ActivityRights]: """Retrieves the rights for this exploration from the datastore. Args: @@ -232,7 +282,9 @@ def get_exploration_rights(exploration_id, strict=True): model, constants.ACTIVITY_TYPE_EXPLORATION) -def get_multiple_exploration_rights_by_ids(exp_ids): +def get_multiple_exploration_rights_by_ids( + exp_ids: List[str] +) -> List[Optional[rights_domain.ActivityRights]]: """Returns a list of ActivityRights objects for given exploration ids. Args: @@ -244,20 +296,22 @@ def get_multiple_exploration_rights_by_ids(exp_ids): """ exp_rights_models = exp_models.ExplorationRightsModel.get_multi( exp_ids) - exp_models_list = [] + activity_rights_list: List[Optional[rights_domain.ActivityRights]] = [] for model in exp_rights_models: if model is None: - exp_models_list.append(None) + activity_rights_list.append(None) else: - exp_models_list.append( + activity_rights_list.append( get_activity_rights_from_model( model, constants.ACTIVITY_TYPE_EXPLORATION)) - return exp_models_list + return activity_rights_list -def _get_activity_rights_where_user_is_owner(activity_type, user_id): +def _get_activity_rights_where_user_is_owner( + activity_type: str, user_id: str +) -> List[rights_domain.ActivityRights]: """Returns a list of activity rights where the user is the owner. Args: @@ -270,23 +324,41 @@ def _get_activity_rights_where_user_is_owner(activity_type, user_id): list(ActivityRights). List of domain objects where the user has some role. """ + # Ruling out the possibility of any other activity type. + assert ( + activity_type in ( + constants.ACTIVITY_TYPE_COLLECTION, + constants.ACTIVITY_TYPE_EXPLORATION + ) + ) + if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: - rights_model_class = exp_models.ExplorationRightsModel + activity_rights_models: Sequence[ + Union[ + collection_models.CollectionRightsModel, + exp_models.ExplorationRightsModel + ] + ] = exp_models.ExplorationRightsModel.query( + datastore_services.any_of( + exp_models.ExplorationRightsModel.owner_ids == user_id + ) + ).fetch() elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: - rights_model_class = collection_models.CollectionRightsModel + activity_rights_models = collection_models.CollectionRightsModel.query( + datastore_services.any_of( + collection_models.CollectionRightsModel.owner_ids == user_id + ) + ).fetch() - activity_rights_models = rights_model_class.query( - datastore_services.any_of( - rights_model_class.owner_ids == user_id - ) - ).fetch() return [ get_activity_rights_from_model(activity_rights_model, activity_type) for activity_rights_model in activity_rights_models ] -def get_exploration_rights_where_user_is_owner(user_id): +def get_exploration_rights_where_user_is_owner( + user_id: str +) -> List[rights_domain.ActivityRights]: """Returns a list of exploration rights where the user is the owner. Args: @@ -301,7 +373,9 @@ def get_exploration_rights_where_user_is_owner(user_id): ) -def get_collection_rights_where_user_is_owner(user_id): +def get_collection_rights_where_user_is_owner( + user_id: str +) -> List[rights_domain.ActivityRights]: """Returns a list of collection rights where the user is the owner. Args: @@ -316,7 +390,7 @@ def get_collection_rights_where_user_is_owner(user_id): ) -def is_exploration_private(exploration_id): +def is_exploration_private(exploration_id: str) -> bool: """Returns whether exploration is private. Args: @@ -329,7 +403,7 @@ def is_exploration_private(exploration_id): return exploration_rights.status == rights_domain.ACTIVITY_STATUS_PRIVATE -def is_exploration_public(exploration_id): +def is_exploration_public(exploration_id: str) -> bool: """Returns whether exploration is public. Args: @@ -342,7 +416,7 @@ def is_exploration_public(exploration_id): return exploration_rights.status == rights_domain.ACTIVITY_STATUS_PUBLIC -def is_exploration_cloned(exploration_id): +def is_exploration_cloned(exploration_id: str) -> bool: """Returns whether the exploration is a clone of another exploration. Args: @@ -355,7 +429,9 @@ def is_exploration_cloned(exploration_id): return bool(exploration_rights.cloned_from) -def create_new_collection_rights(collection_id, committer_id): +def create_new_collection_rights( + collection_id: str, committer_id: str +) -> None: """Creates a new collection rights object and saves it to the datastore. Subscribes the committer to the new collection. @@ -382,7 +458,33 @@ def create_new_collection_rights(collection_id, committer_id): subscription_services.subscribe_to_collection(committer_id, collection_id) -def get_collection_rights(collection_id, strict=True): +@overload +def get_collection_rights( + collection_id: str +) -> rights_domain.ActivityRights: ... + + +@overload +def get_collection_rights( + collection_id: str, *, strict: Literal[True] +) -> rights_domain.ActivityRights: ... + + +@overload +def get_collection_rights( + collection_id: str, *, strict: Literal[False] +) -> Optional[rights_domain.ActivityRights]: ... + + +@overload +def get_collection_rights( + collection_id: str, *, strict: bool = False +) -> Optional[rights_domain.ActivityRights]: ... + + +def get_collection_rights( + collection_id: str, strict: bool = True +) -> Optional[rights_domain.ActivityRights]: """Retrieves the rights for this collection from the datastore. Args: @@ -404,7 +506,7 @@ def get_collection_rights(collection_id, strict=True): model, constants.ACTIVITY_TYPE_COLLECTION) -def get_collection_owner_names(collection_id): +def get_collection_owner_names(collection_id: str) -> List[str]: """Retrieves the owners for this collection from the datastore. Args: @@ -419,7 +521,7 @@ def get_collection_owner_names(collection_id): collection_rights.owner_ids) -def is_collection_private(collection_id): +def is_collection_private(collection_id: str) -> bool: """Returns whether the collection is private. Args: @@ -432,7 +534,7 @@ def is_collection_private(collection_id): return collection_rights.status == rights_domain.ACTIVITY_STATUS_PRIVATE -def is_collection_public(collection_id): +def is_collection_public(collection_id: str) -> bool: """Returns whether the collection is public. Args: @@ -445,7 +547,27 @@ def is_collection_public(collection_id): return collection_rights.status == rights_domain.ACTIVITY_STATUS_PUBLIC -def _get_activity_rights(activity_type, activity_id): +@overload +def _get_activity_rights( + activity_type: str, activity_id: str, *, strict: Literal[True] +) -> rights_domain.ActivityRights: ... + + +@overload +def _get_activity_rights( + activity_type: str, activity_id: str +) -> Optional[rights_domain.ActivityRights]: ... + + +@overload +def _get_activity_rights( + activity_type: str, activity_id: str, *, strict: Literal[False] +) -> Optional[rights_domain.ActivityRights]: ... + + +def _get_activity_rights( + activity_type: str, activity_id: str, strict: bool = False +) -> Optional[rights_domain.ActivityRights]: """Retrieves the rights object for the given activity based on its type. @@ -454,24 +576,31 @@ def _get_activity_rights(activity_type, activity_id): constants.ACTIVITY_TYPE_EXPLORATION, constants.ACTIVITY_TYPE_COLLECTION. activity_id: str. ID of the activity. + strict: bool. Whether to fail noisily if the activity_rights + doesn't exist for the given activity_id. Returns: - ActivityRights. The rights object associated with the given activity. + ActivityRights|None. The rights object associated with the given + activity, or None if no rights object exists. Raises: Exception. The activity_type provided is unknown. """ if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: - return get_exploration_rights(activity_id, strict=False) + activity_rights = get_exploration_rights(activity_id, strict=strict) elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: - return get_collection_rights(activity_id, strict=False) + activity_rights = get_collection_rights(activity_id, strict=strict) else: raise Exception( 'Cannot get activity rights for unknown activity type: %s' % ( activity_type)) + return activity_rights -def check_can_access_activity(user, activity_rights): +def check_can_access_activity( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can access given activity. Args: @@ -490,15 +619,24 @@ def check_can_access_activity(user, activity_rights): role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY in user.actions) elif activity_rights.is_private(): return bool( - (role_services.ACTION_PLAY_ANY_PRIVATE_ACTIVITY in user.actions) or - activity_rights.is_viewer(user.user_id) or - activity_rights.is_owner(user.user_id) or - activity_rights.is_editor(user.user_id) or - activity_rights.is_voice_artist(user.user_id) or - activity_rights.viewable_if_private) + role_services.ACTION_PLAY_ANY_PRIVATE_ACTIVITY in user.actions or + ( + user.user_id and ( + activity_rights.is_viewer(user.user_id) or + activity_rights.is_owner(user.user_id) or + activity_rights.is_editor(user.user_id) or + activity_rights.is_voice_artist(user.user_id) or + activity_rights.viewable_if_private + ) + ) + ) + return False -def check_can_edit_activity(user, activity_rights): +def check_can_edit_activity( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can edit given activity. Args: @@ -516,8 +654,12 @@ def check_can_edit_activity(user, activity_rights): if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions: return False - if (activity_rights.is_owner(user.user_id) or - activity_rights.is_editor(user.user_id)): + if ( + user.user_id and ( + activity_rights.is_owner(user.user_id) or + activity_rights.is_editor(user.user_id) + ) + ): return True if (activity_rights.community_owned or @@ -531,7 +673,10 @@ def check_can_edit_activity(user, activity_rights): return False -def check_can_voiceover_activity(user, activity_rights): +def check_can_voiceover_activity( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can voiceover given activity. Args: @@ -549,9 +694,13 @@ def check_can_voiceover_activity(user, activity_rights): if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions: return False - if (activity_rights.is_owner(user.user_id) or + if ( + user.user_id and ( + activity_rights.is_owner(user.user_id) or activity_rights.is_editor(user.user_id) or - activity_rights.is_voice_artist(user.user_id)): + activity_rights.is_voice_artist(user.user_id) + ) + ): return True if (activity_rights.community_owned or @@ -565,8 +714,13 @@ def check_can_voiceover_activity(user, activity_rights): return False -def check_can_manage_voice_artist_in_activity(user, activity_rights): +def check_can_manage_voice_artist_in_activity( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Check whether the user can manage voice artist for an activity. + Callers are expected to ensure that the activity is published when we are + adding voice artists. Args: user: UserActionInfo. Object having user_id, role, and actions for @@ -579,14 +733,14 @@ def check_can_manage_voice_artist_in_activity(user, activity_rights): """ if activity_rights is None: return False - elif (role_services.ACTION_CAN_MANAGE_VOICE_ARTIST in user.actions and ( - activity_rights.community_owned or activity_rights.is_published())): - return True - else: - return False + + return role_services.ACTION_CAN_MANAGE_VOICE_ARTIST in user.actions -def check_can_save_activity(user, activity_rights): +def check_can_save_activity( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can save given activity. Args: @@ -603,7 +757,10 @@ def check_can_save_activity(user, activity_rights): check_can_voiceover_activity(user, activity_rights))) -def check_can_delete_activity(user, activity_rights): +def check_can_delete_activity( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can delete given activity. Args: @@ -618,11 +775,16 @@ def check_can_delete_activity(user, activity_rights): if activity_rights is None: return False + if user.user_id is None: + return False + if role_services.ACTION_DELETE_ANY_ACTIVITY in user.actions: return True - elif (activity_rights.is_private() and - (role_services.ACTION_DELETE_OWNED_PRIVATE_ACTIVITY in user.actions) - and activity_rights.is_owner(user.user_id)): + elif ( + activity_rights.is_private() and + role_services.ACTION_DELETE_OWNED_PRIVATE_ACTIVITY in user.actions and + activity_rights.is_owner(user.user_id) + ): return True elif (activity_rights.is_published() and (role_services.ACTION_DELETE_ANY_PUBLIC_ACTIVITY in user.actions)): @@ -631,7 +793,10 @@ def check_can_delete_activity(user, activity_rights): return False -def check_can_modify_core_activity_roles(user, activity_rights): +def check_can_modify_core_activity_roles( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can modify core roles for the given activity. The core roles for an activity includes owner, editor etc. @@ -647,6 +812,9 @@ def check_can_modify_core_activity_roles(user, activity_rights): if activity_rights is None: return False + if user.user_id is None: + return False + if activity_rights.community_owned or activity_rights.cloned_from: return False @@ -659,7 +827,10 @@ def check_can_modify_core_activity_roles(user, activity_rights): return False -def check_can_release_ownership(user, activity_rights): +def check_can_release_ownership( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can release ownership for given activity. Args: @@ -681,7 +852,10 @@ def check_can_release_ownership(user, activity_rights): user, activity_rights) -def check_can_publish_activity(user, activity_rights): +def check_can_publish_activity( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can publish given activity. Args: @@ -706,13 +880,16 @@ def check_can_publish_activity(user, activity_rights): return True if role_services.ACTION_PUBLISH_OWNED_ACTIVITY in user.actions: - if activity_rights.is_owner(user.user_id): + if user.user_id and activity_rights.is_owner(user.user_id): return True return False -def check_can_unpublish_activity(user, activity_rights): +def check_can_unpublish_activity( + user: user_domain.UserActionsInfo, + activity_rights: Optional[rights_domain.ActivityRights] +) -> bool: """Checks whether the user can unpublish given activity. Args: @@ -737,8 +914,13 @@ def check_can_unpublish_activity(user, activity_rights): def _assign_role( - committer, assignee_id, new_role, activity_id, activity_type, - allow_assigning_any_role=False): + committer: user_domain.UserActionsInfo, + assignee_id: str, + new_role: str, + activity_id: str, + activity_type: str, + allow_assigning_any_role: bool = False +) -> None: """Assigns a new role to the user. Args: @@ -768,15 +950,32 @@ def _assign_role( Exception. The user can already view the activity. Exception. The activity is already publicly viewable. Exception. The role is invalid. + Exception. No activity_rights exists for the given activity id. + Exception. Guest user is not allowed to assign roles. """ committer_id = committer.user_id + if committer_id is None: + raise Exception( + 'Guest user is not allowed to assign roles.' + ) activity_rights = _get_activity_rights(activity_type, activity_id) - user_can_assign_role = False - if new_role == rights_domain.ROLE_VOICE_ARTIST and ( - activity_type == constants.ACTIVITY_TYPE_EXPLORATION): - user_can_assign_role = check_can_manage_voice_artist_in_activity( - committer, activity_rights) + if activity_rights is None: + raise Exception( + 'No activity_rights exists for the given activity_id: %s' % + activity_id + ) + + if ( + new_role == rights_domain.ROLE_VOICE_ARTIST and + activity_type == constants.ACTIVITY_TYPE_EXPLORATION + ): + if activity_rights.is_published(): + user_can_assign_role = check_can_manage_voice_artist_in_activity( + committer, activity_rights) + else: + raise Exception( + 'Could not assign voice artist to private activity.') else: user_can_assign_role = check_can_modify_core_activity_roles( committer, activity_rights) @@ -799,6 +998,7 @@ def _assign_role( rights_domain.ROLE_VIEWER ]: raise Exception('Invalid role: %s' % new_role) + # TODO(#12369): Currently, only exploration allows reassigning users to # any role. We are expecting to remove the below check and allow this # function to assign any role in general once the collection is removed. @@ -881,7 +1081,12 @@ def _assign_role( _update_activity_summary(activity_type, activity_rights) -def _deassign_role(committer, removed_user_id, activity_id, activity_type): +def _deassign_role( + committer: user_domain.UserActionsInfo, + removed_user_id: str, + activity_id: str, + activity_type: str +) -> None: """Deassigns given user from their current role in the activity. Args: @@ -897,13 +1102,26 @@ def _deassign_role(committer, removed_user_id, activity_id, activity_type): Raises: Exception. UnauthorizedUserException: Could not deassign role. Exception. This user does not have any role for the given activity. + Exception. No activity_rights exists for the given activity id. + Exception. Guest user is not allowed to deassign roles. """ committer_id = committer.user_id + if committer_id is None: + raise Exception( + 'Guest user is not allowed to deassign roles.' + ) activity_rights = _get_activity_rights(activity_type, activity_id) - user_can_deassign_role = False - if activity_rights.is_voice_artist(removed_user_id) and ( - activity_type == constants.ACTIVITY_TYPE_EXPLORATION): + if activity_rights is None: + raise Exception( + 'No activity_rights exists for the given activity_id: %s' % + activity_id + ) + + if ( + activity_rights.is_voice_artist(removed_user_id) and + activity_type == constants.ACTIVITY_TYPE_EXPLORATION + ): user_can_deassign_role = check_can_manage_voice_artist_in_activity( committer, activity_rights) else: @@ -917,6 +1135,7 @@ def _deassign_role(committer, removed_user_id, activity_id, activity_type): committer_id, removed_user_id, activity_id)) raise Exception( 'UnauthorizedUserException: Could not deassign role.') + if activity_rights.is_owner(removed_user_id): old_role = rights_domain.ROLE_OWNER activity_rights.owner_ids.remove(removed_user_id) @@ -934,7 +1153,7 @@ def _deassign_role(committer, removed_user_id, activity_id, activity_type): 'This user does not have any role in %s with ID %s' % (activity_type, activity_id)) - assignee_username = user_services.get_usernames(removed_user_id)[0] + assignee_username = user_services.get_usernames([removed_user_id])[0] if assignee_username is None: assignee_username = 'ANONYMOUS' commit_message = 'Remove %s from role %s for %s' % ( @@ -955,7 +1174,11 @@ def _deassign_role(committer, removed_user_id, activity_id, activity_type): _update_activity_summary(activity_type, activity_rights) -def _release_ownership_of_activity(committer, activity_id, activity_type): +def _release_ownership_of_activity( + committer: user_domain.UserActionsInfo, + activity_id: str, + activity_type: str +) -> None: """Releases ownership of the given activity to the community. Args: @@ -966,11 +1189,19 @@ def _release_ownership_of_activity(committer, activity_id, activity_type): constants.ACTIVITY_TYPE_EXPLORATION, constants.ACTIVITY_TYPE_COLLECTION. - Raise: + Raises: Exception. The committer does not have release rights. + Exception. The activity rights does not exist for the given activity_id. + Exception. Guest user is not allowed to release ownership of activity. """ committer_id = committer.user_id - activity_rights = _get_activity_rights(activity_type, activity_id) + if committer_id is None: + raise Exception( + 'Guest user is not allowed to release ownership of activity.' + ) + activity_rights = _get_activity_rights( + activity_type, activity_id, strict=True + ) if not check_can_release_ownership(committer, activity_rights): logging.error( @@ -995,7 +1226,12 @@ def _release_ownership_of_activity(committer, activity_id, activity_type): def _change_activity_status( - committer_id, activity_id, activity_type, new_status, commit_message): + committer_id: str, + activity_id: str, + activity_type: str, + new_status: str, + commit_message: str +) -> None: """Changes the status of the given activity. Args: @@ -1006,8 +1242,14 @@ def _change_activity_status( constants.ACTIVITY_TYPE_COLLECTION. new_status: str. The new status of the activity. commit_message: str. The human-written commit message for this change. + + Raises: + Exception. The activity rights does not exist for the given activity_id. """ - activity_rights = _get_activity_rights(activity_type, activity_id) + activity_rights = _get_activity_rights( + activity_type, activity_id, strict=True + ) + old_status = activity_rights.status activity_rights.status = new_status if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: @@ -1032,7 +1274,11 @@ def _change_activity_status( _update_activity_summary(activity_type, activity_rights) -def _publish_activity(committer, activity_id, activity_type): +def _publish_activity( + committer: user_domain.UserActionsInfo, + activity_id: str, + activity_type: str +) -> None: """Publishes the given activity. Args: @@ -1045,8 +1291,13 @@ def _publish_activity(committer, activity_id, activity_type): Raises: Exception. The committer does not have rights to publish the activity. + Exception. Guest user is not allowed to publish activities. """ committer_id = committer.user_id + if committer_id is None: + raise Exception( + 'Guest user is not allowed to publish activities.' + ) activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_publish_activity(committer, activity_rights): @@ -1064,7 +1315,11 @@ def _publish_activity(committer, activity_id, activity_type): ) -def _unpublish_activity(committer, activity_id, activity_type): +def _unpublish_activity( + committer: user_domain.UserActionsInfo, + activity_id: str, + activity_type: str +) -> None: """Unpublishes the given activity. Args: @@ -1077,8 +1332,13 @@ def _unpublish_activity(committer, activity_id, activity_type): Raises: Exception. The committer does not have rights to unpublish the activity. + Exception. Guest user is not allowed to unpublish activities. """ committer_id = committer.user_id + if committer_id is None: + raise Exception( + 'Guest user is not allowed to unpublish activities.' + ) activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_unpublish_activity(committer, activity_rights): @@ -1100,7 +1360,11 @@ def _unpublish_activity(committer, activity_id, activity_type): # Rights functions for activities. def assign_role_for_exploration( - committer, exploration_id, assignee_id, new_role): + committer: user_domain.UserActionsInfo, + exploration_id: str, + assignee_id: str, + new_role: str +) -> None: """Assigns a user to the given role and subscribes the assignee to future exploration updates. @@ -1133,7 +1397,11 @@ def assign_role_for_exploration( assignee_id, exploration_id) -def deassign_role_for_exploration(committer, exploration_id, removed_user_id): +def deassign_role_for_exploration( + committer: user_domain.UserActionsInfo, + exploration_id: str, + removed_user_id: str +) -> None: """Deassigns a user from a given exploration. The caller should ensure that assignee_id corresponds to a valid user in @@ -1158,7 +1426,10 @@ def deassign_role_for_exploration(committer, exploration_id, removed_user_id): ) -def release_ownership_of_exploration(committer, exploration_id): +def release_ownership_of_exploration( + committer: user_domain.UserActionsInfo, + exploration_id: str +) -> None: """Releases ownership of the given exploration to the community. Args: @@ -1174,7 +1445,10 @@ def release_ownership_of_exploration(committer, exploration_id): def set_private_viewability_of_exploration( - committer, exploration_id, viewable_if_private): + committer: user_domain.UserActionsInfo, + exploration_id: str, + viewable_if_private: bool +) -> None: """Sets the viewable_if_private attribute for the given exploration's rights object. @@ -1191,8 +1465,13 @@ def set_private_viewability_of_exploration( Exception. The committer does not have the permission to perform change action. Exception. If the viewable_if_private property is already as desired. + Exception. Guest user is not allowed to set viewability of exploration. """ committer_id = committer.user_id + if committer_id is None: + raise Exception( + 'Guest user is not allowed to set viewability of exploration.' + ) exploration_rights = get_exploration_rights(exploration_id) # The user who can publish activity can change its private viewability. @@ -1210,7 +1489,7 @@ def set_private_viewability_of_exploration( 'but that is already the current value.' % viewable_if_private) exploration_rights.viewable_if_private = viewable_if_private - commit_cmds = [{ + commit_cmds: List[Dict[str, Union[str, bool]]] = [{ 'cmd': rights_domain.CMD_CHANGE_PRIVATE_VIEWABILITY, 'old_viewable_if_private': old_viewable_if_private, 'new_viewable_if_private': viewable_if_private, @@ -1226,7 +1505,10 @@ def set_private_viewability_of_exploration( _update_exploration_summary(exploration_rights) -def publish_exploration(committer, exploration_id): +def publish_exploration( + committer: user_domain.UserActionsInfo, + exploration_id: str +) -> None: """Publishes the given exploration. It is the responsibility of the caller to check that the exploration is @@ -1244,7 +1526,10 @@ def publish_exploration(committer, exploration_id): committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) -def unpublish_exploration(committer, exploration_id): +def unpublish_exploration( + committer: user_domain.UserActionsInfo, + exploration_id: str +) -> None: """Unpublishes the given exploration. Args: @@ -1264,7 +1549,11 @@ def unpublish_exploration(committer, exploration_id): # Rights functions for collections. def assign_role_for_collection( - committer, collection_id, assignee_id, new_role): + committer: user_domain.UserActionsInfo, + collection_id: str, + assignee_id: str, + new_role: str +) -> None: """Assign the given user to the given role and subscribes the assignee to future collection updates. @@ -1291,7 +1580,11 @@ def assign_role_for_collection( assignee_id, collection_id) -def deassign_role_for_collection(committer, collection_id, removed_user_id): +def deassign_role_for_collection( + committer: user_domain.UserActionsInfo, + collection_id: str, + removed_user_id: str +) -> None: """Deassigns a user from a given collection. The caller should ensure that assignee_id corresponds to a valid user in @@ -1316,7 +1609,10 @@ def deassign_role_for_collection(committer, collection_id, removed_user_id): ) -def release_ownership_of_collection(committer, collection_id): +def release_ownership_of_collection( + committer: user_domain.UserActionsInfo, + collection_id: str +) -> None: """Releases ownership of the given collection to the community. Args: @@ -1331,7 +1627,10 @@ def release_ownership_of_collection(committer, collection_id): committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION) -def publish_collection(committer, collection_id): +def publish_collection( + committer: user_domain.UserActionsInfo, + collection_id: str +) -> None: """Publishes the given collection. It is the responsibility of the caller to check that the collection is @@ -1349,7 +1648,10 @@ def publish_collection(committer, collection_id): committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION) -def unpublish_collection(committer, collection_id): +def unpublish_collection( + committer: user_domain.UserActionsInfo, + collection_id: str +) -> None: """Unpublishes the given collection. Args: diff --git a/core/domain/rights_manager_test.py b/core/domain/rights_manager_test.py index 4104d85b9b29..513efbd632dd 100644 --- a/core/domain/rights_manager_test.py +++ b/core/domain/rights_manager_test.py @@ -26,16 +26,27 @@ from core.domain import role_services from core.domain import user_domain from core.domain import user_services +from core.platform import models from core.tests import test_utils +from typing import Final + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([ + models.Names.EXPLORATION +]) + class ExplorationRightsTests(test_utils.GenericTestBase): """Test that rights for actions on explorations work as expected.""" - EXP_ID = 'exp_id' + EXP_ID: Final = 'exp_id' - def setUp(self): - super(ExplorationRightsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup('a@example.com', 'A') self.signup('b@example.com', 'B') self.signup('c@example.com', 'C') @@ -73,10 +84,10 @@ def setUp(self): self.user_voiceover_admin = user_services.get_user_actions_info( self.user_id_voiceover_admin) - def test_get_exploration_rights_for_nonexistent_exploration(self): + def test_get_exploration_rights_for_nonexistent_exploration(self) -> None: non_exp_id = 'this_exp_does_not_exist_id' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class ExplorationRightsModel with id ' 'this_exp_does_not_exist_id not found' @@ -86,7 +97,7 @@ def test_get_exploration_rights_for_nonexistent_exploration(self): self.assertIsNone( rights_manager.get_exploration_rights(non_exp_id, strict=False)) - def test_demo_exploration(self): + def test_demo_exploration(self) -> None: exp_services.load_demo('1') rights_manager.release_ownership_of_exploration( self.system_user, '1') @@ -126,7 +137,14 @@ def test_demo_exploration(self): rights_manager.check_can_manage_voice_artist_in_activity( self.user_a, None)) - def test_non_splash_page_demo_exploration(self): + def test_check_can_modify_core_activity_roles_for_none_activity( + self + ) -> None: + self.assertFalse( + rights_manager.check_can_modify_core_activity_roles( + self.user_a, None)) + + def test_non_splash_page_demo_exploration(self) -> None: # Note: there is no difference between permissions for demo # explorations, whether or not they are on the splash page. exp_services.load_demo('3') @@ -156,7 +174,7 @@ def test_non_splash_page_demo_exploration(self): self.assertTrue(rights_manager.check_can_delete_activity( self.user_moderator, exp_rights)) - def test_ownership_of_exploration(self): + def test_ownership_of_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -169,7 +187,7 @@ def test_ownership_of_exploration(self): self.assertFalse(exp_rights.is_owner(self.user_id_b)) self.assertFalse(exp_rights.is_owner(self.user_id_moderator)) - def test_newly_created_exploration(self): + def test_newly_created_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) @@ -207,7 +225,7 @@ def test_newly_created_exploration(self): self.assertFalse(rights_manager.check_can_delete_activity( self.user_b, exp_rights)) - def test_inviting_collaborator_to_exploration(self): + def test_inviting_collaborator_to_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) @@ -239,7 +257,7 @@ def test_inviting_collaborator_to_exploration(self): self.assertFalse(rights_manager.check_can_delete_activity( self.user_b, exp_rights)) - def test_inviting_voice_artist_to_exploration(self): + def test_inviting_voice_artist_to_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) @@ -272,7 +290,15 @@ def test_inviting_voice_artist_to_exploration(self): self.assertFalse(rights_manager.check_can_delete_activity( self.user_b, exp_rights)) - def test_inviting_playtester_to_exploration(self): + def test_get_activity_rights_raise_error_for_invalid_activity_type( + self + ) -> None: + with self.assertRaisesRegex( + Exception, 'Cannot get activity rights for unknown activity' + ): + rights_manager._get_activity_rights('invalid_type', self.user_id_a) # pylint: disable=protected-access + + def test_inviting_playtester_to_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) @@ -304,7 +330,7 @@ def test_inviting_playtester_to_exploration(self): self.assertFalse(rights_manager.check_can_delete_activity( self.user_b, exp_rights)) - def test_user_with_rights_to_edit_any_public_activity(self): + def test_user_with_rights_to_edit_any_public_activity(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) rights_manager.publish_exploration(self.user_a, self.EXP_ID) @@ -324,7 +350,7 @@ def test_user_with_rights_to_edit_any_public_activity(self): self.assertFalse(rights_manager.check_can_delete_activity( user_with_public_activity_rights, exp_rights)) - def test_user_with_rights_to_delete_any_public_activity(self): + def test_user_with_rights_to_delete_any_public_activity(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) rights_manager.publish_exploration(self.user_a, self.EXP_ID) @@ -344,7 +370,18 @@ def test_user_with_rights_to_delete_any_public_activity(self): self.assertTrue(rights_manager.check_can_delete_activity( user_with_public_activity_rights, exp_rights)) - def test_setting_rights_of_exploration(self): + def test_assign_role_for_exploration_raises_error_for_invalid_activity_id( + self + ) -> None: + exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) + exp_services.save_new_exploration(self.user_id_a, exp) + + with self.assertRaisesRegex(Exception, 'No activity_rights exists'): + rights_manager.assign_role_for_exploration( + self.user_b, 'abcdefg', self.user_id_c, + rights_domain.ROLE_VIEWER) + + def test_setting_rights_of_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -352,7 +389,7 @@ def test_setting_rights_of_exploration(self): self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VIEWER) - with self.assertRaisesRegexp(Exception, 'Could not assign new role.'): + with self.assertRaisesRegex(Exception, 'Could not assign new role.'): rights_manager.assign_role_for_exploration( self.user_b, self.EXP_ID, self.user_id_c, rights_domain.ROLE_VIEWER) @@ -361,7 +398,7 @@ def test_setting_rights_of_exploration(self): self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_EDITOR) - with self.assertRaisesRegexp(Exception, 'Could not assign new role.'): + with self.assertRaisesRegex(Exception, 'Could not assign new role.'): rights_manager.assign_role_for_exploration( self.user_b, self.EXP_ID, self.user_id_c, rights_domain.ROLE_VIEWER) @@ -380,7 +417,7 @@ def test_setting_rights_of_exploration(self): self.user_b, self.EXP_ID, self.user_id_f, rights_domain.ROLE_VIEWER) - def test_publishing_and_unpublishing_exploration(self): + def test_publishing_and_unpublishing_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration( self.EXP_ID, title='A title', category='A category') exp_services.save_new_exploration(self.user_id_a, exp) @@ -405,7 +442,9 @@ def test_publishing_and_unpublishing_exploration(self): self.assertFalse(rights_manager.check_can_access_activity( self.user_b, exp_rights)) - def test_unpublished_exploration_is_removed_from_completed_activities(self): + def test_unpublished_exploration_is_removed_from_completed_activities( + self + ) -> None: exp = exp_domain.Exploration.create_default_exploration( self.EXP_ID, title='A title', category='A category') exp_services.save_new_exploration(self.user_id_a, exp) @@ -429,7 +468,8 @@ def test_unpublished_exploration_is_removed_from_completed_activities(self): ) def test_unpublished_exploration_is_removed_from_incomplete_activities( - self): + self + ) -> None: exp = exp_domain.Exploration.create_default_exploration( self.EXP_ID, title='A title', category='A category') exp_services.save_new_exploration(self.user_id_a, exp) @@ -451,7 +491,7 @@ def test_unpublished_exploration_is_removed_from_incomplete_activities( [] ) - def test_can_only_delete_unpublished_explorations(self): + def test_can_only_delete_unpublished_explorations(self) -> None: exp = exp_domain.Exploration.create_default_exploration( self.EXP_ID, title='A title', category='A category') exp_services.save_new_exploration(self.user_id_a, exp) @@ -472,7 +512,7 @@ def test_can_only_delete_unpublished_explorations(self): self.assertTrue(rights_manager.check_can_delete_activity( self.user_a, exp_rights)) - def test_changing_viewability_of_exploration(self): + def test_changing_viewability_of_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration( self.EXP_ID, title='A title', category='A category') exp_services.save_new_exploration(self.user_id_a, exp) @@ -481,10 +521,10 @@ def test_changing_viewability_of_exploration(self): self.assertFalse(rights_manager.check_can_access_activity( self.user_b, exp_rights)) - with self.assertRaisesRegexp(Exception, 'already the current value'): + with self.assertRaisesRegex(Exception, 'already the current value'): rights_manager.set_private_viewability_of_exploration( self.user_a, self.EXP_ID, False) - with self.assertRaisesRegexp(Exception, 'cannot be changed'): + with self.assertRaisesRegex(Exception, 'cannot be changed'): rights_manager.set_private_viewability_of_exploration( self.user_b, self.EXP_ID, True) @@ -504,7 +544,7 @@ def test_changing_viewability_of_exploration(self): self.assertFalse(rights_manager.check_can_access_activity( self.user_b, exp_rights)) - def test_reassign_higher_role_to_exploration(self): + def test_reassign_higher_role_to_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -522,7 +562,7 @@ def test_reassign_higher_role_to_exploration(self): exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertTrue(exp_rights.is_owner(self.user_id_b)) - def test_reassign_lower_role_to_exploration(self): + def test_reassign_lower_role_to_exploration(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -540,7 +580,7 @@ def test_reassign_lower_role_to_exploration(self): exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertTrue(exp_rights.is_viewer(self.user_id_b)) - def test_check_exploration_rights(self): + def test_check_exploration_rights(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) rights_manager.assign_role_for_exploration( @@ -569,7 +609,7 @@ def test_check_exploration_rights(self): self.assertTrue(exp_rights.is_voice_artist(self.user_id_d)) self.assertFalse(exp_rights.is_voice_artist(self.user_id_b)) - def test_get_multiple_exploration_rights(self): + def test_get_multiple_exploration_rights(self) -> None: exp_ids = ['exp1', 'exp2', 'exp3', 'exp4'] # Saving only first 3 explorations to check that None is returned for @@ -584,16 +624,16 @@ def test_get_multiple_exploration_rights(self): self.assertIsNotNone(rights_object) self.assertIsNone(exp_rights[3]) - def test_owner_cannot_be_reassigned_as_owner(self): + def test_owner_cannot_be_reassigned_as_owner(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) - with self.assertRaisesRegexp(Exception, 'This user already owns this'): + with self.assertRaisesRegex(Exception, 'This user already owns this'): rights_manager.assign_role_for_exploration( self.user_a, self.EXP_ID, self.user_id_a, rights_domain.ROLE_OWNER) - def test_assign_viewer_to_role_owner(self): + def test_assign_viewer_to_role_owner(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -610,17 +650,17 @@ def test_assign_viewer_to_role_owner(self): self.assertTrue(exp_rights.is_owner(self.user_id_b)) - def test_owner_cannot_assign_voice_artist(self): + def test_owner_cannot_assign_voice_artist(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) rights_manager.publish_exploration(self.user_a, self.EXP_ID) - with self.assertRaisesRegexp(Exception, 'Could not assign new role.'): + with self.assertRaisesRegex(Exception, 'Could not assign new role.'): rights_manager.assign_role_for_exploration( self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VOICE_ARTIST) - def test_voiceover_admin_can_modify_voice_artist_role(self): + def test_voiceover_admin_can_modify_voice_artist_role(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) rights_manager.publish_exploration(self.user_a, self.EXP_ID) @@ -628,16 +668,50 @@ def test_voiceover_admin_can_modify_voice_artist_role(self): rights_manager.assign_role_for_exploration( self.user_voiceover_admin, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VOICE_ARTIST) + exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) + self.assertTrue(exp_rights.is_voice_artist(self.user_id_b)) + + rights_manager.deassign_role_for_exploration( + self.user_voiceover_admin, self.EXP_ID, self.user_id_b) + + self.assertFalse(exp_rights.is_voice_artist(self.user_id_b)) + + def test_voice_artist_cannot_be_assigned_to_private_exploration( + self + ) -> None: + exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) + exp_services.save_new_exploration(self.user_id_a, exp) + + with self.assertRaisesRegex( + Exception, 'Could not assign voice artist to private activity.' + ): + rights_manager.assign_role_for_exploration( + self.user_voiceover_admin, self.EXP_ID, self.user_id_b, + rights_domain.ROLE_VOICE_ARTIST) + def test_voice_artist_can_be_unassigned_from_private_exploration( + self + ) -> None: + exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) + exp_services.save_new_exploration(self.user_id_a, exp) + rights_manager.publish_exploration(self.user_a, self.EXP_ID) + + rights_manager.assign_role_for_exploration( + self.user_voiceover_admin, self.EXP_ID, self.user_id_b, + rights_domain.ROLE_VOICE_ARTIST) + + exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertTrue(exp_rights.is_voice_artist(self.user_id_b)) + rights_manager.unpublish_exploration(self.user_moderator, self.EXP_ID) rights_manager.deassign_role_for_exploration( self.user_voiceover_admin, self.EXP_ID, self.user_id_b) + exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertFalse(exp_rights.is_voice_artist(self.user_id_b)) - def test_owner_cannot_assign_voice_artist_to_core_role(self): + def test_owner_cannot_assign_voice_artist_to_core_role(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) rights_manager.publish_exploration(self.user_a, self.EXP_ID) @@ -648,12 +722,12 @@ def test_owner_cannot_assign_voice_artist_to_core_role(self): exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertFalse(exp_rights.is_owner(self.user_id_b)) - with self.assertRaisesRegexp(Exception, 'Could not assign new role.'): + with self.assertRaisesRegex(Exception, 'Could not assign new role.'): rights_manager.assign_role_for_exploration( self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VOICE_ARTIST) - def test_voice_artist_cannot_be_reassigned_as_voice_artist(self): + def test_voice_artist_cannot_be_reassigned_as_voice_artist(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) rights_manager.publish_exploration(self.user_a, self.EXP_ID) @@ -662,13 +736,13 @@ def test_voice_artist_cannot_be_reassigned_as_voice_artist(self): self.user_voiceover_admin, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VOICE_ARTIST) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This user already can voiceover this'): rights_manager.assign_role_for_exploration( self.user_voiceover_admin, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VOICE_ARTIST) - def test_viewer_cannot_be_reassigned_as_viewer(self): + def test_viewer_cannot_be_reassigned_as_viewer(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -676,42 +750,56 @@ def test_viewer_cannot_be_reassigned_as_viewer(self): self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VIEWER) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This user already can view this'): rights_manager.assign_role_for_exploration( self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VIEWER) - def test_public_explorations_cannot_be_assigned_role_viewer(self): + def test_public_explorations_cannot_be_assigned_role_viewer( + self + ) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) rights_manager.publish_exploration(self.user_a, self.EXP_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Public explorations can be viewed by anyone.'): rights_manager.assign_role_for_exploration( self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VIEWER) - def test_cannot_assign_invalid_role(self): + def test_cannot_assign_invalid_role(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) - with self.assertRaisesRegexp(Exception, 'Invalid role: invalid_role'): + with self.assertRaisesRegex(Exception, 'Invalid role: invalid_role'): rights_manager.assign_role_for_exploration( self.user_a, self.EXP_ID, self.user_id_b, 'invalid_role') - def test_deassign_without_rights_fails(self): + def test_deassign_role_for_exploration_raise_error_with_invalid_activity_id( + self + ) -> None: + exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) + exp_services.save_new_exploration(self.user_id_a, exp) + + with self.assertRaisesRegex( + Exception, 'No activity_rights exists for the given activity_id' + ): + rights_manager.deassign_role_for_exploration( + self.user_b, 'abcdefg', self.user_id_a) + + def test_deassign_without_rights_fails(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Could not deassign role'): rights_manager.deassign_role_for_exploration( self.user_b, self.EXP_ID, self.user_id_a) - def test_deassign_viewer_is_successful(self): + def test_deassign_viewer_is_successful(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -725,7 +813,7 @@ def test_deassign_viewer_is_successful(self): exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertFalse(exp_rights.is_viewer(self.user_id_b)) - def test_deassign_editor_is_successful(self): + def test_deassign_editor_is_successful(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -739,7 +827,24 @@ def test_deassign_editor_is_successful(self): exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertFalse(exp_rights.is_editor(self.user_id_b)) - def test_deassign_owner_is_successful(self): + def test_deassign_editor_is_successful_with_commit_message_having_anonymous( + self + ) -> None: + exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) + exp_services.save_new_exploration(self.user_id_a, exp) + + rights_manager.assign_role_for_exploration( + self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_EDITOR) + exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) + self.assertTrue(exp_rights.is_editor(self.user_id_b)) + + with self.swap_to_always_return(user_services, 'get_usernames', [None]): + rights_manager.deassign_role_for_exploration( + self.user_a, self.EXP_ID, self.user_id_b) + exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) + self.assertFalse(exp_rights.is_editor(self.user_id_b)) + + def test_deassign_owner_is_successful(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) @@ -753,24 +858,163 @@ def test_deassign_owner_is_successful(self): exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertFalse(exp_rights.is_owner(self.user_id_b)) - def test_deassign_non_existent_fails(self): + def test_deassign_non_existent_fails(self) -> None: exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) exp_services.save_new_exploration(self.user_id_a, exp) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This user does not have any role in'): rights_manager.deassign_role_for_exploration( self.user_a, self.EXP_ID, self.user_id_b) + def test_deassign_editor_is_successful_with_all_valid_commit_messages( + self + ) -> None: + self.signup('testuser@example.com', 'TestUser') + test_user = self.get_user_id_from_email('testuser@example.com') + editor_username = 'TestUser' + self.assertEqual( + user_services.get_username(test_user), + editor_username + ) + + # Creating new exploration. + exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID) + exp_services.save_new_exploration(self.user_id_a, exp) + + snapshots_data = ( + exp_models.ExplorationRightsModel.get_snapshots_metadata( + self.EXP_ID, [1] + ) + ) + self.assertEqual( + snapshots_data[0]['commit_message'], + 'Created new exploration' + ) + + # Assigning editor role to editor_username ('TestUser'). + rights_manager.assign_role_for_exploration( + self.user_a, self.EXP_ID, test_user, rights_domain.ROLE_EDITOR) + exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) + self.assertTrue(exp_rights.is_editor(test_user)) + + snapshots_data = ( + exp_models.ExplorationRightsModel.get_snapshots_metadata( + self.EXP_ID, [2] + ) + ) + self.assertEqual( + snapshots_data[0]['commit_message'], + 'Changed role of TestUser from none to editor' + ) + + # De-assigning editor role from editor_username ('TestUser'). + rights_manager.deassign_role_for_exploration( + self.user_a, self.EXP_ID, test_user) + exp_rights = rights_manager.get_exploration_rights(self.EXP_ID) + self.assertFalse(exp_rights.is_editor(test_user)) + + snapshots_data = ( + exp_models.ExplorationRightsModel.get_snapshots_metadata( + self.EXP_ID, [3] + ) + ) + self.assertEqual( + snapshots_data[0]['commit_message'], + 'Remove TestUser from role editor for exploration' + ) + + def test_guest_user_cannot_assign_roles_for_exploration(self) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Guest user is not allowed to assign roles.' + ): + rights_manager.assign_role_for_exploration( + guest_user, 'exp_id', 'assignee_id', rights_domain.ROLE_VIEWER + ) + + def test_guest_user_cannot_deassign_roles_for_exploration(self) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Guest user is not allowed to deassign roles.' + ): + rights_manager.deassign_role_for_exploration( + guest_user, 'exp_id', 'assignee_id' + ) + + def test_guest_user_cannot_release_ownership_of_exploration(self) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Guest user is not allowed to release ownership of activity.' + ): + rights_manager.release_ownership_of_exploration( + guest_user, 'exp_id' + ) + + def test_guest_user_cannot_publish_exploration_activities(self) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Guest user is not allowed to publish activities.' + ): + rights_manager.publish_exploration(guest_user, 'exp_id') + + def test_guest_user_cannot_unpublish_exploration_activities(self) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Guest user is not allowed to unpublish activities.' + ): + rights_manager.unpublish_exploration(guest_user, 'exp_id') + + def test_guest_user_cannot_set_viewability_of_exploration(self) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Guest user is not allowed to set viewability of exploration.' + ): + rights_manager.set_private_viewability_of_exploration( + guest_user, 'exp_id', False + ) + + def test_guest_user_cannot_perform_activity_actions(self) -> None: + collection_services.load_demo('0') + collection_rights = rights_manager.get_collection_rights('0') + guest_user = user_services.get_user_actions_info(None) + + # Testing guest user is not allowed to delete activity. + self.assertFalse( + rights_manager.check_can_delete_activity( + guest_user, collection_rights + ) + ) + + # Testing guest user is not allowed to modify core activity roles. + self.assertFalse( + rights_manager.check_can_modify_core_activity_roles( + guest_user, collection_rights + ) + ) + + # Testing guest user is not allowed to publish activity. + self.assertFalse( + rights_manager.check_can_publish_activity( + guest_user, collection_rights + ) + ) + class CollectionRightsTests(test_utils.GenericTestBase): """Test that rights for actions on collections work as expected.""" - COLLECTION_ID = 'collection_id' - EXP_ID_FOR_COLLECTION = 'exp_id_for_collection' + COLLECTION_ID: Final = 'collection_id' + EXP_ID_FOR_COLLECTION: Final = 'exp_id_for_collection' - def setUp(self): - super(CollectionRightsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup('a@example.com', 'A') self.signup('b@example.com', 'B') self.signup('c@example.com', 'C') @@ -796,10 +1040,10 @@ def setUp(self): self.system_user = user_services.get_system_user() self.login(self.MODERATOR_EMAIL) - def test_get_collection_rights_for_nonexistent_collection(self): + def test_get_collection_rights_for_nonexistent_collection(self) -> None: non_col_id = 'this_collection_does_not_exist_id' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity for class CollectionRightsModel with id ' 'this_collection_does_not_exist_id not found' @@ -809,7 +1053,7 @@ def test_get_collection_rights_for_nonexistent_collection(self): self.assertIsNone( rights_manager.get_collection_rights(non_col_id, strict=False)) - def test_demo_collection(self): + def test_demo_collection(self) -> None: collection_services.load_demo('0') rights_manager.release_ownership_of_collection( self.system_user, '0') @@ -828,8 +1072,11 @@ def test_demo_collection(self): self.user_moderator, collection_rights)) self.assertTrue(rights_manager.check_can_delete_activity( self.user_moderator, collection_rights)) + collection_rights.status = 'invalid_status' + self.assertFalse(rights_manager.check_can_access_activity( + self.user_moderator, collection_rights)) - def test_ownership_of_collection(self): + def test_ownership_of_collection(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) rights_manager.assign_role_for_collection( @@ -848,7 +1095,7 @@ def test_ownership_of_collection(self): self.assertFalse(collection_rights.is_owner(self.user_id_moderator)) - def test_newly_created_collection(self): + def test_newly_created_collection(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) self.assertListEqual( @@ -879,17 +1126,17 @@ def test_newly_created_collection(self): self.assertFalse(rights_manager.check_can_delete_activity( self.user_b, collection_rights)) - def test_owner_cannot_be_reassigned_as_owner(self): + def test_owner_cannot_be_reassigned_as_owner(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) - with self.assertRaisesRegexp(Exception, 'This user already owns this'): + with self.assertRaisesRegex(Exception, 'This user already owns this'): rights_manager.assign_role_for_collection( self.user_a, self.COLLECTION_ID, self.user_id_a, rights_domain.ROLE_OWNER) - def test_editor_can_be_reassigned_as_owner(self): + def test_editor_can_be_reassigned_as_owner(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -909,7 +1156,7 @@ def test_editor_can_be_reassigned_as_owner(self): self.assertTrue(collection_rights.is_owner(self.user_id_b)) self.assertFalse(collection_rights.is_editor(self.user_id_b)) - def test_voiceartist_can_be_reassigned_as_owner(self): + def test_voiceartist_can_be_reassigned_as_owner(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -929,7 +1176,7 @@ def test_voiceartist_can_be_reassigned_as_owner(self): self.assertTrue(collection_rights.is_owner(self.user_id_b)) self.assertFalse(collection_rights.is_voice_artist(self.user_id_b)) - def test_viewer_can_be_reassigned_as_owner(self): + def test_viewer_can_be_reassigned_as_owner(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -949,7 +1196,7 @@ def test_viewer_can_be_reassigned_as_owner(self): self.assertTrue(collection_rights.is_owner(self.user_id_b)) self.assertFalse(collection_rights.is_viewer(self.user_id_b)) - def test_viewer_can_be_reassigned_as_editor(self): + def test_viewer_can_be_reassigned_as_editor(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -969,7 +1216,7 @@ def test_viewer_can_be_reassigned_as_editor(self): self.assertTrue(collection_rights.is_editor(self.user_id_b)) self.assertFalse(collection_rights.is_viewer(self.user_id_b)) - def test_voiceartist_can_be_reassigned_as_editor(self): + def test_voiceartist_can_be_reassigned_as_editor(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -989,7 +1236,7 @@ def test_voiceartist_can_be_reassigned_as_editor(self): self.assertTrue(collection_rights.is_editor(self.user_id_b)) self.assertFalse(collection_rights.is_voice_artist(self.user_id_b)) - def test_viewer_can_be_reassigned_as_voiceartist(self): + def test_viewer_can_be_reassigned_as_voiceartist(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -1009,7 +1256,7 @@ def test_viewer_can_be_reassigned_as_voiceartist(self): self.assertTrue(collection_rights.is_voice_artist(self.user_id_b)) self.assertFalse(collection_rights.is_viewer(self.user_id_b)) - def test_editor_cannot_be_reassigned_as_editor(self): + def test_editor_cannot_be_reassigned_as_editor(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -1018,13 +1265,13 @@ def test_editor_cannot_be_reassigned_as_editor(self): self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_EDITOR) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This user already can edit this'): rights_manager.assign_role_for_collection( self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_EDITOR) - def test_voice_artist_cannot_be_reassigned_as_voice_artist(self): + def test_voice_artist_cannot_be_reassigned_as_voice_artist(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -1033,13 +1280,13 @@ def test_voice_artist_cannot_be_reassigned_as_voice_artist(self): self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_VOICE_ARTIST) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This user already can voiceover this'): rights_manager.assign_role_for_collection( self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_VOICE_ARTIST) - def test_viewer_cannot_be_reassigned_as_viewer(self): + def test_viewer_cannot_be_reassigned_as_viewer(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) @@ -1048,26 +1295,26 @@ def test_viewer_cannot_be_reassigned_as_viewer(self): self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_VIEWER) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This user already can view this'): rights_manager.assign_role_for_collection( self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_VIEWER) - def test_public_collection_cannot_be_assigned_role_viewer(self): + def test_public_collection_cannot_be_assigned_role_viewer(self) -> None: collection = collection_domain.Collection.create_default_collection( self.COLLECTION_ID) collection_services.save_new_collection(self.user_id_a, collection) rights_manager.publish_collection(self.user_a, self.COLLECTION_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Public collections can be viewed by anyone.'): rights_manager.assign_role_for_collection( self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_VIEWER) - def test_inviting_collaborator_to_collection(self): + def test_inviting_collaborator_to_collection(self) -> None: self.save_new_valid_collection( self.COLLECTION_ID, self.user_id_a, exploration_id=self.EXP_ID_FOR_COLLECTION) @@ -1117,7 +1364,7 @@ def test_inviting_collaborator_to_collection(self): self.assertFalse(rights_manager.check_can_edit_activity( self.user_b, exp_for_collection_rights)) - def test_inviting_playtester_to_collection(self): + def test_inviting_playtester_to_collection(self) -> None: self.save_new_valid_collection( self.COLLECTION_ID, self.user_id_a, exploration_id=self.EXP_ID_FOR_COLLECTION) @@ -1161,14 +1408,14 @@ def test_inviting_playtester_to_collection(self): self.assertFalse(rights_manager.check_can_edit_activity( self.user_b, exp_for_collection_rights)) - def test_setting_rights_of_collection(self): + def test_setting_rights_of_collection(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) rights_manager.assign_role_for_collection( self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_VIEWER) - with self.assertRaisesRegexp(Exception, 'Could not assign new role.'): + with self.assertRaisesRegex(Exception, 'Could not assign new role.'): rights_manager.assign_role_for_collection( self.user_b, self.COLLECTION_ID, self.user_id_c, rights_domain.ROLE_VIEWER) @@ -1177,7 +1424,7 @@ def test_setting_rights_of_collection(self): self.user_a, self.COLLECTION_ID, self.user_id_b, rights_domain.ROLE_EDITOR) - with self.assertRaisesRegexp(Exception, 'Could not assign new role.'): + with self.assertRaisesRegex(Exception, 'Could not assign new role.'): rights_manager.assign_role_for_collection( self.user_b, self.COLLECTION_ID, self.user_id_c, rights_domain.ROLE_VIEWER) @@ -1196,7 +1443,7 @@ def test_setting_rights_of_collection(self): self.user_b, self.COLLECTION_ID, self.user_id_e, rights_domain.ROLE_VIEWER) - def test_publishing_and_unpublishing_collection(self): + def test_publishing_and_unpublishing_collection(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) collection_rights = rights_manager.get_collection_rights( self.COLLECTION_ID) @@ -1223,7 +1470,7 @@ def test_publishing_and_unpublishing_collection(self): self.assertFalse(rights_manager.check_can_access_activity( self.user_b, collection_rights)) - def test_can_only_delete_unpublished_collections(self): + def test_can_only_delete_unpublished_collections(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) collection_rights = rights_manager.get_collection_rights( self.COLLECTION_ID) @@ -1246,15 +1493,15 @@ def test_can_only_delete_unpublished_collections(self): self.assertTrue(rights_manager.check_can_delete_activity( self.user_a, collection_rights)) - def test_deassign_without_rights_fails(self): + def test_deassign_without_rights_fails(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Could not deassign role'): rights_manager.deassign_role_for_collection( self.user_b, self.COLLECTION_ID, self.user_id_a) - def test_deassign_viewer_is_successful(self): + def test_deassign_viewer_is_successful(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) rights_manager.assign_role_for_collection( @@ -1272,7 +1519,7 @@ def test_deassign_viewer_is_successful(self): col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID) self.assertFalse(col_rights.is_viewer(self.user_id_b)) - def test_deassign_voice_artist_is_successful(self): + def test_deassign_voice_artist_is_successful(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) rights_manager.assign_role_for_collection( @@ -1289,7 +1536,7 @@ def test_deassign_voice_artist_is_successful(self): col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID) self.assertFalse(col_rights.is_voice_artist(self.user_id_b)) - def test_deassign_editor_is_successful(self): + def test_deassign_editor_is_successful(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) rights_manager.assign_role_for_collection( @@ -1306,7 +1553,7 @@ def test_deassign_editor_is_successful(self): col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID) self.assertFalse(col_rights.is_editor(self.user_id_b)) - def test_deassign_owner_is_successful(self): + def test_deassign_owner_is_successful(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) rights_manager.assign_role_for_collection( @@ -1323,10 +1570,10 @@ def test_deassign_owner_is_successful(self): col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID) self.assertFalse(col_rights.is_owner(self.user_id_b)) - def test_deassign_non_existent_fails(self): + def test_deassign_non_existent_fails(self) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This user does not have any role in'): rights_manager.deassign_role_for_collection( self.user_a, self.COLLECTION_ID, self.user_id_b) @@ -1335,11 +1582,11 @@ def test_deassign_non_existent_fails(self): class CheckCanReleaseOwnershipTest(test_utils.GenericTestBase): """Tests for check_can_release_ownership function.""" - published_exp_id = 'exp_id_1' - private_exp_id = 'exp_id_2' + published_exp_id: str = 'exp_id_1' + private_exp_id: str = 'exp_id_2' - def setUp(self): - super(CheckCanReleaseOwnershipTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.set_moderators([self.MODERATOR_USERNAME]) @@ -1353,22 +1600,30 @@ def setUp(self): self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) - def test_moderator_can_release_ownership_of_published_exploration(self): + def test_moderator_can_release_ownership_of_published_exploration( + self + ) -> None: self.assertTrue(rights_manager.check_can_release_ownership( self.moderator, rights_manager.get_exploration_rights(self.published_exp_id))) - def test_owner_can_release_ownership_of_published_exploration(self): + def test_owner_can_release_ownership_of_published_exploration( + self + ) -> None: self.assertTrue(rights_manager.check_can_release_ownership( self.owner, rights_manager.get_exploration_rights(self.published_exp_id))) - def test_moderator_cannot_release_ownership_of_private_exploration(self): + def test_moderator_cannot_release_ownership_of_private_exploration( + self + ) -> None: self.assertFalse(rights_manager.check_can_release_ownership( self.moderator, rights_manager.get_exploration_rights(self.private_exp_id))) - def test_owner_cannot_release_ownership_of_private_exploration(self): + def test_owner_cannot_release_ownership_of_private_exploration( + self + ) -> None: self.assertFalse(rights_manager.check_can_release_ownership( self.owner, rights_manager.get_exploration_rights(self.private_exp_id))) @@ -1377,13 +1632,13 @@ def test_owner_cannot_release_ownership_of_private_exploration(self): class CheckCanUnpublishActivityTest(test_utils.GenericTestBase): """Tests for check_can_unpublish_activity function.""" - published_exp_id = 'exp_id_1' - private_exp_id = 'exp_id_2' - private_col_id = 'col_id_1' - published_col_id = 'col_id_2' + published_exp_id: str = 'exp_id_1' + private_exp_id: str = 'exp_id_2' + private_col_id: str = 'col_id_1' + published_col_id: str = 'col_id_2' - def setUp(self): - super(CheckCanUnpublishActivityTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL) @@ -1404,32 +1659,32 @@ def setUp(self): rights_manager.publish_exploration(self.owner, self.published_exp_id) rights_manager.publish_collection(self.owner, self.published_col_id) - def test_moderator_can_unpublish_published_collection(self): + def test_moderator_can_unpublish_published_collection(self) -> None: self.assertTrue(rights_manager.check_can_unpublish_activity( self.moderator, rights_manager.get_collection_rights(self.published_col_id))) - def test_owner_cannot_unpublish_published_collection(self): + def test_owner_cannot_unpublish_published_collection(self) -> None: self.assertFalse(rights_manager.check_can_unpublish_activity( self.owner, rights_manager.get_collection_rights(self.published_col_id))) - def test_moderator_cannot_unpublish_private_collection(self): + def test_moderator_cannot_unpublish_private_collection(self) -> None: self.assertFalse(rights_manager.check_can_unpublish_activity( self.moderator, rights_manager.get_collection_rights(self.private_col_id))) - def test_moderator_can_unpublish_published_exploration(self): + def test_moderator_can_unpublish_published_exploration(self) -> None: self.assertTrue(rights_manager.check_can_unpublish_activity( self.moderator, rights_manager.get_exploration_rights(self.published_exp_id))) - def test_owner_cannot_unpublish_published_exploration(self): + def test_owner_cannot_unpublish_published_exploration(self) -> None: self.assertFalse(rights_manager.check_can_unpublish_activity( self.owner, rights_manager.get_exploration_rights(self.published_exp_id))) - def test_moderator_cannot_unpublish_private_exploration(self): + def test_moderator_cannot_unpublish_private_exploration(self) -> None: self.assertFalse(rights_manager.check_can_unpublish_activity( self.moderator, rights_manager.get_exploration_rights(self.private_exp_id))) diff --git a/core/domain/role_services.py b/core/domain/role_services.py index 62e22bb7449b..a94f0ae9d7ad 100644 --- a/core/domain/role_services.py +++ b/core/domain/role_services.py @@ -29,12 +29,16 @@ from core import feconf from core.platform import models -(audit_models,) = models.Registry.import_models([models.NAMES.audit]) +from typing import Dict, List, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import audit_models + +(audit_models,) = models.Registry.import_models([models.Names.AUDIT]) # Actions that can be performed in the system. ACTION_ACCEPT_ANY_SUGGESTION = 'ACCEPT_ANY_SUGGESTION' -ACTION_ACCEPT_ANY_VOICEOVER_APPLICATION = ( - 'ACTION_ACCEPT_ANY_VOICEOVER_APPLICATION') ACTION_ACCESS_CREATOR_DASHBOARD = 'ACCESS_CREATOR_DASHBOARD' ACTION_ACCESS_LEARNER_DASHBOARD = 'ACCESS_LEARNER_DASHBOARD' ACTION_ACCESS_MODERATOR_PAGE = 'ACCESS_MODERATOR_PAGE' @@ -90,13 +94,13 @@ ACTION_PUBLISH_OWNED_SKILL = 'PUBLISH_OWNED_SKILL' ACTION_RATE_ANY_PUBLIC_EXPLORATION = 'RATE_ANY_PUBLIC_EXPLORATION' ACTION_SEND_MODERATOR_EMAILS = 'SEND_MODERATOR_EMAILS' -ACTION_SUBMIT_VOICEOVER_APPLICATION = 'ACTION_SUBMIT_VOICEOVER_APPLICATION' ACTION_SUBSCRIBE_TO_USERS = 'SUBSCRIBE_TO_USERS' ACTION_SUGGEST_CHANGES = 'SUGGEST_CHANGES' ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY = 'UNPUBLISH_ANY_PUBLIC_ACTIVITY' ACTION_VISIT_ANY_QUESTION_EDITOR_PAGE = 'VISIT_ANY_QUESTION_EDITOR_PAGE' ACTION_VISIT_ANY_TOPIC_EDITOR_PAGE = 'VISIT_ANY_TOPIC_EDITOR_PAGE' ACTION_CAN_MANAGE_VOICE_ARTIST = 'CAN_MANAGE_VOICE_ARTIST' +ACTION_ACCESS_LEARNER_GROUPS = 'ACCESS_LEARNER_GROUPS' # Users can be updated to the following list of role IDs via admin interface. # @@ -154,7 +158,6 @@ _ROLE_ACTIONS = { feconf.ROLE_ID_CURRICULUM_ADMIN: [ ACTION_ACCEPT_ANY_SUGGESTION, - ACTION_ACCEPT_ANY_VOICEOVER_APPLICATION, ACTION_ACCESS_TOPICS_AND_SKILLS_DASHBOARD, ACTION_CHANGE_STORY_STATUS, ACTION_CHANGE_TOPIC_STATUS, @@ -186,6 +189,7 @@ feconf.ROLE_ID_FULL_USER: [ ACTION_ACCESS_CREATOR_DASHBOARD, ACTION_ACCESS_LEARNER_DASHBOARD, + ACTION_ACCESS_LEARNER_GROUPS, ACTION_CREATE_EXPLORATION, ACTION_DELETE_OWNED_PRIVATE_ACTIVITY, ACTION_EDIT_OWNED_ACTIVITY, @@ -196,8 +200,7 @@ ACTION_PUBLISH_OWNED_ACTIVITY, ACTION_RATE_ANY_PUBLIC_EXPLORATION, ACTION_SUBSCRIBE_TO_USERS, - ACTION_SUGGEST_CHANGES, - ACTION_SUBMIT_VOICEOVER_APPLICATION + ACTION_SUGGEST_CHANGES ], feconf.ROLE_ID_GUEST: [ ACTION_PLAY_ANY_PUBLIC_ACTIVITY @@ -255,7 +258,7 @@ } -def get_all_actions(roles): +def get_all_actions(roles: List[str]) -> List[str]: """Returns a list of all actions that can be performed by the given role. Args: @@ -277,7 +280,7 @@ def get_all_actions(roles): return list(role_actions) -def get_role_actions(): +def get_role_actions() -> Dict[str, List[str]]: """Returns the possible role to actions items in the application. Returns: @@ -287,7 +290,12 @@ def get_role_actions(): return copy.deepcopy(_ROLE_ACTIONS) -def log_role_query(user_id, intent, role=None, username=None): +def log_role_query( + user_id: str, + intent: str, + role: Optional[str] = None, + username: Optional[str] = None +) -> None: """Stores the query to role structure in RoleQueryAuditModel.""" model_id = '%s.%s.%s.%s' % ( user_id, int(math.floor(time.time())), intent, random.randint(0, 1000) diff --git a/core/domain/role_services_test.py b/core/domain/role_services_test.py index a5dc58d120c0..49a01d3f2f8e 100644 --- a/core/domain/role_services_test.py +++ b/core/domain/role_services_test.py @@ -20,13 +20,14 @@ from core import feconf from core.domain import role_services +from core.storage.audit import gae_models from core.tests import test_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): """Tests for roles and actions.""" - def test_get_role_actions_return_value_in_correct_schema(self): + def test_get_role_actions_return_value_in_correct_schema(self) -> None: role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) @@ -37,8 +38,8 @@ def test_get_role_actions_return_value_in_correct_schema(self): for action_name in allotted_actions: self.assertTrue(isinstance(action_name, str)) - def test_get_all_actions(self): - with self.assertRaisesRegexp( + def test_get_all_actions(self) -> None: + with self.assertRaisesRegex( Exception, 'Role TEST_ROLE does not exist.'): role_services.get_all_actions(['TEST_ROLE']) @@ -46,7 +47,20 @@ def test_get_all_actions(self): role_services.get_all_actions([feconf.ROLE_ID_GUEST]), [role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY]) - def test_action_allocated_to_all_allowed_roles(self): + def test_action_allocated_to_all_allowed_roles(self) -> None: role_actions = role_services.get_role_actions() - self.assertItemsEqual(list(role_actions), feconf.ALLOWED_USER_ROLES) + self.assertItemsEqual( + list(role_actions), feconf.ALLOWED_USER_ROLES) + + def test_log_role_query(self) -> None: + self.assertEqual( + gae_models.RoleQueryAuditModel.has_reference_to_user_id( + 'TEST_USER'), + False) + role_services.log_role_query( + 'TEST_USER', feconf.ROLE_ACTION_ADD, role='GUEST') + self.assertEqual( + gae_models.RoleQueryAuditModel.has_reference_to_user_id( + 'TEST_USER'), + True) diff --git a/core/domain/rte_component_registry.py b/core/domain/rte_component_registry.py index 87ee37dc8f6b..2f728da3b756 100644 --- a/core/domain/rte_component_registry.py +++ b/core/domain/rte_component_registry.py @@ -24,33 +24,67 @@ from core import constants from core import feconf -from core import python_utils from core import utils +from typing import Any, Dict, List, Type, TypedDict, Union + +MYPY = False +if MYPY: # pragma: no cover + # Here, we are importing 'components' from rich_text_components only + # for type checking. + from extensions.rich_text_components import components + + +class CustomizationArgSpecDict(TypedDict): + """Dictionary representing the customization_arg_specs object.""" + + name: str + description: str + # Here we use type Any because values in schema dictionary can be of + # type str, List, Dict and other types too. + schema: Dict[str, Any] + default_value: Union[str, int, List[str], Dict[str, str]] + + +class RteComponentDict(TypedDict): + """Dictionary representing the RTE component's definition.""" + + backend_id: str + category: str + description: str + frontend_id: str + tooltip: str + icon_data_url: str + is_complex: bool + requires_internet: bool + requires_fs: bool + is_block_element: bool + customization_arg_specs: List[CustomizationArgSpecDict] + class Registry: """Registry of all custom rich-text components.""" - _rte_components = {} + _rte_components: Dict[str, RteComponentDict] = {} @classmethod - def _refresh(cls): + def _refresh(cls) -> None: """Repopulate the registry.""" cls._rte_components.clear() package, filepath = os.path.split( feconf.RTE_EXTENSIONS_DEFINITIONS_PATH) cls._rte_components = constants.parse_json_from_ts( - python_utils.get_package_file_contents(package, filepath)) + constants.get_package_file_contents(package, filepath)) @classmethod - def get_all_rte_components(cls): + def get_all_rte_components(cls) -> Dict[str, RteComponentDict]: """Get a dictionary mapping RTE component IDs to their definitions.""" if not cls._rte_components: cls._refresh() return cls._rte_components @classmethod - def get_tag_list_with_attrs(cls): + def get_tag_list_with_attrs(cls) -> Dict[str, List[str]]: """Returns a dict of HTML tag names and attributes for RTE components. The keys are tag names starting with 'oppia-noninteractive-', followed @@ -75,7 +109,9 @@ def get_tag_list_with_attrs(cls): return component_tags @classmethod - def get_component_types_to_component_classes(cls): + def get_component_types_to_component_classes( + cls + ) -> Dict[str, Type[components.BaseRteComponent]]: """Get component classes mapping for component types. Returns: @@ -86,7 +122,10 @@ def get_component_types_to_component_classes(cls): for loader, name, _ in pkgutil.iter_modules(path=rte_path): if name == 'components': - module = loader.find_module(name).load_module(name) + fetched_module = loader.find_module(name) + # Ruling out the possibility of None for mypy type checking. + assert fetched_module is not None + module = fetched_module.load_module(name) break component_types_to_component_classes = {} @@ -101,7 +140,9 @@ def get_component_types_to_component_classes(cls): return component_types_to_component_classes @classmethod - def get_component_tag_names(cls, key, expected_value): + def get_component_tag_names( + cls, key: str, expected_value: bool + ) -> List[str]: """Get a list of component tag names which have the expected value of a key. @@ -117,13 +158,13 @@ def get_component_tag_names(cls, key, expected_value): rich_text_components_specs = cls.get_all_rte_components() component_tag_names = [] for component_spec in rich_text_components_specs.values(): - if component_spec[key] == expected_value: + if component_spec.get(key) == expected_value: component_tag_names.append( 'oppia-noninteractive-%s' % component_spec['frontend_id']) return component_tag_names @classmethod - def get_inline_component_tag_names(cls): + def get_inline_component_tag_names(cls) -> List[str]: """Get a list of inline component tag names. Returns: @@ -132,7 +173,7 @@ def get_inline_component_tag_names(cls): return cls.get_component_tag_names('is_block_element', False) @classmethod - def get_block_component_tag_names(cls): + def get_block_component_tag_names(cls) -> List[str]: """Get a list of block component tag names. Returns: @@ -141,7 +182,7 @@ def get_block_component_tag_names(cls): return cls.get_component_tag_names('is_block_element', True) @classmethod - def get_simple_component_tag_names(cls): + def get_simple_component_tag_names(cls) -> List[str]: """Get a list of simple component tag names. Returns: @@ -150,7 +191,7 @@ def get_simple_component_tag_names(cls): return cls.get_component_tag_names('is_complex', False) @classmethod - def get_complex_component_tag_names(cls): + def get_complex_component_tag_names(cls) -> List[str]: """Get a list of complex component tag names. Returns: diff --git a/core/domain/rte_component_registry_test.py b/core/domain/rte_component_registry_test.py index 57c65b438d28..6ffd0bb600f2 100644 --- a/core/domain/rte_component_registry_test.py +++ b/core/domain/rte_component_registry_test.py @@ -26,7 +26,6 @@ import struct from core import feconf -from core import python_utils from core import schema_utils from core import schema_utils_test from core import utils @@ -35,35 +34,45 @@ from core.domain import rte_component_registry from core.tests import test_utils +from typing import Final, List, Tuple, Type + # File names ending in any of these suffixes will be ignored when checking for # RTE component validity. -IGNORED_FILE_SUFFIXES = ['.pyc', '.DS_Store'] -RTE_THUMBNAIL_HEIGHT_PX = 16 -RTE_THUMBNAIL_WIDTH_PX = 16 +IGNORED_FILE_SUFFIXES: Final = ['.pyc', '.DS_Store'] +RTE_THUMBNAIL_HEIGHT_PX: Final = 16 +RTE_THUMBNAIL_WIDTH_PX: Final = 16 -_COMPONENT_CONFIG_SCHEMA = [ +# Here we use object because every type is inherited from object class. +_COMPONENT_CONFIG_SCHEMA: List[Tuple[str, Type[object]]] = [ ('backend_id', str), ('category', str), ('description', str), ('frontend_id', str), ('tooltip', str), ('icon_data_url', str), - ('requires_fs', bool), ('is_block_element', bool), - ('customization_arg_specs', list)] + ('requires_fs', bool), + ('is_block_element', bool), + ('customization_arg_specs', list) +] class RteComponentUnitTests(test_utils.GenericTestBase): """Tests that all the default RTE components are valid.""" - def _is_camel_cased(self, name): + def _is_camel_cased(self, name: str) -> bool: """Check whether a name is in CamelCase.""" - return name and (name[0] in string.ascii_uppercase) + return bool(name and name[0] in string.ascii_uppercase) - def _is_alphanumeric_string(self, input_string): + def _is_alphanumeric_string(self, input_string: str) -> bool: """Check whether a string is alphanumeric.""" return bool(re.compile('^[a-zA-Z0-9_]+$').match(input_string)) - def _validate_customization_arg_specs(self, customization_arg_specs): + def _validate_customization_arg_specs( + self, + customization_arg_specs: List[ + rte_component_registry.CustomizationArgSpecDict + ] + ) -> None: """Validates the given customization arg specs.""" for ca_spec in customization_arg_specs: self.assertEqual(set(ca_spec.keys()), set([ @@ -101,7 +110,7 @@ def _validate_customization_arg_specs(self, customization_arg_specs): ca_spec['default_value'], obj_class.normalize(ca_spec['default_value'])) - def _listdir_omit_ignored(self, directory): + def _listdir_omit_ignored(self, directory: str) -> List[str]: """List all files and directories within 'directory', omitting the ones whose name ends in one of the IGNORED_FILE_SUFFIXES. """ @@ -110,7 +119,7 @@ def _listdir_omit_ignored(self, directory): names = [name for name in names if not name.endswith(suffix)] return names - def test_image_thumbnails_for_rte_components(self): + def test_image_thumbnails_for_rte_components(self) -> None: """Test the thumbnails for the RTE component icons.""" rte_components = ( rte_component_registry.Registry.get_all_rte_components()) @@ -124,14 +133,14 @@ def test_image_thumbnails_for_rte_components(self): 'extensions', relative_icon_data_url) self.assertEqual(generated_image_filepath, defined_image_filepath) - with python_utils.open_file( + with utils.open_file( generated_image_filepath, 'rb', encoding=None) as f: img_data = f.read() width, height = struct.unpack('>LL', img_data[16:24]) self.assertEqual(int(width), RTE_THUMBNAIL_WIDTH_PX) self.assertEqual(int(height), RTE_THUMBNAIL_HEIGHT_PX) - def test_rte_components_are_valid(self): + def test_rte_components_are_valid(self) -> None: """Test that the default RTE components are valid.""" rte_components = ( @@ -149,20 +158,21 @@ def test_rte_components_are_valid(self): self.assertTrue(os.path.isdir(component_dir)) # In this directory there should be a /directives directory, an - # an icon .png file and a protractor.js file, and an optional - # preview .png file. + # an icon .png file, webdriverio.js file, + # and an optional preview .png file. # In /directives directory should be HTML file, a JS file, # there could be multiple JS and HTML files. dir_contents = self._listdir_omit_ignored(component_dir) - self.assertLessEqual(len(dir_contents), 4) + self.assertLessEqual(len(dir_contents), 5) directives_dir = os.path.join(component_dir, 'directives') png_file = os.path.join(component_dir, '%s.png' % component_id) - protractor_file = os.path.join(component_dir, 'protractor.js') + + webdriverio_file = os.path.join(component_dir, 'webdriverio.js') self.assertTrue(os.path.isdir(directives_dir)) self.assertTrue(os.path.isfile(png_file)) - self.assertTrue(os.path.isfile(protractor_file)) + self.assertTrue(os.path.isfile(webdriverio_file)) main_ts_file = os.path.join( directives_dir, 'oppia-noninteractive-%s.component.ts' @@ -183,20 +193,20 @@ def test_rte_components_are_valid(self): # top-level keys, and that these keys have the correct types. for item, item_type in _COMPONENT_CONFIG_SCHEMA: self.assertTrue(isinstance( - component_specs[item], item_type)) + component_specs.get(item), item_type)) # The string attributes should be non-empty. if item_type == str: - self.assertTrue(component_specs[item]) + self.assertTrue(component_specs.get(item)) self._validate_customization_arg_specs( component_specs['customization_arg_specs']) # pylint: disable=protected-access - def test_require_file_contains_all_imports(self): + def test_require_file_contains_all_imports(self) -> None: """Test that the rich_text_components.html file contains script-imports for all directives of all RTE components. """ - rtc_ts_filenames = [] + rtc_ts_filenames: List[str] = [] for component_id in feconf.ALLOWED_RTE_EXTENSIONS: component_dir = os.path.join( feconf.RTE_EXTENSIONS_DIR, component_id) @@ -212,7 +222,7 @@ def test_require_file_contains_all_imports(self): rtc_ts_file = os.path.join( feconf.RTE_EXTENSIONS_DIR, 'richTextComponentsRequires.ts') - with python_utils.open_file(rtc_ts_file, 'r') as f: + with utils.open_file(rtc_ts_file, 'r') as f: rtc_require_file_contents = f.read() for rtc_ts_filename in rtc_ts_filenames: @@ -222,7 +232,7 @@ def test_require_file_contains_all_imports(self): class RteComponentRegistryUnitTests(test_utils.GenericTestBase): """Tests the methods in RteComponentRegistry.""" - def test_get_all_rte_components(self): + def test_get_all_rte_components(self) -> None: """Test get_all_rte_components method.""" obtained_components = list( rte_component_registry.Registry.get_all_rte_components().keys()) @@ -235,7 +245,7 @@ def test_get_all_rte_components(self): self.assertEqual(set(obtained_components), set(actual_components)) - def test_get_tag_list_with_attrs(self): + def test_get_tag_list_with_attrs(self) -> None: """Test get_tag_list_with_attrs method.""" obtained_tag_list_with_attrs = ( rte_component_registry.Registry.get_tag_list_with_attrs()) @@ -254,7 +264,7 @@ def test_get_tag_list_with_attrs(self): for key, attrs in obtained_tag_list_with_attrs.items(): self.assertEqual(set(attrs), set(actual_tag_list_with_attrs[key])) - def test_get_component_types_to_component_classes(self): + def test_get_component_types_to_component_classes(self) -> None: """Test get_component_types_to_component_classes method.""" component_types_to_component_classes = rte_component_registry.Registry.get_component_types_to_component_classes() # pylint: disable=line-too-long component_specs = ( @@ -279,7 +289,10 @@ def test_get_component_types_to_component_classes(self): for loader, name, _ in pkgutil.iter_modules(path=rte_path): if name == 'components': - module = loader.find_module(name).load_module(name) + fetched_module = loader.find_module(name) + # Ruling out the possibility of None for mypy type checking. + assert fetched_module is not None + module = fetched_module.load_module(name) break for name, obj in inspect.getmembers(module): @@ -290,7 +303,7 @@ def test_get_component_types_to_component_classes(self): set(obtained_component_class_names), set(actual_component_class_names)) - def test_get_component_tag_names(self): + def test_get_component_tag_names(self) -> None: """Test get_component_tag_names method.""" component_specs = ( rte_component_registry.Registry.get_all_rte_components()) @@ -301,7 +314,7 @@ def test_get_component_tag_names(self): actual_component_tag_names = [ 'oppia-noninteractive-%s' % component_spec['frontend_id'] for component_spec in component_specs.values() - if component_spec[key] == expected_value] + if component_spec.get(key) == expected_value] obtained_component_tag_names = ( rte_component_registry.Registry.get_component_tag_names( key, expected_value)) @@ -309,7 +322,7 @@ def test_get_component_tag_names(self): set(actual_component_tag_names), set(obtained_component_tag_names)) - def test_get_inline_component_tag_names(self): + def test_get_inline_component_tag_names(self) -> None: """Test get_inline_component_tag_names method.""" component_specs = ( rte_component_registry.Registry.get_all_rte_components()) @@ -325,7 +338,7 @@ def test_get_inline_component_tag_names(self): set(actual_inline_component_tag_names), set(obtained_inline_component_tag_names)) - def test_inline_rte_components_list(self): + def test_inline_rte_components_list(self) -> None: inline_component_tag_names = ( rte_component_registry.Registry.get_inline_component_tag_names()) inline_component_tag_names_from_constant = [ @@ -335,7 +348,7 @@ def test_inline_rte_components_list(self): set(inline_component_tag_names), set(inline_component_tag_names_from_constant)) - def test_get_block_component_tag_names(self): + def test_get_block_component_tag_names(self) -> None: """Test get_block_component_tag_names method.""" component_specs = ( rte_component_registry.Registry.get_all_rte_components()) @@ -351,7 +364,7 @@ def test_get_block_component_tag_names(self): set(actual_block_component_tag_names), set(obtained_block_component_tag_names)) - def test_get_simple_component_tag_names(self): + def test_get_simple_component_tag_names(self) -> None: """Test get_simple_component_tag_names method.""" component_specs = ( rte_component_registry.Registry.get_all_rte_components()) @@ -367,7 +380,7 @@ def test_get_simple_component_tag_names(self): set(actual_simple_component_tag_names), set(obtained_simple_component_tag_names)) - def test_get_complex_component_tag_names(self): + def test_get_complex_component_tag_names(self) -> None: """Test get_complex_component_tag_names method.""" component_specs = ( rte_component_registry.Registry.get_all_rte_components()) diff --git a/core/domain/rules_registry.py b/core/domain/rules_registry.py index 7632e1b1d415..9ce71cf2804c 100644 --- a/core/domain/rules_registry.py +++ b/core/domain/rules_registry.py @@ -21,19 +21,34 @@ import json import os +from core import constants from core import feconf -from core import python_utils + +from typing import Dict, List, Optional, TypedDict + + +class RuleSpecsExtensionDict(TypedDict): + """Dictionary representation of rule specs of an extension.""" + + interactionId: str + format: str + ruleTypes: Dict[str, Dict[str, List[str]]] class Registry: """Registry of rules.""" - _state_schema_version_to_html_field_types_to_rule_specs = {} + _state_schema_version_to_html_field_types_to_rule_specs: Dict[ + Optional[int], + Dict[str, RuleSpecsExtensionDict] + ] = {} @classmethod - def get_html_field_types_to_rule_specs(cls, state_schema_version=None): + def get_html_field_types_to_rule_specs( + cls, state_schema_version: Optional[int] = None + ) -> Dict[str, RuleSpecsExtensionDict]: """Returns a dict containing a html_field_types_to_rule_specs dict of - the specified state schema verison, if available. + the specified state schema version, if available. Args: state_schema_version: int|None. The state schema version to retrieve @@ -49,36 +64,46 @@ def get_html_field_types_to_rule_specs(cls, state_schema_version=None): Exception. No html_field_types_to_rule_specs json file found for the given state schema version. """ + specs_from_json: Dict[str, RuleSpecsExtensionDict] = {} cached = ( state_schema_version in cls._state_schema_version_to_html_field_types_to_rule_specs) - if not cached and state_schema_version is None: - cls._state_schema_version_to_html_field_types_to_rule_specs[ - state_schema_version] = json.loads( - python_utils.get_package_file_contents( - 'extensions', - feconf - .HTML_FIELD_TYPES_TO_RULE_SPECS_EXTENSIONS_MODULE_PATH)) - elif not cached: - file_name = 'html_field_types_to_rule_specs_state_v%i.json' % ( - state_schema_version) - spec_file = os.path.join( - feconf - .LEGACY_HTML_FIELD_TYPES_TO_RULE_SPECS_EXTENSIONS_MODULE_DIR, - file_name) - - try: + if not cached: + if state_schema_version is None: specs_from_json = json.loads( - python_utils.get_package_file_contents( - 'extensions', spec_file)) - except: - raise Exception( - 'No specs json file found for state schema v%i' % - state_schema_version) - - cls._state_schema_version_to_html_field_types_to_rule_specs[ - state_schema_version] = specs_from_json + constants.get_package_file_contents( + 'extensions', + feconf. + HTML_FIELD_TYPES_TO_RULE_SPECS_EXTENSIONS_MODULE_PATH + ) + ) + cls._state_schema_version_to_html_field_types_to_rule_specs[ + state_schema_version + ] = specs_from_json + else: + file_name = 'html_field_types_to_rule_specs_state_v%i.json' % ( + state_schema_version + ) + spec_file = os.path.join( + feconf + .LEGACY_HTML_FIELD_TYPES_TO_RULE_SPECS_EXTENSIONS_MODULE_DIR, # pylint: disable=line-too-long + file_name + ) + + try: + specs_from_json = json.loads( + constants.get_package_file_contents( + 'extensions', spec_file + ) + ) + except Exception as e: + raise Exception( + 'No specs json file found for state schema v%i' % + state_schema_version) from e + + cls._state_schema_version_to_html_field_types_to_rule_specs[ + state_schema_version] = specs_from_json return cls._state_schema_version_to_html_field_types_to_rule_specs[ state_schema_version] diff --git a/core/domain/rules_registry_test.py b/core/domain/rules_registry_test.py index fee00db73b79..e53cdf915632 100644 --- a/core/domain/rules_registry_test.py +++ b/core/domain/rules_registry_test.py @@ -21,7 +21,7 @@ import json import os -from core import python_utils +from core import utils from core.domain import rules_registry from core.tests import test_utils @@ -29,20 +29,24 @@ class RulesRegistryUnitTests(test_utils.GenericTestBase): """Test for the rules registry.""" - def test_get_html_field_types_to_rule_specs_for_current_state_schema_version(self): # pylint: disable=line-too-long + def test_get_html_field_types_to_rule_specs_for_current_state_schema_version( # pylint: disable=line-too-long + self + ) -> None: html_field_types_to_rule_specs = ( rules_registry.Registry.get_html_field_types_to_rule_specs()) spec_file = os.path.join( 'extensions', 'interactions', 'html_field_types_to_rule_specs.json') - with python_utils.open_file(spec_file, 'r') as f: + with utils.open_file(spec_file, 'r') as f: specs_from_json = json.loads(f.read()) self.assertDictEqual( html_field_types_to_rule_specs, specs_from_json) - def test_get_html_field_types_to_rule_specs_for_previous_state_schema_version(self): # pylint: disable=line-too-long + def test_get_html_field_types_to_rule_specs_for_previous_state_schema_version( # pylint: disable=line-too-long + self + ) -> None: html_field_types_to_rule_specs_v41 = ( rules_registry.Registry.get_html_field_types_to_rule_specs( state_schema_version=41)) @@ -51,15 +55,17 @@ def test_get_html_field_types_to_rule_specs_for_previous_state_schema_version(se 'extensions', 'interactions', 'legacy_html_field_types_to_rule_specs_by_state_version', 'html_field_types_to_rule_specs_state_v41.json') - with python_utils.open_file(spec_file_v41, 'r') as f: + with utils.open_file(spec_file_v41, 'r') as f: specs_from_json_v41 = json.loads(f.read()) self.assertDictEqual( html_field_types_to_rule_specs_v41, specs_from_json_v41) - def test_get_html_field_types_to_rule_specs_for_unsaved_state_schema_version(self): # pylint: disable=line-too-long - with self.assertRaisesRegexp( + def test_get_html_field_types_to_rule_specs_for_unsaved_state_schema_version( # pylint: disable=line-too-long + self + ) -> None: + with self.assertRaisesRegex( Exception, 'No specs json file found for state schema' ): diff --git a/core/domain/search_services.py b/core/domain/search_services.py index 19154820fcbb..23e9466d2990 100644 --- a/core/domain/search_services.py +++ b/core/domain/search_services.py @@ -14,32 +14,73 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Commands for operating on the search status of activities.""" +"""Commands for operating on the search status of activities and blog posts.""" from __future__ import annotations +import math + +from core import utils +from core.domain import blog_domain +from core.domain import collection_domain +from core.domain import exp_domain from core.domain import rights_domain from core.domain import rights_manager from core.platform import models +from typing import Final, List, Optional, Tuple, TypedDict + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import search_services as platform_search_services + platform_search_services = models.Registry.import_search_services() +# "NOTE TO DEVELOPERS: If you change any of these index names or add any new +# indexes, please contact Sean to update permissions on the ElasticSearch +# production servers, otherwise search operations will fail in production. +# Please do this before merging the PR. Thanks!" # Name for the exploration search index. -SEARCH_INDEX_EXPLORATIONS = 'explorations' +SEARCH_INDEX_EXPLORATIONS: Final = 'explorations' +# "NOTE TO DEVELOPERS: If you change any of these index names or add any new +# indexes, please contact Sean to update permissions on the ElasticSearch +# production servers, otherwise search operations will fail in production. +# Please do this before merging the PR. Thanks!" # Name for the collection search index. -SEARCH_INDEX_COLLECTIONS = 'collections' +SEARCH_INDEX_COLLECTIONS: Final = 'collections' + +# "NOTE TO DEVELOPERS: If you change any of these index names or add any new +# indexes, please contact Sean to update permissions on the ElasticSearch +# production servers, otherwise search operations will fail in production. +# Please do this before merging the PR. Thanks!" +# Name for the blog post search index. +SEARCH_INDEX_BLOG_POSTS: Final = 'blog-posts' # This is done to prevent the rank hitting 0 too easily. Note that # negative ranks are disallowed in the Search API. -_DEFAULT_RANK = 20 +_DEFAULT_RANK: Final = 20 + + +class DomainSearchDict(TypedDict): + """Dictionary representing the search dictionary of a domain object.""" + + id: str + language_code: str + title: str + category: str + tags: List[str] + objective: str + rank: int -def index_exploration_summaries(exp_summaries): +def index_exploration_summaries( + exp_summaries: List[exp_domain.ExplorationSummary] +) -> None: """Adds the explorations to the search index. Args: - exp_summaries: list(ExpSummaryModel). List of Exp Summary domain + exp_summaries: list(ExplorationSummary). List of Exp Summary domain objects to be indexed. """ platform_search_services.add_documents_to_index([ @@ -49,18 +90,20 @@ def index_exploration_summaries(exp_summaries): ], SEARCH_INDEX_EXPLORATIONS) -def _exp_summary_to_search_dict(exp_summary): +def _exp_summary_to_search_dict( + exp_summary: exp_domain.ExplorationSummary +) -> DomainSearchDict: """Updates the dict to be returned, whether the given exploration is to be indexed for further queries or not. Args: - exp_summary: ExpSummaryModel. ExplorationSummary domain object. + exp_summary: ExplorationSummary. ExplorationSummary domain object. Returns: dict. The representation of the given exploration, in a form that can be used by the search index. """ - doc = { + doc: DomainSearchDict = { 'id': exp_summary.id, 'language_code': exp_summary.language_code, 'title': exp_summary.title, @@ -72,12 +115,14 @@ def _exp_summary_to_search_dict(exp_summary): return doc -def _should_index_exploration(exp_summary): +def _should_index_exploration( + exp_summary: exp_domain.ExplorationSummary +) -> bool: """Returns whether the given exploration should be indexed for future search queries. Args: - exp_summary: ExpSummaryModel. ExplorationSummary domain object. + exp_summary: ExplorationSummary. ExplorationSummary domain object. Returns: bool. Whether the given exploration should be indexed for future @@ -89,7 +134,9 @@ def _should_index_exploration(exp_summary): ) -def get_search_rank_from_exp_summary(exp_summary): +def get_search_rank_from_exp_summary( + exp_summary: exp_domain.ExplorationSummary +) -> int: """Returns an integer determining the document's rank in search. Featured explorations get a ranking bump, and so do explorations that @@ -117,12 +164,14 @@ def get_search_rank_from_exp_summary(exp_summary): return max(rank, 0) -def index_collection_summaries(collection_summaries): +def index_collection_summaries( + collection_summaries: List[collection_domain.CollectionSummary] +) -> None: """Adds the collections to the search index. Args: - collection_summaries: list(CollectionSummaryModel). List of - Collection Summary domain objects to be indexed. + collection_summaries: list(CollectionSummary). List of collection + summary domain objects to be indexed. """ platform_search_services.add_documents_to_index([ _collection_summary_to_search_dict(collection_summary) @@ -131,17 +180,19 @@ def index_collection_summaries(collection_summaries): ], SEARCH_INDEX_COLLECTIONS) -def _collection_summary_to_search_dict(collection_summary): +def _collection_summary_to_search_dict( + collection_summary: collection_domain.CollectionSummary +) -> DomainSearchDict: """Converts a collection domain object to a search dict. Args: - collection_summary: CollectionSummaryModel. The collection + collection_summary: CollectionSummary. The collection summary object to be converted. Returns: dict. The search dict of the collection domain object. """ - doc = { + doc: DomainSearchDict = { 'id': collection_summary.id, 'title': collection_summary.title, 'category': collection_summary.category, @@ -153,11 +204,13 @@ def _collection_summary_to_search_dict(collection_summary): return doc -def _should_index_collection(collection): +def _should_index_collection( + collection: collection_domain.CollectionSummary +) -> bool: """Checks if a particular collection should be indexed. Args: - collection: CollectionSummaryModel. The collection summary model object. + collection: CollectionSummary. CollectionSummary domain object. Returns: bool. Whether a particular collection should be indexed. @@ -166,11 +219,17 @@ def _should_index_collection(collection): return rights.status != rights_domain.ACTIVITY_STATUS_PRIVATE -def search_explorations(query, categories, language_codes, size, offset=None): +def search_explorations( + query: str, + categories: List[str], + language_codes: List[str], + size: int, + offset: Optional[int] = None +) -> Tuple[List[str], Optional[int]]: """Searches through the available explorations. Args: - query: str or None. The query string to search for. + query: str. The query string to search for. categories: list(str). The list of categories to query for. If it is empty, no category filter is applied to the results. If it is not empty, then a result is considered valid if it matches at least one @@ -191,12 +250,15 @@ def search_explorations(query, categories, language_codes, size, offset=None): fetch, None otherwise. If an offset is returned, it will be a web-safe string that can be used in URLs. """ - return platform_search_services.search( - query, SEARCH_INDEX_EXPLORATIONS, categories, language_codes, - offset=offset, size=size, ids_only=True) + result_ids, result_offset = platform_search_services.search( + query, SEARCH_INDEX_EXPLORATIONS, + categories, language_codes, + offset=offset, size=size + ) + return result_ids, result_offset -def delete_explorations_from_search_index(exploration_ids): +def delete_explorations_from_search_index(exploration_ids: List[str]) -> None: """Deletes the documents corresponding to these exploration_ids from the search index. @@ -208,18 +270,24 @@ def delete_explorations_from_search_index(exploration_ids): exploration_ids, SEARCH_INDEX_EXPLORATIONS) -def clear_exploration_search_index(): +def clear_exploration_search_index() -> None: """WARNING: This runs in-request, and may therefore fail if there are too many entries in the index. """ platform_search_services.clear_index(SEARCH_INDEX_EXPLORATIONS) -def search_collections(query, categories, language_codes, size, offset=None): +def search_collections( + query: str, + categories: List[str], + language_codes: List[str], + size: int, + offset: Optional[int] = None +) -> Tuple[List[str], Optional[int]]: """Searches through the available collections. Args: - query: str or None. The query string to search for. + query: str. The query string to search for. categories: list(str). The list of categories to query for. If it is empty, no category filter is applied to the results. If it is not empty, then a result is considered valid if it matches at least one @@ -240,12 +308,15 @@ def search_collections(query, categories, language_codes, size, offset=None): otherwise. If an offset is returned, it will be a web-safe string that can be used in URLs. """ - return platform_search_services.search( - query, SEARCH_INDEX_COLLECTIONS, categories, language_codes, - offset=offset, size=size, ids_only=True) + result_ids, result_offset = platform_search_services.search( + query, SEARCH_INDEX_COLLECTIONS, + categories, language_codes, + offset=offset, size=size + ) + return result_ids, result_offset -def delete_collections_from_search_index(collection_ids): +def delete_collections_from_search_index(collection_ids: List[str]) -> None: """Removes the given collections from the search index. Args: @@ -256,10 +327,128 @@ def delete_collections_from_search_index(collection_ids): collection_ids, SEARCH_INDEX_COLLECTIONS) -def clear_collection_search_index(): +def clear_collection_search_index() -> None: """Clears the search index. WARNING: This runs in-request, and may therefore fail if there are too many entries in the index. """ platform_search_services.clear_index(SEARCH_INDEX_COLLECTIONS) + + +class BlogPostSummaryDomainSearchDict(TypedDict): + """Dictionary representing the search dictionary of a blog post summary + domain object. + """ + + id: str + title: str + tags: List[str] + rank: int + + +def index_blog_post_summaries( + blog_post_summaries: List[blog_domain.BlogPostSummary] +) -> None: + """Adds the blog post summaries to the search index. + + Args: + blog_post_summaries: list(BlogPostSummary). List of BlogPostSummary + domain objects to be indexed. + """ + + docs_to_index = [ + _blog_post_summary_to_search_dict(blog_post_summary) + for blog_post_summary in blog_post_summaries + ] + platform_search_services.add_documents_to_index([ + doc for doc in docs_to_index if doc + ], SEARCH_INDEX_BLOG_POSTS) + + +def _blog_post_summary_to_search_dict( + blog_post_summary: blog_domain.BlogPostSummary +) -> Optional[BlogPostSummaryDomainSearchDict]: + """Updates the dict to be returned, whether the given blog post summary is + to be indexed for further queries or not. + + Args: + blog_post_summary: BlogPostSummary. BlogPostSummary domain object. + + Returns: + dict. The representation of the given blog post summary, in a form that + can be used by the search index. + """ + if ( + not blog_post_summary.deleted and + blog_post_summary.published_on is not None + ): + doc: BlogPostSummaryDomainSearchDict = { + 'id': blog_post_summary.id, + 'title': blog_post_summary.title, + 'tags': blog_post_summary.tags, + 'rank': math.floor( + utils.get_time_in_millisecs(blog_post_summary.published_on)) + } + return doc + return None + + +def search_blog_post_summaries( + query: str, + tags: List[str], + size: int, + offset: Optional[int] = None +) -> Tuple[List[str], Optional[int]]: + """Searches through the available blog post summaries. + + Args: + query: str. The query string to search for. + tags: list(str). The list of tags to query for. If it is + empty, no tags filter is applied to the results. If it is not + empty, then a result is considered valid if it matches at least one + of these tags. + size: int. The maximum number of results to return. + offset: int or None. A marker that is used to get the next page of + results. If there are more documents that match the query than + 'size', this function will return an offset to get the next page. + + Returns: + tuple. A 2-tuple consisting of: + - list(str). A list of blog post ids that match the query. + - int or None. An offset if there are more matching blog post + summaries to fetch, None otherwise. If an offset is returned, it + will be a web-safe string that can be used in URLs. + """ + result_ids, result_offset = ( + platform_search_services.blog_post_summaries_search( + query, + tags, + offset=offset, + size=size + ) + ) + return result_ids, result_offset + + +def delete_blog_post_summary_from_search_index(blog_post_id: str) -> None: + """Deletes the documents corresponding to the blog_id from the + search index. + + Args: + blog_post_id: str. Blog post id whose document are to be deleted from + the search index. + """ + # The argument type of delete_documents_from_index() is List[str], + # therefore, we provide [blog_post_id] as argument. + platform_search_services.delete_documents_from_index( + [blog_post_id], SEARCH_INDEX_BLOG_POSTS) + + +def clear_blog_post_summaries_search_index() -> None: + """Clears the blog post search index. + + WARNING: This runs in-request, and may therefore fail if there are too + many entries in the index. + """ + platform_search_services.clear_index(SEARCH_INDEX_BLOG_POSTS) diff --git a/core/domain/search_services_test.py b/core/domain/search_services_test.py index d344721e7bfe..9e256a5288e3 100644 --- a/core/domain/search_services_test.py +++ b/core/domain/search_services_test.py @@ -18,6 +18,7 @@ from __future__ import annotations +from core.domain import blog_services from core.domain import collection_services from core.domain import exp_fetchers from core.domain import exp_services @@ -28,17 +29,19 @@ from core.platform import models from core.tests import test_utils +from typing import Final, List, Optional, Tuple + gae_search_services = models.Registry.import_search_services() class SearchServicesUnitTests(test_utils.GenericTestBase): """Test the search services module.""" - EXP_ID = 'An_exploration_id' - COLLECTION_ID = 'A_collection_id' + EXP_ID: Final = 'An_exploration_id' + COLLECTION_ID: Final = 'A_collection_id' - def setUp(self): - super(SearchServicesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) @@ -58,7 +61,7 @@ def setUp(self): self.user_id_admin = ( self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) - def test_get_search_rank(self): + def test_get_search_rank(self) -> None: self.save_new_valid_exploration(self.EXP_ID, self.owner_id) exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID) @@ -87,7 +90,7 @@ def test_get_search_rank(self): search_services.get_search_rank_from_exp_summary(exp_summary), base_search_rank + 8) - def test_search_ranks_cannot_be_negative(self): + def test_search_ranks_cannot_be_negative(self) -> None: self.save_new_valid_exploration(self.EXP_ID, self.owner_id) exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID) @@ -115,7 +118,7 @@ def test_search_ranks_cannot_be_negative(self): self.assertEqual(search_services.get_search_rank_from_exp_summary( exp_summary), 0) - def test_search_explorations(self): + def test_search_explorations(self) -> None: expected_query_string = 'a query string' expected_offset = 0 expected_size = 30 @@ -123,15 +126,20 @@ def test_search_explorations(self): doc_ids = ['id1', 'id2'] def mock_search( - query_string, index, categories, language_codes, offset=None, - size=20, ids_only=False, retries=3): + query_string: str, + index: str, + categories: List[str], + language_codes: List[str], + offset: Optional[int] = None, + size: int = 20, + retries: int = 3 + ) -> Tuple[List[str], Optional[int]]: self.assertEqual(query_string, expected_query_string) self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS) self.assertEqual(categories, []) self.assertEqual(language_codes, []) self.assertEqual(offset, expected_offset) self.assertEqual(size, expected_size) - self.assertEqual(ids_only, True) self.assertEqual(retries, 3) return doc_ids, expected_result_offset @@ -145,7 +153,7 @@ def mock_search( self.assertEqual(result_offset, expected_result_offset) self.assertEqual(result, doc_ids) - def test_search_collections(self): + def test_search_collections(self) -> None: expected_query_string = 'a query string' expected_offset = 0 expected_size = 30 @@ -153,8 +161,14 @@ def test_search_collections(self): doc_ids = ['id1', 'id2'] def mock_search( - query_string, index, categories, language_codes, offset=None, - size=20, ids_only=False, retries=3): + query_string: str, + index: str, + categories: List[str], + language_codes: List[str], + offset: Optional[int] = None, + size: int = 20, + retries: int = 3 + ) -> Tuple[List[str], Optional[int]]: self.assertEqual(query_string, expected_query_string) self.assertEqual( index, collection_services.SEARCH_INDEX_COLLECTIONS) @@ -162,7 +176,6 @@ def mock_search( self.assertEqual(language_codes, []) self.assertEqual(offset, expected_offset) self.assertEqual(size, expected_size) - self.assertEqual(ids_only, True) self.assertEqual(retries, 3) return doc_ids, expected_result_offset @@ -176,7 +189,7 @@ def mock_search( self.assertEqual(result_offset, expected_result_offset) self.assertEqual(result, doc_ids) - def test_demo_collections_are_added_to_search_index(self): + def test_demo_collections_are_added_to_search_index(self) -> None: results = search_services.search_collections('Welcome', [], [], 2)[0] self.assertEqual(results, []) @@ -184,7 +197,7 @@ def test_demo_collections_are_added_to_search_index(self): results = search_services.search_collections('Welcome', [], [], 2)[0] self.assertEqual(results, ['0']) - def test_demo_explorations_are_added_to_search_index(self): + def test_demo_explorations_are_added_to_search_index(self) -> None: results, _ = search_services.search_explorations('Welcome', [], [], 2) self.assertEqual(results, []) @@ -192,7 +205,7 @@ def test_demo_explorations_are_added_to_search_index(self): results, _ = search_services.search_explorations('Welcome', [], [], 2) self.assertEqual(results, ['0']) - def test_clear_exploration_search_index(self): + def test_clear_exploration_search_index(self) -> None: exp_services.load_demo('0') result = search_services.search_explorations('Welcome', [], [], 2)[0] self.assertEqual(result, ['0']) @@ -200,7 +213,7 @@ def test_clear_exploration_search_index(self): result = search_services.search_explorations('Welcome', [], [], 2)[0] self.assertEqual(result, []) - def test_clear_collection_search_index(self): + def test_clear_collection_search_index(self) -> None: collection_services.load_demo('0') result = search_services.search_collections('Welcome', [], [], 2)[0] self.assertEqual(result, ['0']) @@ -208,9 +221,9 @@ def test_clear_collection_search_index(self): result = search_services.search_collections('Welcome', [], [], 2)[0] self.assertEqual(result, []) - def test_delete_explorations_from_search_index(self): + def test_delete_explorations_from_search_index(self) -> None: - def _mock_delete_docs(ids, index): + def _mock_delete_docs(ids: List[str], index: str) -> None: """Mocks delete_documents_from_index().""" self.assertEqual(ids, [self.EXP_ID]) self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS) @@ -226,9 +239,9 @@ def _mock_delete_docs(ids, index): self.assertEqual(delete_docs_counter.times_called, 1) - def test_delete_collections_from_search_index(self): + def test_delete_collections_from_search_index(self) -> None: - def _mock_delete_docs(ids, index): + def _mock_delete_docs(ids: List[str], index: str) -> None: """Mocks delete_documents_from_index().""" self.assertEqual(ids, [self.COLLECTION_ID]) self.assertEqual(index, search_services.SEARCH_INDEX_COLLECTIONS) @@ -244,3 +257,123 @@ def _mock_delete_docs(ids, index): [self.COLLECTION_ID]) self.assertEqual(delete_docs_counter.times_called, 1) + + +class BlogPostSearchServicesUnitTests(test_utils.GenericTestBase): + + def setUp(self) -> None: + super().setUp() + + self.signup('a@example.com', 'A') + self.signup('b@example.com', 'B') + self.user_id_a = self.get_user_id_from_email('a@example.com') + self.user_id_b = self.get_user_id_from_email('b@example.com') + + self.blog_post_a = blog_services.create_new_blog_post(self.user_id_a) + self.blog_post_b = blog_services.create_new_blog_post(self.user_id_b) + self.blog_post_a_id = self.blog_post_a.id + self.blog_post_b_id = self.blog_post_b.id + + self.change_dict_one: blog_services.BlogPostChangeDict = { + 'title': 'Sample title one', + 'thumbnail_filename': 'thummbnail.svg', + 'content': '

    Hello

    ', + 'tags': ['one', 'two'] + } + + self.change_dict_two: blog_services.BlogPostChangeDict = { + 'title': 'Sample title two', + 'thumbnail_filename': 'thummbnail.svg', + 'content': '

    Hello

    ', + 'tags': ['two'] + } + + blog_services.update_blog_post( + self.blog_post_a_id, self.change_dict_one) + blog_services.update_blog_post( + self.blog_post_b_id, self.change_dict_two) + blog_services.publish_blog_post(self.blog_post_a_id) + blog_services.publish_blog_post(self.blog_post_b_id) + + def test_search_blog_post_summaries(self) -> None: + expected_query_string = 'a query string' + expected_offset = 0 + expected_size = 30 + expected_result_offset = 30 + doc_ids = ['id1', 'id2'] + + def mock_search( + query_string: str, + tags: List[str], + offset: Optional[int] = None, + size: int = 20, + retries: int = 3 + ) -> Tuple[List[str], Optional[int]]: + self.assertEqual(query_string, expected_query_string) + self.assertEqual(tags, []) + self.assertEqual(offset, expected_offset) + self.assertEqual(size, expected_size) + self.assertEqual(retries, 3) + + return doc_ids, expected_result_offset + + with self.swap( + gae_search_services, 'blog_post_summaries_search', mock_search + ): + result, result_offset = ( + search_services.search_blog_post_summaries( + expected_query_string, [], expected_size, + offset=expected_offset, + ) + ) + + self.assertEqual(result_offset, expected_result_offset) + self.assertEqual(result, doc_ids) + + def test_clear_blog_post_search_index(self) -> None: + result = search_services.search_blog_post_summaries( + 'title', [], 2)[0] + self.assertEqual(result, [self.blog_post_a_id, self.blog_post_b_id]) + search_services.clear_blog_post_summaries_search_index() + result = search_services.search_blog_post_summaries( + 'title', [], 2)[0] + self.assertEqual(result, []) + + def test_delete_blog_posts_from_search_index(self) -> None: + + def _mock_delete_docs(ids: List[str], index: str) -> None: + """Mocks delete_documents_from_index().""" + self.assertEqual(ids, [self.blog_post_a_id]) + self.assertEqual( + index, search_services.SEARCH_INDEX_BLOG_POSTS) + + delete_docs_counter = test_utils.CallCounter(_mock_delete_docs) + + delete_docs_swap = self.swap( + gae_search_services, 'delete_documents_from_index', + delete_docs_counter) + + with delete_docs_swap: + search_services.delete_blog_post_summary_from_search_index(self.blog_post_a_id) # pylint: disable=line-too-long + + self.assertEqual(delete_docs_counter.times_called, 1) + + def test_should_not_index_draft_blog_post(self) -> None: + result = search_services.search_blog_post_summaries( + 'title', [], 2)[0] + self.assertEqual(result, [self.blog_post_a_id, self.blog_post_b_id]) + + # Unpublishing a blog post removes it from the search index. + blog_services.unpublish_blog_post(self.blog_post_a_id) + result = search_services.search_blog_post_summaries( + 'title', [], 2)[0] + self.assertEqual(result, [self.blog_post_b_id]) + + # Trying indexing draft blog post. + draft_blog_post = blog_services.get_blog_post_summary_by_id( + self.blog_post_a_id) + search_services.index_blog_post_summaries([draft_blog_post]) + + result = search_services.search_blog_post_summaries( + 'title', [], 2)[0] + self.assertEqual(result, [self.blog_post_b_id]) diff --git a/core/domain/skill_domain.py b/core/domain/skill_domain.py index 4aaf9659a361..17c6a9db76dd 100644 --- a/core/domain/skill_domain.py +++ b/core/domain/skill_domain.py @@ -17,6 +17,7 @@ from __future__ import annotations import copy +import datetime import json from core import android_validation_constants @@ -24,47 +25,54 @@ from core import utils from core.constants import constants from core.domain import change_domain -from core.domain import html_cleaner -from core.domain import html_validation_service from core.domain import state_domain +from core.domain import translation_domain + +from typing import Callable, Dict, Final, List, Literal, Optional, TypedDict + +from core.domain import html_cleaner # pylint: disable=invalid-import-from # isort:skip +from core.domain import html_validation_service # pylint: disable=invalid-import-from # isort:skip + +# TODO(#14537): Refactor this file and remove imports marked +# with 'invalid-import-from'. # Do not modify the values of these constants. This is to preserve backwards # compatibility with previous change dicts. -SKILL_PROPERTY_DESCRIPTION = 'description' -SKILL_PROPERTY_LANGUAGE_CODE = 'language_code' -SKILL_PROPERTY_SUPERSEDING_SKILL_ID = 'superseding_skill_id' -SKILL_PROPERTY_ALL_QUESTIONS_MERGED = 'all_questions_merged' -SKILL_PROPERTY_PREREQUISITE_SKILL_IDS = 'prerequisite_skill_ids' +SKILL_PROPERTY_DESCRIPTION: Final = 'description' +SKILL_PROPERTY_LANGUAGE_CODE: Final = 'language_code' +SKILL_PROPERTY_SUPERSEDING_SKILL_ID: Final = 'superseding_skill_id' +SKILL_PROPERTY_ALL_QUESTIONS_MERGED: Final = 'all_questions_merged' +SKILL_PROPERTY_PREREQUISITE_SKILL_IDS: Final = 'prerequisite_skill_ids' -SKILL_CONTENTS_PROPERTY_EXPLANATION = 'explanation' -SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES = 'worked_examples' +SKILL_CONTENTS_PROPERTY_EXPLANATION: Final = 'explanation' +SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES: Final = 'worked_examples' -SKILL_MISCONCEPTIONS_PROPERTY_NAME = 'name' -SKILL_MISCONCEPTIONS_PROPERTY_NOTES = 'notes' -SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK = 'feedback' -SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED = 'must_be_addressed' +SKILL_MISCONCEPTIONS_PROPERTY_NAME: Final = 'name' +SKILL_MISCONCEPTIONS_PROPERTY_NOTES: Final = 'notes' +SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK: Final = 'feedback' +SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED: Final = 'must_be_addressed' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. -CMD_UPDATE_SKILL_PROPERTY = 'update_skill_property' -CMD_UPDATE_SKILL_CONTENTS_PROPERTY = 'update_skill_contents_property' -CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY = ( +CMD_UPDATE_SKILL_PROPERTY: Final = 'update_skill_property' +CMD_UPDATE_SKILL_CONTENTS_PROPERTY: Final = 'update_skill_contents_property' +CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY: Final = ( 'update_skill_misconceptions_property') -CMD_UPDATE_RUBRICS = 'update_rubrics' +CMD_UPDATE_RUBRICS: Final = 'update_rubrics' -CMD_ADD_SKILL_MISCONCEPTION = 'add_skill_misconception' -CMD_DELETE_SKILL_MISCONCEPTION = 'delete_skill_misconception' +CMD_ADD_SKILL_MISCONCEPTION: Final = 'add_skill_misconception' +CMD_DELETE_SKILL_MISCONCEPTION: Final = 'delete_skill_misconception' -CMD_ADD_PREREQUISITE_SKILL = 'add_prerequisite_skill' -CMD_DELETE_PREREQUISITE_SKILL = 'delete_prerequisite_skill' +CMD_ADD_PREREQUISITE_SKILL: Final = 'add_prerequisite_skill' +CMD_DELETE_PREREQUISITE_SKILL: Final = 'delete_prerequisite_skill' -CMD_CREATE_NEW = 'create_new' -CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION = ( +CMD_CREATE_NEW: Final = 'create_new' +CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION: Final = ( 'migrate_contents_schema_to_latest_version') -CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION = ( +CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION: Final = ( 'migrate_misconceptions_schema_to_latest_version') -CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION = ( +CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION: Final = ( 'migrate_rubrics_schema_to_latest_version') @@ -89,99 +97,351 @@ class SkillChange(change_domain.BaseChange): # The allowed list of skill properties which can be used in # update_skill_property command. - SKILL_PROPERTIES = ( + SKILL_PROPERTIES: List[str] = [ SKILL_PROPERTY_DESCRIPTION, SKILL_PROPERTY_LANGUAGE_CODE, SKILL_PROPERTY_SUPERSEDING_SKILL_ID, SKILL_PROPERTY_ALL_QUESTIONS_MERGED, - SKILL_PROPERTY_PREREQUISITE_SKILL_IDS) + SKILL_PROPERTY_PREREQUISITE_SKILL_IDS + ] # The allowed list of skill contents properties which can be used in # update_skill_contents_property command. - SKILL_CONTENTS_PROPERTIES = ( + SKILL_CONTENTS_PROPERTIES: List[str] = [ SKILL_CONTENTS_PROPERTY_EXPLANATION, - SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES) + SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES + ] # The allowed list of misconceptions properties which can be used in # update_skill_misconceptions_property command. - SKILL_MISCONCEPTIONS_PROPERTIES = ( + SKILL_MISCONCEPTIONS_PROPERTIES: List[str] = [ SKILL_MISCONCEPTIONS_PROPERTY_NAME, SKILL_MISCONCEPTIONS_PROPERTY_NOTES, SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK, SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED - ) + ] - ALLOWED_COMMANDS = [{ + ALLOWED_COMMANDS: List[feconf.ValidCmdDict] = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_ADD_SKILL_MISCONCEPTION, 'required_attribute_names': ['new_misconception_dict'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_SKILL_MISCONCEPTION, 'required_attribute_names': ['misconception_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_ADD_PREREQUISITE_SKILL, 'required_attribute_names': ['skill_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_PREREQUISITE_SKILL, 'required_attribute_names': ['skill_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_RUBRICS, 'required_attribute_names': ['difficulty', 'explanations'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'required_attribute_names': [ 'misconception_id', 'property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': SKILL_MISCONCEPTIONS_PROPERTIES} + 'allowed_values': {'property_name': SKILL_MISCONCEPTIONS_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_SKILL_PROPERTY, 'required_attribute_names': ['property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': SKILL_PROPERTIES} + 'allowed_values': {'property_name': SKILL_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_SKILL_CONTENTS_PROPERTY, 'required_attribute_names': ['property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': SKILL_CONTENTS_PROPERTIES} + 'allowed_values': {'property_name': SKILL_CONTENTS_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION, 'required_attribute_names': ['from_version', 'to_version'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION, 'required_attribute_names': ['from_version', 'to_version'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION, 'required_attribute_names': ['from_version', 'to_version'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }] +class CreateNewSkillCmd(SkillChange): + """Class representing the SkillChange's + CMD_CREATE_NEW command. + """ + + pass + + +class AddSkillMisconceptionCmd(SkillChange): + """Class representing the SkillChange's + CMD_ADD_SKILL_MISCONCEPTION command. + """ + + new_misconception_dict: MisconceptionDict + + +class DeleteSkillMisconceptionCmd(SkillChange): + """Class representing the SkillChange's + CMD_DELETE_SKILL_MISCONCEPTION command. + """ + + misconception_id: int + + +class AddPrerequisiteSkillCmd(SkillChange): + """Class representing the SkillChange's + CMD_ADD_PREREQUISITE_SKILL command. + """ + + skill_id: str + + +class DeletePrerequisiteSkillCmd(SkillChange): + """Class representing the SkillChange's + CMD_DELETE_PREREQUISITE_SKILL command. + """ + + skill_id: str + + +class UpdateRubricsCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_RUBRICS command. + """ + + difficulty: str + explanations: List[str] + + +class UpdateSkillMisconceptionPropertyNameCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY command with + SKILL_MISCONCEPTIONS_PROPERTY_NAME as allowed value. + """ + + misconception_id: int + property_name: Literal['name'] + new_value: str + old_value: str + + +class UpdateSkillMisconceptionPropertyNotesCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY command with + SKILL_MISCONCEPTIONS_PROPERTY_NOTES as allowed value. + """ + + misconception_id: int + property_name: Literal['notes'] + new_value: str + old_value: str + + +class UpdateSkillMisconceptionPropertyFeedbackCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY command with + SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK as allowed value. + """ + + misconception_id: int + property_name: Literal['feedback'] + new_value: str + old_value: str + + +class UpdateSkillMisconceptionPropertyMustBeAddressedCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY command with + SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED as allowed value. + """ + + misconception_id: int + property_name: Literal['must_be_addressed'] + new_value: bool + old_value: bool + + +class UpdateSkillPropertyDescriptionCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_PROPERTY command with + SKILL_PROPERTY_DESCRIPTION as allowed value. + """ + + property_name: Literal['description'] + new_value: str + old_value: str + + +class UpdateSkillPropertyLanguageCodeCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_PROPERTY command with + SKILL_PROPERTY_LANGUAGE_CODE as allowed value. + """ + + property_name: Literal['language_code'] + new_value: str + old_value: str + + +class UpdateSkillPropertySupersedingSkillIdCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_PROPERTY command with + SKILL_PROPERTY_SUPERSEDING_SKILL_ID as + allowed value. + """ + + property_name: Literal['superseding_skill_id'] + new_value: str + old_value: str + + +class UpdateSkillPropertyAllQuestionsMergedCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_PROPERTY command with + SKILL_PROPERTY_ALL_QUESTIONS_MERGED as + allowed value. + """ + + property_name: Literal['all_questions_merged'] + new_value: bool + old_value: bool + + +class UpdateSkillPropertyPrerequisiteSkillIdsCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_PROPERTY command with + SKILL_PROPERTY_PREREQUISITE_SKILL_IDS as + allowed value. + """ + + property_name: Literal['prerequisite_skill_ids'] + new_value: List[str] + old_value: List[str] + + +class UpdateSkillContentsPropertyExplanationCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_CONTENTS_PROPERTY command + with SKILL_CONTENTS_PROPERTY_EXPLANATION as + allowed value. + """ + + property_name: Literal['explanation'] + new_value: state_domain.SubtitledHtmlDict + old_value: state_domain.SubtitledHtmlDict + + +class UpdateSkillContentsPropertyWorkedExamplesCmd(SkillChange): + """Class representing the SkillChange's + CMD_UPDATE_SKILL_CONTENTS_PROPERTY command + with SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES + as allowed value. + """ + + property_name: Literal['worked_examples'] + new_value: List[WorkedExampleDict] + old_value: List[WorkedExampleDict] + + +class MigrateContentsSchemaToLatestVersionCmd(SkillChange): + """Class representing the SkillChange's + CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION command. + """ + + from_version: str + to_version: str + + +class MigrateMisconceptionsSchemaToLatestVersionCmd(SkillChange): + """Class representing the SkillChange's + CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION command. + """ + + from_version: str + to_version: str + + +class MigrateRubricsSchemaToLatestVersionCmd(SkillChange): + """Class representing the SkillChange's + CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION command. + """ + + from_version: str + to_version: str + + +class MisconceptionDict(TypedDict): + """Dictionary representing the Misconception object.""" + + id: int + name: str + notes: str + feedback: str + must_be_addressed: bool + + +class VersionedMisconceptionDict(TypedDict): + """Dictionary representing the versioned Misconception object.""" + + schema_version: int + misconceptions: List[MisconceptionDict] + + class Misconception: """Domain object describing a skill misconception.""" def __init__( - self, misconception_id, name, notes, feedback, must_be_addressed): + self, + misconception_id: int, + name: str, + notes: str, + feedback: str, + must_be_addressed: bool + ) -> None: """Initializes a Misconception domain object. Args: @@ -202,7 +462,7 @@ def __init__( self.feedback = html_cleaner.clean(feedback) self.must_be_addressed = must_be_addressed - def to_dict(self): + def to_dict(self) -> MisconceptionDict: """Returns a dict representing this Misconception domain object. Returns: @@ -217,7 +477,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, misconception_dict): + def from_dict(cls, misconception_dict: MisconceptionDict) -> Misconception: """Returns a Misconception domain object from a dict. Args: @@ -235,7 +495,7 @@ def from_dict(cls, misconception_dict): return misconception @classmethod - def require_valid_misconception_id(cls, misconception_id): + def require_valid_misconception_id(cls, misconception_id: int) -> None: """Validates the misconception id for a Misconception object. Args: @@ -249,7 +509,12 @@ def require_valid_misconception_id(cls, misconception_id): 'Expected misconception ID to be an integer, received %s' % misconception_id) - def validate(self): + if misconception_id < 0: + raise utils.ValidationError( + 'Expected misconception ID to be >= 0, received %s' % + misconception_id) + + def validate(self) -> None: """Validates various properties of the Misconception object. Raises: @@ -285,10 +550,28 @@ def validate(self): self.feedback) +class RubricDict(TypedDict): + """Dictionary representing the Rubric object.""" + + difficulty: str + explanations: List[str] + + +class VersionedRubricDict(TypedDict): + """Dictionary representing the versioned Rubric object.""" + + schema_version: int + rubrics: List[RubricDict] + + class Rubric: """Domain object describing a skill rubric.""" - def __init__(self, difficulty, explanations): + def __init__( + self, + difficulty: str, + explanations: List[str] + ) -> None: """Initializes a Rubric domain object. Args: @@ -300,7 +583,7 @@ def __init__(self, difficulty, explanations): self.explanations = [ html_cleaner.clean(explanation) for explanation in explanations] - def to_dict(self): + def to_dict(self) -> RubricDict: """Returns a dict representing this Rubric domain object. Returns: @@ -312,7 +595,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, rubric_dict): + def from_dict(cls, rubric_dict: RubricDict) -> Rubric: """Returns a Rubric domain object from a dict. Args: @@ -326,7 +609,7 @@ def from_dict(cls, rubric_dict): return rubric - def validate(self): + def validate(self) -> None: """Validates various properties of the Rubric object. Raises: @@ -352,11 +635,39 @@ def validate(self): 'Expected each explanation to be a string, received %s' % explanation) + if len(self.explanations) > 10: + raise utils.ValidationError( + 'Expected number of explanations to be less than or equal ' + 'to 10, received %d' % len(self.explanations)) + + for explanation in self.explanations: + if len(explanation) > 300: + raise utils.ValidationError( + 'Explanation should be less than or equal to 300 chars, ' + 'received %d chars' % len(explanation)) + if ( + self.difficulty == constants.SKILL_DIFFICULTIES[1] and + len(self.explanations) == 0 + ): + raise utils.ValidationError( + 'Expected at least one explanation in medium level rubrics') + + +class WorkedExampleDict(TypedDict): + """Dictionary representing the WorkedExample object.""" + + question: state_domain.SubtitledHtmlDict + explanation: state_domain.SubtitledHtmlDict + class WorkedExample: """Domain object for representing the worked_example dict.""" - def __init__(self, question, explanation): + def __init__( + self, + question: state_domain.SubtitledHtml, + explanation: state_domain.SubtitledHtml + ) -> None: """Constructs a WorkedExample domain object. Args: @@ -367,7 +678,7 @@ def __init__(self, question, explanation): self.question = question self.explanation = explanation - def validate(self): + def validate(self) -> None: """Validates various properties of the WorkedExample object. Raises: @@ -385,7 +696,7 @@ def validate(self): 'received %s' % self.question) self.explanation.validate() - def to_dict(self): + def to_dict(self) -> WorkedExampleDict: """Returns a dict representing this WorkedExample domain object. Returns: @@ -397,7 +708,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, worked_example_dict): + def from_dict(cls, worked_example_dict: WorkedExampleDict) -> WorkedExample: """Return a WorkedExample domain object from a dict. Args: @@ -415,18 +726,36 @@ def from_dict(cls, worked_example_dict): worked_example_dict['explanation']['content_id'], worked_example_dict['explanation']['html']) ) - worked_example.question.validate() - worked_example.explanation.validate() return worked_example +class SkillContentsDict(TypedDict): + """Dictionary representing the SkillContents object.""" + + explanation: state_domain.SubtitledHtmlDict + worked_examples: List[WorkedExampleDict] + recorded_voiceovers: state_domain.RecordedVoiceoversDict + written_translations: translation_domain.WrittenTranslationsDict + + +class VersionedSkillContentsDict(TypedDict): + """Dictionary representing the versioned SkillContents object.""" + + schema_version: int + skill_contents: SkillContentsDict + + class SkillContents: """Domain object representing the skill_contents dict.""" def __init__( - self, explanation, worked_examples, recorded_voiceovers, - written_translations): + self, + explanation: state_domain.SubtitledHtml, + worked_examples: List[WorkedExample], + recorded_voiceovers: state_domain.RecordedVoiceovers, + written_translations: translation_domain.WrittenTranslations + ) -> None: """Constructs a SkillContents domain object. Args: @@ -445,7 +774,7 @@ def __init__( self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations - def validate(self): + def validate(self) -> None: """Validates various properties of the SkillContents object. Raises: @@ -480,10 +809,10 @@ def validate(self): available_content_ids.add(example.question.content_id) available_content_ids.add(example.explanation.content_id) - self.recorded_voiceovers.validate(available_content_ids) - self.written_translations.validate(available_content_ids) + self.recorded_voiceovers.validate(list(available_content_ids)) + self.written_translations.validate(list(available_content_ids)) - def to_dict(self): + def to_dict(self) -> SkillContentsDict: """Returns a dict representing this SkillContents domain object. Returns: @@ -498,7 +827,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, skill_contents_dict): + def from_dict(cls, skill_contents_dict: SkillContentsDict) -> SkillContents: """Return a SkillContents domain object from a dict. Args: @@ -516,24 +845,61 @@ def from_dict(cls, skill_contents_dict): for example in skill_contents_dict['worked_examples']], state_domain.RecordedVoiceovers.from_dict(skill_contents_dict[ 'recorded_voiceovers']), - state_domain.WrittenTranslations.from_dict(skill_contents_dict[ - 'written_translations']) + translation_domain.WrittenTranslations.from_dict( + skill_contents_dict['written_translations']) ) - skill_contents.explanation.validate() return skill_contents +class SkillDict(TypedDict): + """Dictionary representing the Skill object.""" + + id: str + description: str + misconceptions: List[MisconceptionDict] + rubrics: List[RubricDict] + skill_contents: SkillContentsDict + misconceptions_schema_version: int + rubric_schema_version: int + skill_contents_schema_version: int + language_code: str + version: int + next_misconception_id: int + superseding_skill_id: Optional[str] + all_questions_merged: bool + prerequisite_skill_ids: List[str] + + +class SerializableSkillDict(SkillDict): + """Dictionary representing the serializable Skill object.""" + + created_on: str + last_updated: str + + class Skill: """Domain object for an Oppia Skill.""" def __init__( - self, skill_id, description, misconceptions, rubrics, - skill_contents, misconceptions_schema_version, - rubric_schema_version, skill_contents_schema_version, - language_code, version, next_misconception_id, superseding_skill_id, - all_questions_merged, prerequisite_skill_ids, - created_on=None, last_updated=None): + self, + skill_id: str, + description: str, + misconceptions: List[Misconception], + rubrics: List[Rubric], + skill_contents: SkillContents, + misconceptions_schema_version: int, + rubric_schema_version: int, + skill_contents_schema_version: int, + language_code: str, + version: int, + next_misconception_id: int, + superseding_skill_id: Optional[str], + all_questions_merged: bool, + prerequisite_skill_ids: List[str], + created_on: Optional[datetime.datetime] = None, + last_updated: Optional[datetime.datetime] = None + ) -> None: """Constructs a Skill domain object. Args: @@ -587,7 +953,7 @@ def __init__( self.prerequisite_skill_ids = prerequisite_skill_ids @classmethod - def require_valid_skill_id(cls, skill_id): + def require_valid_skill_id(cls, skill_id: str) -> None: """Checks whether the skill id is a valid one. Args: @@ -600,7 +966,7 @@ def require_valid_skill_id(cls, skill_id): raise utils.ValidationError('Invalid skill id.') @classmethod - def require_valid_description(cls, description): + def require_valid_description(cls, description: str) -> None: """Checks whether the description of the skill is a valid one. Args: @@ -619,7 +985,7 @@ def require_valid_description(cls, description): 'Skill description should be less than %d chars, received %s' % (description_length_limit, description)) - def validate(self): + def validate(self) -> None: """Validates various properties of the Skill object. Raises: @@ -755,7 +1121,7 @@ def validate(self): 'Expected a value for all_questions_merged when ' 'superseding_skill_id is set.') - def to_dict(self): + def to_dict(self) -> SkillDict: """Returns a dict representing this Skill domain object. Returns: @@ -781,14 +1147,21 @@ def to_dict(self): 'prerequisite_skill_ids': self.prerequisite_skill_ids } - def serialize(self): + def serialize(self) -> str: """Returns the object serialized as a JSON string. Returns: str. JSON-encoded str encoding all of the information composing the object. """ - skill_dict = self.to_dict() + # Here we use MyPy ignore because to_dict() method returns a general + # dictionary representation of domain object (SkillDict) which + # does not contain properties like created_on and last_updated but + # MyPy expects skill_dict, a dictionary which contains all the + # properties of domain object. That's why we are explicitly changing + # the type of skill_dict, here which causes MyPy to throw an + # error. Thus, to silence the error, we added an ignore here. + skill_dict: SerializableSkillDict = self.to_dict() # type: ignore[assignment] # The only reason we add the version parameter separately is that our # yaml encoding/decoding of this object does not handle the version # parameter. @@ -810,7 +1183,7 @@ def serialize(self): return json.dumps(skill_dict) @classmethod - def deserialize(cls, json_string): + def deserialize(cls, json_string: str) -> Skill: """Returns a Skill domain object decoded from a JSON string. Args: @@ -840,8 +1213,12 @@ def deserialize(cls, json_string): @classmethod def from_dict( - cls, skill_dict, skill_version=0, skill_created_on=None, - skill_last_updated=None): + cls, + skill_dict: SkillDict, + skill_version: int = 0, + skill_created_on: Optional[datetime.datetime] = None, + skill_last_updated: Optional[datetime.datetime] = None + ) -> Skill: """Returns a Skill domain object from a dict. Args: @@ -883,7 +1260,12 @@ def from_dict( return skill @classmethod - def create_default_skill(cls, skill_id, description, rubrics): + def create_default_skill( + cls, + skill_id: str, + description: str, + rubrics: List[Rubric] + ) -> Skill: """Returns a skill domain object with default values. This is for the frontend where a default blank skill would be shown to the user when the skill is created for the first time. @@ -905,7 +1287,7 @@ def create_default_skill(cls, skill_id, description, rubrics): explanation_content_id: {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { explanation_content_id: {} } @@ -918,7 +1300,7 @@ def create_default_skill(cls, skill_id, description, rubrics): feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0, 0, None, False, []) - def generate_skill_misconception_id(self, misconception_id): + def generate_skill_misconception_id(self, misconception_id: int) -> str: """Given a misconception id, it returns the skill-misconception-id. It is of the form -. @@ -934,7 +1316,10 @@ def generate_skill_misconception_id(self, misconception_id): @classmethod def convert_html_fields_in_skill_contents( - cls, skill_contents_dict, conversion_fn): + cls, + skill_contents_dict: SkillContentsDict, + conversion_fn: Callable[[str], str] + ) -> SkillContentsDict: """Applies a conversion function on all the html strings in a skill to migrate them to a desired state. @@ -949,10 +1334,6 @@ def convert_html_fields_in_skill_contents( """ skill_contents_dict['explanation']['html'] = conversion_fn( skill_contents_dict['explanation']['html']) - skill_contents_dict['written_translations'] = ( - state_domain.WrittenTranslations. - convert_html_in_written_translations( - skill_contents_dict['written_translations'], conversion_fn)) for value_index, value in enumerate( skill_contents_dict['worked_examples']): @@ -964,7 +1345,9 @@ def convert_html_fields_in_skill_contents( return skill_contents_dict @classmethod - def _convert_skill_contents_v1_dict_to_v2_dict(cls, skill_contents_dict): + def _convert_skill_contents_v1_dict_to_v2_dict( + cls, skill_contents_dict: SkillContentsDict + ) -> SkillContentsDict: """Converts v1 skill contents to the v2 schema. In the v2 schema, the new Math components schema is introduced. @@ -979,7 +1362,9 @@ def _convert_skill_contents_v1_dict_to_v2_dict(cls, skill_contents_dict): html_validation_service.add_math_content_to_math_rte_components) @classmethod - def _convert_skill_contents_v2_dict_to_v3_dict(cls, skill_contents_dict): + def _convert_skill_contents_v2_dict_to_v3_dict( + cls, skill_contents_dict: SkillContentsDict + ) -> SkillContentsDict: """Converts v2 skill contents to the v3 schema. The v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. @@ -995,7 +1380,9 @@ def _convert_skill_contents_v2_dict_to_v3_dict(cls, skill_contents_dict): html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod - def _convert_skill_contents_v3_dict_to_v4_dict(cls, skill_contents_dict): + def _convert_skill_contents_v3_dict_to_v4_dict( + cls, skill_contents_dict: SkillContentsDict + ) -> SkillContentsDict: """Converts v3 skill contents to the v4 schema. The v4 schema fixes HTML encoding issues. @@ -1011,7 +1398,10 @@ def _convert_skill_contents_v3_dict_to_v4_dict(cls, skill_contents_dict): @classmethod def update_skill_contents_from_model( - cls, versioned_skill_contents, current_version): + cls, + versioned_skill_contents: VersionedSkillContentsDict, + current_version: int + ) -> None: """Converts the skill_contents blob contained in the given versioned_skill_contents dict from current_version to current_version + 1. Note that the versioned_skill_contents being @@ -1035,7 +1425,10 @@ def update_skill_contents_from_model( @classmethod def update_misconceptions_from_model( - cls, versioned_misconceptions, current_version): + cls, + versioned_misconceptions: VersionedMisconceptionDict, + current_version: int + ) -> None: """Converts the misconceptions blob contained in the given versioned_misconceptions dict from current_version to current_version + 1. Note that the versioned_misconceptions being @@ -1062,7 +1455,9 @@ def update_misconceptions_from_model( versioned_misconceptions['misconceptions'] = updated_misconceptions @classmethod - def _convert_misconception_v1_dict_to_v2_dict(cls, misconception_dict): + def _convert_misconception_v1_dict_to_v2_dict( + cls, misconception_dict: MisconceptionDict + ) -> MisconceptionDict: """Converts v1 misconception schema to the v2 schema. In the v2 schema, the field must_be_addressed has been added. @@ -1076,7 +1471,9 @@ def _convert_misconception_v1_dict_to_v2_dict(cls, misconception_dict): return misconception_dict @classmethod - def _convert_misconception_v2_dict_to_v3_dict(cls, misconception_dict): + def _convert_misconception_v2_dict_to_v3_dict( + cls, misconception_dict: MisconceptionDict + ) -> MisconceptionDict: """Converts v2 misconception schema to the v3 schema. In the v3 schema, the new Math components schema is introduced. @@ -1095,7 +1492,9 @@ def _convert_misconception_v2_dict_to_v3_dict(cls, misconception_dict): return misconception_dict @classmethod - def _convert_misconception_v3_dict_to_v4_dict(cls, misconception_dict): + def _convert_misconception_v3_dict_to_v4_dict( + cls, misconception_dict: MisconceptionDict + ) -> MisconceptionDict: """Converts v3 misconception schema to the v4 schema. The v4 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. @@ -1115,7 +1514,9 @@ def _convert_misconception_v3_dict_to_v4_dict(cls, misconception_dict): return misconception_dict @classmethod - def _convert_misconception_v4_dict_to_v5_dict(cls, misconception_dict): + def _convert_misconception_v4_dict_to_v5_dict( + cls, misconception_dict: MisconceptionDict + ) -> MisconceptionDict: """Converts v4 misconception schema to the v5 schema. The v5 schema fixes HTML encoding issues. @@ -1134,7 +1535,9 @@ def _convert_misconception_v4_dict_to_v5_dict(cls, misconception_dict): return misconception_dict @classmethod - def _convert_rubric_v1_dict_to_v2_dict(cls, rubric_dict): + def _convert_rubric_v1_dict_to_v2_dict( + cls, rubric_dict: RubricDict + ) -> RubricDict: """Converts v1 rubric schema to the v2 schema. In the v2 schema, multiple explanations have been added for each difficulty. @@ -1144,13 +1547,23 @@ def _convert_rubric_v1_dict_to_v2_dict(cls, rubric_dict): Returns: dict. The converted rubric_dict. """ - explanation = rubric_dict['explanation'] - del rubric_dict['explanation'] + # Here we use MyPy ignore because in convert functions, we allow less + # strict typing because here we are working with previous versions of + # the domain object and in previous versions of the domain object there + # are some fields that are discontinued in the latest domain object + # (eg. explanation). So, while accessing these discontinued fields MyPy + # throws an error. Thus, to avoid the error, we used ignore here. + explanation = rubric_dict['explanation'] # type: ignore[misc] + # Here we use MyPy ignore because MyPy doesn't allow key deletion from + # TypedDict. + del rubric_dict['explanation'] # type: ignore[misc] rubric_dict['explanations'] = [explanation] return rubric_dict @classmethod - def _convert_rubric_v2_dict_to_v3_dict(cls, rubric_dict): + def _convert_rubric_v2_dict_to_v3_dict( + cls, rubric_dict: RubricDict + ) -> RubricDict: """Converts v2 rubric schema to the v3 schema. In the v3 schema, the new Math components schema is introduced. @@ -1168,7 +1581,9 @@ def _convert_rubric_v2_dict_to_v3_dict(cls, rubric_dict): return rubric_dict @classmethod - def _convert_rubric_v3_dict_to_v4_dict(cls, rubric_dict): + def _convert_rubric_v3_dict_to_v4_dict( + cls, rubric_dict: RubricDict + ) -> RubricDict: """Converts v3 rubric schema to the v4 schema. The v4 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. @@ -1187,7 +1602,9 @@ def _convert_rubric_v3_dict_to_v4_dict(cls, rubric_dict): return rubric_dict @classmethod - def _convert_rubric_v4_dict_to_v5_dict(cls, rubric_dict): + def _convert_rubric_v4_dict_to_v5_dict( + cls, rubric_dict: RubricDict + ) -> RubricDict: """Converts v4 rubric schema to the v5 schema. The v4 schema fixes HTML encoding issues. @@ -1205,7 +1622,11 @@ def _convert_rubric_v4_dict_to_v5_dict(cls, rubric_dict): return rubric_dict @classmethod - def update_rubrics_from_model(cls, versioned_rubrics, current_version): + def update_rubrics_from_model( + cls, + versioned_rubrics: VersionedRubricDict, + current_version: int + ) -> None: """Converts the rubrics blob contained in the given versioned_rubrics dict from current_version to current_version + 1. Note that the versioned_rubrics being @@ -1231,7 +1652,7 @@ def update_rubrics_from_model(cls, versioned_rubrics, current_version): versioned_rubrics['rubrics'] = updated_rubrics - def get_all_html_content_strings(self): + def get_all_html_content_strings(self) -> List[str]: """Returns all html strings that are part of the skill (or any of its subcomponents). @@ -1254,7 +1675,7 @@ def get_all_html_content_strings(self): return html_content_strings - def update_description(self, description): + def update_description(self, description: str) -> None: """Updates the description of the skill. Args: @@ -1262,7 +1683,7 @@ def update_description(self, description): """ self.description = description - def update_language_code(self, language_code): + def update_language_code(self, language_code: str) -> None: """Updates the language code of the skill. Args: @@ -1270,7 +1691,7 @@ def update_language_code(self, language_code): """ self.language_code = language_code - def update_superseding_skill_id(self, superseding_skill_id): + def update_superseding_skill_id(self, superseding_skill_id: str) -> None: """Updates the superseding skill ID of the skill. Args: @@ -1278,7 +1699,9 @@ def update_superseding_skill_id(self, superseding_skill_id): """ self.superseding_skill_id = superseding_skill_id - def record_that_all_questions_are_merged(self, all_questions_merged): + def record_that_all_questions_are_merged( + self, all_questions_merged: bool + ) -> None: """Updates the flag value which indicates if all questions are merged. Args: @@ -1287,7 +1710,9 @@ def record_that_all_questions_are_merged(self, all_questions_merged): """ self.all_questions_merged = all_questions_merged - def update_explanation(self, explanation): + def update_explanation( + self, explanation: state_domain.SubtitledHtml + ) -> None: """Updates the explanation of the skill. Args: @@ -1302,7 +1727,9 @@ def update_explanation(self, explanation): new_content_ids = [self.skill_contents.explanation.content_id] self._update_content_ids_in_assets(old_content_ids, new_content_ids) - def update_worked_examples(self, worked_examples): + def update_worked_examples( + self, worked_examples: List[WorkedExample] + ) -> None: """Updates the worked examples list of the skill by performing a copy of the provided list. @@ -1324,7 +1751,11 @@ def update_worked_examples(self, worked_examples): self._update_content_ids_in_assets(old_content_ids, new_content_ids) - def _update_content_ids_in_assets(self, old_ids_list, new_ids_list): + def _update_content_ids_in_assets( + self, + old_ids_list: List[str], + new_ids_list: List[str] + ) -> None: """Adds or deletes content ids in recorded_voiceovers and written_translations. @@ -1349,7 +1780,7 @@ def _update_content_ids_in_assets(self, old_ids_list, new_ids_list): recorded_voiceovers.add_content_id_for_voiceover(content_id) written_translations.add_content_id_for_translation(content_id) - def _find_misconception_index(self, misconception_id): + def _find_misconception_index(self, misconception_id: int) -> Optional[int]: """Returns the index of the misconception with the given misconception id, or None if it is not in the misconceptions list. @@ -1365,7 +1796,7 @@ def _find_misconception_index(self, misconception_id): return ind return None - def add_misconception(self, misconception): + def add_misconception(self, misconception: Misconception) -> None: """Adds a new misconception to the skill. Args: @@ -1376,7 +1807,9 @@ def add_misconception(self, misconception): self.next_misconception_id = self.get_incremented_misconception_id( misconception.id) - def _find_prerequisite_skill_id_index(self, skill_id_to_find): + def _find_prerequisite_skill_id_index( + self, skill_id_to_find: str + ) -> Optional[int]: """Returns the index of the skill_id in the prerequisite_skill_ids array. @@ -1391,7 +1824,7 @@ def _find_prerequisite_skill_id_index(self, skill_id_to_find): return ind return None - def add_prerequisite_skill(self, skill_id): + def add_prerequisite_skill(self, skill_id: str) -> None: """Adds a prerequisite skill to the skill. Args: @@ -1404,7 +1837,7 @@ def add_prerequisite_skill(self, skill_id): raise ValueError('The skill is already a prerequisite skill.') self.prerequisite_skill_ids.append(skill_id) - def delete_prerequisite_skill(self, skill_id): + def delete_prerequisite_skill(self, skill_id: str) -> None: """Removes a prerequisite skill from the skill. Args: @@ -1418,12 +1851,17 @@ def delete_prerequisite_skill(self, skill_id): raise ValueError('The skill to remove is not a prerequisite skill.') del self.prerequisite_skill_ids[index] - def update_rubric(self, difficulty, explanations): + def update_rubric( + self, difficulty: str, explanations: List[str] + ) -> None: """Adds or updates the rubric of the given difficulty. Args: difficulty: str. The difficulty of the rubric. explanations: list(str). The explanations for the rubric. + + Raises: + ValueError. No rubric for given difficulty. """ for rubric in self.rubrics: if rubric.difficulty == difficulty: @@ -1432,7 +1870,7 @@ def update_rubric(self, difficulty, explanations): raise ValueError( 'There is no rubric for the given difficulty.') - def get_incremented_misconception_id(self, misconception_id): + def get_incremented_misconception_id(self, misconception_id: int) -> int: """Returns the incremented misconception id. Args: @@ -1444,7 +1882,7 @@ def get_incremented_misconception_id(self, misconception_id): """ return misconception_id + 1 - def delete_misconception(self, misconception_id): + def delete_misconception(self, misconception_id: int) -> None: """Removes a misconception with the given id. Args: @@ -1459,7 +1897,9 @@ def delete_misconception(self, misconception_id): 'There is no misconception with the given id.') del self.misconceptions[index] - def update_misconception_name(self, misconception_id, name): + def update_misconception_name( + self, misconception_id: int, name: str + ) -> None: """Updates the name of the misconception with the given id. Args: @@ -1476,7 +1916,8 @@ def update_misconception_name(self, misconception_id, name): self.misconceptions[index].name = name def update_misconception_must_be_addressed( - self, misconception_id, must_be_addressed): + self, misconception_id: int, must_be_addressed: bool + ) -> None: """Updates the must_be_addressed value of the misconception with the given id. @@ -1497,7 +1938,9 @@ def update_misconception_must_be_addressed( 'There is no misconception with the given id.') self.misconceptions[index].must_be_addressed = must_be_addressed - def update_misconception_notes(self, misconception_id, notes): + def update_misconception_notes( + self, misconception_id: int, notes: str + ) -> None: """Updates the notes of the misconception with the given id. Args: @@ -1513,7 +1956,9 @@ def update_misconception_notes(self, misconception_id, notes): 'There is no misconception with the given id.') self.misconceptions[index].notes = notes - def update_misconception_feedback(self, misconception_id, feedback): + def update_misconception_feedback( + self, misconception_id: int, feedback: str + ) -> None: """Updates the feedback of the misconception with the given id. Args: @@ -1531,13 +1976,33 @@ def update_misconception_feedback(self, misconception_id, feedback): self.misconceptions[index].feedback = feedback +class SkillSummaryDict(TypedDict): + """Dictionary representing the SkillSummary object.""" + + id: str + description: str + language_code: str + version: int + misconception_count: int + worked_examples_count: int + skill_model_created_on: float + skill_model_last_updated: float + + class SkillSummary: """Domain object for Skill Summary.""" def __init__( - self, skill_id, description, language_code, version, - misconception_count, worked_examples_count, skill_model_created_on, - skill_model_last_updated): + self, + skill_id: str, + description: str, + language_code: str, + version: int, + misconception_count: int, + worked_examples_count: int, + skill_model_created_on: datetime.datetime, + skill_model_last_updated: datetime.datetime + ) -> None: """Constructs a SkillSummary domain object. Args: @@ -1563,7 +2028,7 @@ def __init__( self.skill_model_created_on = skill_model_created_on self.skill_model_last_updated = skill_model_last_updated - def validate(self): + def validate(self) -> None: """Validates various properties of the Skill Summary object. Raises: @@ -1604,7 +2069,7 @@ def validate(self): 'Expected worked_examples_count to be non-negative, ' 'received \'%s\'' % self.worked_examples_count) - def to_dict(self): + def to_dict(self) -> SkillSummaryDict: """Returns a dictionary representation of this domain object. Returns: @@ -1624,6 +2089,21 @@ def to_dict(self): } +class AugmentedSkillSummaryDict(TypedDict): + """Dictionary representing the AugmentedSkillSummary object.""" + + id: str + description: str + language_code: str + version: int + misconception_count: int + worked_examples_count: int + topic_names: List[str] + classroom_names: List[str] + skill_model_created_on: float + skill_model_last_updated: float + + class AugmentedSkillSummary: """Domain object for Augmented Skill Summary, which has all the properties of SkillSummary along with the topic names to which the skill is assigned @@ -1631,9 +2111,18 @@ class AugmentedSkillSummary: """ def __init__( - self, skill_id, description, language_code, version, - misconception_count, worked_examples_count, topic_names, - classroom_names, skill_model_created_on, skill_model_last_updated): + self, + skill_id: str, + description: str, + language_code: str, + version: int, + misconception_count: int, + worked_examples_count: int, + topic_names: List[str], + classroom_names: List[str], + skill_model_created_on: datetime.datetime, + skill_model_last_updated: datetime.datetime + ) -> None: """Constructs an AugmentedSkillSummary domain object. Args: @@ -1665,7 +2154,7 @@ def __init__( self.topic_names = topic_names self.classroom_names = classroom_names - def to_dict(self): + def to_dict(self) -> AugmentedSkillSummaryDict: """Returns a dictionary representation of this domain object. Returns: @@ -1687,6 +2176,15 @@ def to_dict(self): } +class TopicAssignmentDict(TypedDict): + """Dictionary representing the TopicAssignment object.""" + + topic_id: str + topic_name: str + topic_version: int + subtopic_id: Optional[int] + + class TopicAssignment: """Domain object for Topic Assignment, which provides the details of a single topic (and, if applicable, the subtopic within that topic) to which @@ -1694,7 +2192,12 @@ class TopicAssignment: """ def __init__( - self, topic_id, topic_name, topic_version, subtopic_id): + self, + topic_id: str, + topic_name: str, + topic_version: int, + subtopic_id: Optional[int] + ) -> None: """Constructs a TopicAssignment domain object. Args: @@ -1702,7 +2205,7 @@ def __init__( topic_name: str. The name of the topic. topic_version: int. The current version of the topic to which the skill is assigned. - subtopic_id: str or None. The id of the subtopic to which the skill + subtopic_id: int or None. The id of the subtopic to which the skill is assigned, or None if the skill is not assigned to any subtopic. """ @@ -1711,7 +2214,7 @@ def __init__( self.topic_version = topic_version self.subtopic_id = subtopic_id - def to_dict(self): + def to_dict(self) -> TopicAssignmentDict: """Returns a dictionary representation of this domain object. Returns: @@ -1725,10 +2228,23 @@ def to_dict(self): } +class UserSkillMasteryDict(TypedDict): + """Dictionary representing the UserSkillMastery object.""" + + user_id: str + skill_id: str + degree_of_mastery: float + + class UserSkillMastery: """Domain object for a user's mastery of a particular skill.""" - def __init__(self, user_id, skill_id, degree_of_mastery): + def __init__( + self, + user_id: str, + skill_id: str, + degree_of_mastery: float + ) -> None: """Constructs a SkillMastery domain object for a user. Args: @@ -1741,7 +2257,7 @@ def __init__(self, user_id, skill_id, degree_of_mastery): self.skill_id = skill_id self.degree_of_mastery = degree_of_mastery - def to_dict(self): + def to_dict(self) -> UserSkillMasteryDict: """Returns a dictionary representation of this domain object. Returns: @@ -1754,7 +2270,9 @@ def to_dict(self): } @classmethod - def from_dict(cls, skill_mastery_dict): + def from_dict( + cls, skill_mastery_dict: UserSkillMasteryDict + ) -> UserSkillMastery: """Returns a UserSkillMastery domain object from the given dict. Args: @@ -1769,3 +2287,185 @@ def from_dict(cls, skill_mastery_dict): skill_mastery_dict['skill_id'], skill_mastery_dict['degree_of_mastery'] ) + + +class CategorizedSkills: + """Domain object for representing categorized skills' ids and + descriptions. Here, 'categorized skill' means that the skill is assigned + to some topic. If a skill is assigned to a topic but not a + subtopic, then it is termed as 'uncategorized' which also comes under + CategorizedSkills because it is at least assigned to a topic. + + Attributes: + categorized_skills: dict[str, dict[str, list(ShortSkillSummary)]. + The parent dict contains keys as topic names. The children dicts + contain keys as subtopic titles and values as list of short skill + summaries. An extra key called 'uncategorized' is present in every + child dict to represent the skills that are not assigned to any + subtopic but are assigned to the parent topic. + """ + + def __init__(self) -> None: + """Constructs a CategorizedSkills domain object.""" + self.categorized_skills: Dict[ + str, Dict[str, List[ShortSkillSummary]] + ] = {} + + def add_topic(self, topic_name: str, subtopic_titles: List[str]) -> None: + """Adds a topic to the categorized skills and initializes its + 'uncategorized' and subtopic skills as empty lists. + + Args: + topic_name: str. The name of the topic. + subtopic_titles: list(str). The list of subtopic titles of the + topic. + + Raises: + ValidationError. Topic name is already added. + """ + if topic_name in self.categorized_skills: + raise utils.ValidationError( + 'Topic name \'%s\' is already added.' % topic_name) + + self.categorized_skills[topic_name] = {} + self.categorized_skills[topic_name]['uncategorized'] = [] + for subtopic_title in subtopic_titles: + self.categorized_skills[topic_name][subtopic_title] = [] + + def add_uncategorized_skill( + self, + topic_name: str, + skill_id: str, + skill_description: str + ) -> None: + """Adds an uncategorized skill id and description for the given topic. + + Args: + topic_name: str. The name of the topic. + skill_id: str. The id of the skill. + skill_description: str. The description of the skill. + """ + self.require_topic_name_to_be_added(topic_name) + self.categorized_skills[topic_name]['uncategorized'].append( + ShortSkillSummary(skill_id, skill_description)) + + def add_subtopic_skill( + self, + topic_name: str, + subtopic_title: str, + skill_id: str, + skill_description: str + ) -> None: + """Adds a subtopic skill id and description for the given topic. + + Args: + topic_name: str. The name of the topic. + subtopic_title: str. The title of the subtopic. + skill_id: str. The id of the skill. + skill_description: str. The description of the skill. + """ + self.require_topic_name_to_be_added(topic_name) + self.require_subtopic_title_to_be_added(topic_name, subtopic_title) + self.categorized_skills[topic_name][subtopic_title].append( + ShortSkillSummary(skill_id, skill_description)) + + def require_topic_name_to_be_added(self, topic_name: str) -> None: + """Checks whether the given topic name is valid i.e. added to the + categorized skills dict. + + Args: + topic_name: str. The name of the topic. + + Raises: + ValidationError. Topic name is not added. + """ + if not topic_name in self.categorized_skills: + raise utils.ValidationError( + 'Topic name \'%s\' is not added.' % topic_name) + + def require_subtopic_title_to_be_added( + self, topic_name: str, subtopic_title: str + ) -> None: + """Checks whether the given subtopic title is added to the + categorized skills dict under the given topic name. + + Args: + topic_name: str. The name of the topic. + subtopic_title: str. The title of the subtopic. + + Raises: + ValidationError. Subtopic title is not added. + """ + if not subtopic_title in self.categorized_skills[topic_name]: + raise utils.ValidationError( + 'Subtopic title \'%s\' is not added.' % subtopic_title) + + def to_dict(self) -> Dict[str, Dict[str, List[ShortSkillSummaryDict]]]: + """Returns a dictionary representation of this domain object.""" + categorized_skills_dict = copy.deepcopy(self.categorized_skills) + + result_categorized_skills_dict: Dict[ + str, Dict[str, List[ShortSkillSummaryDict]] + ] = {} + for topic_name in categorized_skills_dict: + # The key 'uncategorized' will also be covered by this loop. + result_categorized_skills_dict[topic_name] = {} + for subtopic_title in categorized_skills_dict[topic_name]: + result_categorized_skills_dict[topic_name][subtopic_title] = [ + short_skill_summary.to_dict() for short_skill_summary in + categorized_skills_dict[topic_name][subtopic_title] + ] + return result_categorized_skills_dict + + +class ShortSkillSummaryDict(TypedDict): + """Dictionary representing the ShortSkillSummary object.""" + + skill_id: str + skill_description: str + + +class ShortSkillSummary: + """Domain object for a short skill summary. It contains the id and + description of the skill. It is different from the SkillSummary in the + sense that the latter contains many other properties of the skill along with + the skill id and description. + """ + + def __init__(self, skill_id: str, skill_description: str) -> None: + """Constructs a ShortSkillSummary domain object. + + Args: + skill_id: str. The id of the skill. + skill_description: str. The description of the skill. + """ + self.skill_id = skill_id + self.skill_description = skill_description + + def to_dict(self) -> ShortSkillSummaryDict: + """Returns a dictionary representation of this domain object. + + Returns: + dict. A dict representing this ShortSkillSummary object. + """ + return { + 'skill_id': self.skill_id, + 'skill_description': self.skill_description + } + + @classmethod + def from_skill_summary( + cls, skill_summary: SkillSummary + ) -> ShortSkillSummary: + """Returns a ShortSkillSummary domain object from the given skill + summary. + + Args: + skill_summary: SkillSummary. The skill summary domain object. + + Returns: + ShortSkillSummary. The ShortSkillSummary domain object. + """ + return cls( + skill_summary.id, + skill_summary.description) diff --git a/core/domain/skill_domain_test.py b/core/domain/skill_domain_test.py index e18588746276..daf73d81efd7 100644 --- a/core/domain/skill_domain_test.py +++ b/core/domain/skill_domain_test.py @@ -23,17 +23,20 @@ from core.constants import constants from core.domain import skill_domain from core.domain import state_domain +from core.domain import translation_domain from core.tests import test_utils +from typing import Final, List + class SkillDomainUnitTests(test_utils.GenericTestBase): """Test the skill domain object.""" - SKILL_ID = 'skill_id' - MISCONCEPTION_ID = 0 + SKILL_ID: Final = 'skill_id' + MISCONCEPTION_ID: Final = 0 - def setUp(self): - super(SkillDomainUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '

    Example Question 1

    '), state_domain.SubtitledHtml('3', '

    Example Explanation 1

    ') @@ -46,7 +49,7 @@ def setUp(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -67,38 +70,59 @@ def setUp(self): skill_contents, feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION, feconf.CURRENT_RUBRIC_SCHEMA_VERSION, feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION, 'en', 0, 1, - None, False, ['skill_id_2'] - ) + None, False, ['skill_id_2'], + created_on=datetime.datetime.now(), + last_updated=datetime.datetime.now()) - def _assert_validation_error(self, expected_error_substring): + # Here we use MyPy ignore because the signature of this method + # doesn't match with TestBase._assert_validation_error(). + def _assert_validation_error(self, expected_error_substring: str) -> None: # type: ignore[override] """Checks that the skill passes strict validation.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): self.skill.validate() - def test_skill_id_validation_fails_with_invalid_skill_id_type(self): - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_skill_id_validation_fails_with_invalid_skill_id_type(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Skill id should be a string'): - skill_domain.Skill.require_valid_skill_id(10) + skill_domain.Skill.require_valid_skill_id(10) # type: ignore[arg-type] - def test_skill_id_validation_fails_with_invalid_skill_id_length(self): - with self.assertRaisesRegexp( + def test_skill_id_validation_fails_with_invalid_skill_id_length( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Invalid skill id'): skill_domain.Skill.require_valid_skill_id('abc') - def test_valid_misconception_id(self): - self.skill.next_misconception_id = 'invalid_id' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_valid_misconception_id(self) -> None: + self.skill.next_misconception_id = 'invalid_id' # type: ignore[assignment] self._assert_validation_error( 'Expected misconception ID to be an integer') - def test_get_all_html_content_strings(self): + def test_valid_misconception_id_greater_than_zero(self) -> None: + self.skill.next_misconception_id = -12 + self._assert_validation_error( + 'Expected misconception ID to be >= 0') + + def test_get_all_html_content_strings(self) -> None: html_strings = self.skill.get_all_html_content_strings() self.assertEqual(len(html_strings), 8) - def test_valid_misconception_name(self): + def test_valid_misconception_name(self) -> None: misconception_name = 'This string is smaller than 50' self.skill.update_misconception_name(0, misconception_name) self.skill.validate() + with self.assertRaisesRegex( + ValueError, + 'There is no misconception with the given id.' + ): + self.skill.update_misconception_name(1, misconception_name) misconception_name = ( 'etiam non quam lacus suspendisse faucibus interdum posuere lorem ' 'ipsum dolor sit amet consectetur adipiscing elit duis tristique ' @@ -107,25 +131,317 @@ def test_valid_misconception_name(self): self._assert_validation_error( 'Misconception name should be less than 100 chars' ) + self.assertEqual(self.skill.get_incremented_misconception_id(0), 1) - def test_valid_misconception_must_be_addressed(self): + def test_update_contents_from_model(self) -> None: + versioned_skill_contents: skill_domain.VersionedSkillContentsDict = { + 'schema_version': 1, + 'skill_contents': { + 'explanation': { + 'content_id': '1', + 'html': '

    Feedback

    ' + '' + '', + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'explanation': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'explanation': {} + } + }, + 'worked_examples': [ + { + 'question': { + 'html': '

    A Question

    ', + 'content_id': 'id' + }, + 'explanation': { + 'html': '

    An explanation

    ', + 'content_id': 'id' + } + } + ] + } + } + self.skill.update_skill_contents_from_model( + versioned_skill_contents, + versioned_skill_contents['schema_version'] + ) + self.skill.validate() + self.assertEqual(versioned_skill_contents['schema_version'], 2) + self.assertEqual( + versioned_skill_contents['skill_contents']['explanation'], + { + 'content_id': '1', + 'html': '

    Feedback

    ', + } + ) + versioned_skill_contents['skill_contents']['explanation'] = { + 'content_id': '1', + 'html': '' + '' + } + self.skill.update_skill_contents_from_model( + versioned_skill_contents, + versioned_skill_contents['schema_version'] + ) + self.skill.validate() + self.assertEqual(versioned_skill_contents['schema_version'], 3) + self.assertEqual( + versioned_skill_contents['skill_contents']['explanation'], + { + 'content_id': '1', + 'html': '' + '', + } + ) + versioned_skill_contents['skill_contents']['explanation']['html'] = ( + '

    Test 

    ' + ) + self.skill.update_skill_contents_from_model( + versioned_skill_contents, + versioned_skill_contents['schema_version'] + ) + self.skill.validate() + self.assertEqual(versioned_skill_contents['schema_version'], 4) + self.assertEqual( + versioned_skill_contents['skill_contents']['explanation'], + { + 'content_id': '1', + 'html': '

    Test

    ', + } + ) + + def test_update_misconceptions_from_model(self) -> None: + versioned_misconceptions: skill_domain.VersionedMisconceptionDict = { + 'schema_version': 1, + 'misconceptions': [ + { + 'id': self.MISCONCEPTION_ID, + 'name': 'name', + 'notes': '

    notes

    ', + 'feedback': '

    feedback

    ', + 'must_be_addressed': True + } + ] + } + self.skill.update_misconceptions_from_model( + versioned_misconceptions, + versioned_misconceptions['schema_version'] + ) + self.skill.validate() + self.assertEqual(versioned_misconceptions['schema_version'], 2) + self.assertEqual( + versioned_misconceptions['misconceptions'][0]['must_be_addressed'], + True + ) + versioned_misconceptions['misconceptions'][0]['feedback'] = ( + '

    ' + 'Feedback

    ' + '' + ) + expected_feedback = ( + '

    Feedback

    ' + '' + ) + self.skill.update_misconceptions_from_model( + versioned_misconceptions, + versioned_misconceptions['schema_version'] + ) + self.skill.validate() + self.assertEqual(versioned_misconceptions['schema_version'], 3) + self.assertEqual( + versioned_misconceptions['misconceptions'][0]['feedback'], + expected_feedback + ) + self.skill.update_misconceptions_from_model( + versioned_misconceptions, + versioned_misconceptions['schema_version'] + ) + self.skill.validate() + self.assertEqual(versioned_misconceptions['schema_version'], 4) + versioned_misconceptions['misconceptions'][0]['feedback'] = ( + '' + 'feedback ' + ) + self.skill.update_misconceptions_from_model( + versioned_misconceptions, + versioned_misconceptions['schema_version'] + ) + self.assertEqual(versioned_misconceptions['schema_version'], 5) + self.assertEqual( + versioned_misconceptions['misconceptions'][0]['feedback'], + 'feedback ' + ) + + def test_update_misconception_feedback(self) -> None: + feedback = '

    new_feedback

    ' + self.skill.update_misconception_feedback( + 0, feedback) + self.skill.validate() + self.assertEqual(self.skill.misconceptions[0].feedback, feedback) + with self.assertRaisesRegex( + ValueError, + 'There is no misconception with the given id.' + ): + self.skill.update_misconception_feedback(1, feedback) + + def test_update_misconception_notes(self) -> None: + new_notes = '

    Update notes

    ' + self.skill.update_misconception_notes( + 0, new_notes) + self.skill.validate() + self.assertEqual(self.skill.misconceptions[0].notes, new_notes) + with self.assertRaisesRegex( + ValueError, + 'There is no misconception with the given id.' + ): + self.skill.update_misconception_notes(1, new_notes) + + def test_update_misconception_must_be_addressed(self) -> None: + must_be_addressed = False + self.skill.update_misconception_must_be_addressed( + 0, must_be_addressed) + self.skill.validate() + self.assertEqual( + self.skill.misconceptions[0].must_be_addressed, + must_be_addressed + ) + with self.assertRaisesRegex( + ValueError, + 'There is no misconception with the given id.' + ): + self.skill.update_misconception_must_be_addressed( + 1, must_be_addressed) + + def test_delete_misconceptions(self) -> None: + self.skill.delete_misconception(0) + self.assertEqual(len(self.skill.misconceptions), 0) + with self.assertRaisesRegex( + ValueError, + 'There is no misconception with the given id.' + ): + self.skill.delete_misconception(0) + + def test_add_misconception(self) -> None: + misconception = skill_domain.Misconception( + self.MISCONCEPTION_ID + 1, 'name_2', '

    notes_2

    ', + '

    default_feedback_2

    ', True) + self.skill.add_misconception(misconception) + self.skill.validate() + self.assertEqual(self.skill.misconceptions[1], misconception) + + def test_delete_prerequisite_skill(self) -> None: + with self.assertRaisesRegex( + ValueError, + 'The skill to remove is not a prerequisite skill.' + ): + self.skill.delete_prerequisite_skill('some_id') + self.skill.delete_prerequisite_skill('skill_id_2') + self.assertEqual(len(self.skill.prerequisite_skill_ids), 0) + + def test_add_prerequisite_skill(self) -> None: + self.skill.add_prerequisite_skill('skill_id_3') + self.assertEqual(len(self.skill.prerequisite_skill_ids), 2) + self.assertEqual(self.skill.prerequisite_skill_ids[1], 'skill_id_3') + with self.assertRaisesRegex( + ValueError, + 'The skill is already a prerequisite skill.' + ): + self.skill.add_prerequisite_skill('skill_id_2') + + def test_find_prerequisite_skill_id_index(self) -> None: + # Disabling pylint protected access because this is a test. + self.assertEqual( + self.skill._find_prerequisite_skill_id_index('skill_id_2'), # pylint: disable=protected-access + 0 + ) + self.assertEqual( + self.skill._find_prerequisite_skill_id_index('skill_id_3'), # pylint: disable=protected-access + None + ) + + def test_update_explanation(self) -> None: + new_explanation = state_domain.SubtitledHtml( + '1', + '

    New Explanation

    ' + ) + self.skill.update_explanation(new_explanation) + self.skill.validate() + self.assertEqual( + self.skill.skill_contents.explanation, + new_explanation + ) + + def test_update_rubric(self) -> None: + difficulty = constants.SKILL_DIFFICULTIES[0] + explanations = ['explanation1'] + self.skill.update_rubric(difficulty, explanations) + with self.assertRaisesRegex( + ValueError, + 'There is no rubric for the given difficulty.' + ): + self.skill.update_rubric('difficulty', explanations) + + def test_updates_on_skill(self) -> None: + self.skill.update_description('Update Description') + self.skill.update_language_code('de') + self.skill.update_superseding_skill_id('1') + self.skill.record_that_all_questions_are_merged(True) + self.skill.validate() + self.assertEqual(self.skill.description, 'Update Description') + self.assertEqual(self.skill.language_code, 'de') + self.assertEqual(self.skill.superseding_skill_id, '1') + self.assertEqual(self.skill.all_questions_merged, True) + + def test_valid_misconception_must_be_addressed(self) -> None: self.skill.validate() must_be_addressed = 'False' - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( ValueError, 'must_be_addressed should be a bool value'): self.skill.update_misconception_must_be_addressed( - 0, must_be_addressed) + 0, must_be_addressed) # type: ignore[arg-type] - self.skill.misconceptions[0].must_be_addressed = 'False' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.misconceptions[0].must_be_addressed = 'False' # type: ignore[assignment] self._assert_validation_error( 'Expected must_be_addressed to be a bool' ) - def test_rubrics_validation(self): - self.skill.rubrics = 'rubric' + def test_rubrics_validation(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.rubrics = 'rubric' # type: ignore[assignment] self._assert_validation_error('Expected rubrics to be a list') - self.skill.rubrics = ['rubric'] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.rubrics = ['rubric'] # type: ignore[list-item] self._assert_validation_error( 'Expected each rubric to be a Rubric object') @@ -137,24 +453,58 @@ def test_rubrics_validation(self): ] self._assert_validation_error('Duplicate rubric found') - def test_valid_rubric_difficulty(self): + self.skill.rubrics = [ + skill_domain.Rubric( + constants.SKILL_DIFFICULTIES[0], + ['

    ' + 'Explanation' * 30 + '

    '] + ) + ] + self._assert_validation_error( + 'Explanation should be less than or equal to 300 chars, ' + 'received 337 chars') + + self.skill.rubrics = [ + skill_domain.Rubric( + constants.SKILL_DIFFICULTIES[0], + ['

    Explanation

    '] * 15 + ) + ] + self._assert_validation_error( + 'Expected number of explanations to be less than or equal ' + 'to 10, received 15') + + self.skill.rubrics = [skill_domain.Rubric( + constants.SKILL_DIFFICULTIES[1], [])] + self._assert_validation_error( + 'Expected at least one explanation in medium level rubrics') + + def test_valid_rubric_difficulty(self) -> None: self.skill.rubrics = [skill_domain.Rubric( 'invalid_difficulty', ['

    Explanation

    '])] self._assert_validation_error('Invalid difficulty received for rubric') - def test_valid_rubric_difficulty_type(self): - self.skill.rubrics = [skill_domain.Rubric(10, ['

    Explanation

    '])] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_valid_rubric_difficulty_type(self) -> None: + self.skill.rubrics = [skill_domain.Rubric(10, ['

    Explanation

    '])] # type: ignore[arg-type] self._assert_validation_error('Expected difficulty to be a string') - def test_valid_rubric_explanation(self): - self.skill.rubrics[0].explanations = 0 + def test_valid_rubric_explanation(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.rubrics[0].explanations = 0 # type: ignore[assignment] self._assert_validation_error('Expected explanations to be a list') - self.skill.rubrics[0].explanations = [0] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.rubrics[0].explanations = [0] # type: ignore[list-item] self._assert_validation_error( 'Expected each explanation to be a string') - def test_rubric_present_for_all_difficulties(self): + def test_rubric_present_for_all_difficulties(self) -> None: self.skill.validate() self.skill.rubrics = [ skill_domain.Rubric( @@ -165,7 +515,7 @@ def test_rubric_present_for_all_difficulties(self): self._assert_validation_error( 'All 3 difficulties should be addressed in rubrics') - def test_order_of_rubrics(self): + def test_order_of_rubrics(self) -> None: self.skill.rubrics = [ skill_domain.Rubric( constants.SKILL_DIFFICULTIES[1], ['

    Explanation 1

    ']), @@ -177,8 +527,11 @@ def test_order_of_rubrics(self): self._assert_validation_error( 'The difficulties should be ordered as follows') - def test_description_validation(self): - self.skill.description = 0 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_description_validation(self) -> None: + self.skill.description = 0 # type: ignore[assignment] self._assert_validation_error('Description should be a string') self.skill.description = ( @@ -188,28 +541,41 @@ def test_description_validation(self): self._assert_validation_error( 'Skill description should be less than 100 chars') - def test_prerequisite_skill_ids_validation(self): - self.skill.prerequisite_skill_ids = 0 + def test_prerequisite_skill_ids_validation(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.prerequisite_skill_ids = 0 # type: ignore[assignment] self._assert_validation_error( 'Expected prerequisite_skill_ids to be a list') - self.skill.prerequisite_skill_ids = [0] + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.prerequisite_skill_ids = [0] # type: ignore[list-item] self._assert_validation_error( 'Expected each skill ID to be a string') - def test_language_code_validation(self): - self.skill.language_code = 0 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_language_code_validation(self) -> None: + self.skill.language_code = 0 # type: ignore[assignment] self._assert_validation_error('Expected language code to be a string') self.skill.language_code = 'xz' self._assert_validation_error('Invalid language code') - def test_schema_versions_validation(self): + def test_schema_versions_validation(self) -> None: self.skill.skill_contents_schema_version = 100 self._assert_validation_error( 'Expected skill contents schema version to be %s' % feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION) - self.skill.skill_contents_schema_version = 'a' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.skill_contents_schema_version = 'a' # type: ignore[assignment] self._assert_validation_error( 'Expected skill contents schema version to be an integer') @@ -218,7 +584,10 @@ def test_schema_versions_validation(self): 'Expected misconceptions schema version to be %s' % feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION) - self.skill.misconceptions_schema_version = 'a' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.misconceptions_schema_version = 'a' # type: ignore[assignment] self._assert_validation_error( 'Expected misconceptions schema version to be an integer') @@ -228,59 +597,95 @@ def test_schema_versions_validation(self): 'Expected rubric schema version to be %s' % feconf.CURRENT_RUBRIC_SCHEMA_VERSION) - self.skill.rubric_schema_version = 'a' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.rubric_schema_version = 'a' # type: ignore[assignment] self._assert_validation_error( 'Expected rubric schema version to be an integer') - def test_misconception_validation(self): - self.skill.misconceptions[0].feedback = 0 + def test_misconception_validation(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.misconceptions[0].feedback = 0 # type: ignore[assignment] self._assert_validation_error( 'Expected misconception feedback to be a string') - self.skill.misconceptions[0].notes = 0 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.misconceptions[0].notes = 0 # type: ignore[assignment] self._assert_validation_error( 'Expected misconception notes to be a string') - self.skill.misconceptions[0].name = 0 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.misconceptions[0].name = 0 # type: ignore[assignment] self._assert_validation_error( 'Expected misconception name to be a string') - self.skill.misconceptions = [''] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.misconceptions = [''] # type: ignore[list-item] self._assert_validation_error( 'Expected each misconception to be a Misconception object') - self.skill.misconceptions = '' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.misconceptions = '' # type: ignore[assignment] self._assert_validation_error('Expected misconceptions to be a list') - def test_skill_contents_validation(self): - self.skill.skill_contents.worked_examples = '' + def test_skill_contents_validation(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.skill_contents.worked_examples = '' # type: ignore[assignment] self._assert_validation_error('Expected worked examples to be a list') - self.skill.skill_contents.worked_examples = [1] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.skill_contents.worked_examples = [1] # type: ignore[list-item] self._assert_validation_error( 'Expected worked example to be a WorkedExample object') - example = skill_domain.WorkedExample('question', 'explanation') + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + example = skill_domain.WorkedExample('question', 'explanation') # type: ignore[arg-type] self.skill.skill_contents.worked_examples = [example] self._assert_validation_error( 'Expected example question to be a SubtitledHtml object') + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. example = skill_domain.WorkedExample( state_domain.SubtitledHtml( - '2', '

    Example Question 1

    '), 'explanation') + '2', '

    Example Question 1

    '), 'explanation') # type: ignore[arg-type] self.skill.skill_contents.worked_examples = [example] self._assert_validation_error( 'Expected example explanation to be a SubtitledHtml object') - self.skill.skill_contents.explanation = 'explanation' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.skill_contents.explanation = 'explanation' # type: ignore[assignment] self._assert_validation_error( 'Expected skill explanation to be a SubtitledHtml object') - self.skill.skill_contents = '' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.skill.skill_contents = '' # type: ignore[assignment] self._assert_validation_error( 'Expected skill_contents to be a SkillContents object') - def test_validate_duplicate_content_id(self): + def test_validate_duplicate_content_id(self) -> None: self.skill.skill_contents.worked_examples = ( [skill_domain.WorkedExample( self.skill.skill_contents.explanation, @@ -294,7 +699,7 @@ def test_validate_duplicate_content_id(self): self.skill.skill_contents.worked_examples = [example_1] self._assert_validation_error('Found a duplicate content id 1') - def test_misconception_id_validation(self): + def test_misconception_id_validation(self) -> None: self.skill.misconceptions = [ skill_domain.Misconception( self.MISCONCEPTION_ID, 'name', '

    notes

    ', @@ -304,9 +709,12 @@ def test_misconception_id_validation(self): '

    default_feedback

    ', True)] self._assert_validation_error('Duplicate misconception ID found') - def test_skill_migration_validation(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_skill_migration_validation(self) -> None: self.skill.superseding_skill_id = 'TestSkillId' - self.skill.all_questions_merged = None + self.skill.all_questions_merged = None # type: ignore[assignment] self._assert_validation_error( 'Expected a value for all_questions_merged when ' 'superseding_skill_id is set.') @@ -316,7 +724,7 @@ def test_skill_migration_validation(self): 'Expected a value for superseding_skill_id when ' 'all_questions_merged is True.') - def test_create_default_skill(self): + def test_create_default_skill(self) -> None: """Test the create_default_skill function.""" rubrics = [ skill_domain.Rubric( @@ -370,7 +778,7 @@ def test_create_default_skill(self): } self.assertEqual(skill.to_dict(), expected_skill_dict) - def test_conversion_to_and_from_dict(self): + def test_conversion_to_and_from_dict(self) -> None: """Test that to_dict and from_dict preserve all data within a skill_contents and misconception object. """ @@ -385,7 +793,7 @@ def test_conversion_to_and_from_dict(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -413,11 +821,11 @@ def test_conversion_to_and_from_dict(self): self.assertEqual( rubric_from_dict.to_dict(), rubric_dict) - def test_skill_mastery_to_dict(self): - expected_skill_mastery_dict = { + def test_skill_mastery_to_dict(self) -> None: + expected_skill_mastery_dict: skill_domain.UserSkillMasteryDict = { 'user_id': 'user', 'skill_id': 'skill_id', - 'degree_of_mastery': '0.5' + 'degree_of_mastery': 0.5 } observed_skill_mastery = skill_domain.UserSkillMastery.from_dict( expected_skill_mastery_dict) @@ -425,24 +833,24 @@ def test_skill_mastery_to_dict(self): expected_skill_mastery_dict, observed_skill_mastery.to_dict()) - def test_update_worked_examples(self): - question_1 = { + def test_update_worked_examples(self) -> None: + question_1: state_domain.SubtitledHtmlDict = { 'content_id': 'question_1', 'html': '

    Worked example question 1

    ' } - explanation_1 = { + explanation_1: state_domain.SubtitledHtmlDict = { 'content_id': 'explanation_1', 'html': '

    Worked example explanation 1

    ' } - question_2 = { + question_2: state_domain.SubtitledHtmlDict = { 'content_id': 'question_2', 'html': '

    Worked example question 2

    ' } - explanation_2 = { + explanation_2: state_domain.SubtitledHtmlDict = { 'content_id': 'explanation_2', 'html': '

    Worked example explanation 2

    ' } - worked_examples_dict_list = [{ + worked_examples_dict_list: List[skill_domain.WorkedExampleDict] = [{ 'question': question_1, 'explanation': explanation_1 }, { @@ -463,17 +871,19 @@ def test_update_worked_examples(self): self.skill.update_worked_examples(worked_examples_object_list) self.skill.validate() - def test_require_valid_description_with_empty_description_raise_error(self): - with self.assertRaisesRegexp( + def test_require_valid_description_with_empty_description_raise_error( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Description field should not be empty'): self.skill.require_valid_description('') - def test_misconception_id_range(self): + def test_misconception_id_range(self) -> None: self.skill.misconceptions[0].id = 5 self._assert_validation_error( 'The misconception with id 5 is out of bounds') - def test_skill_export_import_returns_original_object(self): + def test_skill_export_import_returns_original_object(self) -> None: """Checks that to_dict and from_dict preserves all the data within a Skill during export and import. """ @@ -481,7 +891,7 @@ def test_skill_export_import_returns_original_object(self): skill_from_dict = skill_domain.Skill.from_dict(skill_dict) self.assertEqual(skill_from_dict.to_dict(), skill_dict) - def test_serialize_and_deserialize_returns_unchanged_skill(self): + def test_serialize_and_deserialize_returns_unchanged_skill(self) -> None: """Checks that serializing and then deserializing a default skill works as intended by leaving the skill unchanged. """ @@ -490,7 +900,7 @@ def test_serialize_and_deserialize_returns_unchanged_skill(self): skill_domain.Skill.deserialize( self.skill.serialize()).to_dict()) - def test_generate_skill_misconception_id(self): + def test_generate_skill_misconception_id(self) -> None: """Checks that skill misconception id is generated correctly.""" self.assertEqual( self.skill.generate_skill_misconception_id(0), @@ -499,13 +909,25 @@ def test_generate_skill_misconception_id(self): self.skill.generate_skill_misconception_id(1), '%s-%d' % (self.skill.id, 1)) - def test_update_rubrics_from_model(self): + def test_update_rubrics_from_model(self) -> None: """Checks that skill misconception id is generated correctly.""" - versioned_rubrics = { + versioned_rubrics: skill_domain.VersionedRubricDict = { 'schema_version': 1, 'rubrics': [ - {'explanation': 'explanation1'}, - {'explanation': 'explanation2'} + # Here we use MyPy ignore because we are defining a + # VersionedRubricDict and in VersionedRubricDict there + # is no key exists with the name 'explanation', but here + # for testing purposes we are defining 'explanation' key + # which causes MyPy to throw a error. Thus to avoid the error, + # we used ignore here. + {'explanation': 'explanation1'}, # type: ignore[typeddict-item] + # Here we use MyPy ignore because we are defining a + # VersionedRubricDict and in VersionedRubricDict there + # is no key exists with the name 'explanation', but here + # for testing purposes we are defining 'explanation' key + # which causes MyPy to throw a error. Thus to avoid the error, + # we used ignore here. + {'explanation': 'explanation2'} # type: ignore[typeddict-item] ] } @@ -518,22 +940,87 @@ def test_update_rubrics_from_model(self): {'explanations': ['explanation2']} ] }) + versioned_rubrics['rubrics'][0]['explanations'] = [ + '

    Explanation

    ' + '' + ] + skill_domain.Skill.update_rubrics_from_model( + versioned_rubrics, 2) + self.skill.validate() + self.assertEqual(versioned_rubrics, { + 'schema_version': 3, + 'rubrics': [ + { + 'explanations': [ + ( + '

    Explanation

    ' + '' + ) + ] + }, + {'explanations': ['explanation2']} + ] + }) + versioned_rubrics['rubrics'][0]['explanations'] = [( + '' + '' + )] + skill_domain.Skill.update_rubrics_from_model( + versioned_rubrics, 3) + self.skill.validate() + self.assertEqual(versioned_rubrics, { + 'schema_version': 4, + 'rubrics': [ + { + 'explanations': [ + '' + '' + ] + }, + {'explanations': ['explanation2']} + ] + }) + versioned_rubrics['rubrics'][0]['explanations'] = [ + 'explanation '] + skill_domain.Skill.update_rubrics_from_model( + versioned_rubrics, 4) + self.skill.validate() + self.assertEqual(versioned_rubrics, { + 'schema_version': 5, + 'rubrics': [ + { + 'explanations': ['explanation '] + }, + {'explanations': ['explanation2']} + ] + }) class SkillChangeTests(test_utils.GenericTestBase): - def test_skill_change_object_with_missing_cmd(self): - with self.assertRaisesRegexp( + def test_skill_change_object_with_missing_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): skill_domain.SkillChange({'invalid': 'data'}) - def test_skill_change_object_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_skill_change_object_with_invalid_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): skill_domain.SkillChange({'cmd': 'invalid'}) - def test_skill_change_object_with_missing_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_skill_change_object_with_missing_attribute_in_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following required attributes are missing: ' 'new_value, old_value')): @@ -542,8 +1029,8 @@ def test_skill_change_object_with_missing_attribute_in_cmd(self): 'property_name': 'name', }) - def test_skill_change_object_with_extra_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_skill_change_object_with_extra_attribute_in_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following extra attributes are present: invalid')): skill_domain.SkillChange({ @@ -554,8 +1041,8 @@ def test_skill_change_object_with_extra_attribute_in_cmd(self): 'invalid': 'invalid' }) - def test_skill_change_object_with_invalid_skill_property(self): - with self.assertRaisesRegexp( + def test_skill_change_object_with_invalid_skill_property(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd update_skill_property: ' 'invalid is not allowed')): @@ -567,8 +1054,9 @@ def test_skill_change_object_with_invalid_skill_property(self): }) def test_skill_change_object_with_invalid_skill_misconception_property( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd ' 'update_skill_misconceptions_property: invalid is not ' @@ -582,8 +1070,9 @@ def test_skill_change_object_with_invalid_skill_misconception_property( }) def test_skill_change_object_with_invalid_skill_contents_property( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd ' 'update_skill_contents_property: invalid is not allowed')): @@ -594,7 +1083,7 @@ def test_skill_change_object_with_invalid_skill_contents_property( 'new_value': 'new_value', }) - def test_skill_change_object_with_add_skill_misconception(self): + def test_skill_change_object_with_add_skill_misconception(self) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'add_skill_misconception', 'new_misconception_dict': { @@ -608,7 +1097,7 @@ def test_skill_change_object_with_add_skill_misconception(self): 'id': 0, 'name': 'name', 'notes': '

    notes

    ', 'feedback': '

    default_feedback

    '}) - def test_skill_change_object_with_update_rubrics(self): + def test_skill_change_object_with_update_rubrics(self) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'update_rubrics', 'difficulty': constants.SKILL_DIFFICULTIES[0], @@ -621,7 +1110,7 @@ def test_skill_change_object_with_update_rubrics(self): self.assertEqual( skill_change_object.explanations, ['

    Explanation

    ']) - def test_skill_change_object_with_delete_skill_misconception(self): + def test_skill_change_object_with_delete_skill_misconception(self) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'delete_skill_misconception', 'misconception_id': 'id' @@ -632,7 +1121,8 @@ def test_skill_change_object_with_delete_skill_misconception(self): self.assertEqual(skill_change_object.misconception_id, 'id') def test_skill_change_object_with_update_skill_misconceptions_property( - self): + self + ) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'update_skill_misconceptions_property', 'misconception_id': 'id', @@ -648,8 +1138,7 @@ def test_skill_change_object_with_update_skill_misconceptions_property( self.assertEqual(skill_change_object.new_value, 'new_value') self.assertEqual(skill_change_object.old_value, 'old_value') - def test_skill_change_object_with_update_skill_property( - self): + def test_skill_change_object_with_update_skill_property(self) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'update_skill_property', 'property_name': 'description', @@ -663,7 +1152,8 @@ def test_skill_change_object_with_update_skill_property( self.assertEqual(skill_change_object.old_value, 'old_value') def test_skill_change_object_with_update_skill_contents_property( - self): + self + ) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'update_skill_contents_property', 'property_name': 'explanation', @@ -677,7 +1167,7 @@ def test_skill_change_object_with_update_skill_contents_property( self.assertEqual(skill_change_object.new_value, 'new_value') self.assertEqual(skill_change_object.old_value, 'old_value') - def test_skill_change_object_with_create_new(self): + def test_skill_change_object_with_create_new(self) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'create_new' }) @@ -685,7 +1175,8 @@ def test_skill_change_object_with_create_new(self): self.assertEqual(skill_change_object.cmd, 'create_new') def test_skill_change_object_with_migrate_contents_schema_to_latest_version( - self): + self + ) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'migrate_contents_schema_to_latest_version', 'from_version': 'from_version', @@ -699,7 +1190,8 @@ def test_skill_change_object_with_migrate_contents_schema_to_latest_version( self.assertEqual(skill_change_object.to_version, 'to_version') def test_skill_change_object_with_migrate_misconceptions_schema_to_latest_version( # pylint: disable=line-too-long - self): + self + ) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'migrate_misconceptions_schema_to_latest_version', 'from_version': 'from_version', @@ -713,7 +1205,8 @@ def test_skill_change_object_with_migrate_misconceptions_schema_to_latest_versio self.assertEqual(skill_change_object.to_version, 'to_version') def test_skill_change_object_with_migrate_rubrics_schema_to_latest_version( - self): + self + ) -> None: skill_change_object = skill_domain.SkillChange({ 'cmd': 'migrate_rubrics_schema_to_latest_version', 'from_version': 'from_version', @@ -726,7 +1219,7 @@ def test_skill_change_object_with_migrate_rubrics_schema_to_latest_version( self.assertEqual(skill_change_object.from_version, 'from_version') self.assertEqual(skill_change_object.to_version, 'to_version') - def test_to_dict(self): + def test_to_dict(self) -> None: skill_change_dict = { 'cmd': 'migrate_misconceptions_schema_to_latest_version', 'from_version': 'from_version', @@ -738,8 +1231,8 @@ def test_to_dict(self): class SkillSummaryTests(test_utils.GenericTestBase): - def setUp(self): - super(SkillSummaryTests, self).setUp() + def setUp(self) -> None: + super().setUp() current_time = datetime.datetime.utcnow() time_in_millisecs = utils.get_time_in_millisecs(current_time) self.skill_summary_dict = { @@ -757,63 +1250,75 @@ def setUp(self): 'skill_id', 'description', 'en', 1, 1, 1, current_time, current_time) - def test_skill_summary_gets_created(self): + def test_skill_summary_gets_created(self) -> None: self.assertEqual( self.skill_summary.to_dict(), self.skill_summary_dict) - def test_validation_passes_with_valid_properties(self): + def test_validation_passes_with_valid_properties(self) -> None: self.skill_summary.validate() - def test_validation_fails_with_invalid_description(self): - self.skill_summary.description = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_description(self) -> None: + self.skill_summary.description = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Description should be a string.'): self.skill_summary.validate() - def test_validation_fails_with_empty_description(self): + def test_validation_fails_with_empty_description(self) -> None: self.skill_summary.description = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Description field should not be empty'): self.skill_summary.validate() - def test_validation_fails_with_invalid_language_code(self): - self.skill_summary.language_code = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_language_code(self) -> None: + self.skill_summary.language_code = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected language code to be a string, received 0'): self.skill_summary.validate() - def test_validation_fails_with_unallowed_language_code(self): + def test_validation_fails_with_unallowed_language_code(self) -> None: self.skill_summary.language_code = 'invalid' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid language code: invalid'): self.skill_summary.validate() - def test_validation_fails_with_invalid_misconception_count(self): - self.skill_summary.misconception_count = '10' - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_misconception_count(self) -> None: + self.skill_summary.misconception_count = '10' # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected misconception_count to be an int, received \'10\''): self.skill_summary.validate() - def test_validation_fails_with_negative_misconception_count(self): + def test_validation_fails_with_negative_misconception_count(self) -> None: self.skill_summary.misconception_count = -1 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, ( 'Expected misconception_count to be non-negative, ' 'received \'-1\'')): self.skill_summary.validate() - def test_validation_fails_with_invalid_worked_examples_count(self): - self.skill_summary.worked_examples_count = '10' - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_worked_examples_count(self) -> None: + self.skill_summary.worked_examples_count = '10' # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected worked_examples_count to be an int, received \'10\''): self.skill_summary.validate() - def test_validation_fails_with_negative_worked_examples_count(self): + def test_validation_fails_with_negative_worked_examples_count(self) -> None: self.skill_summary.worked_examples_count = -1 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, ( 'Expected worked_examples_count to be non-negative, ' 'received \'-1\'')): @@ -822,8 +1327,8 @@ def test_validation_fails_with_negative_worked_examples_count(self): class AugmentedSkillSummaryTests(test_utils.GenericTestBase): - def setUp(self): - super(AugmentedSkillSummaryTests, self).setUp() + def setUp(self) -> None: + super().setUp() current_time = datetime.datetime.utcnow() self.time_in_millisecs = utils.get_time_in_millisecs(current_time) @@ -831,7 +1336,7 @@ def setUp(self): 'skill_id', 'description', 'en', 1, 1, 1, ['topic1'], ['math'], current_time, current_time) - def test_augmented_skill_summary_gets_created(self): + def test_augmented_skill_summary_gets_created(self) -> None: augmented_skill_summary_dict = { 'id': 'skill_id', 'description': 'description', @@ -851,18 +1356,110 @@ def test_augmented_skill_summary_gets_created(self): class TopicAssignmentTests(test_utils.GenericTestBase): - def setUp(self): - super(TopicAssignmentTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.topic_assignments = skill_domain.TopicAssignment( 'topic_id1', 'Topic1', 2, 1) - def test_topic_assignments_gets_created(self): + def test_topic_assignments_gets_created(self) -> None: topic_assignments_dict = { 'topic_id': 'topic_id1', 'topic_name': 'Topic1', 'topic_version': 2, - 'subtopic_id': 1, + 'subtopic_id': 1 } self.assertEqual( self.topic_assignments.to_dict(), topic_assignments_dict) + + +class CategorizedSkillsTests(test_utils.GenericTestBase): + + def setUp(self) -> None: + super().setUp() + self.categorized_skills = skill_domain.CategorizedSkills() + self.subtopic_titles = ['Subtopic Title 1', 'Subtopic Title 2'] + self.categorized_skills.add_topic('Topic Name', self.subtopic_titles) + + def test_validation_fails_with_duplicate_topic_name(self) -> None: + with self.assertRaisesRegex( + utils.ValidationError, + 'Topic name \'Topic Name\' is already added.'): + self.categorized_skills.add_topic('Topic Name', []) + + def test_uncategorized_skill_gets_added(self) -> None: + self.categorized_skills.add_uncategorized_skill( + 'Topic Name', 'skill_1', 'Description 1') + + self.assertEqual(self.categorized_skills.to_dict(), { + 'Topic Name': { + 'uncategorized': [{ + 'skill_id': 'skill_1', + 'skill_description': 'Description 1', + }], + 'Subtopic Title 1': [], + 'Subtopic Title 2': [] + } + }) + + def test_validation_fails_with_topic_name_not_added(self) -> None: + with self.assertRaisesRegex( + utils.ValidationError, + 'Topic name \'Topic Name 1\' is not added.'): + self.categorized_skills.add_uncategorized_skill( + 'Topic Name 1', 'skill_1', 'Description 1') + + def test_subtopic_skill_gets_added(self) -> None: + self.categorized_skills.add_subtopic_skill( + 'Topic Name', 'Subtopic Title 1', 'skill_2', 'Description 2') + self.categorized_skills.add_subtopic_skill( + 'Topic Name', 'Subtopic Title 2', 'skill_3', 'Description 3') + + self.assertEqual(self.categorized_skills.to_dict(), { + 'Topic Name': { + 'uncategorized': [], + 'Subtopic Title 1': [{ + 'skill_id': 'skill_2', + 'skill_description': 'Description 2' + }], + 'Subtopic Title 2': [{ + 'skill_id': 'skill_3', + 'skill_description': 'Description 3' + }] + } + }) + + def test_validation_fails_with_subtopic_title_not_added(self) -> None: + with self.assertRaisesRegex( + utils.ValidationError, + 'Subtopic title \'Subtopic Title 3\' is not added.'): + self.categorized_skills.add_subtopic_skill( + 'Topic Name', 'Subtopic Title 3', 'skill_1', 'Description 1') + + +class ShortSkillSummaryTests(test_utils.GenericTestBase): + + def setUp(self) -> None: + super().setUp() + self.skill_summary = skill_domain.SkillSummary( + 'skill_1', 'Description 1', 'en', 1, + 0, 0, datetime.datetime.now(), datetime.datetime.now()) + self.short_skill_summary = skill_domain.ShortSkillSummary( + 'skill_1', 'Description 1') + + def test_short_skill_summary_gets_created(self) -> None: + short_skill_summary_dict = { + 'skill_id': 'skill_1', + 'skill_description': 'Description 1', + } + self.assertEqual( + self.short_skill_summary.to_dict(), + short_skill_summary_dict) + + def test_short_skill_summary_gets_created_from_skill_summary(self) -> None: + short_skill_summary = ( + skill_domain.ShortSkillSummary.from_skill_summary( + self.skill_summary)) + self.assertEqual( + short_skill_summary.to_dict(), + self.short_skill_summary.to_dict()) diff --git a/core/domain/skill_fetchers.py b/core/domain/skill_fetchers.py index 9628ba834c18..e247ebefe276 100644 --- a/core/domain/skill_fetchers.py +++ b/core/domain/skill_fetchers.py @@ -21,15 +21,23 @@ import copy from core import feconf -from core import python_utils from core.domain import caching_services from core.domain import skill_domain from core.platform import models -(skill_models,) = models.Registry.import_models([models.NAMES.skill]) +from typing import List, Literal, Optional, overload +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import skill_models -def get_multi_skills(skill_ids, strict=True): +(skill_models,) = models.Registry.import_models([models.Names.SKILL]) + + +def get_multi_skills( + skill_ids: List[str], + strict: bool = True +) -> List[skill_domain.Skill]: """Returns a list of skills matching the skill IDs provided. Args: @@ -38,10 +46,12 @@ def get_multi_skills(skill_ids, strict=True): Returns: list(Skill). The list of skills matching the provided IDs. + + Raises: + Exception. No skill exists for given ID. """ local_skill_models = skill_models.SkillModel.get_multi(skill_ids) - for skill_id, skill_model in python_utils.ZIP( - skill_ids, local_skill_models): + for skill_id, skill_model in zip(skill_ids, local_skill_models): if strict and skill_model is None: raise Exception('No skill exists for ID %s' % skill_id) skills = [ @@ -51,7 +61,43 @@ def get_multi_skills(skill_ids, strict=True): return skills -def get_skill_by_id(skill_id, strict=True, version=None): +@overload +def get_skill_by_id( + skill_id: str, +) -> skill_domain.Skill: ... + + +@overload +def get_skill_by_id( + skill_id: str, + *, + version: Optional[int] = None +) -> skill_domain.Skill: ... + + +@overload +def get_skill_by_id( + skill_id: str, + *, + strict: Literal[True], + version: Optional[int] = None +) -> skill_domain.Skill: ... + + +@overload +def get_skill_by_id( + skill_id: str, + *, + strict: Literal[False], + version: Optional[int] = None +) -> Optional[skill_domain.Skill]: ... + + +def get_skill_by_id( + skill_id: str, + strict: bool = True, + version: Optional[int] = None +) -> Optional[skill_domain.Skill]: """Returns a domain object representing a skill. Args: @@ -87,7 +133,9 @@ def get_skill_by_id(skill_id, strict=True, version=None): return None -def get_skill_from_model(skill_model): +def get_skill_from_model( + skill_model: skill_models.SkillModel +) -> skill_domain.Skill: """Returns a skill domain object given a skill model loaded from the datastore. @@ -99,17 +147,17 @@ def get_skill_from_model(skill_model): """ # Ensure the original skill model does not get altered. - versioned_skill_contents = { + versioned_skill_contents: skill_domain.VersionedSkillContentsDict = { 'schema_version': skill_model.skill_contents_schema_version, 'skill_contents': copy.deepcopy(skill_model.skill_contents) } - versioned_misconceptions = { + versioned_misconceptions: skill_domain.VersionedMisconceptionDict = { 'schema_version': skill_model.misconceptions_schema_version, 'misconceptions': copy.deepcopy(skill_model.misconceptions) } - versioned_rubrics = { + versioned_rubrics: skill_domain.VersionedRubricDict = { 'schema_version': skill_model.rubric_schema_version, 'rubrics': copy.deepcopy(skill_model.rubrics) } @@ -147,7 +195,7 @@ def get_skill_from_model(skill_model): skill_model.last_updated) -def get_skill_by_description(description): +def get_skill_by_description(description: str) -> Optional[skill_domain.Skill]: """Returns a domain object representing a skill. Args: @@ -162,7 +210,9 @@ def get_skill_by_description(description): return get_skill_from_model(skill_model) if skill_model else None -def _migrate_skill_contents_to_latest_schema(versioned_skill_contents): +def _migrate_skill_contents_to_latest_schema( + versioned_skill_contents: skill_domain.VersionedSkillContentsDict +) -> None: """Holds the responsibility of performing a step-by-step, sequential update of the skill contents structure based on the schema version of the input skill contents dictionary. If the current skill_contents schema changes, a @@ -193,7 +243,9 @@ def _migrate_skill_contents_to_latest_schema(versioned_skill_contents): skill_contents_schema_version += 1 -def _migrate_misconceptions_to_latest_schema(versioned_misconceptions): +def _migrate_misconceptions_to_latest_schema( + versioned_misconceptions: skill_domain.VersionedMisconceptionDict +) -> None: """Holds the responsibility of performing a step-by-step, sequential update of the misconceptions structure based on the schema version of the input misconceptions dictionary. If the current misconceptions schema changes, a @@ -225,7 +277,9 @@ def _migrate_misconceptions_to_latest_schema(versioned_misconceptions): misconception_schema_version += 1 -def _migrate_rubrics_to_latest_schema(versioned_rubrics): +def _migrate_rubrics_to_latest_schema( + versioned_rubrics: skill_domain.VersionedRubricDict +) -> None: """Holds the responsibility of performing a step-by-step, sequential update of the rubrics structure based on the schema version of the input rubrics dictionary. If the current rubrics schema changes, a diff --git a/core/domain/skill_fetchers_test.py b/core/domain/skill_fetchers_test.py index e6a591ae6276..da3eb8efc997 100644 --- a/core/domain/skill_fetchers_test.py +++ b/core/domain/skill_fetchers_test.py @@ -23,21 +23,27 @@ from core.domain import skill_fetchers from core.domain import skill_services from core.domain import state_domain +from core.domain import translation_domain from core.platform import models from core.tests import test_utils -(skill_models,) = models.Registry.import_models([models.NAMES.skill]) +from typing import Final + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import skill_models + +(skill_models,) = models.Registry.import_models([models.Names.SKILL]) class SkillFetchersUnitTests(test_utils.GenericTestBase): """Tests for skill fetchers.""" - SKILL_ID = None - USER_ID = 'user' - MISCONCEPTION_ID_1 = 1 + USER_ID: Final = 'user' + MISCONCEPTION_ID_1: Final = 1 - def setUp(self): - super(SkillFetchersUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '

    Example Question 1

    '), state_domain.SubtitledHtml('3', '

    Example Explanation 1

    ') @@ -49,7 +55,7 @@ def setUp(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -58,7 +64,7 @@ def setUp(self): misconceptions = [skill_domain.Misconception( self.MISCONCEPTION_ID_1, 'name', '

    description

    ', '

    default_feedback

    ', True)] - self.SKILL_ID = skill_services.get_new_skill_id() + self.skill_id = skill_services.get_new_skill_id() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.user_id_admin = ( @@ -66,12 +72,12 @@ def setUp(self): self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.skill = self.save_new_skill( - self.SKILL_ID, self.USER_ID, description='Description', + self.skill_id, self.USER_ID, description='Description', misconceptions=misconceptions, skill_contents=skill_contents, prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) - def test_get_multi_skills(self): + def test_get_multi_skills(self) -> None: example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '

    Example Question 1

    '), state_domain.SubtitledHtml('3', '

    Example Explanation 1

    ') @@ -87,7 +93,7 @@ def test_get_multi_skills(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -105,7 +111,7 @@ def test_get_multi_skills(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -125,17 +131,21 @@ def test_get_multi_skills(self): self.assertEqual(skills[1].description, 'Description B') self.assertEqual(skills[1].misconceptions, []) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'No skill exists for ID skill_c'): skill_fetchers.get_multi_skills(['skill_a', 'skill_c']) - def test_get_skill_by_id(self): + def test_get_skill_by_id(self) -> None: expected_skill = self.skill.to_dict() - skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) + skill = skill_fetchers.get_skill_by_id(self.skill_id) self.assertEqual(skill.to_dict(), expected_skill) + self.assertEqual( + skill_fetchers.get_skill_by_id('Does Not Exist', strict=False), None + ) def test_get_skill_from_model_with_invalid_skill_contents_schema_version( - self): + self + ) -> None: commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) @@ -155,14 +165,15 @@ def test_get_skill_from_model_with_invalid_skill_contents_schema_version( model.commit( self.user_id_admin, 'skill model created', commit_cmd_dicts) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d skill schemas at ' 'present.' % feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION): skill_fetchers.get_skill_from_model(model) def test_get_skill_from_model_with_invalid_misconceptions_schema_version( - self): + self + ) -> None: commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) @@ -189,7 +200,7 @@ def test_get_skill_from_model_with_invalid_misconceptions_schema_version( '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -200,13 +211,15 @@ def test_get_skill_from_model_with_invalid_misconceptions_schema_version( model.commit( self.user_id_admin, 'skill model created', commit_cmd_dicts) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d misconception schemas at ' 'present.' % feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION): skill_fetchers.get_skill_from_model(model) - def test_get_skill_from_model_with_invalid_rubric_schema_version(self): + def test_get_skill_from_model_with_invalid_rubric_schema_version( + self + ) -> None: commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) @@ -233,7 +246,7 @@ def test_get_skill_from_model_with_invalid_rubric_schema_version(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -244,15 +257,18 @@ def test_get_skill_from_model_with_invalid_rubric_schema_version(self): model.commit( self.user_id_admin, 'skill model created', commit_cmd_dicts) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d rubric schemas at ' 'present.' % feconf.CURRENT_RUBRIC_SCHEMA_VERSION): skill_fetchers.get_skill_from_model(model) - def test_get_skill_from_model_with_description(self): + def test_get_skill_from_model_with_description(self) -> None: + skill = skill_fetchers.get_skill_by_description('Description') + # Ruling out the possibility of None for mypy type checking. + assert skill is not None self.assertEqual( - skill_fetchers.get_skill_by_description('Description').to_dict(), + skill.to_dict(), self.skill.to_dict() ) self.assertEqual( @@ -260,7 +276,7 @@ def test_get_skill_from_model_with_description(self): None ) - def test_get_skill_by_id_with_different_versions(self): + def test_get_skill_by_id_with_different_versions(self) -> None: changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, @@ -270,12 +286,64 @@ def test_get_skill_by_id_with_different_versions(self): }) ] skill_services.update_skill( - self.USER_ID, self.SKILL_ID, changelist, 'update language code') + self.USER_ID, self.skill_id, changelist, 'update language code') - skill = skill_fetchers.get_skill_by_id(self.SKILL_ID, version=1) - self.assertEqual(skill.id, self.SKILL_ID) + skill = skill_fetchers.get_skill_by_id(self.skill_id, version=1) + self.assertEqual(skill.id, self.skill_id) self.assertEqual(skill.language_code, 'en') - skill = skill_fetchers.get_skill_by_id(self.SKILL_ID, version=2) - self.assertEqual(skill.id, self.SKILL_ID) + skill = skill_fetchers.get_skill_by_id(self.skill_id, version=2) + self.assertEqual(skill.id, self.skill_id) self.assertEqual(skill.language_code, 'bn') + + def test_get_skill_from_model_with_latest_schemas_version(self) -> None: + commit_cmd = skill_domain.SkillChange({ + 'cmd': skill_domain.CMD_CREATE_NEW + }) + example_1 = skill_domain.WorkedExample( + state_domain.SubtitledHtml('2', '

    Example Question 1

    '), + state_domain.SubtitledHtml('3', '

    Example Explanation 1

    ') + ) + model = skill_models.SkillModel( + id='skill_id', + description='description', + language_code='en', + misconceptions=[], + rubrics=[], + next_misconception_id=0, + misconceptions_schema_version=2, + rubric_schema_version=2, + skill_contents_schema_version=2, + all_questions_merged=False, + skill_contents=skill_domain.SkillContents( + state_domain.SubtitledHtml('1', '

    Explanation

    '), + [example_1], + state_domain.RecordedVoiceovers.from_dict({ + 'voiceovers_mapping': { + '1': {}, '2': {}, '3': {} + } + }), + translation_domain.WrittenTranslations.from_dict({ + 'translations_mapping': { + '1': {}, '2': {}, '3': {} + } + }) + ).to_dict() + ) + commit_cmd_dicts = [commit_cmd.to_dict()] + model.commit( + self.user_id_admin, 'skill model created', commit_cmd_dicts) + + skill = skill_fetchers.get_skill_from_model(model) + self.assertEqual( + skill.misconceptions_schema_version, + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION + ) + self.assertEqual( + skill.skill_contents_schema_version, + feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION + ) + self.assertEqual( + skill.rubric_schema_version, + feconf.CURRENT_RUBRIC_SCHEMA_VERSION + ) diff --git a/core/domain/skill_services.py b/core/domain/skill_services.py index 8b924b0a0c8a..5a921f339aa9 100644 --- a/core/domain/skill_services.py +++ b/core/domain/skill_services.py @@ -17,10 +17,10 @@ from __future__ import annotations import collections +import itertools import logging from core import feconf -from core import python_utils from core.constants import constants from core.domain import caching_services from core.domain import config_domain @@ -38,15 +38,24 @@ from core.domain import user_services from core.platform import models +from typing import ( + Callable, Dict, List, Literal, Optional, Set, Tuple, cast, overload) + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import question_models + from mypy_imports import skill_models + from mypy_imports import topic_models + from mypy_imports import user_models + (skill_models, user_models, question_models, topic_models) = ( models.Registry.import_models([ - models.NAMES.skill, models.NAMES.user, models.NAMES.question, - models.NAMES.topic])) -datastore_services = models.Registry.import_datastore_services() + models.Names.SKILL, models.Names.USER, models.Names.QUESTION, + models.Names.TOPIC])) # Repository GET methods. -def get_merged_skill_ids(): +def get_merged_skill_ids() -> List[str]: """Returns the skill IDs of skills that have been merged. Returns: @@ -55,7 +64,7 @@ def get_merged_skill_ids(): return [skill.id for skill in skill_models.SkillModel.get_merged_skills()] -def get_all_skill_summaries(): +def get_all_skill_summaries() -> List[skill_domain.SkillSummary]: """Returns the summaries of all skills present in the datastore. Returns: @@ -70,13 +79,17 @@ def get_all_skill_summaries(): def _get_skill_summaries_in_batches( - num_skills_to_fetch, urlsafe_start_cursor, sort_by): + num_skills_to_fetch: int, + urlsafe_start_cursor: Optional[str], + sort_by: Optional[str] +) -> Tuple[List[skill_domain.SkillSummary], Optional[str], bool]: """Returns the summaries of skills present in the datastore. Args: num_skills_to_fetch: int. Number of skills to fetch. urlsafe_start_cursor: str or None. The cursor to the next page. - sort_by: str. A string indicating how to sort the result. + sort_by: str|None. A string indicating how to sort the result, or None + if no sort is required. Returns: 3-tuple(skill_summaries, new_urlsafe_start_cursor, more). where: @@ -106,18 +119,25 @@ def _get_skill_summaries_in_batches( def get_filtered_skill_summaries( - num_skills_to_fetch, status, classroom_name, keywords, - sort_by, urlsafe_start_cursor): + num_skills_to_fetch: int, + status: Optional[str], + classroom_name: Optional[str], + keywords: List[str], + sort_by: Optional[str], + urlsafe_start_cursor: Optional[str] +) -> Tuple[List[skill_domain.AugmentedSkillSummary], Optional[str], bool]: """Returns all the skill summary dicts after filtering. Args: num_skills_to_fetch: int. Number of skills to fetch. - status: str. The status of the skill. - classroom_name: str. The classroom_name of the topic to which the skill - is assigned to. + status: str|None. The status of the skill, or None if no status is + provided to filter skills id. + classroom_name: str|None. The classroom_name of the topic to which + the skill is assigned to. keywords: list(str). The keywords to look for in the skill description. - sort_by: str. A string indicating how to sort the result. + sort_by: str|None. A string indicating how to sort the result, or None + if no sorting is required. urlsafe_start_cursor: str or None. The cursor to the next page. Returns: @@ -134,7 +154,7 @@ def get_filtered_skill_summaries( more: bool. If True, there are (probably) more results after this batch. If False, there are no further results after this batch. """ - augmented_skill_summaries = [] + augmented_skill_summaries: List[skill_domain.AugmentedSkillSummary] = [] new_urlsafe_start_cursor = urlsafe_start_cursor more = True @@ -155,7 +175,10 @@ def get_filtered_skill_summaries( def _get_augmented_skill_summaries_in_batches( - num_skills_to_fetch, urlsafe_start_cursor, sort_by): + num_skills_to_fetch: int, + urlsafe_start_cursor: Optional[str], + sort_by: Optional[str] +) -> Tuple[List[skill_domain.AugmentedSkillSummary], Optional[str], bool]: """Returns all the Augmented skill summaries after attaching topic and classroom. @@ -173,15 +196,17 @@ def _get_augmented_skill_summaries_in_batches( _get_skill_summaries_in_batches( num_skills_to_fetch, urlsafe_start_cursor, sort_by)) - assigned_skill_ids = collections.defaultdict(lambda: { + assigned_skill_ids: Dict[ + str, Dict[str, List[str]] + ] = collections.defaultdict(lambda: { 'topic_names': [], 'classroom_names': [] }) all_topic_models = topic_models.TopicModel.get_all() all_topics = [topic_fetchers.get_topic_from_model(topic_model) - if topic_model is not None else None - for topic_model in all_topic_models] + for topic_model in all_topic_models + if topic_model is not None] topic_classroom_dict = {} all_classrooms_dict = config_domain.CLASSROOM_PAGES_DATA.value @@ -221,13 +246,17 @@ def _get_augmented_skill_summaries_in_batches( return augmented_skill_summaries, new_urlsafe_start_cursor, more -def _filter_skills_by_status(augmented_skill_summaries, status): +def _filter_skills_by_status( + augmented_skill_summaries: List[skill_domain.AugmentedSkillSummary], + status: Optional[str] +) -> List[skill_domain.AugmentedSkillSummary]: """Returns the skill summary dicts after filtering by status. Args: augmented_skill_summaries: list(AugmentedSkillSummary). The list of augmented skill summaries. - status: str. The status of the skill. + status: str|None. The status of the skill, or None if no status is + provided to filter skills id. Returns: list(AugmentedSkillSummary). The list of AugmentedSkillSummaries @@ -254,15 +283,20 @@ def _filter_skills_by_status(augmented_skill_summaries, status): augmented_skill_summary) return assigned_augmented_skill_summaries + return [] -def _filter_skills_by_classroom(augmented_skill_summaries, classroom_name): + +def _filter_skills_by_classroom( + augmented_skill_summaries: List[skill_domain.AugmentedSkillSummary], + classroom_name: Optional[str] +) -> List[skill_domain.AugmentedSkillSummary]: """Returns the skill summary dicts after filtering by classroom_name. Args: augmented_skill_summaries: list(AugmentedSkillSummary). The list of augmented skill summaries. - classroom_name: str. The classroom_name of the topic to which the skill - is assigned to. + classroom_name: str|None. The classroom_name of the topic to which + the skill is assigned to. Returns: list(AugmentedSkillSummary). The list of augmented skill summaries with @@ -281,7 +315,10 @@ def _filter_skills_by_classroom(augmented_skill_summaries, classroom_name): return augmented_skill_summaries_with_classroom_name -def _filter_skills_by_keywords(augmented_skill_summaries, keywords): +def _filter_skills_by_keywords( + augmented_skill_summaries: List[skill_domain.AugmentedSkillSummary], + keywords: List[str] +) -> List[skill_domain.AugmentedSkillSummary]: """Returns whether the keywords match the skill description. Args: @@ -306,7 +343,9 @@ def _filter_skills_by_keywords(augmented_skill_summaries, keywords): return filtered_augmented_skill_summaries -def get_multi_skill_summaries(skill_ids): +def get_multi_skill_summaries( + skill_ids: List[str] +) -> List[skill_domain.SkillSummary]: """Returns a list of skill summaries matching the skill IDs provided. Args: @@ -324,7 +363,9 @@ def get_multi_skill_summaries(skill_ids): return skill_summaries -def get_rubrics_of_skills(skill_ids): +def get_rubrics_of_skills( + skill_ids: List[str] +) -> Tuple[Dict[str, Optional[List[skill_domain.RubricDict]]], List[str]]: """Returns a list of rubrics corresponding to given skills. Args: @@ -335,7 +376,9 @@ def get_rubrics_of_skills(skill_ids): corresponding ids and the list of deleted skill ids, if any. """ skills = skill_fetchers.get_multi_skills(skill_ids, strict=False) - skill_id_to_rubrics_dict = {} + skill_id_to_rubrics_dict: Dict[ + str, Optional[List[skill_domain.RubricDict]] + ] = {} for skill in skills: if skill is not None: @@ -351,7 +394,9 @@ def get_rubrics_of_skills(skill_ids): return skill_id_to_rubrics_dict, deleted_skill_ids -def get_descriptions_of_skills(skill_ids): +def get_descriptions_of_skills( + skill_ids: List[str] +) -> Tuple[Dict[str, str], List[str]]: """Returns a list of skill descriptions corresponding to the given skills. Args: @@ -362,7 +407,7 @@ def get_descriptions_of_skills(skill_ids): corresponding ids and the list of deleted skill ids, if any. """ skill_summaries = get_multi_skill_summaries(skill_ids) - skill_id_to_description_dict = {} + skill_id_to_description_dict: Dict[str, str] = {} for skill_summary in skill_summaries: if skill_summary is not None: @@ -372,13 +417,14 @@ def get_descriptions_of_skills(skill_ids): deleted_skill_ids = [] for skill_id in skill_ids: if skill_id not in skill_id_to_description_dict: - skill_id_to_description_dict[skill_id] = None deleted_skill_ids.append(skill_id) return skill_id_to_description_dict, deleted_skill_ids -def get_skill_summary_from_model(skill_summary_model): +def get_skill_summary_from_model( + skill_summary_model: skill_models.SkillSummaryModel +) -> skill_domain.SkillSummary: """Returns a domain object for an Oppia skill summary given a skill summary model. @@ -401,7 +447,7 @@ def get_skill_summary_from_model(skill_summary_model): ) -def get_image_filenames_from_skill(skill): +def get_image_filenames_from_skill(skill: skill_domain.Skill) -> List[str]: """Get the image filenames from the skill. Args: @@ -414,7 +460,9 @@ def get_image_filenames_from_skill(skill): return html_cleaner.get_image_filenames_from_html_strings(html_list) -def get_all_topic_assignments_for_skill(skill_id): +def get_all_topic_assignments_for_skill( + skill_id: str +) -> List[skill_domain.TopicAssignment]: """Returns a list containing all the topics to which the given skill is assigned along with topic details. @@ -440,13 +488,39 @@ def get_all_topic_assignments_for_skill(skill_id): return topic_assignments -def replace_skill_id_in_all_topics(user_id, old_skill_id, new_skill_id): +def get_topic_names_with_given_skill_in_diagnostic_test( + skill_id: str +) -> List[str]: + """Returns a list of topic names for which the given skill is assigned + to that topic's diagnostic test. + + Args: + skill_id: str. ID of the skill. + + Returns: + list(str). A list of topic names for which the given skill is assigned + to that topic's diagnostic test. + """ + topics = topic_fetchers.get_all_topics() + topic_names = [] + for topic in topics: + if skill_id in topic.skill_ids_for_diagnostic_test: + topic_names.append(topic.name) + return topic_names + + +def replace_skill_id_in_all_topics( + user_id: str, old_skill_id: str, new_skill_id: str +) -> None: """Replaces the old skill id with the new one in all the associated topics. Args: user_id: str. The unique user ID of the user. old_skill_id: str. The old skill id. new_skill_id: str. The new skill id. + + Raises: + Exception. The new skill already present. """ all_topics = topic_fetchers.get_all_topics() for topic in all_topics: @@ -490,7 +564,7 @@ def replace_skill_id_in_all_topics(user_id, old_skill_id, new_skill_id): old_skill_id, new_skill_id)) -def remove_skill_from_all_topics(user_id, skill_id): +def remove_skill_from_all_topics(user_id: str, skill_id: str) -> None: """Deletes the skill with the given id from all the associated topics. Args: @@ -521,7 +595,27 @@ def remove_skill_from_all_topics(user_id, skill_id): skill_id, skill_name)) -def get_skill_summary_by_id(skill_id, strict=True): +@overload +def get_skill_summary_by_id( + skill_id: str +) -> skill_domain.SkillSummary: ... + + +@overload +def get_skill_summary_by_id( + skill_id: str, *, strict: Literal[True] +) -> skill_domain.SkillSummary: ... + + +@overload +def get_skill_summary_by_id( + skill_id: str, *, strict: Literal[False] +) -> Optional[skill_domain.SkillSummary]: ... + + +def get_skill_summary_by_id( + skill_id: str, strict: bool = True +) -> Optional[skill_domain.SkillSummary]: """Returns a domain object representing a skill summary. Args: @@ -543,7 +637,7 @@ def get_skill_summary_by_id(skill_id, strict=True): return None -def get_new_skill_id(): +def get_new_skill_id() -> str: """Returns a new skill id. Returns: @@ -552,7 +646,12 @@ def get_new_skill_id(): return skill_models.SkillModel.get_new_id('') -def _create_skill(committer_id, skill, commit_message, commit_cmds): +def _create_skill( + committer_id: str, + skill: skill_domain.Skill, + commit_message: str, + commit_cmds: List[skill_domain.SkillChange] +) -> None: """Creates a new skill. Args: @@ -593,7 +692,7 @@ def _create_skill(committer_id, skill, commit_message, commit_cmds): skill.description) -def does_skill_with_description_exist(description): +def does_skill_with_description_exist(description: str) -> bool: """Checks if skill with provided description exists. Args: @@ -607,7 +706,7 @@ def does_skill_with_description_exist(description): return existing_skill is not None -def save_new_skill(committer_id, skill): +def save_new_skill(committer_id: str, skill: skill_domain.Skill) -> None: """Saves a new skill. Args: @@ -621,7 +720,11 @@ def save_new_skill(committer_id, skill): })]) -def apply_change_list(skill_id, change_list, committer_id): +def apply_change_list( + skill_id: str, + change_list: List[skill_domain.SkillChange], + committer_id: str +) -> skill_domain.Skill: """Applies a changelist to a skill and returns the result. Args: @@ -632,6 +735,11 @@ def apply_change_list(skill_id, change_list, committer_id): Returns: Skill. The resulting skill domain object. + + Raises: + Exception. The user does not have enough rights to edit the + skill description. + Exception. Invalid change dict. """ skill = skill_fetchers.get_skill_by_id(skill_id) user = user_services.get_user_actions_info(committer_id) @@ -645,72 +753,196 @@ def apply_change_list(skill_id, change_list, committer_id): raise Exception( 'The user does not have enough rights to edit the ' 'skill description.') - skill.update_description(change.new_value) + # Here we use cast because this 'if' condition forces + # change to have type UpdateSkillPropertyDescriptionCmd. + update_description_cmd = cast( + skill_domain.UpdateSkillPropertyDescriptionCmd, + change + ) + skill.update_description(update_description_cmd.new_value) ( opportunity_services .update_skill_opportunity_skill_description( - skill.id, change.new_value)) + skill.id, update_description_cmd.new_value)) elif (change.property_name == skill_domain.SKILL_PROPERTY_LANGUAGE_CODE): - skill.update_language_code(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type UpdateSkillPropertyLanguageCodeCmd. + update_language_code_cmd = cast( + skill_domain.UpdateSkillPropertyLanguageCodeCmd, + change + ) + skill.update_language_code( + update_language_code_cmd.new_value + ) elif (change.property_name == skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID): - skill.update_superseding_skill_id(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateSkillPropertySupersedingSkillIdCmd. + update_superseding_skill_id_cmd = cast( + skill_domain.UpdateSkillPropertySupersedingSkillIdCmd, + change + ) + skill.update_superseding_skill_id( + update_superseding_skill_id_cmd.new_value + ) elif (change.property_name == skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED): - skill.record_that_all_questions_are_merged(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateSkillPropertyAllQuestionsMergedCmd. + update_all_questions_merged_cmd = cast( + skill_domain.UpdateSkillPropertyAllQuestionsMergedCmd, + change + ) + skill.record_that_all_questions_are_merged( + update_all_questions_merged_cmd.new_value + ) elif change.cmd == skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY: if (change.property_name == skill_domain.SKILL_CONTENTS_PROPERTY_EXPLANATION): + # Here we use cast because this 'if' + # condition forces change to have type + # UpdateSkillContentsPropertyExplanationCmd. + update_explanation_cmd = cast( + skill_domain.UpdateSkillContentsPropertyExplanationCmd, + change + ) explanation = ( - state_domain.SubtitledHtml.from_dict(change.new_value)) + state_domain.SubtitledHtml.from_dict( + update_explanation_cmd.new_value + ) + ) explanation.validate() skill.update_explanation(explanation) elif (change.property_name == skill_domain.SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES): - worked_examples_list = [ - skill_domain.WorkedExample.from_dict(worked_example) - for worked_example in change.new_value] + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateSkillContentsPropertyWorkedExamplesCmd. + update_worked_examples_cmd = cast( + skill_domain.UpdateSkillContentsPropertyWorkedExamplesCmd, # pylint: disable=line-too-long + change + ) + worked_examples_list: List[skill_domain.WorkedExample] = [] + for worked_example in update_worked_examples_cmd.new_value: + worked_examples_list.append( + skill_domain.WorkedExample.from_dict(worked_example) + ) skill.update_worked_examples(worked_examples_list) elif change.cmd == skill_domain.CMD_ADD_SKILL_MISCONCEPTION: + # Here we use cast because we are narrowing down the type from + # SkillChange to a specific change command. + add_skill_misconception_cmd = cast( + skill_domain.AddSkillMisconceptionCmd, + change + ) misconception = skill_domain.Misconception.from_dict( - change.new_misconception_dict) + add_skill_misconception_cmd.new_misconception_dict) skill.add_misconception(misconception) elif change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION: - skill.delete_misconception(change.misconception_id) + # Here we use cast because we are narrowing down the type from + # SkillChange to a specific change command. + delete_misconception_cmd = cast( + skill_domain.DeleteSkillMisconceptionCmd, + change + ) + skill.delete_misconception( + delete_misconception_cmd.misconception_id + ) elif change.cmd == skill_domain.CMD_ADD_PREREQUISITE_SKILL: - skill.add_prerequisite_skill(change.skill_id) + # Here we use cast because we are narrowing down the type from + # SkillChange to a specific change command. + add_prerequisite_skill_cmd = cast( + skill_domain.AddPrerequisiteSkillCmd, + change + ) + skill.add_prerequisite_skill( + add_prerequisite_skill_cmd.skill_id + ) elif change.cmd == skill_domain.CMD_DELETE_PREREQUISITE_SKILL: - skill.delete_prerequisite_skill(change.skill_id) + # Here we use cast because we are narrowing down the type from + # SkillChange to a specific change command. + delete_prerequisite_skill_cmd = cast( + skill_domain.DeletePrerequisiteSkillCmd, + change + ) + skill.delete_prerequisite_skill( + delete_prerequisite_skill_cmd.skill_id + ) elif change.cmd == skill_domain.CMD_UPDATE_RUBRICS: + # Here we use cast because we are narrowing down the type from + # SkillChange to a specific change command. + update_rubric_cmd = cast( + skill_domain.UpdateRubricsCmd, + change + ) skill.update_rubric( - change.difficulty, change.explanations) + update_rubric_cmd.difficulty, + update_rubric_cmd.explanations + ) elif (change.cmd == skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY): if (change.property_name == skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME): + # Here we use cast because this 'if' + # condition forces change to have type + # UpdateSkillMisconceptionPropertyNameCmd. + update_property_name_cmd = cast( + skill_domain.UpdateSkillMisconceptionPropertyNameCmd, + change + ) skill.update_misconception_name( - change.misconception_id, change.new_value) + update_property_name_cmd.misconception_id, + update_property_name_cmd.new_value + ) elif (change.property_name == skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateSkillMisconceptionPropertyNotesCmd. + update_property_notes_cmd = cast( + skill_domain.UpdateSkillMisconceptionPropertyNotesCmd, + change + ) skill.update_misconception_notes( - change.misconception_id, change.new_value) + update_property_notes_cmd.misconception_id, + update_property_notes_cmd.new_value + ) elif (change.property_name == skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateSkillMisconceptionPropertyFeedbackCmd. + update_property_feedback_cmd = cast( + skill_domain.UpdateSkillMisconceptionPropertyFeedbackCmd, # pylint: disable=line-too-long + change + ) skill.update_misconception_feedback( - change.misconception_id, change.new_value) + update_property_feedback_cmd.misconception_id, + update_property_feedback_cmd.new_value + ) elif (change.property_name == - skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED): # pylint: disable=line-too-long + skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED): # pylint: disable=line-too-long + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateSkillMisconceptionPropertyMustBeAddressedCmd. + update_property_must_be_addressed_cmd = cast( + skill_domain.UpdateSkillMisconceptionPropertyMustBeAddressedCmd, # pylint: disable=line-too-long + change + ) skill.update_misconception_must_be_addressed( - change.misconception_id, change.new_value) + update_property_must_be_addressed_cmd.misconception_id, + update_property_must_be_addressed_cmd.new_value + ) else: raise Exception('Invalid change dict.') - elif (change.cmd == - skill_domain.CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION - or change.cmd == - skill_domain.CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION # pylint: disable=line-too-long - or change.cmd == - skill_domain.CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION): + elif (change.cmd in ( + skill_domain.CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION, + skill_domain.CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION, # pylint: disable=line-too-long + skill_domain.CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION + )): # Loading the skill model from the datastore into a # skill domain object automatically converts it to use the # latest schema version. As a result, simply resaving the @@ -727,7 +959,47 @@ def apply_change_list(skill_id, change_list, committer_id): raise e -def _save_skill(committer_id, skill, commit_message, change_list): +def populate_skill_model_fields( + skill_model: skill_models.SkillModel, skill: skill_domain.Skill +) -> skill_models.SkillModel: + """Populate skill model with the data from skill object. + + Args: + skill_model: SkillModel. The model to populate. + skill: Skill. The skill domain object which should be used to + populate the model. + + Returns: + SkillModel. Populated model. + """ + skill_model.description = skill.description + skill_model.language_code = skill.language_code + skill_model.superseding_skill_id = skill.superseding_skill_id + skill_model.all_questions_merged = skill.all_questions_merged + skill_model.prerequisite_skill_ids = skill.prerequisite_skill_ids + skill_model.misconceptions_schema_version = ( + skill.misconceptions_schema_version) + skill_model.rubric_schema_version = ( + skill.rubric_schema_version) + skill_model.skill_contents_schema_version = ( + skill.skill_contents_schema_version) + skill_model.skill_contents = skill.skill_contents.to_dict() + skill_model.misconceptions = [ + misconception.to_dict() for misconception in skill.misconceptions + ] + skill_model.rubrics = [ + rubric.to_dict() for rubric in skill.rubrics + ] + skill_model.next_misconception_id = skill.next_misconception_id + return skill_model + + +def _save_skill( + committer_id: str, + skill: skill_domain.Skill, + commit_message: str, + change_list: List[skill_domain.SkillChange] +) -> None: """Validates a skill and commits it to persistent storage. If successful, increments the version number of the incoming skill domain object by 1. @@ -752,38 +1024,21 @@ def _save_skill(committer_id, skill, commit_message, change_list): # Skill model cannot be None as skill is passed as parameter here and that # is only possible if a skill model with that skill id exists. skill_model = skill_models.SkillModel.get( - skill.id, strict=False) + skill.id, strict=True) if skill.version > skill_model.version: raise Exception( 'Unexpected error: trying to update version %s of skill ' 'from version %s. Please reload the page and try again.' % (skill_model.version, skill.version)) - elif skill.version < skill_model.version: + + if skill.version < skill_model.version: raise Exception( 'Trying to update version %s of skill from version %s, ' 'which is too old. Please reload the page and try again.' % (skill_model.version, skill.version)) - skill_model.description = skill.description - skill_model.language_code = skill.language_code - skill_model.superseding_skill_id = skill.superseding_skill_id - skill_model.all_questions_merged = skill.all_questions_merged - skill_model.prerequisite_skill_ids = skill.prerequisite_skill_ids - skill_model.misconceptions_schema_version = ( - skill.misconceptions_schema_version) - skill_model.rubric_schema_version = ( - skill.rubric_schema_version) - skill_model.skill_contents_schema_version = ( - skill.skill_contents_schema_version) - skill_model.skill_contents = skill.skill_contents.to_dict() - skill_model.misconceptions = [ - misconception.to_dict() for misconception in skill.misconceptions - ] - skill_model.rubrics = [ - rubric.to_dict() for rubric in skill.rubrics - ] - skill_model.next_misconception_id = skill.next_misconception_id + skill_model = populate_skill_model_fields(skill_model, skill) change_dicts = [change.to_dict() for change in change_list] skill_model.commit(committer_id, commit_message, change_dicts) caching_services.delete_multi( @@ -791,7 +1046,12 @@ def _save_skill(committer_id, skill, commit_message, change_list): skill.version += 1 -def update_skill(committer_id, skill_id, change_list, commit_message): +def update_skill( + committer_id: str, + skill_id: str, + change_list: List[skill_domain.SkillChange], + commit_message: Optional[str] +) -> None: """Updates a skill. Commits changes. Args: @@ -819,11 +1079,21 @@ def update_skill(committer_id, skill_id, change_list, commit_message): for change in change_list ) if misconception_is_deleted: - deleted_skill_misconception_ids = [ - skill.generate_skill_misconception_id(change.misconception_id) - for change in change_list - if change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION - ] + deleted_skill_misconception_ids: List[str] = [] + for change in change_list: + if change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION: + # Here we use cast because we are narrowing down the type of + # 'change' from SkillChange to a specific change command + # DeleteSkillMisconceptionCmd. + delete_skill_misconception_cmd = cast( + skill_domain.DeleteSkillMisconceptionCmd, + change + ) + deleted_skill_misconception_ids.append( + skill.generate_skill_misconception_id( + delete_skill_misconception_cmd.misconception_id + ) + ) taskqueue_services.defer( taskqueue_services.FUNCTION_ID_UNTAG_DELETED_MISCONCEPTIONS, taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS, @@ -831,7 +1101,11 @@ def update_skill(committer_id, skill_id, change_list, commit_message): deleted_skill_misconception_ids) -def delete_skill(committer_id, skill_id, force_deletion=False): +def delete_skill( + committer_id: str, + skill_id: str, + force_deletion: bool = False +) -> None: """Deletes the skill with the given skill_id. Args: @@ -859,7 +1133,7 @@ def delete_skill(committer_id, skill_id, force_deletion=False): skill_id) -def delete_skill_summary(skill_id): +def delete_skill_summary(skill_id: str) -> None: """Delete a skill summary model. Args: @@ -868,12 +1142,14 @@ def delete_skill_summary(skill_id): """ skill_summary_model = ( - skill_models.SkillSummaryModel.get(skill_id, False)) + skill_models.SkillSummaryModel.get(skill_id, strict=False)) if skill_summary_model is not None: skill_summary_model.delete() -def compute_summary_of_skill(skill): +def compute_summary_of_skill( + skill: skill_domain.Skill +) -> skill_domain.SkillSummary: """Create a SkillSummary domain object for a given Skill domain object and return it. @@ -882,11 +1158,24 @@ def compute_summary_of_skill(skill): Returns: SkillSummary. The computed summary for the given skill. + + Raises: + Exception. No data available for when the skill was last_updated. + Exception. No data available for when the skill was created. """ skill_model_misconception_count = len(skill.misconceptions) skill_model_worked_examples_count = len( skill.skill_contents.worked_examples) + if skill.created_on is None: + raise Exception( + 'No data available for when the skill was created.' + ) + + if skill.last_updated is None: + raise Exception( + 'No data available for when the skill was last_updated.' + ) skill_summary = skill_domain.SkillSummary( skill.id, skill.description, skill.language_code, skill.version, skill_model_misconception_count, @@ -897,7 +1186,7 @@ def compute_summary_of_skill(skill): return skill_summary -def create_skill_summary(skill_id): +def create_skill_summary(skill_id: str) -> None: """Creates and stores a summary of the given skill. Args: @@ -908,13 +1197,19 @@ def create_skill_summary(skill_id): save_skill_summary(skill_summary) -def save_skill_summary(skill_summary): - """Save a skill summary domain object as a SkillSummaryModel - entity in the datastore. +def populate_skill_summary_model_fields( + skill_summary_model: skill_models.SkillSummaryModel, + skill_summary: skill_domain.SkillSummary +) -> skill_models.SkillSummaryModel: + """Populate skill summary model with the data from skill summary object. Args: - skill_summary: SkillSummaryModel. The skill summary object to be saved - in the datastore. + skill_summary_model: SkillSummaryModel. The model to populate. + skill_summary: SkillSummary. The skill summary domain object which + should be used to populate the model. + + Returns: + SkillSummaryModel. Populated model. """ skill_summary_dict = { 'description': skill_summary.description, @@ -922,26 +1217,39 @@ def save_skill_summary(skill_summary): 'version': skill_summary.version, 'misconception_count': skill_summary.misconception_count, 'worked_examples_count': skill_summary.worked_examples_count, - 'skill_model_last_updated': ( - skill_summary.skill_model_last_updated), - 'skill_model_created_on': ( - skill_summary.skill_model_created_on) + 'skill_model_last_updated': skill_summary.skill_model_last_updated, + 'skill_model_created_on': skill_summary.skill_model_created_on } - - skill_summary_model = ( - skill_models.SkillSummaryModel.get_by_id(skill_summary.id)) if skill_summary_model is not None: skill_summary_model.populate(**skill_summary_dict) - skill_summary_model.update_timestamps() - skill_summary_model.put() else: skill_summary_dict['id'] = skill_summary.id - model = skill_models.SkillSummaryModel(**skill_summary_dict) - model.update_timestamps() - model.put() + skill_summary_model = skill_models.SkillSummaryModel( + **skill_summary_dict) + + return skill_summary_model + + +def save_skill_summary(skill_summary: skill_domain.SkillSummary) -> None: + """Save a skill summary domain object as a SkillSummaryModel + entity in the datastore. + + Args: + skill_summary: SkillSummaryModel. The skill summary object to be saved + in the datastore. + """ + existing_skill_summary_model = ( + skill_models.SkillSummaryModel.get_by_id(skill_summary.id)) + skill_summary_model = populate_skill_summary_model_fields( + existing_skill_summary_model, skill_summary + ) + skill_summary_model.update_timestamps() + skill_summary_model.put() -def create_user_skill_mastery(user_id, skill_id, degree_of_mastery): +def create_user_skill_mastery( + user_id: str, skill_id: str, degree_of_mastery: float +) -> None: """Creates skill mastery of a user. Args: @@ -955,7 +1263,9 @@ def create_user_skill_mastery(user_id, skill_id, degree_of_mastery): save_user_skill_mastery(user_skill_mastery) -def save_user_skill_mastery(user_skill_mastery): +def save_user_skill_mastery( + user_skill_mastery: skill_domain.UserSkillMastery +) -> None: """Stores skill mastery of a user. Args: @@ -972,7 +1282,9 @@ def save_user_skill_mastery(user_skill_mastery): user_skill_mastery_model.put() -def create_multi_user_skill_mastery(user_id, degrees_of_mastery): +def create_multi_user_skill_mastery( + user_id: str, degrees_of_mastery: Dict[str, float] +) -> None: """Creates the mastery of a user in multiple skills. Args: @@ -994,7 +1306,7 @@ def create_multi_user_skill_mastery(user_id, degrees_of_mastery): user_models.UserSkillMasteryModel.put_multi(user_skill_mastery_models) -def get_user_skill_mastery(user_id, skill_id): +def get_user_skill_mastery(user_id: str, skill_id: str) -> Optional[float]: """Fetches the mastery of user in a particular skill. Args: @@ -1013,10 +1325,17 @@ def get_user_skill_mastery(user_id, skill_id): if not user_skill_mastery_model: return None - return user_skill_mastery_model.degree_of_mastery + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + degree_of_mastery: float = user_skill_mastery_model.degree_of_mastery + return degree_of_mastery -def get_multi_user_skill_mastery(user_id, skill_ids): + +def get_multi_user_skill_mastery( + user_id: str, skill_ids: List[str] +) -> Dict[str, Optional[float]]: """Fetches the mastery of user in multiple skills. Args: @@ -1029,7 +1348,7 @@ def get_multi_user_skill_mastery(user_id, skill_ids): are the corresponding mastery degree of the user or None if UserSkillMasteryModel does not exist for the skill. """ - degrees_of_mastery = {} + degrees_of_mastery: Dict[str, Optional[float]] = {} model_ids = [] for skill_id in skill_ids: @@ -1039,8 +1358,7 @@ def get_multi_user_skill_mastery(user_id, skill_ids): skill_mastery_models = user_models.UserSkillMasteryModel.get_multi( model_ids) - for skill_id, skill_mastery_model in python_utils.ZIP( - skill_ids, skill_mastery_models): + for skill_id, skill_mastery_model in zip(skill_ids, skill_mastery_models): if skill_mastery_model is None: degrees_of_mastery[skill_id] = None else: @@ -1049,7 +1367,49 @@ def get_multi_user_skill_mastery(user_id, skill_ids): return degrees_of_mastery -def skill_has_associated_questions(skill_id): +def get_multi_users_skills_mastery( + user_ids: List[str], skill_ids: List[str] +) -> Dict[str, Dict[str, Optional[float]]]: + """Fetches the mastery of user in multiple skills. + + Args: + user_ids: list(str). The user IDs of the users. + skill_ids: list(str). Skill IDs of the skill for which mastery degree is + requested. + + Returns: + dict(str, dict(str, float|None)). The keys are the user IDs and values + are dictionaries with keys as requested skill IDs and values + as the corresponding mastery degree of the user or None if + UserSkillMasteryModel does not exist for the skill. + """ + # We need to convert the resultant object of itertools product to a list + # to be able to use it multiple times as it otherwise gets exhausted after + # being iterated over once. + all_combinations = list(itertools.product(user_ids, skill_ids)) + model_ids = [] + for (user_id, skill_id) in all_combinations: + model_ids.append(user_models.UserSkillMasteryModel.construct_model_id( + user_id, skill_id)) + + skill_mastery_models = user_models.UserSkillMasteryModel.get_multi( + model_ids) + degrees_of_masteries: Dict[ + str, Dict[str, Optional[float]] + ] = {user_id: {} for user_id in user_ids} + for i, (user_id, skill_id) in enumerate(all_combinations): + skill_mastery_model = skill_mastery_models[i] + if skill_mastery_model is None: + degrees_of_masteries[user_id][skill_id] = None + else: + degrees_of_masteries[user_id][skill_id] = ( + skill_mastery_model.degree_of_mastery + ) + + return degrees_of_masteries + + +def skill_has_associated_questions(skill_id: str) -> bool: """Returns whether or not any question has this skill attached. Args: @@ -1064,7 +1424,9 @@ def skill_has_associated_questions(skill_id): return len(question_ids) > 0 -def get_sorted_skill_ids(degrees_of_mastery): +def get_sorted_skill_ids( + degrees_of_mastery: Dict[str, Optional[float]] +) -> List[str]: """Sort the dict based on the mastery value. Args: @@ -1079,8 +1441,12 @@ def get_sorted_skill_ids(degrees_of_mastery): skill_id: degree for skill_id, degree in degrees_of_mastery.items() if degree is not None} + sort_fn: Callable[[str], float] = ( + lambda skill_id: skill_dict_with_float_value[skill_id] + if skill_dict_with_float_value.get(skill_id) else 0 + ) sorted_skill_ids_with_float_value = sorted( - skill_dict_with_float_value, key=skill_dict_with_float_value.get) + skill_dict_with_float_value, key=sort_fn) skill_ids_with_none_value = [ skill_id for skill_id, degree in degrees_of_mastery.items() if degree is None] @@ -1090,7 +1456,7 @@ def get_sorted_skill_ids(degrees_of_mastery): return sorted_skill_ids[:feconf.MAX_NUMBER_OF_SKILL_IDS] -def filter_skills_by_mastery(user_id, skill_ids): +def filter_skills_by_mastery(user_id: str, skill_ids: List[str]) -> List[str]: """Given a list of skill_ids, it returns a list of feconf.MAX_NUMBER_OF_SKILL_IDS skill_ids in which the user has the least mastery.(Please note that python 2.7 considers the None @@ -1112,3 +1478,76 @@ def filter_skills_by_mastery(user_id, skill_ids): if skill_id in filtered_skill_ids: arranged_filtered_skill_ids.append(skill_id) return arranged_filtered_skill_ids + + +def get_untriaged_skill_summaries( + skill_summaries: List[skill_domain.SkillSummary], + skill_ids_assigned_to_some_topic: Set[str], + merged_skill_ids: List[str] +) -> List[skill_domain.SkillSummary]: + """Returns a list of skill summaries for all skills that are untriaged. + + Args: + skill_summaries: list(SkillSummary). The list of all skill summary + domain objects. + skill_ids_assigned_to_some_topic: set(str). The set of skill ids which + are assigned to some topic. + merged_skill_ids: list(str). List of skill IDs of merged skills. + + Returns: + list(SkillSummary). A list of skill summaries for all skills that + are untriaged. + """ + untriaged_skill_summaries = [] + + for skill_summary in skill_summaries: + skill_id = skill_summary.id + if (skill_id not in skill_ids_assigned_to_some_topic) and ( + skill_id not in merged_skill_ids): + untriaged_skill_summaries.append(skill_summary) + + return untriaged_skill_summaries + + +def get_categorized_skill_ids_and_descriptions( +) -> skill_domain.CategorizedSkills: + """Returns a CategorizedSkills domain object for all the skills that are + categorized. + + Returns: + CategorizedSkills. An instance of the CategorizedSkills domain object + for all the skills that are categorized. + """ + topics = topic_fetchers.get_all_topics() + + categorized_skills = skill_domain.CategorizedSkills() + + skill_ids = [] + + for topic in topics: + subtopics = topic.subtopics + subtopic_titles = [subtopic.title for subtopic in subtopics] + categorized_skills.add_topic(topic.name, subtopic_titles) + for skill_id in topic.uncategorized_skill_ids: + skill_ids.append(skill_id) + for subtopic in subtopics: + for skill_id in subtopic.skill_ids: + skill_ids.append(skill_id) + + skill_descriptions = get_descriptions_of_skills(skill_ids)[0] + + for topic in topics: + subtopics = topic.subtopics + for skill_id in topic.uncategorized_skill_ids: + description = skill_descriptions[skill_id] + categorized_skills.add_uncategorized_skill( + topic.name, skill_id, + description) + for subtopic in subtopics: + for skill_id in subtopic.skill_ids: + description = skill_descriptions[skill_id] + categorized_skills.add_subtopic_skill( + topic.name, subtopic.title, + skill_id, description) + + return categorized_skills diff --git a/core/domain/skill_services_test.py b/core/domain/skill_services_test.py index 73b0de2406cd..8a5977494992 100644 --- a/core/domain/skill_services_test.py +++ b/core/domain/skill_services_test.py @@ -29,24 +29,41 @@ from core.domain import suggestion_services from core.domain import topic_domain from core.domain import topic_fetchers +from core.domain import topic_services +from core.domain import translation_domain from core.domain import user_services from core.platform import models from core.tests import test_utils +from typing import Dict, Final, List, Union -(skill_models, suggestion_models) = models.Registry.import_models( - [models.NAMES.skill, models.NAMES.suggestion]) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import question_models + from mypy_imports import skill_models + +(skill_models, question_models) = models.Registry.import_models([ + models.Names.SKILL, models.Names.QUESTION +]) + +SuggestionChangeDictType = Dict[ + str, + Union[ + str, + Dict[str, Union[state_domain.StateDict, int, str, List[str]]], + float + ] +] class SkillServicesUnitTests(test_utils.GenericTestBase): """Test the skill services module.""" - SKILL_ID = None - USER_ID = 'user' - MISCONCEPTION_ID_1 = 1 - MISCONCEPTION_ID_2 = 2 + USER_ID: Final = 'user' + MISCONCEPTION_ID_1: Final = 1 + MISCONCEPTION_ID_2: Final = 2 - def setUp(self): - super(SkillServicesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '

    Example Question 1

    '), state_domain.SubtitledHtml('3', '

    Example Explanation 1

    ') @@ -58,7 +75,7 @@ def setUp(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -93,9 +110,9 @@ def setUp(self): skill_contents=skill_contents, prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) - def test_apply_change_list_with_invalid_property_name(self): + def test_apply_change_list_with_invalid_property_name(self) -> None: class MockSkillChange: - def __init__(self, cmd, property_name): + def __init__(self, cmd: str, property_name: str) -> None: self.cmd = cmd self.property_name = property_name @@ -103,19 +120,47 @@ def __init__(self, cmd, property_name): skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'invalid_property_name')] - with self.assertRaisesRegexp(Exception, 'Invalid change dict.'): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex(Exception, 'Invalid change dict.'): skill_services.apply_change_list( - self.SKILL_ID, invalid_skill_change_list, self.user_id_a) + self.SKILL_ID, invalid_skill_change_list, self.user_id_a) # type: ignore[arg-type] - def test_compute_summary(self): - skill_summary = skill_services.compute_summary_of_skill(self.skill) + def test_compute_summary(self) -> None: + skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) + skill_summary = skill_services.compute_summary_of_skill(skill) self.assertEqual(skill_summary.id, self.SKILL_ID) self.assertEqual(skill_summary.description, 'Description') self.assertEqual(skill_summary.misconception_count, 1) self.assertEqual(skill_summary.worked_examples_count, 1) - def test_get_image_filenames_from_skill(self): + def test_raises_error_when_the_skill_provided_with_no_created_on_data( + self + ) -> None: + skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) + skill.created_on = None + + with self.assertRaisesRegex( + Exception, + 'No data available for when the skill was created.' + ): + skill_services.compute_summary_of_skill(skill) + + def test_raises_error_when_the_skill_provided_with_no_last_updated_data( + self + ) -> None: + skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) + skill.last_updated = None + + with self.assertRaisesRegex( + Exception, + 'No data available for when the skill was last_updated.' + ): + skill_services.compute_summary_of_skill(skill) + + def test_get_image_filenames_from_skill(self) -> None: explanation_html = ( 'Explanation with image: None: new_skill_id = skill_services.get_new_skill_id() self.assertEqual(len(new_skill_id), 12) self.assertEqual(skill_models.SkillModel.get_by_id(new_skill_id), None) - def test_get_descriptions_of_skills(self): + def test_get_descriptions_of_skills(self) -> None: example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '

    Example Question 1

    '), state_domain.SubtitledHtml('3', '

    Example Explanation 1

    ') @@ -170,7 +215,7 @@ def test_get_descriptions_of_skills(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -188,7 +233,7 @@ def test_get_descriptions_of_skills(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -203,12 +248,11 @@ def test_get_descriptions_of_skills(self): self.assertEqual(deleted_skill_ids, ['skill_id_2']) self.assertEqual( skill_descriptions, { - 'skill_id_1': 'Description 1', - 'skill_id_2': None + 'skill_id_1': 'Description 1' } ) - def test_get_rubrics_of_linked_skills(self): + def test_get_rubrics_of_linked_skills(self) -> None: example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '

    Example Question 1

    '), state_domain.SubtitledHtml('3', '

    Example Explanation 1

    ') @@ -224,7 +268,7 @@ def test_get_rubrics_of_linked_skills(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -242,7 +286,7 @@ def test_get_rubrics_of_linked_skills(self): '1': {}, '2': {}, '3': {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } @@ -272,13 +316,13 @@ def test_get_rubrics_of_linked_skills(self): } ) - def test_get_skill_from_model(self): + def test_get_skill_from_model(self) -> None: skill_model = skill_models.SkillModel.get(self.SKILL_ID) skill = skill_fetchers.get_skill_from_model(skill_model) self.assertEqual(skill.to_dict(), self.skill.to_dict()) - def test_get_skill_summary_from_model(self): + def test_get_skill_summary_from_model(self) -> None: skill_summary_model = skill_models.SkillSummaryModel.get(self.SKILL_ID) skill_summary = skill_services.get_skill_summary_from_model( skill_summary_model) @@ -288,7 +332,7 @@ def test_get_skill_summary_from_model(self): self.assertEqual(skill_summary.misconception_count, 1) self.assertEqual(skill_summary.worked_examples_count, 1) - def test_get_all_skill_summaries(self): + def test_get_all_skill_summaries(self) -> None: skill_summaries = skill_services.get_all_skill_summaries() self.assertEqual(len(skill_summaries), 1) @@ -297,28 +341,30 @@ def test_get_all_skill_summaries(self): self.assertEqual(skill_summaries[0].misconception_count, 1) self.assertEqual(skill_summaries[0].worked_examples_count, 1) - def test_commit_log_entry(self): + def test_commit_log_entry(self) -> None: skill_commit_log_entry = ( skill_models.SkillCommitLogEntryModel.get_commit(self.SKILL_ID, 1) ) + # Ruling out the possibility of None for mypy type checking. + assert skill_commit_log_entry is not None self.assertEqual(skill_commit_log_entry.commit_type, 'create') self.assertEqual(skill_commit_log_entry.skill_id, self.SKILL_ID) self.assertEqual(skill_commit_log_entry.user_id, self.USER_ID) - def test_get_skill_summary_by_id(self): + def test_get_skill_summary_by_id(self) -> None: skill_summary = skill_services.get_skill_summary_by_id(self.SKILL_ID) self.assertEqual(skill_summary.id, self.SKILL_ID) self.assertEqual(skill_summary.description, 'Description') self.assertEqual(skill_summary.misconception_count, 1) - def test_get_filtered_skill_summaries(self): + def test_get_filtered_skill_summaries(self) -> None: self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, None, None, None, None)) + self.num_queries_to_fetch, None, None, [], None, None)) self.assertEqual(next_cursor, None) self.assertFalse(more) @@ -328,13 +374,13 @@ def test_get_filtered_skill_summaries(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - 1, None, 'english', None, None, None)) + 1, None, 'english', [], None, None)) self.assertEqual(len(augmented_skill_summaries), 0) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, None, None, + self.num_queries_to_fetch, None, None, [], 'Oldest Created', None)) self.assertEqual(len(augmented_skill_summaries), 2) @@ -343,7 +389,7 @@ def test_get_filtered_skill_summaries(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, None, None, + self.num_queries_to_fetch, None, None, [], 'Most Recently Updated', None)) self.assertEqual(len(augmented_skill_summaries), 2) @@ -352,14 +398,16 @@ def test_get_filtered_skill_summaries(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, None, None, + self.num_queries_to_fetch, None, None, [], 'Least Recently Updated', None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID) self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID2) - def test_cursor_behaves_correctly_when_fetching_skills_in_batches(self): + def test_cursor_behaves_correctly_when_fetching_skills_in_batches( + self + ) -> None: self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=[]) @@ -369,26 +417,26 @@ def test_cursor_behaves_correctly_when_fetching_skills_in_batches(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - 1, None, None, None, None, None)) + 1, None, None, [], None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertIsInstance(next_cursor, str) self.assertTrue(more) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, None, None, None, next_cursor)) + self.num_queries_to_fetch, None, None, [], None, next_cursor)) self.assertEqual(len(augmented_skill_summaries), 1) self.assertIsNone(next_cursor) self.assertFalse(more) - def test_filter_skills_by_status_all(self): + def test_filter_skills_by_status_all(self) -> None: self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, None, None, + self.num_queries_to_fetch, None, None, [], None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(next_cursor, None) @@ -396,20 +444,20 @@ def test_filter_skills_by_status_all(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, 'All', None, None, + self.num_queries_to_fetch, 'All', None, [], None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(next_cursor, None) self.assertFalse(more) - def test_filter_skills_by_status_assigned(self): + def test_filter_skills_by_status_assigned(self) -> None: self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, 'Assigned', None, None, None, None)) + self.num_queries_to_fetch, 'Assigned', None, [], None, None)) self.assertEqual(len(augmented_skill_summaries), 0) self.assertEqual(next_cursor, None) self.assertFalse(more) @@ -427,29 +475,29 @@ def test_filter_skills_by_status_assigned(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, 'Assigned', None, - None, None, None)) + [], None, None)) self.assertEqual(augmented_skill_summaries[0].topic_names, ['topic1']) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2) self.assertEqual(next_cursor, None) self.assertFalse(more) - def test_filter_skills_by_status_unassigned(self): + def test_filter_skills_by_status_unassigned(self) -> None: self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, 'Unassigned', None, None, + self.num_queries_to_fetch, 'Unassigned', None, [], None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(next_cursor, None) self.assertFalse(more) - def test_filter_skills_by_classroom_name(self): + def test_filter_skills_by_classroom_name(self) -> None: augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, 'english', None, None, None)) + self.num_queries_to_fetch, None, 'english', [], None, None)) self.assertEqual(len(augmented_skill_summaries), 0) self.assertEqual(next_cursor, None) self.assertFalse(more) @@ -480,7 +528,7 @@ def test_filter_skills_by_classroom_name(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, 'math', None, + self.num_queries_to_fetch, None, 'math', [], None, None)) self.assertEqual(augmented_skill_summaries[0].topic_names, ['topic1']) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2) @@ -489,7 +537,7 @@ def test_filter_skills_by_classroom_name(self): self.assertEqual(next_cursor, None) self.assertFalse(more) - def test_filter_skills_by_keywords(self): + def test_filter_skills_by_keywords(self) -> None: self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Alpha', misconceptions=None, @@ -503,7 +551,7 @@ def test_filter_skills_by_keywords(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( - self.num_queries_to_fetch, None, None, None, None, None)) + self.num_queries_to_fetch, None, None, [], None, None)) self.assertEqual(len(augmented_skill_summaries), 3) self.assertEqual(next_cursor, None) @@ -560,7 +608,16 @@ def test_filter_skills_by_keywords(self): self.assertEqual(next_cursor, None) self.assertFalse(more) - def test_get_all_topic_assignments_for_skill(self): + augmented_skill_summaries, next_cursor, more = ( + skill_services.get_filtered_skill_summaries( + self.num_queries_to_fetch, 'invalid_status', None, + ['alp', 'bet'], None, None)) + + self.assertEqual(len(augmented_skill_summaries), 0) + self.assertEqual(next_cursor, None) + self.assertFalse(more) + + def test_get_all_topic_assignments_for_skill(self) -> None: topic_id = topic_fetchers.get_new_topic_id() topic_id_1 = topic_fetchers.get_new_topic_id() self.save_new_topic( @@ -604,7 +661,7 @@ def test_get_all_topic_assignments_for_skill(self): self.assertEqual(topic_assignments[1].topic_version, 1) self.assertEqual(topic_assignments[1].subtopic_id, 1) - def test_remove_skill_from_all_topics(self): + def test_remove_skill_from_all_topics(self) -> None: topic_id = topic_fetchers.get_new_topic_id() topic_id_1 = topic_fetchers.get_new_topic_id() self.save_new_topic( @@ -638,7 +695,7 @@ def test_remove_skill_from_all_topics(self): skill_services.get_all_topic_assignments_for_skill(self.SKILL_ID)) self.assertEqual(len(topic_assignments_dict), 0) - def test_successfully_replace_skill_id_in_all_topics(self): + def test_successfully_replace_skill_id_in_all_topics(self) -> None: topic_id = topic_fetchers.get_new_topic_id() topic_id_1 = topic_fetchers.get_new_topic_id() self.save_new_topic( @@ -676,7 +733,7 @@ def test_successfully_replace_skill_id_in_all_topics(self): skill_services.get_all_topic_assignments_for_skill('new_skill_id')) self.assertEqual(len(topic_assignments_dict), 2) - def test_failure_replace_skill_id_in_all_topics(self): + def test_failure_replace_skill_id_in_all_topics(self) -> None: topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.USER_ID, name='Topic1', @@ -690,11 +747,11 @@ def test_failure_replace_skill_id_in_all_topics(self): 'Found topic \'Topic1\' contains the two skills to be merged. ' 'Please unassign one of these skills from topic ' 'and retry this operation.') - with self.assertRaisesRegexp(Exception, error_message): + with self.assertRaisesRegex(Exception, error_message): skill_services.replace_skill_id_in_all_topics( self.USER_ID, self.SKILL_ID, 'new_skill_id') - def test_update_skill(self): + def test_update_skill(self) -> None: changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_ADD_SKILL_MISCONCEPTION, @@ -760,7 +817,7 @@ def test_update_skill(self): '

    New Explanation 1

    ', '

    New Explanation 2

    ']) self.assertEqual(skill.rubrics[1].explanations, ['

    Explanation

    ']) - def test_merge_skill(self): + def test_merge_skill(self) -> None: changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, @@ -785,7 +842,7 @@ def test_merge_skill(self): self.assertEqual(skill.superseding_skill_id, 'TestSkillId') self.assertEqual(skill.all_questions_merged, False) - def test_set_merge_complete_for_skill(self): + def test_set_merge_complete_for_skill(self) -> None: changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, @@ -809,7 +866,7 @@ def test_set_merge_complete_for_skill(self): self.assertEqual(skill.version, 2) self.assertEqual(skill.all_questions_merged, True) - def test_get_merged_skill_ids(self): + def test_get_merged_skill_ids(self) -> None: skill_ids = skill_services.get_merged_skill_ids() self.assertEqual(len(skill_ids), 0) changelist = [ @@ -828,7 +885,7 @@ def test_get_merged_skill_ids(self): self.assertEqual(len(skill_ids), 1) self.assertEqual(skill_ids[0], self.SKILL_ID) - def test_delete_skill(self): + def test_delete_skill(self) -> None: skill_services.delete_skill(self.USER_ID, self.SKILL_ID) self.assertEqual( skill_fetchers.get_skill_by_id(self.SKILL_ID, strict=False), None) @@ -836,7 +893,7 @@ def test_delete_skill(self): skill_services.get_skill_summary_by_id( self.SKILL_ID, strict=False), None) - def test_delete_skill_marked_deleted(self): + def test_delete_skill_marked_deleted(self) -> None: skill_models.SkillModel.delete_multi( [self.SKILL_ID], self.USER_ID, '', force_deletion=False) skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID) @@ -850,13 +907,13 @@ def test_delete_skill_marked_deleted(self): skill_services.get_skill_summary_by_id( self.SKILL_ID, strict=False), None) - def test_delete_skill_model_with_deleted_summary_model(self): + def test_delete_skill_model_with_deleted_summary_model(self) -> None: skill_summary_model = ( skill_models.SkillSummaryModel.get(self.SKILL_ID)) skill_summary_model.delete() - skill_summary_model = ( - skill_models.SkillSummaryModel.get(self.SKILL_ID, False)) - self.assertIsNone(skill_summary_model) + skill_summary_model_with_none = ( + skill_models.SkillSummaryModel.get(self.SKILL_ID, strict=False)) + self.assertIsNone(skill_summary_model_with_none) skill_services.delete_skill( self.USER_ID, self.SKILL_ID, force_deletion=True) @@ -866,19 +923,22 @@ def test_delete_skill_model_with_deleted_summary_model(self): skill_services.get_skill_summary_by_id( self.SKILL_ID, strict=False), None) - def test_delete_skill_model_with_linked_suggestion(self): - suggestion_change = { + def test_delete_skill_model_with_linked_suggestion(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + suggestion_change: SuggestionChangeDictType = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': self.SKILL_ID, 'skill_difficulty': 0.3 @@ -894,13 +954,13 @@ def test_delete_skill_model_with_linked_suggestion(self): skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID) self.assertEqual(skill_model, None) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The suggestion with id %s has already been accepted/' 'rejected.' % suggestion.suggestion_id): suggestion_services.auto_reject_question_suggestions_for_skill_id( self.SKILL_ID) - def test_cannot_update_skill_with_no_commit_message(self): + def test_cannot_update_skill_with_no_commit_message(self) -> None: changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, @@ -910,20 +970,20 @@ def test_cannot_update_skill_with_no_commit_message(self): }) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected a commit message, received none.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, '') - def test_cannot_update_skill_with_empty_changelist(self): - with self.assertRaisesRegexp( + def test_cannot_update_skill_with_empty_changelist(self) -> None: + with self.assertRaisesRegex( Exception, 'Unexpected error: received an invalid change list when trying to ' 'save skill'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, [], 'No changes made.') - def test_mismatch_of_skill_versions(self): + def test_mismatch_of_skill_versions(self) -> None: changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, @@ -935,7 +995,7 @@ def test_mismatch_of_skill_versions(self): skill_model = skill_models.SkillModel.get(self.SKILL_ID) skill_model.version = 0 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unexpected error: trying to update version 0 of skill ' 'from version 1. Please reload the page and try again.'): @@ -944,7 +1004,7 @@ def test_mismatch_of_skill_versions(self): 'Change language code.') skill_model.version = 2 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Trying to update version 2 of skill from version 1, which is too ' 'old. Please reload the page and try again.'): @@ -952,7 +1012,7 @@ def test_mismatch_of_skill_versions(self): self.USER_ID, self.SKILL_ID, changelist, 'Change language code.') - def test_normal_user_cannot_update_skill_property(self): + def test_normal_user_cannot_update_skill_property(self) -> None: changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, @@ -962,7 +1022,7 @@ def test_normal_user_cannot_update_skill_property(self): }) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The user does not have enough rights to edit the ' 'skill description.'): @@ -970,7 +1030,33 @@ def test_normal_user_cannot_update_skill_property(self): self.user_id_a, self.SKILL_ID, changelist, 'Change description.') - def test_update_skill_explanation(self): + def test_update_skill_property(self) -> None: + skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) + old_description = 'Description' + new_description = 'New description' + + self.assertEqual( + skill.description, old_description) + + changelist = [ + skill_domain.SkillChange({ + 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, + 'property_name': skill_domain.SKILL_PROPERTY_DESCRIPTION, + 'old_value': old_description, + 'new_value': new_description + }) + ] + skill_services.update_skill( + self.user_id_admin, + self.SKILL_ID, changelist, + 'Change description.' + ) + + skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) + self.assertEqual( + skill.description, new_description) + + def test_update_skill_explanation(self) -> None: skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) old_explanation = {'content_id': '1', 'html': '

    Explanation

    '} new_explanation = {'content_id': '1', 'html': '

    New explanation

    '} @@ -994,7 +1080,7 @@ def test_update_skill_explanation(self): self.assertEqual( skill.skill_contents.explanation.to_dict(), new_explanation) - def test_update_skill_worked_examples(self): + def test_update_skill_worked_examples(self) -> None: skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) old_worked_example = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '

    Example Question 1

    '), @@ -1028,7 +1114,7 @@ def test_update_skill_worked_examples(self): skill.skill_contents.worked_examples[0].to_dict(), new_worked_example) - def test_delete_skill_misconception(self): + def test_delete_skill_misconception(self) -> None: skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.misconceptions), 1) @@ -1047,7 +1133,17 @@ def test_delete_skill_misconception(self): self.assertEqual(skill.misconceptions, []) - def test_update_skill_misconception_notes(self): + def test_does_skill_with_description_exist(self) -> None: + self.assertEqual( + skill_services.does_skill_with_description_exist('Description'), + True + ) + self.assertEqual( + skill_services.does_skill_with_description_exist('Does not exist'), + False + ) + + def test_update_skill_misconception_notes(self) -> None: skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.misconceptions), 1) @@ -1075,7 +1171,7 @@ def test_update_skill_misconception_notes(self): self.assertEqual( skill.misconceptions[0].notes, '

    new description

    ') - def test_update_skill_misconception_feedback(self): + def test_update_skill_misconception_feedback(self) -> None: skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.misconceptions), 1) @@ -1104,7 +1200,35 @@ def test_update_skill_misconception_feedback(self): self.assertEqual( skill.misconceptions[0].feedback, '

    new feedback

    ') - def test_update_skill_schema(self): + def test_skill_has_associated_questions(self) -> None: + skill_id_1 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_1, 'user', description='Description 1') + + # Testing that no question is linked to a skill. + self.assertEqual( + skill_services.skill_has_associated_questions(skill_id_1), + False + ) + + questionskilllink_model1 = ( + question_models.QuestionSkillLinkModel.create( + 'question_id1', skill_id_1, 0.1) + ) + questionskilllink_model2 = ( + question_models.QuestionSkillLinkModel.create( + 'question_id2', skill_id_1, 0.2) + ) + + question_models.QuestionSkillLinkModel.put_multi_question_skill_links( + [questionskilllink_model1, questionskilllink_model2] + ) + + self.assertEqual( + skill_services.skill_has_associated_questions(skill_id_1), + True + ) + + def test_update_skill_schema(self) -> None: orig_skill_dict = ( skill_fetchers.get_skill_by_id(self.SKILL_ID).to_dict()) @@ -1125,153 +1249,251 @@ def test_update_skill_schema(self): self.assertEqual(new_skill_dict['version'], 2) # Delete version and check that the two dicts are the same. - del orig_skill_dict['version'] - del new_skill_dict['version'] + # Here we use MyPy ignore because MyPy doesn't allow key deletion from + # TypedDict, thus we add an ignore. + del orig_skill_dict['version'] # type: ignore[misc] + # Here we use MyPy ignore because MyPy doesn't allow key deletion from + # TypedDict, thus we add an ignore. + del new_skill_dict['version'] # type: ignore[misc] self.assertEqual(orig_skill_dict, new_skill_dict) - def test_cannot_update_skill_with_invalid_change_list(self): + def test_cannot_update_skill_with_invalid_change_list(self) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.error().""" observed_log_messages.append(msg % args) logging_swap = self.swap(logging, 'error', _mock_logging_function) - assert_raises_context_manager = self.assertRaisesRegexp( + assert_raises_context_manager = self.assertRaisesRegex( Exception, '\'str\' object has no attribute \'cmd\'') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. with logging_swap, assert_raises_context_manager: skill_services.update_skill( - self.USER_ID, self.SKILL_ID, 'invalid_change_list', + self.USER_ID, self.SKILL_ID, 'invalid_change_list', # type: ignore[arg-type] 'commit message') self.assertEqual(len(observed_log_messages), 1) - self.assertRegexpMatches( + self.assertRegex( observed_log_messages[0], 'object has no' ' attribute \'cmd\' %s invalid_change_list' % self.SKILL_ID) - def test_cannot_update_misconception_name_with_invalid_id(self): + def test_cannot_update_misconception_name_with_invalid_id(self) -> None: changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME), - 'misconception_id': 'invalid_id', + 'misconception_id': 0, 'old_value': 'test name', 'new_value': 'Name' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception name.') def test_cannot_update_misconception_must_be_addressed_with_invalid_id( - self): + self + ) -> None: changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED), - 'misconception_id': 'invalid_id', + 'misconception_id': 0, 'old_value': False, 'new_value': True })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception must_be_addressed.') - def test_cannot_add_already_existing_prerequisite_skill(self): + def test_cannot_add_already_existing_prerequisite_skill(self) -> None: changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_ADD_PREREQUISITE_SKILL, 'skill_id': 'skill_id_1' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The skill is already a prerequisite skill.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Added prereq skill.') - def test_cannot_delete_non_existent_prerequisite_skill(self): + def test_cannot_delete_non_existent_prerequisite_skill(self) -> None: changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_DELETE_PREREQUISITE_SKILL, 'skill_id': 'skill_id_5' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The skill to remove is not a prerequisite skill.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Removed prereq skill.') - def test_cannot_add_rubric_with_invalid_difficulty(self): + def test_cannot_add_rubric_with_invalid_difficulty(self) -> None: changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_RUBRICS, 'difficulty': 'invalid_difficulty', 'explanations': ['

    Explanation

    '] })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'There is no rubric for the given difficulty.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Added rubric.') - def test_cannot_delete_misconception_with_invalid_id(self): + def test_cannot_delete_misconception_with_invalid_id(self) -> None: changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION, - 'misconception_id': 'invalid_id' + 'misconception_id': 0 })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Delete misconception') - def test_cannot_update_misconception_notes_with_invalid_id(self): + def test_cannot_update_misconception_notes_with_invalid_id(self) -> None: changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES), - 'misconception_id': 'invalid_id', + 'misconception_id': 0, 'old_value': 'description', 'new_value': 'new description' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception notes.') - def test_cannot_update_misconception_feedback_with_invalid_id(self): + def test_cannot_update_misconception_feedback_with_invalid_id(self) -> None: changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK), - 'misconception_id': 'invalid_id', + 'misconception_id': 0, 'old_value': 'default_feedback', 'new_value': 'new feedback' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception feedback.') + def test_get_untriaged_skill_summaries(self) -> None: + skill_summaries = skill_services.get_all_skill_summaries() + skill_ids_assigned_to_some_topic = ( + topic_fetchers.get_all_skill_ids_assigned_to_some_topic()) + merged_skill_ids = skill_services.get_merged_skill_ids() + + untriaged_skill_summaries = ( + skill_services.get_untriaged_skill_summaries( + skill_summaries, skill_ids_assigned_to_some_topic, + merged_skill_ids)) + + untriaged_skill_summary_dicts = [ + skill_summary.to_dict() + for skill_summary in untriaged_skill_summaries] + + skill_summary = skill_services.get_skill_summary_by_id(self.SKILL_ID) + skill_summary_dict = skill_summary.to_dict() + expected_untriaged_skill_summary_dicts = [skill_summary_dict] + + self.assertEqual( + untriaged_skill_summary_dicts, + expected_untriaged_skill_summary_dicts) + + def test_get_categorized_skill_ids_and_descriptions(self) -> None: + topic_id = topic_fetchers.get_new_topic_id() + linked_skill_id = skill_services.get_new_skill_id() + self.save_new_skill( + linked_skill_id, self.user_id_admin, description='Description 3') + subtopic_skill_id = skill_services.get_new_skill_id() + self.save_new_skill( + subtopic_skill_id, self.user_id_admin, + description='Subtopic Skill') + + subtopic = topic_domain.Subtopic.create_default_subtopic( + 1, 'Subtopic Title', 'url-frag') + subtopic.skill_ids = [subtopic_skill_id] + + self.save_new_topic( + topic_id, self.user_id_admin, name='Topic Name', + abbreviated_name='topic', url_fragment='topic-name', + description='Description', canonical_story_ids=[], + additional_story_ids=[], + uncategorized_skill_ids=[linked_skill_id], + subtopics=[subtopic], next_subtopic_id=2) + + expected_categorized_skills_dict = { + 'Topic Name': { + 'uncategorized': [{ + 'skill_id': linked_skill_id, + 'skill_description': 'Description 3', + }], + 'Subtopic Title': [{ + 'skill_id': subtopic_skill_id, + 'skill_description': 'Subtopic Skill' + }] + } + } + categorized_skills = ( + skill_services.get_categorized_skill_ids_and_descriptions()) + + self.assertEqual( + categorized_skills.to_dict(), + expected_categorized_skills_dict) + + def test_get_topic_names_with_given_skill_in_diagnostic_test(self) -> None: + """Checks whether a skill is assigned for the diagnostic test in + any of the existing topics. + """ + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + + owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + topic = topic_domain.Topic.create_default_topic( + 'topic_id', 'topic', 'abbrev', 'description', 'fragm') + topic.thumbnail_filename = 'thumbnail.svg' + topic.thumbnail_bg_color = '#C6DCDA' + topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-three')] + topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] + topic_services.save_new_topic(owner_id, topic) + + self.assertEqual( + skill_services.get_topic_names_with_given_skill_in_diagnostic_test( + 'skill_id_1'), ['topic']) + self.assertEqual( + skill_services.get_topic_names_with_given_skill_in_diagnostic_test( + 'incorrect_skill_id'), []) + class SkillMasteryServicesUnitTests(test_utils.GenericTestBase): """Test the skill mastery services module.""" - SKILL_IDS = [] - USER_ID = 'user' - DEGREE_OF_MASTERY_1 = 0.0 - DEGREE_OF_MASTERY_2 = 0.5 + USER_ID: Final = 'user' + DEGREE_OF_MASTERY_1: Final = 0.0 + DEGREE_OF_MASTERY_2: Final = 0.5 - def setUp(self): - super(SkillMasteryServicesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.SKILL_ID_1 = skill_services.get_new_skill_id() self.SKILL_ID_2 = skill_services.get_new_skill_id() self.SKILL_ID_3 = skill_services.get_new_skill_id() @@ -1281,7 +1503,7 @@ def setUp(self): skill_services.create_user_skill_mastery( self.USER_ID, self.SKILL_ID_2, self.DEGREE_OF_MASTERY_2) - def test_get_user_skill_mastery(self): + def test_get_user_skill_mastery(self) -> None: degree_of_mastery = skill_services.get_user_skill_mastery( self.USER_ID, self.SKILL_ID_1) @@ -1292,7 +1514,7 @@ def test_get_user_skill_mastery(self): self.assertEqual(degree_of_mastery, None) - def test_get_multi_user_skill_mastery(self): + def test_get_multi_user_skill_mastery(self) -> None: degree_of_mastery = skill_services.get_multi_user_skill_mastery( self.USER_ID, self.SKILL_IDS) @@ -1303,7 +1525,7 @@ def test_get_multi_user_skill_mastery(self): self.SKILL_ID_3: None }) - def test_create_multi_user_skill_mastery(self): + def test_create_multi_user_skill_mastery(self) -> None: skill_id_4 = skill_services.get_new_skill_id() skill_id_5 = skill_services.get_new_skill_id() skill_services.create_multi_user_skill_mastery( @@ -1315,7 +1537,7 @@ def test_create_multi_user_skill_mastery(self): self.assertEqual( degrees_of_mastery, {skill_id_4: 0.3, skill_id_5: 0.5}) - def test_get_sorted_skill_ids(self): + def test_get_sorted_skill_ids(self) -> None: degrees_of_masteries = skill_services.get_multi_user_skill_mastery( self.USER_ID, self.SKILL_IDS) with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 2): @@ -1332,7 +1554,7 @@ def test_get_sorted_skill_ids(self): self.SKILL_ID_3, self.SKILL_ID_1, self.SKILL_ID_2] self.assertEqual(sorted_skill_ids, expected_sorted_skill_ids) - def test_filter_skills_by_mastery(self): + def test_filter_skills_by_mastery(self) -> None: with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 2): arranged_filtered_skill_ids = ( skill_services.filter_skills_by_mastery( @@ -1347,30 +1569,46 @@ def test_filter_skills_by_mastery(self): self.USER_ID, self.SKILL_IDS)) self.assertEqual(arranged_filtered_skill_ids, self.SKILL_IDS) + def test_get_multi_users_skills_mastery(self) -> None: + user_ids = [self.USER_ID, 'user_2'] + skill_ids = [self.SKILL_ID_1, self.SKILL_ID_2] + degrees_of_mastery = { + self.USER_ID: { + self.SKILL_ID_1: self.DEGREE_OF_MASTERY_1, + self.SKILL_ID_2: self.DEGREE_OF_MASTERY_2 + }, + 'user_2': { + self.SKILL_ID_1: None, + self.SKILL_ID_2: None + } + } + user_skill_mastery = skill_services.get_multi_users_skills_mastery( + user_ids, skill_ids) + self.assertEqual(user_skill_mastery, degrees_of_mastery) + class SkillMigrationTests(test_utils.GenericTestBase): - def test_migrate_skill_contents_to_latest_schema(self): + def test_migrate_skill_contents_to_latest_schema(self) -> None: commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID - html_content = ( - '

    Value

    ') - expected_html_content = ( + html_content = ( '

    Value

    ') + 'amp;quot;svg_filename&quot;: &quot;image.svg&quot;}">' + '') - written_translations_dict = { + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { 'translations_mapping': { 'content1': { 'en': { 'data_format': 'html', - 'translation': '', + 'translation': html_content, 'needs_update': True }, 'hi': { @@ -1381,53 +1619,27 @@ def test_migrate_skill_contents_to_latest_schema(self): } } } - written_translations_dict_math = { - 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': expected_html_content, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - } - } - } - worked_example_dict = { + worked_example_dict_math: skill_domain.WorkedExampleDict = { 'question': { 'content_id': 'question1', - 'html': '' + 'html': html_content }, 'explanation': { 'content_id': 'explanation1', - 'html': '' - } - } - worked_example_dict_math = { - 'question': { - 'content_id': 'question1', - 'html': expected_html_content - }, - 'explanation': { - 'content_id': 'explanation1', - 'html': expected_html_content + 'html': html_content } } skill_contents = skill_domain.SkillContents( state_domain.SubtitledHtml( explanation_content_id, ''), - [skill_domain.WorkedExample.from_dict(worked_example_dict)], + [skill_domain.WorkedExample.from_dict(worked_example_dict_math)], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { explanation_content_id: {} } }), - state_domain.WrittenTranslations.from_dict( + translation_domain.WrittenTranslations.from_dict( written_translations_dict)) skill_contents_dict = skill_contents.to_dict() skill_contents_dict['explanation']['html'] = html_content @@ -1464,29 +1676,25 @@ def test_migrate_skill_contents_to_latest_schema(self): self.assertEqual(skill.skill_contents_schema_version, 4) self.assertEqual( - skill.skill_contents.explanation.html, - expected_html_content) + skill.skill_contents.explanation.html, html_content) self.assertEqual( skill.skill_contents.written_translations.to_dict(), - written_translations_dict_math) + written_translations_dict) self.assertEqual( skill.skill_contents.worked_examples[0].to_dict(), worked_example_dict_math) - def test_migrate_misconceptions_to_latest_schema(self): + def test_migrate_misconceptions_to_latest_schema(self) -> None: commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID - html_content = ( - '

    Value

    ') - expected_html_content = ( + html_content = ( '

    Value

    ') + 'amp;quot;svg_filename&quot;: &quot;image.svg&quot;}">' + '') skill_contents = skill_domain.SkillContents( state_domain.SubtitledHtml( @@ -1496,7 +1704,7 @@ def test_migrate_misconceptions_to_latest_schema(self): explanation_content_id: {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { explanation_content_id: {} } @@ -1531,23 +1739,20 @@ def test_migrate_misconceptions_to_latest_schema(self): self.assertEqual(skill.misconceptions_schema_version, 5) self.assertEqual(skill.misconceptions[0].must_be_addressed, True) - self.assertEqual(skill.misconceptions[0].notes, expected_html_content) + self.assertEqual(skill.misconceptions[0].notes, html_content) self.assertEqual( - skill.misconceptions[0].feedback, expected_html_content) + skill.misconceptions[0].feedback, html_content) - def test_migrate_rubrics_to_latest_schema(self): + def test_migrate_rubrics_to_latest_schema(self) -> None: commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID html_content = ( - '

    Value

    ') - expected_html_content = ( '

    Value

    ') + 'amp;quot;svg_filename&quot;: &quot;image.svg&quot;}">' + '') skill_contents = skill_domain.SkillContents( state_domain.SubtitledHtml( explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [], @@ -1556,7 +1761,7 @@ def test_migrate_rubrics_to_latest_schema(self): explanation_content_id: {} } }), - state_domain.WrittenTranslations.from_dict({ + translation_domain.WrittenTranslations.from_dict({ 'translations_mapping': { explanation_content_id: {} } @@ -1602,4 +1807,4 @@ def test_migrate_rubrics_to_latest_schema(self): self.assertEqual(skill.rubrics[2].difficulty, 'Hard') self.assertEqual( skill.rubrics[2].explanations, - ['Hard explanation', expected_html_content]) + ['Hard explanation', html_content]) diff --git a/core/domain/state_domain.py b/core/domain/state_domain.py index 07ff2f26ad87..e5364ff480d5 100644 --- a/core/domain/state_domain.py +++ b/core/domain/state_domain.py @@ -18,28 +18,88 @@ from __future__ import annotations -import collections import copy import itertools import logging +import math import re from core import android_validation_constants from core import feconf -from core import python_utils from core import schema_utils from core import utils from core.constants import constants from core.domain import customization_args_util -from core.domain import html_cleaner -from core.domain import interaction_registry from core.domain import param_domain -from core.domain import rules_registry -from core.domain import translatable_object_registry +from core.domain import translation_domain +from extensions import domain from extensions.objects.models import objects +from typing import ( + Any, Callable, Dict, Iterator, List, Literal, Mapping, Optional, Tuple, + Type, TypedDict, TypeVar, Union, cast, overload +) -class AnswerGroup: +from core.domain import html_cleaner # pylint: disable=invalid-import-from # isort:skip +from core.domain import interaction_registry # pylint: disable=invalid-import-from # isort:skip +from core.domain import rules_registry # pylint: disable=invalid-import-from # isort:skip + +MYPY = False +if MYPY: # pragma: no cover + from extensions.interactions import base + +_GenericCustomizationArgType = TypeVar('_GenericCustomizationArgType') + +# TODO(#14537): Refactor this file and remove imports marked +# with 'invalid-import-from'. + + +# The `AllowedRuleSpecInputTypes` is union of allowed types that a +# RuleSpec's inputs dictionary can accept for it's values. +AllowedRuleSpecInputTypes = Union[ + str, + int, + float, + List[str], + List[List[str]], + # Here we use type Any because some rule specs have deeply nested types, + # such as for the `NumberWithUnits` interaction. + Mapping[ + str, Union[str, List[str], int, bool, float, Dict[str, int], List[Any]] + ], +] + + +class TrainingDataDict(TypedDict): + """Type for the training data dictionary.""" + + answer_group_index: int + answers: List[str] + + +class AnswerGroupDict(TypedDict): + """Dictionary representing the AnswerGroup object.""" + + outcome: OutcomeDict + rule_specs: List[RuleSpecDict] + training_data: List[str] + tagged_skill_misconception_id: Optional[str] + + +class StateVersionHistoryDict(TypedDict): + """Dictionary representing the StateVersionHistory object.""" + + previously_edited_in_version: Optional[int] + state_name_in_previous_version: Optional[str] + committer_id: str + + +AcceptableCorrectAnswerTypes = Union[ + List[List[str]], List[str], str, Dict[str, str], int, None +] + + +class AnswerGroup(translation_domain.BaseTranslatableObject): """Value object for an answer group. Answer groups represent a set of rules dictating whether a shared feedback should be shared with the user. These rules are ORed together. Answer groups may also support a classifier @@ -48,8 +108,12 @@ class AnswerGroup: """ def __init__( - self, outcome, rule_specs, training_data, - tagged_skill_misconception_id): + self, + outcome: Outcome, + rule_specs: List[RuleSpec], + training_data: List[str], + tagged_skill_misconception_id: Optional[str] + ) -> None: """Initializes a AnswerGroup domain object. Args: @@ -71,7 +135,38 @@ def __init__( self.training_data = training_data self.tagged_skill_misconception_id = tagged_skill_misconception_id - def to_dict(self): + def get_translatable_contents_collection( + self, **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the answer group. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + if self.outcome is not None: + ( + translatable_contents_collection + .add_fields_from_translatable_object(self.outcome) + ) + # TODO(#16256): Instead of hardcoding interactions name here, + # Interaction can have a flag indicating whether the rule_specs can have + # translations. + for rule_spec in self.rule_specs: + if kwargs['interaction_id'] not in ['TextInput', 'SetInput']: + break + ( + translatable_contents_collection + .add_fields_from_translatable_object( + rule_spec, + interaction_id=kwargs['interaction_id']) + ) + return translatable_contents_collection + + def to_dict(self) -> AnswerGroupDict: """Returns a dict representing this AnswerGroup domain object. Returns: @@ -85,26 +180,40 @@ def to_dict(self): 'tagged_skill_misconception_id': self.tagged_skill_misconception_id } + # TODO(#16467): Remove `validate` argument after validating all Question + # states by writing a migration and audit job. As the validation for + # answer group is common between Exploration and Question and the Question + # data is not yet migrated, we do not want to call the validations + # while we load the Question. @classmethod - def from_dict(cls, answer_group_dict): + def from_dict( + cls, answer_group_dict: AnswerGroupDict, validate: bool = True + ) -> AnswerGroup: """Return a AnswerGroup domain object from a dict. Args: answer_group_dict: dict. The dict representation of AnswerGroup object. + validate: bool. False, when the validations should not be called. Returns: AnswerGroup. The corresponding AnswerGroup domain object. """ return cls( - Outcome.from_dict(answer_group_dict['outcome']), + Outcome.from_dict(answer_group_dict['outcome'], validate=validate), [RuleSpec.from_dict(rs) for rs in answer_group_dict['rule_specs']], answer_group_dict['training_data'], answer_group_dict['tagged_skill_misconception_id'] ) - def validate(self, interaction, exp_param_specs_dict): + def validate( + self, + interaction: base.BaseInteraction, + exp_param_specs_dict: Dict[str, param_domain.ParamSpec], + *, + tagged_skill_misconception_id_required: bool = False, + ) -> None: """Verifies that all rule classes are valid, and that the AnswerGroup only has one classifier rule. @@ -113,23 +222,38 @@ def validate(self, interaction, exp_param_specs_dict): exp_param_specs_dict: dict. A dict of all parameters used in the exploration. Keys are parameter names and values are ParamSpec value objects with an object type property (obj_type). + tagged_skill_misconception_id_required: bool. The 'tagged_skill_ + misconception_id' is required or not. Raises: ValidationError. One or more attributes of the AnswerGroup are invalid. ValidationError. The AnswerGroup contains more than one classifier rule. + ValidationError. The tagged_skill_misconception_id is not valid. """ if not isinstance(self.rule_specs, list): raise utils.ValidationError( 'Expected answer group rules to be a list, received %s' % self.rule_specs) - if self.tagged_skill_misconception_id is not None: + if ( + self.tagged_skill_misconception_id is not None and + not tagged_skill_misconception_id_required + ): + raise utils.ValidationError( + 'Expected tagged skill misconception id to be None, ' + 'received %s' % self.tagged_skill_misconception_id) + + if ( + self.tagged_skill_misconception_id is not None and + tagged_skill_misconception_id_required + ): if not isinstance(self.tagged_skill_misconception_id, str): raise utils.ValidationError( 'Expected tagged skill misconception id to be a str, ' 'received %s' % self.tagged_skill_misconception_id) + if not re.match( constants.VALID_SKILL_MISCONCEPTION_ID_REGEX, self.tagged_skill_misconception_id): @@ -138,10 +262,9 @@ def validate(self, interaction, exp_param_specs_dict): 'to be -, received %s' % self.tagged_skill_misconception_id) - if len(self.rule_specs) == 0 and len(self.training_data) == 0: + if len(self.rule_specs) == 0: raise utils.ValidationError( - 'There must be at least one rule or training data for each' - ' answer group.') + 'There must be at least one rule for each answer group.') for rule_spec in self.rule_specs: if rule_spec.rule_type not in interaction.rules_dict: @@ -153,83 +276,14 @@ def validate(self, interaction, exp_param_specs_dict): self.outcome.validate() - def get_all_html_content_strings(self, interaction_id): - """Get all html content strings in the AnswerGroup. - - Args: - interaction_id: str. The interaction id that the answer group is - associated with. - - Returns: - list(str). The list of all html content strings in the interaction. - """ - html_list = [] - - # TODO(#9413): Find a way to include a reference to the interaction - # type in the Draft change lists. - # See issue: https://github.com/oppia/oppia/issues/9413. We cannot use - # the interaction-id from the rules_index_dict until issue-9413 has - # been fixed, because this method has no reference to the interaction - # type and draft changes use this method. The rules_index_dict below - # is used to figure out the assembly of the html in the rulespecs. - - outcome_html = self.outcome.feedback.html - html_list += [outcome_html] - - html_field_types_to_rule_specs = ( - rules_registry.Registry.get_html_field_types_to_rule_specs()) - for rule_spec in self.rule_specs: - for interaction_and_rule_details in ( - html_field_types_to_rule_specs.values()): - # Check that the value corresponds to the answer group's - # associated interaction id. - if ( - interaction_and_rule_details['interactionId'] != - interaction_id): - continue - - rule_type_has_html = ( - rule_spec.rule_type in - interaction_and_rule_details['ruleTypes'].keys()) - if rule_type_has_html: - html_type_format = interaction_and_rule_details['format'] - input_variables_from_html_mapping = ( - interaction_and_rule_details['ruleTypes'][ - rule_spec.rule_type][ - 'htmlInputVariables']) - input_variable_match_found = False - for input_variable in rule_spec.inputs.keys(): - if input_variable in input_variables_from_html_mapping: - input_variable_match_found = True - rule_input_variable = ( - rule_spec.inputs[input_variable]) - if (html_type_format == - feconf.HTML_RULE_VARIABLE_FORMAT_STRING): - html_list += [rule_input_variable] - elif (html_type_format == - feconf.HTML_RULE_VARIABLE_FORMAT_SET): - for value in rule_input_variable: - if isinstance(value, str): - html_list += [value] - elif (html_type_format == - feconf. - HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS): - for rule_spec_html in rule_input_variable: - html_list += rule_spec_html - else: - raise Exception( - 'The rule spec does not belong to a valid' - ' format.') - if not input_variable_match_found: - raise Exception( - 'Rule spec should have at least one valid input ' - 'variable with Html in it.') - - return html_list - @staticmethod def convert_html_in_answer_group( - answer_group_dict, conversion_fn, html_field_types_to_rule_specs): + answer_group_dict: AnswerGroupDict, + conversion_fn: Callable[[str], str], + html_field_types_to_rule_specs: Dict[ + str, rules_registry.RuleSpecsExtensionDict + ] + ) -> AnswerGroupDict: """Checks for HTML fields in an answer group dict and converts it according to the conversion function. @@ -258,10 +312,19 @@ def convert_html_in_answer_group( return answer_group_dict -class Hint: +class HintDict(TypedDict): + """Dictionary representing the Hint object.""" + + hint_content: SubtitledHtmlDict + + +class Hint(translation_domain.BaseTranslatableObject): """Value object representing a hint.""" - def __init__(self, hint_content): + def __init__( + self, + hint_content: SubtitledHtml + ) -> None: """Constructs a Hint domain object. Args: @@ -270,7 +333,27 @@ def __init__(self, hint_content): """ self.hint_content = hint_content - def to_dict(self): + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the hint. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_translatable_field( + self.hint_content.content_id, + translation_domain.ContentType.HINT, + translation_domain.TranslatableContentFormat.HTML, + self.hint_content.html) + return translatable_contents_collection + + def to_dict(self) -> HintDict: """Returns a dict representing this Hint domain object. Returns: @@ -280,26 +363,35 @@ def to_dict(self): 'hint_content': self.hint_content.to_dict(), } + # TODO(#16467): Remove `validate` argument after validating all Question + # states by writing a migration and audit job. As the validation for + # hint is common between Exploration and Question and the Question + # data is not yet migrated, we do not want to call the validations + # while we load the Question. @classmethod - def from_dict(cls, hint_dict): + def from_dict(cls, hint_dict: HintDict, validate: bool = True) -> Hint: """Return a Hint domain object from a dict. Args: hint_dict: dict. The dict representation of Hint object. + validate: bool. False, when the validations should not be called. Returns: Hint. The corresponding Hint domain object. """ hint_content = SubtitledHtml.from_dict(hint_dict['hint_content']) - hint_content.validate() + if validate: + hint_content.validate() return cls(hint_content) - def validate(self): + def validate(self) -> None: """Validates all properties of Hint.""" self.hint_content.validate() @staticmethod - def convert_html_in_hint(hint_dict, conversion_fn): + def convert_html_in_hint( + hint_dict: HintDict, conversion_fn: Callable[[str], str] + ) -> HintDict: """Checks for HTML fields in the hints and converts it according to the conversion function. @@ -316,7 +408,15 @@ def convert_html_in_hint(hint_dict, conversion_fn): return hint_dict -class Solution: +class SolutionDict(TypedDict): + """Dictionary representing the Solution object.""" + + answer_is_exclusive: bool + correct_answer: AcceptableCorrectAnswerTypes + explanation: SubtitledHtmlDict + + +class Solution(translation_domain.BaseTranslatableObject): """Value object representing a solution. A solution consists of answer_is_exclusive, correct_answer and an @@ -328,8 +428,12 @@ class Solution: """ def __init__( - self, interaction_id, answer_is_exclusive, - correct_answer, explanation): + self, + interaction_id: str, + answer_is_exclusive: bool, + correct_answer: AcceptableCorrectAnswerTypes, + explanation: SubtitledHtml + ) -> None: """Constructs a Solution domain object. Args: @@ -350,7 +454,27 @@ def __init__( interaction_id).normalize_answer(correct_answer)) self.explanation = explanation - def to_dict(self): + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the solution. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_translatable_field( + self.explanation.content_id, + translation_domain.ContentType.SOLUTION, + translation_domain.TranslatableContentFormat.HTML, + self.explanation.html) + return translatable_contents_collection + + def to_dict(self) -> SolutionDict: """Returns a dict representing this Solution domain object. Returns: @@ -362,19 +486,31 @@ def to_dict(self): 'explanation': self.explanation.to_dict(), } + # TODO(#16467): Remove `validate` argument after validating all Question + # states by writing a migration and audit job. As the validation for + # solution is common between Exploration and Question and the Question + # data is not yet migrated, we do not want to call the validations + # while we load the Question. @classmethod - def from_dict(cls, interaction_id, solution_dict): + def from_dict( + cls, + interaction_id: str, + solution_dict: SolutionDict, + validate: bool = True + ) -> Solution: """Return a Solution domain object from a dict. Args: interaction_id: str. The interaction id. solution_dict: dict. The dict representation of Solution object. + validate: bool. False, when the validations should not be called. Returns: Solution. The corresponding Solution domain object. """ explanation = SubtitledHtml.from_dict(solution_dict['explanation']) - explanation.validate() + if validate: + explanation.validate() return cls( interaction_id, solution_dict['answer_is_exclusive'], @@ -383,7 +519,7 @@ def from_dict(cls, interaction_id, solution_dict): solution_dict['correct_answer']), explanation) - def validate(self, interaction_id): + def validate(self, interaction_id: str) -> None: """Validates all properties of Solution. Args: @@ -403,13 +539,19 @@ def validate(self, interaction_id): @staticmethod def convert_html_in_solution( - interaction_id, solution_dict, conversion_fn, - html_field_types_to_rule_specs, interaction_spec): + interaction_id: Optional[str], + solution_dict: SolutionDict, + conversion_fn: Callable[[str], str], + html_field_types_to_rule_specs: Dict[ + str, rules_registry.RuleSpecsExtensionDict + ], + interaction_spec: base.BaseInteractionDict + ) -> SolutionDict: """Checks for HTML fields in a solution and convert it according to the conversion function. Args: - interaction_id: str. The interaction id. + interaction_id: Optional[str]. The interaction id. solution_dict: dict. The Solution dict. conversion_fn: function. The function to be used for converting the HTML. @@ -422,6 +564,9 @@ def convert_html_in_solution( Returns: dict. The converted Solution dict. + + Raises: + Exception. The Solution dict has an invalid answer type. """ if interaction_id is None: return solution_dict @@ -438,17 +583,45 @@ def convert_html_in_solution( html_type == feconf.ANSWER_TYPE_LIST_OF_SETS_OF_HTML): + # Here correct_answer can only be of type + # List[List[str]] because here html_type is + # 'ListOfSetsOfHtmlStrings'. + assert isinstance( + solution_dict['correct_answer'], list + ) for list_index, html_list in enumerate( solution_dict['correct_answer']): + assert isinstance(html_list, list) for answer_html_index, answer_html in enumerate( html_list): - solution_dict['correct_answer'][list_index][ + # Here we use cast because above assert + # conditions forces correct_answer to be of + # type List[List[str]]. + correct_answer = cast( + List[List[str]], + solution_dict['correct_answer'] + ) + correct_answer[list_index][ answer_html_index] = ( conversion_fn(answer_html)) elif html_type == feconf.ANSWER_TYPE_SET_OF_HTML: + # Here correct_answer can only be of type + # List[str] because here html_type is + # 'SetOfHtmlString'. + assert isinstance( + solution_dict['correct_answer'], list + ) for answer_html_index, answer_html in enumerate( solution_dict['correct_answer']): - solution_dict['correct_answer'][ + assert isinstance(answer_html, str) + # Here we use cast because above assert + # conditions forces correct_answer to be of + # type List[str]. + set_of_html_correct_answer = cast( + List[str], + solution_dict['correct_answer'] + ) + set_of_html_correct_answer[ answer_html_index] = ( conversion_fn(answer_html)) else: @@ -459,13 +632,134 @@ def convert_html_in_solution( return solution_dict -class InteractionInstance: +class InteractionInstanceDict(TypedDict): + """Dictionary representing the InteractionInstance object.""" + + id: Optional[str] + customization_args: CustomizationArgsDictType + answer_groups: List[AnswerGroupDict] + default_outcome: Optional[OutcomeDict] + confirmed_unclassified_answers: List[AnswerGroup] + hints: List[HintDict] + solution: Optional[SolutionDict] + + +class InteractionInstance(translation_domain.BaseTranslatableObject): """Value object for an instance of an interaction.""" + class RangeVariableDict(TypedDict): + """Dictionary representing the range variable for the NumericInput + interaction. + """ + + ans_group_index: int + rule_spec_index: int + lower_bound: Optional[float] + upper_bound: Optional[float] + lb_inclusive: bool + ub_inclusive: bool + + class MatchedDenominatorDict(TypedDict): + """Dictionary representing the matched denominator variable for the + FractionInput interaction. + """ + + ans_group_index: int + rule_spec_index: int + denominator: int + # The default interaction used for a new state. _DEFAULT_INTERACTION_ID = None - def to_dict(self): + def __init__( + self, + interaction_id: Optional[str], + customization_args: Dict[str, InteractionCustomizationArg], + answer_groups: List[AnswerGroup], + default_outcome: Optional[Outcome], + confirmed_unclassified_answers: List[AnswerGroup], + hints: List[Hint], + solution: Optional[Solution] + ) -> None: + """Initializes a InteractionInstance domain object. + + Args: + interaction_id: Optional[str]. The interaction id. + customization_args: dict. The customization dict. The keys are + names of customization_args and the values are dicts with a + single key, 'value', whose corresponding value is the value of + the customization arg. + answer_groups: list(AnswerGroup). List of answer groups of the + interaction instance. + default_outcome: Optional[Outcome]. The default outcome of the + interaction instance, or None if no default outcome exists + for the interaction. + confirmed_unclassified_answers: list(*). List of answers which have + been confirmed to be associated with the default outcome. + hints: list(Hint). List of hints for this interaction. + solution: Solution|None. A possible solution for the question asked + in this interaction, or None if no solution exists for the + interaction. + """ + self.id = interaction_id + # Customization args for the interaction's view. Parts of these + # args may be Jinja templates that refer to state parameters. + # This is a dict: the keys are names of customization_args and the + # values are dicts with a single key, 'value', whose corresponding + # value is the value of the customization arg. + self.customization_args = customization_args + self.answer_groups = answer_groups + self.default_outcome = default_outcome + self.confirmed_unclassified_answers = confirmed_unclassified_answers + self.hints = hints + self.solution = solution + + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the interaction instance. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + if self.default_outcome is not None: + ( + translatable_contents_collection + .add_fields_from_translatable_object(self.default_outcome) + ) + for answer_group in self.answer_groups: + ( + translatable_contents_collection + .add_fields_from_translatable_object( + answer_group, + interaction_id=self.id + ) + ) + for customization_arg in self.customization_args.values(): + ( + translatable_contents_collection + .add_fields_from_translatable_object( + customization_arg, + interaction_id=self.id) + ) + for hint in self.hints: + ( + translatable_contents_collection + .add_fields_from_translatable_object(hint) + ) + if self.solution is not None: + ( + translatable_contents_collection + .add_fields_from_translatable_object(self.solution) + ) + return translatable_contents_collection + + def to_dict(self) -> InteractionInstanceDict: """Returns a dict representing this InteractionInstance domain object. Returns: @@ -501,25 +795,38 @@ def to_dict(self): 'solution': self.solution.to_dict() if self.solution else None, } + # TODO(#16467): Remove `validate` argument after validating all Question + # states by writing a migration and audit job. As the validation for + # interaction is common between Exploration and Question and the Question + # data is not yet migrated, we do not want to call the validations + # while we load the Question. @classmethod - def from_dict(cls, interaction_dict): + def from_dict( + cls, interaction_dict: InteractionInstanceDict, validate: bool = True + ) -> InteractionInstance: """Return a InteractionInstance domain object from a dict. Args: interaction_dict: dict. The dict representation of InteractionInstance object. + validate: bool. False, when the validations should not be called. Returns: InteractionInstance. The corresponding InteractionInstance domain object. """ default_outcome_dict = ( - Outcome.from_dict(interaction_dict['default_outcome']) + Outcome.from_dict( + interaction_dict['default_outcome'], validate=validate) if interaction_dict['default_outcome'] is not None else None) solution_dict = ( Solution.from_dict( - interaction_dict['id'], interaction_dict['solution']) - if (interaction_dict['solution'] and interaction_dict['id']) + interaction_dict['id'], interaction_dict['solution'], + validate=validate) + if ( + interaction_dict['solution'] is not None and + interaction_dict['id'] is not None + ) else None) customization_args = ( @@ -533,59 +840,43 @@ def from_dict(cls, interaction_dict): return cls( interaction_dict['id'], customization_args, - [AnswerGroup.from_dict(h) - for h in interaction_dict['answer_groups']], + ( + [AnswerGroup.from_dict(h, validate=validate) + for h in interaction_dict['answer_groups']] + ), default_outcome_dict, interaction_dict['confirmed_unclassified_answers'], - [Hint.from_dict(h) for h in interaction_dict['hints']], + ( + [Hint.from_dict(h, validate=validate) + for h in interaction_dict['hints']] + ), solution_dict) - def __init__( - self, interaction_id, customization_args, answer_groups, - default_outcome, confirmed_unclassified_answers, hints, solution): - """Initializes a InteractionInstance domain object. - - Args: - interaction_id: str. The interaction id. - customization_args: dict. The customization dict. The keys are - names of customization_args and the values are dicts with a - single key, 'value', whose corresponding value is the value of - the customization arg. - answer_groups: list(AnswerGroup). List of answer groups of the - interaction instance. - default_outcome: Outcome. The default outcome of the interaction - instance. - confirmed_unclassified_answers: list(*). List of answers which have - been confirmed to be associated with the default outcome. - hints: list(Hint). List of hints for this interaction. - solution: Solution. A possible solution for the question asked in - this interaction. - """ - self.id = interaction_id - # Customization args for the interaction's view. Parts of these - # args may be Jinja templates that refer to state parameters. - # This is a dict: the keys are names of customization_args and the - # values are dicts with a single key, 'value', whose corresponding - # value is the value of the customization arg. - self.customization_args = customization_args - self.answer_groups = answer_groups - self.default_outcome = default_outcome - self.confirmed_unclassified_answers = confirmed_unclassified_answers - self.hints = hints - self.solution = solution - @property - def is_terminal(self): + def is_terminal(self) -> bool: """Determines if this interaction type is terminal. If no ID is set for this interaction, it is assumed to not be terminal. Returns: bool. Whether the interaction is terminal. """ - return self.id and interaction_registry.Registry.get_interaction_by_id( - self.id).is_terminal + return bool( + self.id and interaction_registry.Registry.get_interaction_by_id( + self.id + ).is_terminal + ) + + @property + def is_linear(self) -> bool: + """Determines if this interaction type is linear. + + Returns: + bool. Whether the interaction is linear. + """ + return interaction_registry.Registry.get_interaction_by_id( + self.id).is_linear - def is_supported_on_android_app(self): + def is_supported_on_android_app(self) -> bool: """Determines whether the interaction is a valid interaction that is supported by the Android app. @@ -598,7 +889,8 @@ def is_supported_on_android_app(self): ) def is_rte_content_supported_on_android( - self, require_valid_component_names): + self, require_valid_component_names: Callable[[str], bool] + ) -> bool: """Determines whether the RTE content in interaction answer groups, hints and solution is supported by Android app. @@ -632,7 +924,7 @@ def is_rte_content_supported_on_android( return True - def get_all_outcomes(self): + def get_all_outcomes(self) -> List[Outcome]: """Returns a list of all outcomes of this interaction, taking into consideration every answer group and the default outcome. @@ -646,101 +938,1163 @@ def get_all_outcomes(self): outcomes.append(self.default_outcome) return outcomes - def validate(self, exp_param_specs_dict): - """Validates various properties of the InteractionInstance. + def _validate_continue_interaction(self) -> None: + """Validates Continue interaction.""" + # Here we use cast because we are narrowing down the type from various + # customization args value types to 'SubtitledUnicode' type, and this + # is done because here we are accessing 'buttontext' key from continue + # customization arg whose value is always of SubtitledUnicode type. + button_text_subtitled_unicode = cast( + SubtitledUnicode, + self.customization_args['buttonText'].value + ) + text_value = button_text_subtitled_unicode.unicode_str + if len(text_value) > 20: + raise utils.ValidationError( + 'The `continue` interaction text length should be atmost ' + '20 characters.' + ) + + def _validate_end_interaction(self) -> None: + """Validates End interaction.""" + # Here we use cast because we are narrowing down the type + # from various customization args value types to List[str] + # type, and this is done because here we are accessing + # 'recommendedExplorationIds' key from EndExploration + # customization arg whose value is always of List[str] type. + recc_exp_ids = cast( + List[str], + self.customization_args['recommendedExplorationIds'].value + ) + if len(recc_exp_ids) > 3: + raise utils.ValidationError( + 'The total number of recommended explorations inside End ' + 'interaction should be atmost 3.' + ) + + def _validates_choices_should_be_unique_and_nonempty( + self, choices: List[SubtitledHtml] + ) -> None: + """Validates that the choices should be unique and non empty. Args: - exp_param_specs_dict: dict. A dict of specified parameters used in - the exploration. Keys are parameter names and values are - ParamSpec value objects with an object type property(obj_type). - Is used to validate AnswerGroup objects. + choices: List[state_domain.SubtitledHtml]. Choices that needs to + be validated. Raises: - ValidationError. One or more attributes of the InteractionInstance - are invalid. + utils.ValidationError. Choice is empty. + utils.ValidationError. Choice is duplicate. """ - if not isinstance(self.id, str): - raise utils.ValidationError( - 'Expected interaction id to be a string, received %s' % - self.id) - try: - interaction = interaction_registry.Registry.get_interaction_by_id( - self.id) - except KeyError: - raise utils.ValidationError('Invalid interaction id: %s' % self.id) - - self._validate_customization_args() + seen_choices = [] + for choice in choices: + if html_cleaner.is_html_empty(choice.html): + raise utils.ValidationError( + 'Choices should be non empty.' + ) - if not isinstance(self.answer_groups, list): - raise utils.ValidationError( - 'Expected answer groups to be a list, received %s.' - % self.answer_groups) - if not self.is_terminal and self.default_outcome is None: - raise utils.ValidationError( - 'Non-terminal interactions must have a default outcome.') - if self.is_terminal and self.default_outcome is not None: - raise utils.ValidationError( - 'Terminal interactions must not have a default outcome.') - if self.is_terminal and self.answer_groups: - raise utils.ValidationError( - 'Terminal interactions must not have any answer groups.') + if choice.html not in seen_choices: + seen_choices.append(choice.html) + else: + raise utils.ValidationError( + 'Choices should be unique.' + ) - for answer_group in self.answer_groups: - answer_group.validate(interaction, exp_param_specs_dict) - if self.default_outcome is not None: - self.default_outcome.validate() + def _set_lower_and_upper_bounds( + self, + range_var: RangeVariableDict, + lower_bound: float, + upper_bound: float, + *, + lb_inclusive: bool, + ub_inclusive: bool + ) -> None: + """Sets the lower and upper bounds for the range_var. - if not isinstance(self.hints, list): - raise utils.ValidationError( - 'Expected hints to be a list, received %s' - % self.hints) - for hint in self.hints: - hint.validate() + Args: + range_var: RangeVariableDict. Variable used to keep track of each + range. + lower_bound: float. The lower bound. + upper_bound: float. The upper bound. + lb_inclusive: bool. If lower bound is inclusive. + ub_inclusive: bool. If upper bound is inclusive. + """ + range_var['lower_bound'] = lower_bound + range_var['upper_bound'] = upper_bound + range_var['lb_inclusive'] = lb_inclusive + range_var['ub_inclusive'] = ub_inclusive - if self.solution: - self.solution.validate(self.id) + def _is_enclosed_by( + self, test_range: RangeVariableDict, base_range: RangeVariableDict + ) -> bool: + """Returns `True` when `test_range` variable lies within + `base_range` variable. - if self.solution and not self.hints: - raise utils.ValidationError( - 'Hint(s) must be specified if solution is specified') + Args: + test_range: RangeVariableDictDict. It represents the variable for + which we have to check the range. + base_range: RangeVariableDictDict. It is the variable to which + the range is compared. - def _validate_customization_args(self): - """Validates the customization arguments keys and values using - customization_args_util.validate_customization_args_and_values(). + Returns: + bool. Returns True if test_range lies + within base_range. """ - # Because validate_customization_args_and_values() takes in - # customization argument values that are dictionaries, we first convert - # the InteractionCustomizationArg domain objects into dictionaries - # before passing it to the method. + if ( + base_range['lower_bound'] is None or + test_range['lower_bound'] is None or + base_range['upper_bound'] is None or + test_range['upper_bound'] is None + ): + return False - # First, do some basic validation. - if not isinstance(self.customization_args, dict): - raise utils.ValidationError( - 'Expected customization args to be a dict, received %s' - % self.customization_args) + lb_satisfied = ( + base_range['lower_bound'] < test_range['lower_bound'] or + ( + base_range['lower_bound'] == test_range['lower_bound'] and + (not test_range['lb_inclusive'] or base_range['lb_inclusive']) + ) + ) + ub_satisfied = ( + base_range['upper_bound'] > test_range['upper_bound'] or + ( + base_range['upper_bound'] == test_range['upper_bound'] and + (not test_range['ub_inclusive'] or base_range['ub_inclusive']) + ) + ) + return lb_satisfied and ub_satisfied - # customization_args_dict here indicates a dict that maps customization - # argument names to a customization argument dict, the dict - # representation of InteractionCustomizationArg. - customization_args_dict = {} - if self.id: - for ca_name in self.customization_args: - try: - customization_args_dict[ca_name] = ( - self.customization_args[ - ca_name].to_customization_arg_dict() - ) - except AttributeError: - raise utils.ValidationError( - 'Expected customization arg value to be a ' - 'InteractionCustomizationArg domain object, ' - 'received %s' % self.customization_args[ca_name]) + def _should_check_range_criteria( + self, earlier_rule: RuleSpec, later_rule: RuleSpec + ) -> bool: + """Compares the rule types of two rule specs to determine whether + to check for range enclosure. - interaction = interaction_registry.Registry.get_interaction_by_id( - self.id) - customization_args_util.validate_customization_args_and_values( - 'interaction', self.id, customization_args_dict, - interaction.customization_arg_specs) + Args: + earlier_rule: RuleSpec. Previous rule. + later_rule: RuleSpec. Current rule. + + Returns: + bool. Returns True if the rules passes the range criteria check. + """ + if earlier_rule.rule_type in ( + 'HasDenominatorEqualTo', 'IsEquivalentTo', 'IsLessThan', + 'IsEquivalentToAndInSimplestForm', 'IsGreaterThan' + ): + return True + + return later_rule.rule_type in ( + 'HasDenominatorEqualTo', 'IsLessThan', 'IsGreaterThan' + ) + + def _get_rule_value_of_fraction_interaction( + self, rule_spec: RuleSpec + ) -> float: + """Returns rule value of the rule_spec of FractionInput interaction so + that we can keep track of rule's range. + + Args: + rule_spec: RuleSpec. Rule spec of an answer group. + + Returns: + rule_value_f: float. The value of the rule spec. + """ + rule_value_f = rule_spec.inputs['f'] + value: float = ( + rule_value_f['wholeNumber'] + + float(rule_value_f['numerator']) / rule_value_f['denominator'] + ) + return value + + def _validate_numeric_input(self, strict: bool = False) -> None: + """Validates the NumericInput interaction. + + Args: + strict: bool. If True, the exploration is assumed to be published. + + Raises: + ValidationError. Duplicate rules are present. + ValidationError. Rule having a solution that is subset of previous + rules' solution. + ValidationError. The 'tol' value in 'IsWithinTolerance' is negetive. + ValidationError. The 'a' is greater than or equal to 'b' in + 'IsInclusivelyBetween' rule. + """ + lower_infinity = float('-inf') + upper_infinity = float('inf') + ranges: List[InteractionInstance.RangeVariableDict] = [] + rule_spec_till_now: List[RuleSpecDict] = [] + + for ans_group_index, answer_group in enumerate(self.answer_groups): + for rule_spec_index, rule_spec in enumerate( + answer_group.rule_specs + ): + # Rule should not be duplicate. + if rule_spec.to_dict() in rule_spec_till_now and strict: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of answer group ' + f'\'{ans_group_index}\' of NumericInput ' + f'interaction is already present.' + ) + rule_spec_till_now.append(rule_spec.to_dict()) + # All rules should have solutions that is not subset of + # previous rules' solutions. + range_var: InteractionInstance.RangeVariableDict = { + 'ans_group_index': int(ans_group_index), + 'rule_spec_index': int(rule_spec_index), + 'lower_bound': None, + 'upper_bound': None, + 'lb_inclusive': False, + 'ub_inclusive': False + } + + if rule_spec.rule_type == 'IsLessThanOrEqualTo': + rule_value = float(rule_spec.inputs['x']) + self._set_lower_and_upper_bounds( + range_var, + lower_infinity, + rule_value, + lb_inclusive=False, + ub_inclusive=True + ) + + elif rule_spec.rule_type == 'IsGreaterThanOrEqualTo': + rule_value = float(rule_spec.inputs['x']) + self._set_lower_and_upper_bounds( + range_var, + rule_value, + upper_infinity, + lb_inclusive=True, + ub_inclusive=False + ) + + elif rule_spec.rule_type == 'Equals': + rule_value = float(rule_spec.inputs['x']) + self._set_lower_and_upper_bounds( + range_var, + rule_value, + rule_value, + lb_inclusive=True, + ub_inclusive=True + ) + + elif rule_spec.rule_type == 'IsLessThan': + rule_value = float(rule_spec.inputs['x']) + self._set_lower_and_upper_bounds( + range_var, + lower_infinity, + rule_value, + lb_inclusive=False, + ub_inclusive=False + ) + + elif rule_spec.rule_type == 'IsGreaterThan': + rule_value = float(rule_spec.inputs['x']) + self._set_lower_and_upper_bounds( + range_var, + rule_value, + upper_infinity, + lb_inclusive=False, + ub_inclusive=False + ) + + elif rule_spec.rule_type == 'IsWithinTolerance': + rule_value_x = float(rule_spec.inputs['x']) + rule_value_tol = float(rule_spec.inputs['tol']) + if rule_value_tol <= 0.0: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of answer ' + f'group \'{ans_group_index}\' having ' + f'rule type \'IsWithinTolerance\' ' + f'have \'tol\' value less than or equal to ' + f'zero in NumericInput interaction.' + ) + self._set_lower_and_upper_bounds( + range_var, + rule_value_x - rule_value_tol, + rule_value_x + rule_value_tol, + lb_inclusive=True, + ub_inclusive=True + ) + + elif rule_spec.rule_type == 'IsInclusivelyBetween': + rule_value_a = float(rule_spec.inputs['a']) + rule_value_b = float(rule_spec.inputs['b']) + if rule_value_a >= rule_value_b and strict: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of answer ' + f'group \'{ans_group_index}\' having ' + f'rule type \'IsInclusivelyBetween\' ' + f'have `a` value greater than `b` value ' + f'in NumericInput interaction.' + ) + self._set_lower_and_upper_bounds( + range_var, + rule_value_a, + rule_value_b, + lb_inclusive=True, + ub_inclusive=True + ) + + for range_ele in ranges: + if self._is_enclosed_by(range_var, range_ele) and strict: + raise utils.ValidationError( + f'Rule \'{rule_spec_index}\' from answer ' + f'group \'{ans_group_index}\' will never be ' + f'matched because it is made redundant ' + f'by the above rules' + ) + + ranges.append(range_var) + + def _validate_fraction_input(self, strict: bool = False) -> None: + """Validates the FractionInput interaction. + + Args: + strict: bool. If True, the exploration is assumed to be published. + + Raises: + ValidationError. Duplicate rules are present. + ValidationError. Solution is not in simplest form when the + 'simplest form' setting is turned on. + ValidationError. Solution is not in proper form, having values + like 1 2/3 when the 'proper form' setting is turned on. + ValidationError. Solution is not in proper form, when the 'proper + form' setting is turned on. + ValidationError. The 'IsExactlyEqualTo' rule have integral value + when 'allow non zero integers' setting is off. + ValidationError. Rule have solution that is subset of previous + rules' solutions. + ValidationError. The 'HasFractionalPartExactlyEqualTo' rule comes + after 'HasDenominatorEqualTo' rule where the fractional + denominator is equal to 'HasDenominatorEqualTo' rule value. + """ + ranges: List[InteractionInstance.RangeVariableDict] = [] + matched_denominator_list: List[ + InteractionInstance.MatchedDenominatorDict] = [] + rule_spec_till_now: List[RuleSpecDict] = [] + inputs_without_fractions = [ + 'HasDenominatorEqualTo', + 'HasNumeratorEqualTo', + 'HasIntegerPartEqualTo', + 'HasNoFractionalPart' + ] + rules_that_can_have_improper_fractions = [ + 'IsExactlyEqualTo', + 'HasFractionalPartExactlyEqualTo' + ] + lower_infinity = float('-inf') + upper_infinity = float('inf') + allow_non_zero_integ_part = ( + self.customization_args['allowNonzeroIntegerPart'].value) + allow_imp_frac = self.customization_args['allowImproperFraction'].value + require_simple_form = ( + self.customization_args['requireSimplestForm'].value) + + for ans_group_index, answer_group in enumerate(self.answer_groups): + for rule_spec_index, rule_spec in enumerate( + answer_group.rule_specs + ): + # Rule should not be duplicate. + if rule_spec.to_dict() in rule_spec_till_now and strict: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of answer group ' + f'\'{ans_group_index}\' of FractionInput ' + f'interaction is already present.' + ) + rule_spec_till_now.append(rule_spec.to_dict()) + + if rule_spec.rule_type not in inputs_without_fractions: + num = rule_spec.inputs['f']['numerator'] + den = rule_spec.inputs['f']['denominator'] + whole = rule_spec.inputs['f']['wholeNumber'] + + # Solution should be in simplest form if the `simplest form` + # setting is turned on. + if require_simple_form and strict: + d = math.gcd(num, den) + val_num = num // d + val_den = den // d + if val_num != num and val_den != den: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of ' + f'answer group \'{ans_group_index}\' do ' + f'not have value in simple form ' + f'in FractionInput interaction.' + ) + + if ( + strict and + not allow_imp_frac and + den <= num and + ( + rule_spec.rule_type in + rules_that_can_have_improper_fractions + ) + ): + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of ' + f'answer group \'{ans_group_index}\' do ' + f'not have value in proper fraction ' + f'in FractionInput interaction.' + ) + + # All rules should have solutions that is not subset of + # previous rules' solutions. + range_var: InteractionInstance.RangeVariableDict = { + 'ans_group_index': int(ans_group_index), + 'rule_spec_index': int(rule_spec_index), + 'lower_bound': None, + 'upper_bound': None, + 'lb_inclusive': False, + 'ub_inclusive': False + } + matched_denominator: ( + InteractionInstance.MatchedDenominatorDict + ) = { + 'ans_group_index': int(ans_group_index), + 'rule_spec_index': int(rule_spec_index), + 'denominator': 0 + } + + if rule_spec.rule_type in ( + 'IsEquivalentTo', 'IsExactlyEqualTo', + 'IsEquivalentToAndInSimplestForm' + ): + if ( + rule_spec.rule_type == 'IsExactlyEqualTo' and + not allow_non_zero_integ_part and + whole != 0 and + strict + ): + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of ' + f'answer group \'{ans_group_index}\' has ' + f'non zero integer part ' + f'in FractionInput interaction.' + ) + rule_value_f = ( + self._get_rule_value_of_fraction_interaction(rule_spec)) + self._set_lower_and_upper_bounds( + range_var, + rule_value_f, + rule_value_f, + lb_inclusive=True, + ub_inclusive=True + ) + + if rule_spec.rule_type == 'IsGreaterThan': + rule_value_f = ( + self._get_rule_value_of_fraction_interaction(rule_spec)) + self._set_lower_and_upper_bounds( + range_var, + rule_value_f, + upper_infinity, + lb_inclusive=False, + ub_inclusive=False + ) + + if rule_spec.rule_type == 'IsLessThan': + rule_value_f = ( + self._get_rule_value_of_fraction_interaction(rule_spec)) + self._set_lower_and_upper_bounds( + range_var, + lower_infinity, + rule_value_f, + lb_inclusive=False, + ub_inclusive=False + ) + + if rule_spec.rule_type == 'HasDenominatorEqualTo': + rule_value_x = int(rule_spec.inputs['x']) + matched_denominator['denominator'] = rule_value_x + + for range_ele in ranges: + earlier_rule = ( + self.answer_groups[range_ele['ans_group_index']] + .rule_specs[range_ele['rule_spec_index']] + ) + if ( + self._should_check_range_criteria( + earlier_rule, rule_spec) and + self._is_enclosed_by(range_var, range_ele) and + strict + ): + raise utils.ValidationError( + f'Rule \'{rule_spec_index}\' from answer ' + f'group \'{ans_group_index}\' of ' + f'FractionInput interaction will ' + f'never be matched because it is ' + f'made redundant by the above rules' + ) + + # `HasFractionalPartExactlyEqualTo` rule should always come + # before `HasDenominatorEqualTo` rule where the fractional + # denominator is equal to `HasDenominatorEqualTo` rule value. + for den in matched_denominator_list: + if ( + den is not None and rule_spec.rule_type == + 'HasFractionalPartExactlyEqualTo' and + den['denominator'] == + rule_spec.inputs['f']['denominator'] + ): + raise utils.ValidationError( + f'Rule \'{rule_spec_index}\' from answer ' + f'group \'{ans_group_index}\' of ' + f'FractionInput interaction having ' + f'rule type HasFractionalPart' + f'ExactlyEqualTo will ' + f'never be matched because it is ' + f'made redundant by the above rules' + ) + + ranges.append(range_var) + matched_denominator_list.append(matched_denominator) + + def _validate_number_with_units_input(self, strict: bool = False) -> None: + """Validates the NumberWithUnitsInput interaction. + + Args: + strict: bool. If True, the exploration is assumed to be published. + + Raises: + ValidationError. Duplicate rules are present. + ValidationError. The 'IsEqualTo' rule comes after 'IsEquivalentTo' + rule having same values. + """ + number_with_units_rules = [] + rule_spec_till_now: List[RuleSpecDict] = [] + + for ans_group_index, answer_group in enumerate(self.answer_groups): + for rule_spec_index, rule_spec in enumerate( + answer_group.rule_specs + ): + # Rule should not be duplicate. + if rule_spec.to_dict() in rule_spec_till_now and strict: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of answer group ' + f'\'{ans_group_index}\' of NumberWithUnitsInput ' + f'interaction is already present.' + ) + rule_spec_till_now.append(rule_spec.to_dict()) + + # `IsEqualTo` rule should not come after `IsEquivalentTo` rule. + if rule_spec.rule_type == 'IsEquivalentTo': + number_with_units_rules.append(rule_spec.inputs['f']) + if ( + rule_spec.rule_type == 'IsEqualTo' and + rule_spec.inputs['f'] in number_with_units_rules and + strict + ): + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of answer ' + f'group \'{ans_group_index}\' has ' + f'rule type equal is coming after ' + f'rule type equivalent having same value ' + f'in FractionInput interaction.' + ) + + def _validate_multi_choice_input(self, strict: bool = False) -> None: + """Validates the MultipleChoiceInput interaction. + + Args: + strict: bool. If True, the exploration is assumed to be published. + + Raises: + ValidationError. Duplicate rules are present. + ValidationError. Answer choices are empty or duplicate. + """ + rule_spec_till_now: List[RuleSpecDict] = [] + + # Here we use cast because we are narrowing the down the + # type from various types of cust. args values, and here + # we sure that the type is always going to be List[SubtitledHtml] + # because 'MultipleChoiceInput' cust. arg objects always contain + # 'choices' key with List[SubtitledHtml] types of values. + choices = cast( + List[SubtitledHtml], + self.customization_args['choices'].value + ) + self._validates_choices_should_be_unique_and_nonempty(choices) + + for ans_group_index, answer_group in enumerate(self.answer_groups): + for rule_spec_index, rule_spec in enumerate( + answer_group.rule_specs + ): + # Rule should not be duplicate. + if rule_spec.to_dict() in rule_spec_till_now and strict: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of answer group ' + f'\'{ans_group_index}\' of MultipleChoiceInput ' + f'interaction is already present.' + ) + rule_spec_till_now.append(rule_spec.to_dict()) + + def _validate_item_selec_input(self, strict: bool = False) -> None: + """Validates the ItemSelectionInput interaction. + + Args: + strict: bool. If True, the exploration is assumed to be published. + + Raises: + ValidationError. Duplicate rules are present. + ValidationError. The 'Equals' rule does not have value between min + and max number of selections. + ValidationError. Minimum number of selections value is greater + than maximum number of selections value. + ValidationError. Not enough choices to have minimum number of + selections. + ValidationError. Answer choices are empty or duplicate. + """ + # Here we use cast because we are narrowing down the type from + # various allowed cust. arg types to 'int', and here we are sure + # that the type is always going to be int because 'ItemInputSelection' + # customization args always contains 'minAllowableSelectionCount' key + # with int type of values. + min_value = cast( + int, + self.customization_args['minAllowableSelectionCount'].value + ) + # Here we use cast because we are narrowing down the type from + # various allowed cust. arg types to 'int', and here we are sure + # that the type is always going to be int because 'ItemInputSelection' + # customization args always contains 'maxAllowableSelectionCount' key + # with int type of values. + max_value = cast( + int, + self.customization_args['maxAllowableSelectionCount'].value + ) + rule_spec_till_now: List[RuleSpecDict] = [] + + # Here we use cast because we are narrowing down the type from + # various allowed cust. arg types to 'List[SubtitledHtml]', + # and here we are sure that the type is always going to be + # List[SubtitledHtml] because 'ItemInputSelection' customization + # args always contains 'choices' key with List[SubtitledHtml] + # type of values. + choices = cast( + List[SubtitledHtml], self.customization_args['choices'].value + ) + self._validates_choices_should_be_unique_and_nonempty(choices) + + # Minimum number of selections should be no greater than maximum + # number of selections. + if min_value > max_value: + raise utils.ValidationError( + f'Min value which is {str(min_value)} ' + f'is greater than max value ' + f'which is {str(max_value)} ' + f'in ItemSelectionInput interaction.' + ) + + # There should be enough choices to have minimum number + # of selections. + if len(choices) < min_value: + raise utils.ValidationError( + f'Number of choices which is {str(len(choices))} ' + f'is lesser than the ' + f'min value selection which is {str(min_value)} ' + f'in ItemSelectionInput interaction.' + ) + + for ans_group_index, answer_group in enumerate(self.answer_groups): + for rule_spec_index, rule_spec in enumerate( + answer_group.rule_specs + ): + # Rule should not be duplicate. + if rule_spec.to_dict() in rule_spec_till_now and strict: + raise utils.ValidationError( + f'The rule {rule_spec_index} of answer group ' + f'{ans_group_index} of ItemSelectionInput interaction ' + f'is already present.' + ) + rule_spec_till_now.append(rule_spec.to_dict()) + + # `Equals` should have between min and max number of selections. + if rule_spec.rule_type == 'Equals': + if ( + strict and + ( + len(rule_spec.inputs['x']) < min_value or + len(rule_spec.inputs['x']) > max_value + ) + ): + raise utils.ValidationError( + f'Selected choices of rule \'{rule_spec_index}\' ' + f'of answer group \'{ans_group_index}\' ' + f'either less than min_selection_value ' + f'or greater than max_selection_value ' + f'in ItemSelectionInput interaction.' + ) + + def _validate_drag_and_drop_input(self, strict: bool = False) -> None: + """Validates the DragAndDropInput interaction. + + Args: + strict: bool. If True, the exploration is assumed to be published. + + Raises: + ValidationError. Duplicate rules are present. + ValidationError. Multiple items at the same place when the setting + is turned off. + ValidationError. The 'IsEqualToOrderingWithOneItemAtIncorrect + Position' rule present when 'multiple items at same place' + setting turned off. + ValidationError. In 'HasElementXBeforeElementY' rule, 'X' value + is equal to 'Y' value. + ValidationError. The 'IsEqualToOrdering' rule have empty values. + ValidationError. The 'IsEqualToOrdering' rule comes after + 'HasElementXAtPositionY' where element 'X' is present at + position 'Y' in 'IsEqualToOrdering' rule. + ValidationError. Less than 2 items are present. + ValidationError. Answer choices are empty or duplicate. + """ + multi_item_value = ( + self.customization_args + ['allowMultipleItemsInSamePosition'].value) + ele_x_at_y_rules = [] + rule_spec_till_now: List[RuleSpecDict] = [] + equal_ordering_one_at_incorec_posn = [] + + # Here we use cast because we are narrowing down the type from + # various allowed cust. arg types to 'List[SubtitledHtml]', + # and here we are sure that the type is always going to be + # List[SubtitledHtml] because 'DragAndDrop' customization + # args always contains 'choices' key with List[SubtitledHtml] + # type of values. + choices = cast( + List[SubtitledHtml], + self.customization_args['choices'].value + ) + if len(choices) < 2: + raise utils.ValidationError( + 'There should be atleast 2 values inside DragAndDrop ' + 'interaction.' + ) + + self._validates_choices_should_be_unique_and_nonempty(choices) + + for ans_group_index, answer_group in enumerate(self.answer_groups): + for rule_spec_index, rule_spec in enumerate( + answer_group.rule_specs + ): + # Rule should not be duplicate. + if rule_spec.to_dict() in rule_spec_till_now and strict: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of answer group ' + f'\'{ans_group_index}\' of DragAndDropInput ' + f'interaction is already present.' + ) + rule_spec_till_now.append(rule_spec.to_dict()) + + if ( + strict and + not multi_item_value and ( + rule_spec.rule_type == + 'IsEqualToOrderingWithOneItemAtIncorrectPosition') + ): + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' ' + f'of answer group \'{ans_group_index}\' ' + f'having rule type - IsEqualToOrderingWith' + f'OneItemAtIncorrectPosition should not ' + f'be there when the ' + f'multiple items in same position ' + f'setting is turned off ' + f'in DragAndDropSortInput interaction.' + ) + + # Multiple items cannot be in the same place iff the + # `allow multiple items at same place` setting is turned off. + if not multi_item_value and strict: + for ele in rule_spec.inputs['x']: + if len(ele) > 1: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of ' + f'answer group \'{ans_group_index}\' ' + f'have multiple items at same place ' + f'when multiple items in same ' + f'position settings is turned off ' + f'in DragAndDropSortInput interaction.' + ) + + if ( + rule_spec.rule_type == 'HasElementXBeforeElementY' and + rule_spec.inputs['x'] == rule_spec.inputs['y'] and + strict + ): + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\' of ' + f'answer group \'{ans_group_index}\', ' + f'the value 1 and value 2 cannot be ' + f'same when rule type is ' + f'HasElementXBeforeElementY ' + f'of DragAndDropSortInput interaction.' + ) + + if rule_spec.rule_type == 'HasElementXAtPositionY': + element = rule_spec.inputs['x'] + position = rule_spec.inputs['y'] + ele_x_at_y_rules.append( + {'element': element, 'position': position} + ) + + if ( + rule_spec.rule_type == + 'IsEqualToOrderingWithOneItemAtIncorrectPosition' + ): + equal_ordering_one_at_incorec_posn.append( + rule_spec.inputs['x'] + ) + + if rule_spec.rule_type == 'IsEqualToOrdering': + # `IsEqualToOrdering` rule should not have empty values. + if len(rule_spec.inputs['x']) <= 0: + raise utils.ValidationError( + f'The rule \'{rule_spec_index}\'of ' + f'answer group \'{ans_group_index}\', ' + f'having rule type IsEqualToOrdering ' + f'should not have empty values.' + ) + if strict: + # `IsEqualToOrdering` rule should always come before + # `HasElementXAtPositionY` where element `X` is present + # at position `Y` in `IsEqualToOrdering` rule. + for ele in ele_x_at_y_rules: + ele_position = ele['position'] + ele_element = ele['element'] + + if ele_position > len(rule_spec.inputs['x']): + continue + + rule_choice = rule_spec.inputs['x'][ + ele_position - 1] + for choice in rule_choice: + if choice == ele_element: + raise utils.ValidationError( + f'Rule - {rule_spec_index} of ' + f'answer group {ans_group_index} ' + f'will never be match ' + f'because it is made redundant by the ' + f'HasElementXAtPositionY rule above.' + ) + # `IsEqualToOrdering` should always come before + # `IsEqualToOrderingWithOneItemAtIncorrectPosition` when + # they are off by one value. + item_to_layer_idx = {} + for layer_idx, layer in enumerate( + rule_spec.inputs['x'] + ): + for item in layer: + item_to_layer_idx[item] = layer_idx + + for ele in equal_ordering_one_at_incorec_posn: + wrong_positions = 0 + for layer_idx, layer in enumerate(ele): + for item in layer: + if layer_idx != item_to_layer_idx[item]: + wrong_positions += 1 + if wrong_positions <= 1: + raise utils.ValidationError( + f'Rule - {rule_spec_index} of answer ' + f'group {ans_group_index} will never ' + f'be match because it is made ' + f'redundant by the IsEqualToOrdering' + f'WithOneItemAtIncorrectPosition ' + f'rule above.' + ) + + def _validate_text_input(self, strict: bool = False) -> None: + """Validates the TextInput interaction. + + Args: + strict: bool. If True, the exploration is assumed to be published. + + Raises: + ValidationError. Text input height is not >= 1 and <= 10. + ValidationError. Duplicate rules are present. + ValidationError. The 'Contains' rule comes before another 'Contains' + rule, where 'Contains' rule string is a substring of other + rules string. + ValidationError. The 'Contains' rule comes before 'StartsWith' + rule, where 'Contains' rule string is a substring of other + rules string. + ValidationError. The 'Contains' rule comes before 'Equals' + rule, where 'Contains' rule string is a substring of other + rules string. + ValidationError. The 'StartsWith' rule comes before the 'Equals' + rule where the 'StartsWith' rule string is a prefix of other + rules string. + ValidationError. The 'StartsWith' rule comes before the another + 'StartsWith' rule where the 'StartsWith' rule string is + a prefix of other rules string. + """ + rule_spec_till_now: List[RuleSpecDict] = [] + seen_strings_contains: List[List[str]] = [] + seen_strings_startswith: List[List[str]] = [] + + # Here we use cast because we are narrowing down the type from + # various allowed cust. arg types to 'int', and here we are sure + # that the type is always going to be int because 'TextInput' + # customization args always contain 'rows' key with int type + # of values. + rows_value = cast(int, self.customization_args['rows'].value) + if rows_value < 1 or rows_value > 10: + raise utils.ValidationError( + 'Rows value in Text interaction should be between 1 and 10.' + ) + + for ans_group_idx, answer_group in enumerate(self.answer_groups): + for rule_spec_idx, rule_spec in enumerate(answer_group.rule_specs): + # Rule should not be duplicate. + if rule_spec.to_dict() in rule_spec_till_now and strict: + raise utils.ValidationError( + f'The rule \'{rule_spec_idx}\' of answer group ' + f'\'{ans_group_idx}\' of TextInput interaction ' + f'is already present.' + ) + rule_spec_till_now.append(rule_spec.to_dict()) + + if rule_spec.rule_type == 'Contains': + if not strict: + continue + rule_values = rule_spec.inputs['x']['normalizedStrSet'] + # `Contains` should always come after another + # `Contains` rule where the first contains rule + # strings is a substring of the other contains + # rule strings. + for contain_rule_ele in seen_strings_contains: + for contain_rule_string in contain_rule_ele: + for rule_value in rule_values: + if contain_rule_string in rule_value: + raise utils.ValidationError( + f'Rule - \'{rule_spec_idx}\' of answer ' + f'group - \'{ans_group_idx}\' having ' + f'rule type \'{rule_spec.rule_type}\' ' + f'will never be matched because it ' + f'is made redundant by the above ' + f'\'contains\' rule.' + ) + + seen_strings_contains.append( + rule_spec.inputs['x']['normalizedStrSet']) + + if rule_spec.rule_type == 'StartsWith': + if not strict: + continue + rule_values = rule_spec.inputs['x']['normalizedStrSet'] + # `StartsWith` rule should always come after another + # `StartsWith` rule where the first starts-with string + # is the prefix of the other starts-with string. + for start_with_rule_ele in seen_strings_startswith: + for start_with_rule_string in start_with_rule_ele: + for rule_value in rule_values: + if rule_value.startswith( + start_with_rule_string + ): + raise utils.ValidationError( + f'Rule - \'{rule_spec_idx}\' of answer ' + f'group - \'{ans_group_idx}\' having ' + f'rule type \'{rule_spec.rule_type}\' ' + f'will never be matched because it ' + f'is made redundant by the above ' + f'\'StartsWith\' rule.' + ) + + # `Contains` should always come after `StartsWith` rule + # where the contains rule strings is a substring + # of the `StartsWith` rule string. + for contain_rule_ele in seen_strings_contains: + for contain_rule_string in contain_rule_ele: + for rule_value in rule_values: + if contain_rule_string in rule_value: + raise utils.ValidationError( + f'Rule - \'{rule_spec_idx}\' of answer ' + f'group - \'{ans_group_idx}\' having ' + f'rule type \'{rule_spec.rule_type}\' ' + f'will never be matched because it ' + f'is made redundant by the above ' + f'\'contains\' rule.' + ) + + seen_strings_startswith.append(rule_values) + + if rule_spec.rule_type == 'Equals': + if not strict: + continue + rule_values = rule_spec.inputs['x']['normalizedStrSet'] + # `Contains` should always come after `Equals` rule + # where the contains rule strings is a substring + # of the `Equals` rule string. + for contain_rule_ele in seen_strings_contains: + for contain_rule_string in contain_rule_ele: + for rule_value in rule_values: + if contain_rule_string in rule_value: + raise utils.ValidationError( + f'Rule - \'{rule_spec_idx}\' of answer ' + f'group - \'{ans_group_idx}\' having ' + f'rule type \'{rule_spec.rule_type}\' ' + f'will never be matched because it ' + f'is made redundant by the above ' + f'\'contains\' rule.' + ) + + # `Startswith` should always come after the `Equals` + # rule where a `starts-with` string is a prefix of the + # `Equals` rule's string. + for start_with_rule_ele in seen_strings_startswith: + for start_with_rule_string in start_with_rule_ele: + for rule_value in rule_values: + if rule_value.startswith( + start_with_rule_string + ): + raise utils.ValidationError( + f'Rule - \'{rule_spec_idx}\' of answer ' + f'group - \'{ans_group_idx}\' having ' + f'rule type \'{rule_spec.rule_type}\' ' + f'will never be matched because it ' + f'is made redundant by the above ' + f'\'StartsWith\' rule.' + ) + + def validate( + self, + exp_param_specs_dict: Dict[str, param_domain.ParamSpec], + *, + tagged_skill_misconception_id_required: bool = False, + strict: bool = False + ) -> None: + """Validates various properties of the InteractionInstance. + + Args: + exp_param_specs_dict: dict. A dict of specified parameters used in + the exploration. Keys are parameter names and values are + ParamSpec value objects with an object type property(obj_type). + Is used to validate AnswerGroup objects. + tagged_skill_misconception_id_required: bool. The 'tagged_skill_ + misconception_id' is required or not. + strict: bool. Tells if the validation is strict or not. + + Raises: + ValidationError. One or more attributes of the InteractionInstance + are invalid. + """ + if not isinstance(self.id, str): + raise utils.ValidationError( + 'Expected interaction id to be a string, received %s' % + self.id) + try: + interaction = interaction_registry.Registry.get_interaction_by_id( + self.id) + except KeyError as e: + raise utils.ValidationError( + 'Invalid interaction id: %s' % self.id) from e + + self._validate_customization_args() + + if not isinstance(self.answer_groups, list): + raise utils.ValidationError( + 'Expected answer groups to be a list, received %s.' + % self.answer_groups) + if not self.is_terminal and self.default_outcome is None: + raise utils.ValidationError( + 'Non-terminal interactions must have a default outcome.') + if self.is_terminal and self.default_outcome is not None: + raise utils.ValidationError( + 'Terminal interactions must not have a default outcome.') + if self.is_terminal and self.answer_groups: + raise utils.ValidationError( + 'Terminal interactions must not have any answer groups.') + if self.is_linear and self.answer_groups: + raise utils.ValidationError( + 'Linear interactions must not have any answer groups.') + + for answer_group in self.answer_groups: + answer_group.validate( + interaction, exp_param_specs_dict, + tagged_skill_misconception_id_required=( + tagged_skill_misconception_id_required)) + if self.default_outcome is not None: + self.default_outcome.validate() + + if not isinstance(self.hints, list): + raise utils.ValidationError( + 'Expected hints to be a list, received %s' + % self.hints) + for hint in self.hints: + hint.validate() + + if self.solution: + self.solution.validate(self.id) + + # TODO(#16236): Find a way to encode these checks more declaratively. + # Conceptually the validation code should go in each interaction + # and as inside the interaction the code is very declarative we need + # to figure out a way to put these validations following the + # same format. + # TODO(#16490): Move the validations with strict mode together in every + # interaction. + interaction_id_to_strict_validation_func = { + 'NumericInput': self._validate_numeric_input, + 'FractionInput': self._validate_fraction_input, + 'NumberWithUnits': self._validate_number_with_units_input, + 'MultipleChoiceInput': self._validate_multi_choice_input, + 'ItemSelectionInput': self._validate_item_selec_input, + 'DragAndDropSortInput': self._validate_drag_and_drop_input, + 'TextInput': self._validate_text_input + } + interaction_id_to_non_strict_validation_func = { + 'Continue': self._validate_continue_interaction, + 'EndExploration': self._validate_end_interaction + } + + if self.id in interaction_id_to_strict_validation_func: + interaction_id_to_strict_validation_func[self.id](strict) + + elif self.id in interaction_id_to_non_strict_validation_func: + interaction_id_to_non_strict_validation_func[self.id]() + + def _validate_customization_args(self) -> None: + """Validates the customization arguments keys and values using + customization_args_util.validate_customization_args_and_values(). + """ + # Because validate_customization_args_and_values() takes in + # customization argument values that are dictionaries, we first convert + # the InteractionCustomizationArg domain objects into dictionaries + # before passing it to the method. + + # First, do some basic validation. + if not isinstance(self.customization_args, dict): + raise utils.ValidationError( + 'Expected customization args to be a dict, received %s' + % self.customization_args) + + # customization_args_dict here indicates a dict that maps customization + # argument names to a customization argument dict, the dict + # representation of InteractionCustomizationArg. + customization_args_dict = {} + if self.id: + for ca_name in self.customization_args: + try: + customization_args_dict[ca_name] = ( + self.customization_args[ + ca_name].to_customization_arg_dict() + ) + except AttributeError as e: + raise utils.ValidationError( + 'Expected customization arg value to be a ' + 'InteractionCustomizationArg domain object, ' + 'received %s' % self.customization_args[ca_name] + ) from e + + # Here, we are asserting that interaction_id is never going to be None, + # Because this is a private method and before calling this method we are + # already checking if interaction_id exists or not. + assert self.id is not None + interaction = interaction_registry.Registry.get_interaction_by_id( + self.id) + customization_args_util.validate_customization_args_and_values( + 'interaction', self.id, customization_args_dict, + interaction.customization_arg_specs) self.customization_args = ( InteractionInstance @@ -751,7 +2105,11 @@ def _validate_customization_args(self): ) @classmethod - def create_default_interaction(cls, default_dest_state_name): + def create_default_interaction( + cls, + default_dest_state_name: Optional[str], + content_id_for_default_outcome: str + ) -> InteractionInstance: """Create a default InteractionInstance domain object: - customization_args: empty dictionary; - answer_groups: empty list; @@ -760,7 +2118,10 @@ def create_default_interaction(cls, default_dest_state_name): - confirmed_unclassified_answers: empty list; Args: - default_dest_state_name: str. The default destination state. + default_dest_state_name: str|None. The default destination state, or + None if no default destination is provided. + content_id_for_default_outcome: str. The content id for the default + outcome. Returns: InteractionInstance. The corresponding InteractionInstance domain @@ -768,79 +2129,34 @@ def create_default_interaction(cls, default_dest_state_name): """ default_outcome = Outcome( default_dest_state_name, + None, SubtitledHtml.create_default_subtitled_html( - feconf.DEFAULT_OUTCOME_CONTENT_ID), False, {}, None, None) + content_id_for_default_outcome), False, [], None, None) return cls( cls._DEFAULT_INTERACTION_ID, {}, [], default_outcome, [], [], None) - def get_all_html_content_strings(self): - """Get all html content strings in the interaction. - - Returns: - list(str). The list of all html content strings in the interaction. - """ - html_list = [] - - for answer_group in self.answer_groups: - html_list += answer_group.get_all_html_content_strings(self.id) - - if self.default_outcome: - default_outcome_html = self.default_outcome.feedback.html - html_list += [default_outcome_html] - - for hint in self.hints: - hint_html = hint.hint_content.html - html_list += [hint_html] - - if self.id is None: - return html_list - - interaction = ( - interaction_registry.Registry.get_interaction_by_id( - self.id)) - - if self.solution and interaction.can_have_solution: - solution_html = self.solution.explanation.html - html_list += [solution_html] - html_field_types_to_rule_specs = ( - rules_registry.Registry.get_html_field_types_to_rule_specs()) - - if self.solution.correct_answer: - for html_type in html_field_types_to_rule_specs.keys(): - if html_type == interaction.answer_type: - if ( - html_type == - feconf.ANSWER_TYPE_LIST_OF_SETS_OF_HTML): - for value in self.solution.correct_answer: - html_list += value - elif html_type == feconf.ANSWER_TYPE_SET_OF_HTML: - for value in self.solution.correct_answer: - html_list += [value] - else: - raise Exception( - 'The solution does not have a valid ' - 'correct_answer type.') - - for ca_name in self.customization_args: - html_list += self.customization_args[ca_name].get_html() - - return html_list - @staticmethod - def convert_html_in_interaction(interaction_dict, conversion_fn): + def convert_html_in_interaction( + interaction_dict: InteractionInstanceDict, + ca_specs_dict: List[domain.CustomizationArgSpecsDict], + conversion_fn: Callable[[str], str] + ) -> InteractionInstanceDict: """Checks for HTML fields in the interaction and converts it according to the conversion function. Args: interaction_dict: dict. The interaction dict. + ca_specs_dict: dict. The customization args dict. conversion_fn: function. The function to be used for converting the HTML. Returns: dict. The converted interaction dict. """ - def wrapped_conversion_fn(value, schema_obj_type): + def wrapped_conversion_fn( + value: SubtitledHtml, schema_obj_type: str + ) -> SubtitledHtml: """Applies the conversion function to the SubtitledHtml values. Args: @@ -859,26 +2175,22 @@ def wrapped_conversion_fn(value, schema_obj_type): value.html = conversion_fn(value.html) return value - interaction_id = interaction_dict['id'] - # Convert the customization_args to a dictionary of customization arg # name to InteractionCustomizationArg, so that we can utilize # InteractionCustomizationArg helper functions. # Then, convert back to original dict format afterwards, at the end. customization_args = ( - InteractionInstance - .convert_customization_args_dict_to_customization_args( - interaction_id, - interaction_dict['customization_args']) + InteractionCustomizationArg + .convert_cust_args_dict_to_cust_args_based_on_specs( + interaction_dict['customization_args'], + ca_specs_dict) ) - ca_specs = interaction_registry.Registry.get_interaction_by_id( - interaction_id).customization_arg_specs - for ca_spec in ca_specs: - ca_spec_name = ca_spec.name + for ca_spec in ca_specs_dict: + ca_spec_name = ca_spec['name'] customization_args[ca_spec_name].value = ( InteractionCustomizationArg.traverse_by_schema_and_convert( - ca_spec.schema, + ca_spec['schema'], customization_args[ca_spec_name].value, wrapped_conversion_fn ) @@ -897,7 +2209,10 @@ def wrapped_conversion_fn(value, schema_obj_type): @staticmethod def convert_customization_args_dict_to_customization_args( - interaction_id, customization_args_dict): + interaction_id: Optional[str], + customization_args_dict: CustomizationArgsDictType, + state_schema_version: int = feconf.CURRENT_STATE_SCHEMA_VERSION + ) -> Dict[str, InteractionCustomizationArg]: """Converts customization arguments dictionary to customization arguments. This is done by converting each customization argument to a InteractionCustomizationArg domain object. @@ -908,34 +2223,47 @@ def convert_customization_args_dict_to_customization_args( argument name to a customization argument dict, which is a dict of the single key 'value' to the value of the customization argument. + state_schema_version: int. The state schema version. Returns: dict. A dictionary of customization argument names to the InteractionCustomizationArg domain object's. """ - if interaction_id is None: + all_interaction_ids = ( + interaction_registry.Registry.get_all_interaction_ids() + ) + interaction_id_is_valid = interaction_id not in all_interaction_ids + if interaction_id_is_valid or interaction_id is None: return {} - ca_specs = interaction_registry.Registry.get_interaction_by_id( - interaction_id).customization_arg_specs - customization_args = { - spec.name: InteractionCustomizationArg.from_customization_arg_dict( - customization_args_dict[spec.name], - spec.schema - ) for spec in ca_specs - } + ca_specs_dict = ( + interaction_registry.Registry + .get_all_specs_for_state_schema_version( + state_schema_version, + can_fetch_latest_specs=True + )[interaction_id]['customization_arg_specs'] + ) - return customization_args + return ( + InteractionCustomizationArg + .convert_cust_args_dict_to_cust_args_based_on_specs( + customization_args_dict, ca_specs_dict)) -class InteractionCustomizationArg: +class InteractionCustomizationArg(translation_domain.BaseTranslatableObject): """Object representing an interaction's customization argument. Any SubtitledHtml or SubtitledUnicode values in the customization argument value are represented as their respective domain objects here, rather than a SubtitledHtml dict or SubtitledUnicode dict. """ - def __init__(self, value, schema): + def __init__( + self, + value: UnionOfCustomizationArgsDictValues, + schema: Dict[ + str, Union[SubtitledHtmlDict, SubtitledUnicodeDict, str] + ] + ) -> None: """Initializes a InteractionCustomizationArg domain object. Args: @@ -945,13 +2273,60 @@ def __init__(self, value, schema): self.value = value self.schema = schema - def to_customization_arg_dict(self): + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the interaction customization args. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + subtitled_htmls = self.get_subtitled_html() + for subtitled_html in subtitled_htmls: + translatable_contents_collection.add_translatable_field( + subtitled_html.content_id, + translation_domain.ContentType.CUSTOMIZATION_ARG, + translation_domain.TranslatableContentFormat.HTML, + subtitled_html.html, + kwargs['interaction_id']) + + subtitled_unicodes = self.get_subtitled_unicode() + for subtitled_unicode in subtitled_unicodes: + translatable_contents_collection.add_translatable_field( + subtitled_unicode.content_id, + translation_domain.ContentType.CUSTOMIZATION_ARG, + translation_domain.TranslatableContentFormat.UNICODE_STRING, + subtitled_unicode.unicode_str, + kwargs['interaction_id']) + return translatable_contents_collection + + def to_customization_arg_dict(self) -> Dict[ + str, UnionOfCustomizationArgsDictValues + ]: """Converts a InteractionCustomizationArgument domain object to a customization argument dictionary. This is done by traversing the customization argument schema, and converting SubtitledUnicode to unicode and SubtitledHtml to html where appropriate. """ - def convert_content_to_dict(ca_value, unused_schema_obj_type): + @overload + def convert_content_to_dict( + ca_value: SubtitledHtml, unused_schema_obj_type: str + ) -> SubtitledHtmlDict: ... + + @overload + def convert_content_to_dict( + ca_value: SubtitledUnicode, unused_schema_obj_type: str + ) -> SubtitledUnicodeDict: ... + + def convert_content_to_dict( + ca_value: Union[SubtitledHtml, SubtitledUnicode], + unused_schema_obj_type: str + ) -> Union[SubtitledHtmlDict, SubtitledUnicodeDict]: """Conversion function used to convert SubtitledHtml to SubtitledHtml dicts and SubtitledUnicode to SubtitledUnicode dicts. @@ -976,8 +2351,15 @@ def convert_content_to_dict(ca_value, unused_schema_obj_type): ) } + # Here we use type Any because argument 'ca_schema' can accept schema + # dictionaries that can contain values of types str, List, Dict and other + # types too. @classmethod - def from_customization_arg_dict(cls, ca_dict, ca_schema): + def from_customization_arg_dict( + cls, + ca_dict: Dict[str, UnionOfCustomizationArgsDictValues], + ca_schema: Dict[str, Any] + ) -> InteractionCustomizationArg: """Converts a customization argument dictionary to an InteractionCustomizationArgument domain object. This is done by traversing the customization argument schema, and converting @@ -993,7 +2375,21 @@ def from_customization_arg_dict(cls, ca_dict, ca_schema): InteractionCustomizationArg. The customization argument domain object. """ - def convert_content_to_domain_obj(ca_value, schema_obj_type): + @overload + def convert_content_to_domain_obj( + ca_value: Dict[str, str], + schema_obj_type: Literal['SubtitledUnicode'] + ) -> SubtitledUnicode: ... + + @overload + def convert_content_to_domain_obj( + ca_value: Dict[str, str], + schema_obj_type: Literal['SubtitledHtml'] + ) -> SubtitledHtml: ... + + def convert_content_to_domain_obj( + ca_value: Dict[str, str], schema_obj_type: str + ) -> Union[SubtitledHtml, SubtitledUnicode]: """Conversion function used to convert SubtitledHtml dicts to SubtitledHtml and SubtitledUnicode dicts to SubtitledUnicode. @@ -1010,12 +2406,15 @@ def convert_content_to_domain_obj(ca_value, schema_obj_type): schema_obj_type == schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE ): - return SubtitledUnicode( + class_obj: Union[ + SubtitledUnicode, SubtitledHtml + ] = SubtitledUnicode( ca_value['content_id'], ca_value['unicode_str']) if schema_obj_type == schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML: - return SubtitledHtml( + class_obj = SubtitledHtml( ca_value['content_id'], ca_value['html']) + return class_obj ca_value = InteractionCustomizationArg.traverse_by_schema_and_convert( ca_schema, @@ -1025,11 +2424,11 @@ def convert_content_to_domain_obj(ca_value, schema_obj_type): return cls(ca_value, ca_schema) - def get_subtitled_unicode(self): + def get_subtitled_unicode(self) -> List[SubtitledUnicode]: """Get all SubtitledUnicode(s) in the customization argument. Returns: - list(str). A list of SubtitledUnicode. + list(SubtitledUnicode). A list of SubtitledUnicode. """ return InteractionCustomizationArg.traverse_by_schema_and_get( self.schema, @@ -1038,11 +2437,11 @@ def get_subtitled_unicode(self): lambda x: x ) - def get_subtitled_html(self): + def get_subtitled_html(self) -> List[SubtitledHtml]: """Get all SubtitledHtml(s) in the customization argument. Returns: - list(str). A list of SubtitledHtml. + list(SubtitledHtml). A list of SubtitledHtml. """ return InteractionCustomizationArg.traverse_by_schema_and_get( self.schema, @@ -1051,7 +2450,7 @@ def get_subtitled_html(self): lambda x: x ) - def get_content_ids(self): + def get_content_ids(self) -> List[str]: """Get all content_ids from SubtitledHtml and SubtitledUnicode in the customization argument. @@ -1066,25 +2465,11 @@ def get_content_ids(self): lambda x: x.content_id ) - def get_html(self): - """Get all html from SubtitledHtml in the customization argument. - - Returns: - list(str). All html strings in the customization argument. - """ - - return InteractionCustomizationArg.traverse_by_schema_and_get( - self.schema, - self.value, - [schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML], - lambda x: x.html - ) - - def validate_subtitled_html(self): + def validate_subtitled_html(self) -> None: """Calls the validate method on all SubtitledHtml domain objects in the customization arguments. """ - def validate_html(subtitled_html): + def validate_html(subtitled_html: SubtitledHtml) -> None: """A dummy value extractor that calls the validate method on the passed SubtitledHtml domain object. """ @@ -1097,8 +2482,15 @@ def validate_html(subtitled_html): validate_html ) + # Here we use type Any because the argument `schema` can accept + # schema dicts and those schema dictionaries can have nested dict + # structure. @staticmethod - def traverse_by_schema_and_convert(schema, value, conversion_fn): + def traverse_by_schema_and_convert( + schema: Dict[str, Any], + value: _GenericCustomizationArgType, + conversion_fn: AcceptableConversionFnType + ) -> _GenericCustomizationArgType: """Helper function that recursively traverses an interaction customization argument spec to locate any SubtitledHtml or SubtitledUnicode objects, and applies a conversion function to the @@ -1128,9 +2520,15 @@ def traverse_by_schema_and_convert(schema, value, conversion_fn): schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE) if is_subtitled_html_spec or is_subtitled_unicode_spec: - value = conversion_fn(value, schema['obj_type']) + # Here we use MyPy ignore because here we are assigning + # Optional[str] type to generic type variable, and passing + # generic variable to conversion function. + value = conversion_fn(value, schema['obj_type']) # type: ignore[assignment, arg-type] elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST: - value = [ + assert isinstance(value, list) + # Here we use MyPy ignore because here we are assigning List type + # to generic type variable. + value = [ # type: ignore[assignment] InteractionCustomizationArg.traverse_by_schema_and_convert( schema['items'], value_element, @@ -1138,6 +2536,7 @@ def traverse_by_schema_and_convert(schema, value, conversion_fn): ) for value_element in value ] elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT: + assert isinstance(value, dict) for property_spec in schema['properties']: name = property_spec['name'] value[name] = ( @@ -1149,9 +2548,18 @@ def traverse_by_schema_and_convert(schema, value, conversion_fn): return value + # TODO(#15982): Here we use type Any because `value` argument can accept + # values of customization arg and that values can be of type Dict[Dict[..]], + # str, int, bool and other types too, and for argument `schema` we used Any + # type because values in schema dictionary can be of type str, List, Dict + # and other types too. @staticmethod def traverse_by_schema_and_get( - schema, value, obj_types_to_search_for, value_extractor): + schema: Dict[str, Any], + value: Any, + obj_types_to_search_for: List[str], + value_extractor: Union[Callable[..., str], Callable[..., None]] + ) -> List[Any]: """Recursively traverses an interaction customization argument spec to locate values with schema obj_type in obj_types_to_search_for, and extracting the value using a value_extractor function. @@ -1204,60 +2612,70 @@ def traverse_by_schema_and_get( return result + @staticmethod + def convert_cust_args_dict_to_cust_args_based_on_specs( + ca_dict: CustomizationArgsDictType, + ca_specs_dict: List[domain.CustomizationArgSpecsDict] + ) -> Dict[str, InteractionCustomizationArg]: + """Converts customization arguments dictionary to customization + arguments. This is done by converting each customization argument to a + InteractionCustomizationArg domain object. -class Outcome: - """Value object representing an outcome of an interaction. An outcome - consists of a destination state, feedback to show the user, and any - parameter changes. - """ - - def to_dict(self): - """Returns a dict representing this Outcome domain object. + Args: + ca_dict: dict. A dictionary of customization + argument name to a customization argument dict, which is a dict + of the single key 'value' to the value of the customization + argument. + ca_specs_dict: dict. A dictionary of customization argument specs. Returns: - dict. A dict, mapping all fields of Outcome instance. + dict. A dictionary of customization argument names to the + InteractionCustomizationArg domain object's. """ return { - 'dest': self.dest, - 'feedback': self.feedback.to_dict(), - 'labelled_as_correct': self.labelled_as_correct, - 'param_changes': [ - param_change.to_dict() for param_change in self.param_changes], - 'refresher_exploration_id': self.refresher_exploration_id, - 'missing_prerequisite_skill_id': self.missing_prerequisite_skill_id + spec['name']: ( + InteractionCustomizationArg.from_customization_arg_dict( + ca_dict[spec['name']], + spec['schema'] + ) + ) for spec in ca_specs_dict } - @classmethod - def from_dict(cls, outcome_dict): - """Return a Outcome domain object from a dict. - Args: - outcome_dict: dict. The dict representation of Outcome object. +class OutcomeDict(TypedDict): + """Dictionary representing the Outcome object.""" + + dest: Optional[str] + dest_if_really_stuck: Optional[str] + feedback: SubtitledHtmlDict + labelled_as_correct: bool + param_changes: List[param_domain.ParamChangeDict] + refresher_exploration_id: Optional[str] + missing_prerequisite_skill_id: Optional[str] - Returns: - Outcome. The corresponding Outcome domain object. - """ - feedback = SubtitledHtml.from_dict(outcome_dict['feedback']) - feedback.validate() - return cls( - outcome_dict['dest'], - feedback, - outcome_dict['labelled_as_correct'], - [param_domain.ParamChange( - param_change['name'], param_change['generator_id'], - param_change['customization_args']) - for param_change in outcome_dict['param_changes']], - outcome_dict['refresher_exploration_id'], - outcome_dict['missing_prerequisite_skill_id'] - ) + +class Outcome(translation_domain.BaseTranslatableObject): + """Value object representing an outcome of an interaction. An outcome + consists of a destination state, feedback to show the user, and any + parameter changes. + """ def __init__( - self, dest, feedback, labelled_as_correct, param_changes, - refresher_exploration_id, missing_prerequisite_skill_id): + self, + dest: Optional[str], + dest_if_really_stuck: Optional[str], + feedback: SubtitledHtml, + labelled_as_correct: bool, + param_changes: List[param_domain.ParamChange], + refresher_exploration_id: Optional[str], + missing_prerequisite_skill_id: Optional[str] + ) -> None: """Initializes a Outcome domain object. Args: dest: str. The name of the destination state. + dest_if_really_stuck: str or None. The name of the optional state + to redirect the learner to strengthen their concepts. feedback: SubtitledHtml. Feedback to give to the user if this rule is triggered. labelled_as_correct: bool. Whether this outcome has been labelled @@ -1276,6 +2694,9 @@ def __init__( # Id of the destination state. # TODO(sll): Check that this state actually exists. self.dest = dest + # An optional destination state to redirect the learner to + # strengthen their concepts corresponding to a particular card. + self.dest_if_really_stuck = dest_if_really_stuck # Feedback to give the reader if this rule is triggered. self.feedback = feedback # Whether this outcome has been labelled by the creator as @@ -1292,7 +2713,78 @@ def __init__( # when the learner receives this outcome. self.missing_prerequisite_skill_id = missing_prerequisite_skill_id - def validate(self): + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the outcome. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_translatable_field( + self.feedback.content_id, + translation_domain.ContentType.FEEDBACK, + translation_domain.TranslatableContentFormat.HTML, + self.feedback.html) + return translatable_contents_collection + + def to_dict(self) -> OutcomeDict: + """Returns a dict representing this Outcome domain object. + + Returns: + dict. A dict, mapping all fields of Outcome instance. + """ + return { + 'dest': self.dest, + 'dest_if_really_stuck': self.dest_if_really_stuck, + 'feedback': self.feedback.to_dict(), + 'labelled_as_correct': self.labelled_as_correct, + 'param_changes': [ + param_change.to_dict() for param_change in self.param_changes], + 'refresher_exploration_id': self.refresher_exploration_id, + 'missing_prerequisite_skill_id': self.missing_prerequisite_skill_id + } + + # TODO(#16467): Remove `validate` argument after validating all Question + # states by writing a migration and audit job. As the validation for + # outcome is common between Exploration and Question and the Question + # data is not yet migrated, we do not want to call the validations + # while we load the Question. + @classmethod + def from_dict( + cls, outcome_dict: OutcomeDict, validate: bool = True + ) -> Outcome: + """Return a Outcome domain object from a dict. + + Args: + outcome_dict: dict. The dict representation of Outcome object. + validate: bool. False, when the validations should not be called. + + Returns: + Outcome. The corresponding Outcome domain object. + """ + feedback = SubtitledHtml.from_dict(outcome_dict['feedback']) + if validate: + feedback.validate() + return cls( + outcome_dict['dest'], + outcome_dict['dest_if_really_stuck'], + feedback, + outcome_dict['labelled_as_correct'], + [param_domain.ParamChange( + param_change['name'], param_change['generator_id'], + param_change['customization_args']) + for param_change in outcome_dict['param_changes']], + outcome_dict['refresher_exploration_id'], + outcome_dict['missing_prerequisite_skill_id'] + ) + + def validate(self) -> None: """Validates various properties of the Outcome. Raises: @@ -1325,7 +2817,9 @@ def validate(self): 'received %s' % self.refresher_exploration_id) @staticmethod - def convert_html_in_outcome(outcome_dict, conversion_fn): + def convert_html_in_outcome( + outcome_dict: OutcomeDict, conversion_fn: Callable[[str], str] + ) -> OutcomeDict: """Checks for HTML fields in the outcome and converts it according to the conversion function. @@ -1342,10 +2836,19 @@ def convert_html_in_outcome(outcome_dict, conversion_fn): return outcome_dict +class VoiceoverDict(TypedDict): + """Dictionary representing the Voiceover object.""" + + filename: str + file_size_bytes: int + needs_update: bool + duration_secs: float + + class Voiceover: """Value object representing an voiceover.""" - def to_dict(self): + def to_dict(self) -> VoiceoverDict: """Returns a dict representing this Voiceover domain object. Returns: @@ -1359,7 +2862,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, voiceover_dict): + def from_dict(cls, voiceover_dict: VoiceoverDict) -> Voiceover: """Return a Voiceover domain object from a dict. Args: @@ -1375,7 +2878,13 @@ def from_dict(cls, voiceover_dict): voiceover_dict['needs_update'], voiceover_dict['duration_secs']) - def __init__(self, filename, file_size_bytes, needs_update, duration_secs): + def __init__( + self, + filename: str, + file_size_bytes: int, + needs_update: bool, + duration_secs: float + ) -> None: """Initializes a Voiceover domain object. Args: @@ -1398,7 +2907,7 @@ def __init__(self, filename, file_size_bytes, needs_update, duration_secs): # float. The duration in seconds for the voiceover recording. self.duration_secs = duration_secs - def validate(self): + def validate(self) -> None: """Validates properties of the Voiceover. Raises: @@ -1433,7 +2942,7 @@ def validate(self): raise utils.ValidationError( 'Expected needs_update to be a bool, received %s' % self.needs_update) - if not isinstance(self.duration_secs, float): + if not isinstance(self.duration_secs, (float, int)): raise utils.ValidationError( 'Expected duration_secs to be a float, received %s' % self.duration_secs) @@ -1444,409 +2953,10 @@ def validate(self): self.duration_secs) -class WrittenTranslation: - """Value object representing a written translation for a content. - - Here, "content" could mean a string or a list of strings. The latter arises, - for example, in the case where we are checking for equality of a learner's - answer against a given set of strings. In such cases, the number of strings - in the translation of the original object may not be the same as the number - of strings in the original object. - """ - - DATA_FORMAT_HTML = 'html' - DATA_FORMAT_UNICODE_STRING = 'unicode' - DATA_FORMAT_SET_OF_NORMALIZED_STRING = 'set_of_normalized_string' - DATA_FORMAT_SET_OF_UNICODE_STRING = 'set_of_unicode_string' - - DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE = { - DATA_FORMAT_HTML: 'TranslatableHtml', - DATA_FORMAT_UNICODE_STRING: 'TranslatableUnicodeString', - DATA_FORMAT_SET_OF_NORMALIZED_STRING: ( - 'TranslatableSetOfNormalizedString'), - DATA_FORMAT_SET_OF_UNICODE_STRING: 'TranslatableSetOfUnicodeString', - } - - @classmethod - def is_data_format_list(cls, data_format): - """Checks whether the content of translation with given format is of - a list type. - - Args: - data_format: str. The format of the translation. - - Returns: - bool. Whether the content of translation is a list. - """ - return data_format in ( - cls.DATA_FORMAT_SET_OF_NORMALIZED_STRING, - cls.DATA_FORMAT_SET_OF_UNICODE_STRING - ) - - def __init__(self, data_format, translation, needs_update): - """Initializes a WrittenTranslation domain object. - - Args: - data_format: str. One of the keys in - DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE. Indicates the - type of the field (html, unicode, etc.). - translation: str|list(str). A user-submitted string or list of - strings that matches the given data format. - needs_update: bool. Whether the translation is marked as needing - review. - """ - self.data_format = data_format - self.translation = translation - self.needs_update = needs_update - - def to_dict(self): - """Returns a dict representing this WrittenTranslation domain object. - - Returns: - dict. A dict, mapping all fields of WrittenTranslation instance. - """ - return { - 'data_format': self.data_format, - 'translation': self.translation, - 'needs_update': self.needs_update, - } - - @classmethod - def from_dict(cls, written_translation_dict): - """Return a WrittenTranslation domain object from a dict. - - Args: - written_translation_dict: dict. The dict representation of - WrittenTranslation object. - - Returns: - WrittenTranslation. The corresponding WrittenTranslation domain - object. - """ - return cls( - written_translation_dict['data_format'], - written_translation_dict['translation'], - written_translation_dict['needs_update']) - - def validate(self): - """Validates properties of the WrittenTranslation, normalizing the - translation if needed. - - Raises: - ValidationError. One or more attributes of the WrittenTranslation - are invalid. - """ - if self.data_format not in ( - self.DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE): - raise utils.ValidationError( - 'Invalid data_format: %s' % self.data_format) - - translatable_class_name = ( - self.DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE[self.data_format]) - translatable_obj_class = ( - translatable_object_registry.Registry.get_object_class( - translatable_class_name)) - self.translation = translatable_obj_class.normalize_value( - self.translation) - - if not isinstance(self.needs_update, bool): - raise utils.ValidationError( - 'Expected needs_update to be a bool, received %s' % - self.needs_update) - - -class WrittenTranslations: - """Value object representing a content translations which stores - translated contents of all state contents (like hints, feedback etc.) in - different languages linked through their content_id. - """ - - def __init__(self, translations_mapping): - """Initializes a WrittenTranslations domain object. - - Args: - translations_mapping: dict. A dict mapping the content Ids - to the dicts which is the map of abbreviated code of the - languages to WrittenTranslation objects. - """ - self.translations_mapping = translations_mapping - - def to_dict(self): - """Returns a dict representing this WrittenTranslations domain object. - - Returns: - dict. A dict, mapping all fields of WrittenTranslations instance. - """ - translations_mapping = {} - for (content_id, language_code_to_written_translation) in ( - self.translations_mapping.items()): - translations_mapping[content_id] = {} - for (language_code, written_translation) in ( - language_code_to_written_translation.items()): - translations_mapping[content_id][language_code] = ( - written_translation.to_dict()) - written_translations_dict = { - 'translations_mapping': translations_mapping - } - - return written_translations_dict - - @classmethod - def from_dict(cls, written_translations_dict): - """Return a WrittenTranslations domain object from a dict. - - Args: - written_translations_dict: dict. The dict representation of - WrittenTranslations object. - - Returns: - WrittenTranslations. The corresponding WrittenTranslations domain - object. - """ - translations_mapping = {} - for (content_id, language_code_to_written_translation) in ( - written_translations_dict['translations_mapping'].items()): - translations_mapping[content_id] = {} - for (language_code, written_translation) in ( - language_code_to_written_translation.items()): - translations_mapping[content_id][language_code] = ( - WrittenTranslation.from_dict(written_translation)) - - return cls(translations_mapping) - - def get_content_ids_that_are_correctly_translated(self, language_code): - """Returns a list of content ids in which a correct translation is - available in the given language. - - Args: - language_code: str. The abbreviated code of the language. - - Returns: - list(str). A list of content ids in which the translations are - available in the given language. - """ - correctly_translated_content_ids = [] - for content_id, translations in self.translations_mapping.items(): - if ( - language_code in translations and - not translations[language_code].needs_update - ): - correctly_translated_content_ids.append(content_id) - - return correctly_translated_content_ids - - def add_translation(self, content_id, language_code, html): - """Adds a translation for the given content id in a given language. - - Args: - content_id: str. The id of the content. - language_code: str. The language code of the translated html. - html: str. The translated html. - """ - written_translation = WrittenTranslation( - WrittenTranslation.DATA_FORMAT_HTML, html, False) - self.translations_mapping[content_id][language_code] = ( - written_translation) - - def mark_written_translation_as_needing_update( - self, content_id, language_code): - """Marks translation as needing update for the given content id and - language code. - - Args: - content_id: str. The id of the content. - language_code: str. The language code. - """ - self.translations_mapping[content_id][language_code].needs_update = ( - True - ) - - def mark_written_translations_as_needing_update(self, content_id): - """Marks translation as needing update for the given content id in all - languages. - - Args: - content_id: str. The id of the content. - """ - for (language_code, written_translation) in ( - self.translations_mapping[content_id].items()): - written_translation.needs_update = True - self.translations_mapping[content_id][language_code] = ( - written_translation) - - def validate(self, expected_content_id_list): - """Validates properties of the WrittenTranslations. - - Args: - expected_content_id_list: list(str). A list of content id which are - expected to be inside they WrittenTranslations. - - Raises: - ValidationError. One or more attributes of the WrittenTranslations - are invalid. - """ - if expected_content_id_list is not None: - if not set(self.translations_mapping.keys()) == ( - set(expected_content_id_list)): - raise utils.ValidationError( - 'Expected state written_translations to match the listed ' - 'content ids %s, found %s' % ( - expected_content_id_list, - list(self.translations_mapping.keys())) - ) - - for (content_id, language_code_to_written_translation) in ( - self.translations_mapping.items()): - if not isinstance(content_id, str): - raise utils.ValidationError( - 'Expected content_id to be a string, received %s' - % content_id) - if not isinstance(language_code_to_written_translation, dict): - raise utils.ValidationError( - 'Expected content_id value to be a dict, received %s' - % language_code_to_written_translation) - for (language_code, written_translation) in ( - language_code_to_written_translation.items()): - if not isinstance(language_code, str): - raise utils.ValidationError( - 'Expected language_code to be a string, received %s' - % language_code) - # Currently, we assume written translations are used by the - # voice-artist to voiceover the translated text so written - # translations can be in supported audio/voiceover languages. - allowed_language_codes = [language['id'] for language in ( - constants.SUPPORTED_AUDIO_LANGUAGES)] - if language_code not in allowed_language_codes: - raise utils.ValidationError( - 'Invalid language_code: %s' % language_code) - - written_translation.validate() - - def get_content_ids_for_text_translation(self): - """Returns a list of content_id available for text translation. +class RecordedVoiceoversDict(TypedDict): + """Dictionary representing the RecordedVoiceovers object.""" - Returns: - list(str). A list of content id available for text translation. - """ - return list(sorted(self.translations_mapping.keys())) - - def get_translated_content(self, content_id, language_code): - """Returns the translated content for the given content_id in the given - language. - - Args: - content_id: str. The ID of the content. - language_code: str. The language code for the translated content. - - Returns: - str. The translated content for a given content id in a language. - - Raises: - Exception. Translation doesn't exist in the given language. - Exception. The given content id doesn't exist. - """ - if content_id in self.translations_mapping: - if language_code in self.translations_mapping[content_id]: - return self.translations_mapping[ - content_id][language_code].translation - else: - raise Exception( - 'Translation for the given content_id %s does not exist in ' - '%s language code' % (content_id, language_code)) - else: - raise Exception('Invalid content_id: %s' % content_id) - - def add_content_id_for_translation(self, content_id): - """Adds a content id as a key for the translation into the - content_translation dict. - - Args: - content_id: str. The id representing a subtitled html. - - Raises: - Exception. The content id isn't a string. - """ - if not isinstance(content_id, str): - raise Exception( - 'Expected content_id to be a string, received %s' % content_id) - if content_id in self.translations_mapping: - raise Exception( - 'The content_id %s already exist.' % content_id) - else: - self.translations_mapping[content_id] = {} - - def delete_content_id_for_translation(self, content_id): - """Deletes a content id from the content_translation dict. - - Args: - content_id: str. The id representing a subtitled html. - - Raises: - Exception. The content id isn't a string. - """ - if not isinstance(content_id, str): - raise Exception( - 'Expected content_id to be a string, received %s' % content_id) - if content_id not in self.translations_mapping: - raise Exception( - 'The content_id %s does not exist.' % content_id) - else: - self.translations_mapping.pop(content_id, None) - - def get_all_html_content_strings(self): - """Gets all html content strings used in the WrittenTranslations. - - Returns: - list(str). The list of html content strings. - """ - html_string_list = [] - for translations in self.translations_mapping.values(): - for written_translation in translations.values(): - if (written_translation.data_format == - WrittenTranslation.DATA_FORMAT_HTML): - html_string_list.append(written_translation.translation) - return html_string_list - - @staticmethod - def convert_html_in_written_translations( - written_translations_dict, conversion_fn): - """Checks for HTML fields in the written translations and converts it - according to the conversion function. - - Args: - written_translations_dict: dict. The written translations dict. - conversion_fn: function. The function to be used for converting the - HTML. - - Returns: - dict. The converted written translations dict. - """ - for content_id, language_code_to_written_translation in ( - written_translations_dict['translations_mapping'].items()): - for language_code in ( - language_code_to_written_translation.keys()): - translation_dict = written_translations_dict[ - 'translations_mapping'][content_id][language_code] - if 'data_format' in translation_dict: - if (translation_dict['data_format'] == - WrittenTranslation.DATA_FORMAT_HTML): - written_translations_dict['translations_mapping'][ - content_id][language_code]['translation'] = ( - conversion_fn(written_translations_dict[ - 'translations_mapping'][content_id][ - language_code]['translation']) - ) - elif 'html' in translation_dict: - # TODO(#11950): Delete this once old schema migration - # functions are deleted. - # This "elif" branch is needed because, in states schema - # v33, this function is called but the dict is still in the - # old format (that doesn't have a "data_format" key). - written_translations_dict['translations_mapping'][ - content_id][language_code]['html'] = ( - conversion_fn(translation_dict['html'])) - - return written_translations_dict + voiceovers_mapping: Dict[str, Dict[str, VoiceoverDict]] class RecordedVoiceovers: @@ -1855,7 +2965,9 @@ class RecordedVoiceovers: through their content_id. """ - def __init__(self, voiceovers_mapping): + def __init__( + self, voiceovers_mapping: Dict[str, Dict[str, Voiceover]] + ) -> None: """Initializes a RecordedVoiceovers domain object. Args: @@ -1865,13 +2977,13 @@ def __init__(self, voiceovers_mapping): """ self.voiceovers_mapping = voiceovers_mapping - def to_dict(self): + def to_dict(self) -> RecordedVoiceoversDict: """Returns a dict representing this RecordedVoiceovers domain object. Returns: dict. A dict, mapping all fields of RecordedVoiceovers instance. """ - voiceovers_mapping = {} + voiceovers_mapping: Dict[str, Dict[str, VoiceoverDict]] = {} for (content_id, language_code_to_voiceover) in ( self.voiceovers_mapping.items()): voiceovers_mapping[content_id] = {} @@ -1879,14 +2991,16 @@ def to_dict(self): language_code_to_voiceover.items()): voiceovers_mapping[content_id][language_code] = ( voiceover.to_dict()) - recorded_voiceovers_dict = { + recorded_voiceovers_dict: RecordedVoiceoversDict = { 'voiceovers_mapping': voiceovers_mapping } return recorded_voiceovers_dict @classmethod - def from_dict(cls, recorded_voiceovers_dict): + def from_dict( + cls, recorded_voiceovers_dict: RecordedVoiceoversDict + ) -> RecordedVoiceovers: """Return a RecordedVoiceovers domain object from a dict. Args: @@ -1897,7 +3011,7 @@ def from_dict(cls, recorded_voiceovers_dict): RecordedVoiceovers. The corresponding RecordedVoiceovers domain object. """ - voiceovers_mapping = {} + voiceovers_mapping: Dict[str, Dict[str, Voiceover]] = {} for (content_id, language_code_to_voiceover) in ( recorded_voiceovers_dict['voiceovers_mapping'].items()): voiceovers_mapping[content_id] = {} @@ -1908,12 +3022,12 @@ def from_dict(cls, recorded_voiceovers_dict): return cls(voiceovers_mapping) - def validate(self, expected_content_id_list): + def validate(self, expected_content_id_list: Optional[List[str]]) -> None: """Validates properties of the RecordedVoiceovers. Args: - expected_content_id_list: list(str). A list of content id which are - expected to be inside the RecordedVoiceovers. + expected_content_id_list: list(str)|None. A list of content id which + are expected to be inside the RecordedVoiceovers. Raises: ValidationError. One or more attributes of the RecordedVoiceovers @@ -1953,7 +3067,7 @@ def validate(self, expected_content_id_list): voiceover.validate() - def get_content_ids_for_voiceovers(self): + def get_content_ids_for_voiceovers(self) -> List[str]: """Returns a list of content_id available for voiceover. Returns: @@ -1961,12 +3075,12 @@ def get_content_ids_for_voiceovers(self): """ return list(self.voiceovers_mapping.keys()) - def strip_all_existing_voiceovers(self): + def strip_all_existing_voiceovers(self) -> None: """Strips all existing voiceovers from the voiceovers_mapping.""" for content_id in self.voiceovers_mapping.keys(): self.voiceovers_mapping[content_id] = {} - def add_content_id_for_voiceover(self, content_id): + def add_content_id_for_voiceover(self, content_id: str) -> None: """Adds a content id as a key for the voiceover into the voiceovers_mapping dict. @@ -1987,7 +3101,7 @@ def add_content_id_for_voiceover(self, content_id): self.voiceovers_mapping[content_id] = {} - def delete_content_id_for_voiceover(self, content_id): + def delete_content_id_for_voiceover(self, content_id: str) -> None: """Deletes a content id from the voiceovers_mapping dict. Args: @@ -2004,14 +3118,25 @@ def delete_content_id_for_voiceover(self, content_id): if content_id not in self.voiceovers_mapping: raise Exception( 'The content_id %s does not exist.' % content_id) - else: - self.voiceovers_mapping.pop(content_id, None) + self.voiceovers_mapping.pop(content_id, None) + + +class RuleSpecDict(TypedDict): + """Dictionary representing the RuleSpec object.""" -class RuleSpec: + rule_type: str + inputs: Dict[str, AllowedRuleSpecInputTypes] + + +class RuleSpec(translation_domain.BaseTranslatableObject): """Value object representing a rule specification.""" - def __init__(self, rule_type, inputs): + def __init__( + self, + rule_type: str, + inputs: Mapping[str, AllowedRuleSpecInputTypes] + ) -> None: """Initializes a RuleSpec domain object. Args: @@ -2025,9 +3150,46 @@ def __init__(self, rule_type, inputs): enclosed in {{...}} braces. """ self.rule_type = rule_type + # Here, we are narrowing down the type from Mapping to Dict. Because + # Mapping is used just to accept the different types of allowed Dicts. + assert isinstance(inputs, dict) self.inputs = inputs - def to_dict(self): + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the rule spec. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + for input_value in self.inputs.values(): + if 'normalizedStrSet' in input_value: + translatable_contents_collection.add_translatable_field( + input_value['contentId'], + translation_domain.ContentType.RULE, + translation_domain.TranslatableContentFormat + .SET_OF_NORMALIZED_STRING, + input_value['normalizedStrSet'], + kwargs['interaction_id'], + self.rule_type) + if 'unicodeStrSet' in input_value: + translatable_contents_collection.add_translatable_field( + input_value['contentId'], + translation_domain.ContentType.RULE, + translation_domain.TranslatableContentFormat + .SET_OF_UNICODE_STRING, + input_value['unicodeStrSet'], + kwargs['interaction_id'], + self.rule_type) + return translatable_contents_collection + + def to_dict(self) -> RuleSpecDict: """Returns a dict representing this RuleSpec domain object. Returns: @@ -2039,7 +3201,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, rulespec_dict): + def from_dict(cls, rulespec_dict: RuleSpecDict) -> RuleSpec: """Return a RuleSpec domain object from a dict. Args: @@ -2053,7 +3215,11 @@ def from_dict(cls, rulespec_dict): rulespec_dict['inputs'] ) - def validate(self, rule_params_list, exp_param_specs_dict): + def validate( + self, + rule_params_list: List[Tuple[str, Type[objects.BaseObject]]], + exp_param_specs_dict: Dict[str, param_domain.ParamSpec] + ) -> None: """Validates a RuleSpec value object. It ensures the inputs dict does not refer to any non-existent parameters and that it contains values for all the parameters the rule expects. @@ -2124,7 +3290,12 @@ def validate(self, rule_params_list, exp_param_specs_dict): @staticmethod def convert_html_in_rule_spec( - rule_spec_dict, conversion_fn, html_field_types_to_rule_specs): + rule_spec_dict: RuleSpecDict, + conversion_fn: Callable[[str], str], + html_field_types_to_rule_specs: Dict[ + str, rules_registry.RuleSpecsExtensionDict + ] + ) -> RuleSpecDict: """Checks for HTML fields in a Rule Spec and converts it according to the conversion function. @@ -2140,6 +3311,11 @@ def convert_html_in_rule_spec( Returns: dict. The converted Rule Spec dict. + + Raises: + Exception. The Rule spec has an invalid format. + Exception. The Rule spec has no valid input variable + with HTML in it. """ # TODO(#9413): Find a way to include a reference to the interaction # type in the Draft change lists. @@ -2167,9 +3343,14 @@ def convert_html_in_rule_spec( rule_spec_dict['inputs'][input_variable]) if (html_type_format == feconf.HTML_RULE_VARIABLE_FORMAT_STRING): + input_value = ( + rule_spec_dict['inputs'][input_variable] + ) + # Ruling out the possibility of any other type for + # mypy type checking. + assert isinstance(input_value, str) rule_spec_dict['inputs'][input_variable] = ( - conversion_fn( - rule_spec_dict['inputs'][input_variable])) + conversion_fn(input_value)) elif (html_type_format == feconf.HTML_RULE_VARIABLE_FORMAT_SET): # Here we are checking the type of the @@ -2181,16 +3362,36 @@ def convert_html_in_rule_spec( for value_index, value in enumerate( rule_input_variable): if isinstance(value, str): - rule_spec_dict['inputs'][ + # Here we use cast because above assert + # conditions forces 'inputs' to be of + # type Dict[str, List[str]]. + variable_format_set_input = cast( + Dict[str, List[str]], + rule_spec_dict['inputs'] + ) + variable_format_set_input[ input_variable][value_index] = ( conversion_fn(value)) elif (html_type_format == feconf.HTML_RULE_VARIABLE_FORMAT_LIST_OF_SETS): + input_variable_list = ( + rule_spec_dict['inputs'][input_variable] + ) + # Ruling out the possibility of any other type for + # mypy type checking. + assert isinstance(input_variable_list, list) for list_index, html_list in enumerate( - rule_spec_dict['inputs'][input_variable]): + input_variable_list): for rule_html_index, rule_html in enumerate( html_list): - rule_spec_dict['inputs'][input_variable][ + # Here we use cast because above assert + # conditions forces 'inputs' to be of + # type Dict[str, List[List[str]]]. + list_of_sets_inputs = cast( + Dict[str, List[List[str]]], + rule_spec_dict['inputs'] + ) + list_of_sets_inputs[input_variable][ list_index][rule_html_index] = ( conversion_fn(rule_html)) else: @@ -2205,10 +3406,21 @@ def convert_html_in_rule_spec( return rule_spec_dict +class SubtitledHtmlDict(TypedDict): + """Dictionary representing the SubtitledHtml object.""" + + content_id: str + html: str + + class SubtitledHtml: """Value object representing subtitled HTML.""" - def __init__(self, content_id, html): + def __init__( + self, + content_id: str, + html: str + ) -> None: """Initializes a SubtitledHtml domain object. Note that initializing the SubtitledHtml object does not clean the html. This is because we sometimes need to initialize SubtitledHtml and migrate the contained @@ -2229,7 +3441,7 @@ def __init__(self, content_id, html): self.content_id = content_id self.html = html - def to_dict(self): + def to_dict(self) -> SubtitledHtmlDict: """Returns a dict representing this SubtitledHtml domain object. Returns: @@ -2241,7 +3453,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, subtitled_html_dict): + def from_dict(cls, subtitled_html_dict: SubtitledHtmlDict) -> SubtitledHtml: """Return a SubtitledHtml domain object from a dict. Args: @@ -2254,7 +3466,7 @@ def from_dict(cls, subtitled_html_dict): return cls( subtitled_html_dict['content_id'], subtitled_html_dict['html']) - def validate(self): + def validate(self) -> None: """Validates properties of the SubtitledHtml, and cleans the html. Raises: @@ -2272,8 +3484,11 @@ def validate(self): self.html = html_cleaner.clean(self.html) + html_cleaner.validate_rte_tags(self.html) + html_cleaner.validate_tabs_and_collapsible_rte_tags(self.html) + @classmethod - def create_default_subtitled_html(cls, content_id): + def create_default_subtitled_html(cls, content_id: str) -> SubtitledHtml: """Create a default SubtitledHtml domain object. Args: @@ -2286,10 +3501,17 @@ def create_default_subtitled_html(cls, content_id): return cls(content_id, '') +class SubtitledUnicodeDict(TypedDict): + """Dictionary representing the SubtitledUnicode object.""" + + content_id: str + unicode_str: str + + class SubtitledUnicode: """Value object representing subtitled unicode.""" - def __init__(self, content_id, unicode_str): + def __init__(self, content_id: str, unicode_str: str) -> None: """Initializes a SubtitledUnicode domain object. Args: @@ -2301,7 +3523,7 @@ def __init__(self, content_id, unicode_str): self.unicode_str = unicode_str self.validate() - def to_dict(self): + def to_dict(self) -> SubtitledUnicodeDict: """Returns a dict representing this SubtitledUnicode domain object. Returns: @@ -2313,7 +3535,9 @@ def to_dict(self): } @classmethod - def from_dict(cls, subtitled_unicode_dict): + def from_dict( + cls, subtitled_unicode_dict: SubtitledUnicodeDict + ) -> SubtitledUnicode: """Return a SubtitledUnicode domain object from a dict. Args: @@ -2328,7 +3552,7 @@ def from_dict(cls, subtitled_unicode_dict): subtitled_unicode_dict['unicode_str'] ) - def validate(self): + def validate(self) -> None: """Validates properties of the SubtitledUnicode. Raises: @@ -2345,7 +3569,9 @@ def validate(self): 'Invalid content unicode: %s' % self.unicode_str) @classmethod - def create_default_subtitled_unicode(cls, content_id): + def create_default_subtitled_unicode( + cls, content_id: str + ) -> SubtitledUnicode: """Create a default SubtitledUnicode domain object. Args: @@ -2357,64 +3583,51 @@ def create_default_subtitled_unicode(cls, content_id): return cls(content_id, '') -class TranslatableItem: - """Value object representing item that can be translated.""" +DomainObjectCustomizationArgsConversionFnTypes = Union[ + Callable[[SubtitledHtml, str], SubtitledHtml], + Callable[[SubtitledHtml, str], SubtitledHtmlDict], + Callable[[SubtitledUnicode, str], SubtitledUnicodeDict], + Callable[[SubtitledHtml, str], List[str]] +] - DATA_FORMAT_HTML = 'html' - DATA_FORMAT_UNICODE_STRING = 'unicode' - DATA_FORMAT_SET_OF_NORMALIZED_STRING = 'set_of_normalized_string' - DATA_FORMAT_SET_OF_UNICODE_STRING = 'set_of_unicode_string' - CONTENT_TYPE_CONTENT = 'content' - CONTENT_TYPE_INTERACTION = 'interaction' - CONTENT_TYPE_RULE = 'rule' - CONTENT_TYPE_FEEDBACK = 'feedback' - CONTENT_TYPE_HINT = 'hint' - CONTENT_TYPE_SOLUTION = 'solution' +DictCustomizationArgsConversionFnTypes = Union[ + Callable[[Dict[str, str], Literal['SubtitledUnicode']], SubtitledUnicode], + Callable[[Dict[str, str], Literal['SubtitledHtml']], SubtitledHtml] +] - def __init__( - self, content, data_format, content_type, interaction_id=None, - rule_type=None): - """Initializes a TranslatableItem domain object. +AcceptableConversionFnType = Union[ + DomainObjectCustomizationArgsConversionFnTypes, + DictCustomizationArgsConversionFnTypes +] - Args: - content: str|list(str). The translatable content text. - data_format: str. The data format of the translatable content. - content_type: str. One of `Content`, `Interaction`, ‘Rule`, - `Feedback`, `Hint`, `Solution`. - interaction_id: str|None. Interaction ID, e.g. `TextInput`, if the - content corresponds to an InteractionInstance, else None. - rule_type: str|None. Rule type if content_type == `Rule`, e.g. - “Equals”, “IsSubsetOf”, “Contains” else None. - """ - self.content = content - self.data_format = data_format - self.content_type = content_type - self.interaction_id = interaction_id - self.rule_type = rule_type - def to_dict(self): - """Returns a dict representing this TranslatableItem domain object. +class StateDict(TypedDict): + """Dictionary representing the State object.""" - Returns: - dict. A dict, mapping all fields of TranslatableItem instance. - """ - return { - 'content': self.content, - 'data_format': self.data_format, - 'content_type': self.content_type, - 'interaction_id': self.interaction_id, - 'rule_type': self.rule_type - } + content: SubtitledHtmlDict + param_changes: List[param_domain.ParamChangeDict] + interaction: InteractionInstanceDict + recorded_voiceovers: RecordedVoiceoversDict + solicit_answer_details: bool + card_is_checkpoint: bool + linked_skill_id: Optional[str] + classifier_model_id: Optional[str] -class State: +class State(translation_domain.BaseTranslatableObject): """Domain object for a state.""" def __init__( - self, content, param_changes, interaction, recorded_voiceovers, - written_translations, solicit_answer_details, card_is_checkpoint, - next_content_id_index, linked_skill_id=None, - classifier_model_id=None): + self, + content: SubtitledHtml, + param_changes: List[param_domain.ParamChange], + interaction: InteractionInstance, + recorded_voiceovers: RecordedVoiceovers, + solicit_answer_details: bool, + card_is_checkpoint: bool, + linked_skill_id: Optional[str] = None, + classifier_model_id: Optional[str] = None + ) -> None: """Initializes a State domain object. Args: @@ -2426,15 +3639,11 @@ def __init__( associated with this state. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the state contents and translations. - written_translations: WrittenTranslations. The written translations - for the state contents. solicit_answer_details: bool. Whether the creator wants to ask for answer details from the learner about why they picked a particular answer while playing the exploration. card_is_checkpoint: bool. If the card is marked as a checkpoint by the creator or not. - next_content_id_index: int. The next content_id index to use for - generation of new content_ids. linked_skill_id: str or None. The linked skill ID associated with this state. classifier_model_id: str or None. The classifier model ID @@ -2456,12 +3665,39 @@ def __init__( self.classifier_model_id = classifier_model_id self.recorded_voiceovers = recorded_voiceovers self.linked_skill_id = linked_skill_id - self.written_translations = written_translations self.solicit_answer_details = solicit_answer_details self.card_is_checkpoint = card_is_checkpoint - self.next_content_id_index = next_content_id_index - def validate(self, exp_param_specs_dict, allow_null_interaction): + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + """Get all translatable fields in the state. + + Returns: + translatable_contents_collection: TranslatableContentsCollection. + An instance of TranslatableContentsCollection class. + """ + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_translatable_field( + self.content.content_id, + translation_domain.ContentType.CONTENT, + translation_domain.TranslatableContentFormat.HTML, + self.content.html) + translatable_contents_collection.add_fields_from_translatable_object( + self.interaction) + return translatable_contents_collection + + def validate( + self, + exp_param_specs_dict: Optional[Dict[str, param_domain.ParamSpec]], + allow_null_interaction: bool, + *, + tagged_skill_misconception_id_required: bool = False, + strict: bool = False + ) -> None: """Validates various properties of the State. Args: @@ -2472,11 +3708,18 @@ def validate(self, exp_param_specs_dict, allow_null_interaction): question. allow_null_interaction: bool. Whether this state's interaction is allowed to be unspecified. + tagged_skill_misconception_id_required: bool. The 'tagged_skill_ + misconception_id' is required or not. + strict: bool. Tells if the exploration is strict or not. Raises: ValidationError. One or more attributes of the State are invalid. """ self.content.validate() + if exp_param_specs_dict: + param_specs_dict = exp_param_specs_dict + else: + param_specs_dict = {} if not isinstance(self.param_changes, list): raise utils.ValidationError( @@ -2488,80 +3731,12 @@ def validate(self, exp_param_specs_dict, allow_null_interaction): if not allow_null_interaction and self.interaction.id is None: raise utils.ValidationError( 'This state does not have any interaction specified.') - elif self.interaction.id is not None: - self.interaction.validate(exp_param_specs_dict) - - content_id_list = [] - content_id_list.append(self.content.content_id) - for answer_group in self.interaction.answer_groups: - feedback_content_id = answer_group.outcome.feedback.content_id - if feedback_content_id in content_id_list: - raise utils.ValidationError( - 'Found a duplicate content id %s' % feedback_content_id) - content_id_list.append(feedback_content_id) - - for rule_spec in answer_group.rule_specs: - for param_name, value in rule_spec.inputs.items(): - param_type = ( - interaction_registry.Registry.get_interaction_by_id( - self.interaction.id - ).get_rule_param_type(rule_spec.rule_type, param_name)) - - if issubclass(param_type, objects.BaseTranslatableObject): - if value['contentId'] in content_id_list: - raise utils.ValidationError( - 'Found a duplicate content ' - 'id %s' % value['contentId']) - content_id_list.append(value['contentId']) - - if self.interaction.default_outcome: - default_outcome_content_id = ( - self.interaction.default_outcome.feedback.content_id) - if default_outcome_content_id in content_id_list: - raise utils.ValidationError( - 'Found a duplicate content id %s' - % default_outcome_content_id) - content_id_list.append(default_outcome_content_id) - for hint in self.interaction.hints: - hint_content_id = hint.hint_content.content_id - if hint_content_id in content_id_list: - raise utils.ValidationError( - 'Found a duplicate content id %s' % hint_content_id) - content_id_list.append(hint_content_id) - if self.interaction.solution: - solution_content_id = ( - self.interaction.solution.explanation.content_id) - if solution_content_id in content_id_list: - raise utils.ValidationError( - 'Found a duplicate content id %s' % solution_content_id) - content_id_list.append(solution_content_id) - if self.interaction.id is not None: - for ca_name in self.interaction.customization_args: - content_id_list.extend( - self.interaction.customization_args[ca_name] - .get_content_ids() - ) - - if len(set(content_id_list)) != len(content_id_list): - raise utils.ValidationError( - 'Expected all content_ids to be unique, ' - 'received %s' % content_id_list) - - for content_id in content_id_list: - content_id_suffix = content_id.split('_')[-1] - - # Possible values of content_id_suffix are a digit, or from - # a 'outcome' (from 'default_outcome'). If the content_id_suffix - # is not a digit, we disregard it here. - if ( - content_id_suffix.isdigit() and - int(content_id_suffix) > self.next_content_id_index - ): - raise utils.ValidationError( - 'Expected all content id indexes to be less than the "next ' - 'content id index", but received content id %s' % content_id - ) + self.interaction.validate( + param_specs_dict, + tagged_skill_misconception_id_required=( + tagged_skill_misconception_id_required), + strict=strict) if not isinstance(self.solicit_answer_details, bool): raise utils.ValidationError( @@ -2579,8 +3754,7 @@ def validate(self, exp_param_specs_dict, allow_null_interaction): 'Expected card_is_checkpoint to be a boolean, ' 'received %s' % self.card_is_checkpoint) - self.written_translations.validate(content_id_list) - self.recorded_voiceovers.validate(content_id_list) + self.recorded_voiceovers.validate(self.get_translatable_content_ids()) if self.linked_skill_id is not None: if not isinstance(self.linked_skill_id, str): @@ -2588,32 +3762,14 @@ def validate(self, exp_param_specs_dict, allow_null_interaction): 'Expected linked_skill_id to be a str, ' 'received %s.' % self.linked_skill_id) - def get_content_html(self, content_id): - """Returns the content belongs to a given content id of the object. - - Args: - content_id: str. The id of the content. - - Returns: - str. The html content corresponding to the given content id. - - Raises: - ValueError. The given content_id does not exist. - """ - content_id_to_translatable_item = self._get_all_translatable_content() - if content_id not in content_id_to_translatable_item: - raise ValueError('Content ID %s does not exist' % content_id) - - return content_id_to_translatable_item[content_id].content - - def is_rte_content_supported_on_android(self): + def is_rte_content_supported_on_android(self) -> bool: """Checks whether the RTE components used in the state are supported by Android. Returns: bool. Whether the RTE components in the state is valid. """ - def require_valid_component_names(html): + def require_valid_component_names(html: str) -> bool: """Checks if the provided html string contains only whitelisted RTE tags. @@ -2637,7 +3793,7 @@ def require_valid_component_names(html): return self.interaction.is_rte_content_supported_on_android( require_valid_component_names) - def get_training_data(self): + def get_training_data(self) -> List[TrainingDataDict]: """Retrieves training data from the State domain object. Returns: @@ -2646,7 +3802,7 @@ def get_training_data(self): group and the other maps 'answers' to the answer group's training data. """ - state_training_data_by_answer_group = [] + state_training_data_by_answer_group: List[TrainingDataDict] = [] for (answer_group_index, answer_group) in enumerate( self.interaction.answer_groups): if answer_group.training_data: @@ -2657,7 +3813,7 @@ def get_training_data(self): }) return state_training_data_by_answer_group - def can_undergo_classification(self): + def can_undergo_classification(self) -> bool: """Checks whether the answers for this state satisfy the preconditions for a ML model to be trained. @@ -2677,7 +3833,9 @@ def can_undergo_classification(self): return False @classmethod - def convert_state_dict_to_yaml(cls, state_dict, width): + def convert_state_dict_to_yaml( + cls, state_dict: StateDict, width: int + ) -> str: """Converts the given state dict to yaml format. Args: @@ -2689,7 +3847,7 @@ def convert_state_dict_to_yaml(cls, state_dict, width): str. The YAML version of the state_dict. Raises: - Exception. The state_dict does not represent a valid state. + Exception. The state dict does not represent a valid state. """ try: # Check if the state_dict can be converted to a State. @@ -2698,43 +3856,13 @@ def convert_state_dict_to_yaml(cls, state_dict, width): logging.exception('Bad state dict: %s' % str(state_dict)) raise e - return python_utils.yaml_from_dict(state.to_dict(), width=width) - - def get_translation_counts(self): - """Return a dict representing the number of translations available in a - languages in which there exists at least one translation in the state - object. - - Note: This method only counts the translations which are translatable as - per _get_all_translatable_content method. - - Returns: - dict(str, int). A dict with language code as a key and number of - translations available in that language as the value. - """ - translation_counts = collections.defaultdict(int) - translations_mapping = self.written_translations.translations_mapping - - for content_id in self._get_all_translatable_content(): - for language_code, translation in ( - translations_mapping[content_id].items()): - if not translation.needs_update: - translation_counts[language_code] += 1 - return translation_counts - - def get_translatable_content_count(self): - """Returns the number of content fields available for translation in - the object. + return utils.yaml_from_dict(state.to_dict(), width=width) - Returns: - int. The number of content fields available for translation in - the state. - """ - return len(self._get_all_translatable_content()) - - def _update_content_ids_in_assets(self, old_ids_list, new_ids_list): + def _update_content_ids_in_assets( + self, old_ids_list: List[str], new_ids_list: List[str] + ) -> None: """Adds or deletes content ids in assets i.e, other parts of state - object such as recorded_voiceovers and written_translations. + object such as recorded_voiceovers. Args: old_ids_list: list(str). A list of content ids present earlier @@ -2743,11 +3871,13 @@ def _update_content_ids_in_assets(self, old_ids_list, new_ids_list): new_ids_list: list(str). A list of content ids currently present within the substructure (like answer groups, hints etc.) of state. + + Raises: + Exception. The content to be deleted doesn't exist. + Exception. The content to be added already exists. """ content_ids_to_delete = set(old_ids_list) - set(new_ids_list) content_ids_to_add = set(new_ids_list) - set(old_ids_list) - content_ids_for_text_translations = ( - self.written_translations.get_content_ids_for_text_translation()) content_ids_for_voiceovers = ( self.recorded_voiceovers.get_content_ids_for_voiceovers()) for content_id in content_ids_to_delete: @@ -2755,90 +3885,32 @@ def _update_content_ids_in_assets(self, old_ids_list, new_ids_list): raise Exception( 'The content_id %s does not exist in recorded_voiceovers.' % content_id) - elif not content_id in content_ids_for_text_translations: - raise Exception( - 'The content_id %s does not exist in written_translations.' - % content_id) - else: - self.recorded_voiceovers.delete_content_id_for_voiceover( - content_id) - self.written_translations.delete_content_id_for_translation( - content_id) + + self.recorded_voiceovers.delete_content_id_for_voiceover(content_id) for content_id in content_ids_to_add: if content_id in content_ids_for_voiceovers: raise Exception( 'The content_id %s already exists in recorded_voiceovers' % content_id) - elif content_id in content_ids_for_text_translations: - raise Exception( - 'The content_id %s already exists in written_translations.' - % content_id) - else: - self.recorded_voiceovers.add_content_id_for_voiceover( - content_id) - self.written_translations.add_content_id_for_translation( - content_id) - - def add_translation(self, content_id, language_code, translation_html): - """Adds translation to a given content id in a specific language. - - Args: - content_id: str. The id of the content. - language_code: str. The language code. - translation_html: str. The translated html content. - """ - translation_html = html_cleaner.clean(translation_html) - self.written_translations.add_translation( - content_id, language_code, translation_html) - - def add_written_translation( - self, content_id, language_code, translation, data_format): - """Adds a translation for the given content id in a given language. - - Args: - content_id: str. The id of the content. - language_code: str. The language code of the translated html. - translation: str|list(str). The translated content. - data_format: str. The data format of the translated content. - """ - written_translation = WrittenTranslation( - data_format, translation, False) - self.written_translations.translations_mapping[content_id][ - language_code] = written_translation - - def mark_written_translation_as_needing_update( - self, content_id, language_code): - """Marks translation as needing update for the given content id and - language code. - - Args: - content_id: str. The id of the content. - language_code: str. The language code. - """ - self.written_translations.mark_written_translation_as_needing_update( - content_id, language_code) - - def mark_written_translations_as_needing_update(self, content_id): - """Marks translation as needing update for the given content id in all - languages. - Args: - content_id: str. The id of the content. - """ - self.written_translations.mark_written_translations_as_needing_update( - content_id) + self.recorded_voiceovers.add_content_id_for_voiceover(content_id) - def update_content(self, content): + def update_content(self, content: SubtitledHtml) -> None: """Update the content of this state. Args: content: SubtitledHtml. Representation of updated content. """ + old_content_id = self.content.content_id # TODO(sll): Must sanitize all content in RTE component attrs. self.content = content + self._update_content_ids_in_assets( + [old_content_id], [self.content.content_id]) - def update_param_changes(self, param_changes): + def update_param_changes( + self, param_changes: List[param_domain.ParamChange] + ) -> None: """Update the param_changes dict attribute. Args: @@ -2847,11 +3919,11 @@ def update_param_changes(self, param_changes): """ self.param_changes = param_changes - def update_interaction_id(self, interaction_id): + def update_interaction_id(self, interaction_id: Optional[str]) -> None: """Update the interaction id attribute. Args: - interaction_id: str. The new interaction id to set. + interaction_id: str|None. The new interaction id to set. """ if self.interaction.id: old_content_id_list = [ @@ -2878,28 +3950,39 @@ def update_interaction_id(self, interaction_id): self.interaction.id = interaction_id self.interaction.answer_groups = [] - def update_next_content_id_index(self, next_content_id_index): - """Update the interaction next content id index attribute. - - Args: - next_content_id_index: int. The new next content id index to set. - """ - self.next_content_id_index = next_content_id_index - - def update_linked_skill_id(self, linked_skill_id): + def update_linked_skill_id(self, linked_skill_id: Optional[str]) -> None: """Update the state linked skill id attribute. Args: - linked_skill_id: str. The linked skill id to state. + linked_skill_id: str|None. The linked skill id to state. """ self.linked_skill_id = linked_skill_id - def update_interaction_customization_args(self, customization_args_dict): + def update_interaction_customization_args( + self, + customization_args_mapping: Mapping[ + str, Mapping[str, UnionOfCustomizationArgsDictValues] + ] + ) -> None: """Update the customization_args of InteractionInstance domain object. Args: - customization_args_dict: dict. The new customization_args to set. + customization_args_mapping: dict. The new customization_args to set. + + Raises: + Exception. The customization arguments are not unique. """ + # Here we use cast because for argument 'customization_args_mapping' + # we have used Mapping type because we want to allow + # 'update_interaction_customization_args' method to accept different + # subtypes of customization_arg dictionaries, but the problem with + # Mapping is that the Mapping does not allow to update(or set) values + # because Mapping is a read-only type. To overcome this issue, we + # narrowed down the type from Mapping to Dict by using cast so that + # while updating or setting a new value MyPy will not throw any error. + customization_args_dict = cast( + CustomizationArgsDictType, customization_args_mapping + ) customization_args = ( InteractionInstance. convert_customization_args_dict_to_customization_args( @@ -2926,12 +4009,17 @@ def update_interaction_customization_args(self, customization_args_dict): self._update_content_ids_in_assets( old_content_id_list, new_content_id_list) - def update_interaction_answer_groups(self, answer_groups_list): + def update_interaction_answer_groups( + self, answer_groups_list: List[AnswerGroup] + ) -> None: """Update the list of AnswerGroup in InteractionInstance domain object. Args: answer_groups_list: list(AnswerGroup). List of AnswerGroup domain objects. + + Raises: + Exception. Type of AnswerGroup domain objects is not as expected. """ if not isinstance(answer_groups_list, list): raise Exception( @@ -2995,11 +4083,11 @@ def update_interaction_answer_groups(self, answer_groups_list): try: normalized_param = param_type.normalize(value) - except Exception: + except Exception as e: raise Exception( 'Value has the wrong type. It should be a %s. ' 'The value is %s' % - (param_type.__name__, value)) + (param_type.__name__, value)) from e rule_inputs[param_name] = normalized_param @@ -3012,7 +4100,9 @@ def update_interaction_answer_groups(self, answer_groups_list): self._update_content_ids_in_assets( old_content_id_list, new_content_id_list) - def update_interaction_default_outcome(self, default_outcome): + def update_interaction_default_outcome( + self, default_outcome: Optional[Outcome] + ) -> None: """Update the default_outcome of InteractionInstance domain object. Args: @@ -3035,7 +4125,8 @@ def update_interaction_default_outcome(self, default_outcome): old_content_id_list, new_content_id_list) def update_interaction_confirmed_unclassified_answers( - self, confirmed_unclassified_answers): + self, confirmed_unclassified_answers: List[AnswerGroup] + ) -> None: """Update the confirmed_unclassified_answers of IteractionInstance domain object. @@ -3045,7 +4136,7 @@ def update_interaction_confirmed_unclassified_answers( default outcome. Raises: - Exception. The 'confirmed_unclassified_answers' is not a list. + Exception. Given answers is not of type list. """ if not isinstance(confirmed_unclassified_answers, list): raise Exception( @@ -3054,7 +4145,7 @@ def update_interaction_confirmed_unclassified_answers( self.interaction.confirmed_unclassified_answers = ( confirmed_unclassified_answers) - def update_interaction_hints(self, hints_list): + def update_interaction_hints(self, hints_list: List[Hint]) -> None: """Update the list of hints. Args: @@ -3076,11 +4167,13 @@ def update_interaction_hints(self, hints_list): self._update_content_ids_in_assets( old_content_id_list, new_content_id_list) - def update_interaction_solution(self, solution): + def update_interaction_solution( + self, solution: Optional[Solution] + ) -> None: """Update the solution of interaction. Args: - solution: Solution. Object of class Solution. + solution: Solution|None. Object of class Solution. Raises: Exception. The 'solution' is not a domain object. @@ -3105,7 +4198,9 @@ def update_interaction_solution(self, solution): self._update_content_ids_in_assets( old_content_id_list, new_content_id_list) - def update_recorded_voiceovers(self, recorded_voiceovers): + def update_recorded_voiceovers( + self, recorded_voiceovers: RecordedVoiceovers + ) -> None: """Update the recorded_voiceovers of a state. Args: @@ -3114,21 +4209,17 @@ def update_recorded_voiceovers(self, recorded_voiceovers): """ self.recorded_voiceovers = recorded_voiceovers - def update_written_translations(self, written_translations): - """Update the written_translations of a state. - - Args: - written_translations: WrittenTranslations. The new - WrittenTranslations object for the state. - """ - self.written_translations = written_translations - - def update_solicit_answer_details(self, solicit_answer_details): + def update_solicit_answer_details( + self, solicit_answer_details: bool + ) -> None: """Update the solicit_answer_details of a state. Args: solicit_answer_details: bool. The new value of solicit_answer_details for the state. + + Raises: + Exception. The argument is not of type bool. """ if not isinstance(solicit_answer_details, bool): raise Exception( @@ -3136,12 +4227,15 @@ def update_solicit_answer_details(self, solicit_answer_details): % solicit_answer_details) self.solicit_answer_details = solicit_answer_details - def update_card_is_checkpoint(self, card_is_checkpoint): + def update_card_is_checkpoint(self, card_is_checkpoint: bool) -> None: """Update the card_is_checkpoint field of a state. Args: card_is_checkpoint: bool. The new value of card_is_checkpoint for the state. + + Raises: + Exception. The argument is not of type bool. """ if not isinstance(card_is_checkpoint, bool): raise Exception( @@ -3149,138 +4243,7 @@ def update_card_is_checkpoint(self, card_is_checkpoint): % card_is_checkpoint) self.card_is_checkpoint = card_is_checkpoint - def _get_all_translatable_content(self): - """Returns all content which can be translated into different languages. - - Returns: - dict(str, TranslatableItem). Returns a dict with key as content - id and TranslatableItem as value with the appropriate data - format. - """ - content_id_to_translatable_item = {} - - content_id_to_translatable_item[self.content.content_id] = ( - TranslatableItem( - self.content.html, - TranslatableItem.DATA_FORMAT_HTML, - TranslatableItem.CONTENT_TYPE_CONTENT)) - - # TODO(#6178): Remove empty html checks once we add a validation - # check that ensures each content in state should be non-empty html. - default_outcome = self.interaction.default_outcome - if default_outcome is not None and default_outcome.feedback.html != '': - content_id_to_translatable_item[ - default_outcome.feedback.content_id - ] = TranslatableItem( - default_outcome.feedback.html, - TranslatableItem.DATA_FORMAT_HTML, - TranslatableItem.CONTENT_TYPE_FEEDBACK) - - for answer_group in self.interaction.answer_groups: - if answer_group.outcome.feedback.html != '': - content_id_to_translatable_item[ - answer_group.outcome.feedback.content_id - ] = TranslatableItem( - answer_group.outcome.feedback.html, - TranslatableItem.DATA_FORMAT_HTML, - TranslatableItem.CONTENT_TYPE_FEEDBACK) - # As of Aug 2021, only TextInput and SetInput have translatable rule - # inputs. - if self.interaction.id not in ['TextInput', 'SetInput']: - continue - for rule_spec in answer_group.rule_specs: - for input_value in rule_spec.inputs.values(): - if 'normalizedStrSet' in input_value: - content_id_to_translatable_item[ - input_value['contentId'] - ] = TranslatableItem( - input_value['normalizedStrSet'], - TranslatableItem - .DATA_FORMAT_SET_OF_NORMALIZED_STRING, - TranslatableItem.CONTENT_TYPE_RULE, - self.interaction.id, - rule_spec.rule_type) - if 'unicodeStrSet' in input_value: - content_id_to_translatable_item[ - input_value['contentId'] - ] = TranslatableItem( - input_value['unicodeStrSet'], - TranslatableItem - .DATA_FORMAT_SET_OF_UNICODE_STRING, - TranslatableItem.CONTENT_TYPE_RULE, - self.interaction.id, - rule_spec.rule_type) - - for hint in self.interaction.hints: - if hint.hint_content.html != '': - content_id_to_translatable_item[ - hint.hint_content.content_id - ] = TranslatableItem( - hint.hint_content.html, - TranslatableItem.DATA_FORMAT_HTML, - TranslatableItem.CONTENT_TYPE_HINT) - - solution = self.interaction.solution - if solution is not None and solution.explanation.html != '': - content_id_to_translatable_item[ - solution.explanation.content_id - ] = TranslatableItem( - solution.explanation.html, - TranslatableItem.DATA_FORMAT_HTML, - TranslatableItem.CONTENT_TYPE_SOLUTION) - - for ca_dict in self.interaction.customization_args.values(): - subtitled_htmls = ca_dict.get_subtitled_html() - for subtitled_html in subtitled_htmls: - html_string = subtitled_html.html - # Make sure we don't include content that only consists of - # numbers. See issue #13055. - if html_string != '' and not html_string.isnumeric(): - content_id_to_translatable_item[ - subtitled_html.content_id - ] = TranslatableItem( - html_string, - TranslatableItem.DATA_FORMAT_HTML, - TranslatableItem.CONTENT_TYPE_INTERACTION, - self.interaction.id) - - subtitled_unicodes = ca_dict.get_subtitled_unicode() - for subtitled_unicode in subtitled_unicodes: - if subtitled_unicode.unicode_str != '': - content_id_to_translatable_item[ - subtitled_unicode.content_id - ] = TranslatableItem( - subtitled_unicode.unicode_str, - TranslatableItem.DATA_FORMAT_UNICODE_STRING, - TranslatableItem.CONTENT_TYPE_INTERACTION, - self.interaction.id) - - return content_id_to_translatable_item - - def get_content_id_mapping_needing_translations(self, language_code): - """Returns all text html which can be translated in the given language. - - Args: - language_code: str. The abbreviated code of the language. - - Returns: - dict(str, TranslatableItem). A dict with key as content id and - value as TranslatableItem containing the content and the data - format. - """ - content_id_to_translatable_item = self._get_all_translatable_content() - available_translation_content_ids = ( - self.written_translations - .get_content_ids_that_are_correctly_translated(language_code)) - for content_id in available_translation_content_ids: - content_id_to_translatable_item.pop(content_id, None) - - # TODO(#7571): Add functionality to return the list of - # translations which needs update. - - return content_id_to_translatable_item - - def to_dict(self): + def to_dict(self) -> StateDict: """Returns a dict representing this State domain object. Returns: @@ -3294,69 +4257,88 @@ def to_dict(self): 'classifier_model_id': self.classifier_model_id, 'linked_skill_id': self.linked_skill_id, 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), - 'written_translations': self.written_translations.to_dict(), 'solicit_answer_details': self.solicit_answer_details, - 'card_is_checkpoint': self.card_is_checkpoint, - 'next_content_id_index': self.next_content_id_index + 'card_is_checkpoint': self.card_is_checkpoint } + # TODO(#16467): Remove `validate` argument after validating all Question + # states by writing a migration and audit job. As the validation for + # states is common between Exploration and Question and the Question + # data is not yet migrated, we do not want to call the validations + # while we load the Question. @classmethod - def from_dict(cls, state_dict): + def from_dict(cls, state_dict: StateDict, validate: bool = True) -> State: """Return a State domain object from a dict. Args: state_dict: dict. The dict representation of State object. + validate: bool. False, when the validations should not be called. Returns: State. The corresponding State domain object. """ content = SubtitledHtml.from_dict(state_dict['content']) - content.validate() + if validate: + content.validate() return cls( content, [param_domain.ParamChange.from_dict(param) for param in state_dict['param_changes']], - InteractionInstance.from_dict(state_dict['interaction']), + InteractionInstance.from_dict( + state_dict['interaction'], validate=validate), RecordedVoiceovers.from_dict(state_dict['recorded_voiceovers']), - WrittenTranslations.from_dict(state_dict['written_translations']), state_dict['solicit_answer_details'], state_dict['card_is_checkpoint'], - state_dict['next_content_id_index'], state_dict['linked_skill_id'], state_dict['classifier_model_id']) @classmethod def create_default_state( - cls, default_dest_state_name, is_initial_state=False): + cls, + default_dest_state_name: Optional[str], + content_id_for_state_content: str, + content_id_for_default_outcome: str, + is_initial_state: bool = False + ) -> State: """Return a State domain object with default value. Args: - default_dest_state_name: str. The default destination state. + default_dest_state_name: str|None. The default destination state, or + None if no default destination state is defined. is_initial_state: bool. Whether this state represents the initial state of an exploration. + content_id_for_state_content: str. The content id for the content. + content_id_for_default_outcome: str. The content id for the default + outcome. Returns: State. The corresponding State domain object. """ content_html = ( feconf.DEFAULT_INIT_STATE_CONTENT_STR if is_initial_state else '') - content_id = feconf.DEFAULT_NEW_STATE_CONTENT_ID + + recorded_voiceovers = RecordedVoiceovers({}) + recorded_voiceovers.add_content_id_for_voiceover( + content_id_for_state_content) + recorded_voiceovers.add_content_id_for_voiceover( + content_id_for_default_outcome) + return cls( - SubtitledHtml(content_id, content_html), + SubtitledHtml(content_id_for_state_content, content_html), [], InteractionInstance.create_default_interaction( - default_dest_state_name), - RecordedVoiceovers.from_dict(copy.deepcopy( - feconf.DEFAULT_RECORDED_VOICEOVERS)), - WrittenTranslations.from_dict( - copy.deepcopy(feconf.DEFAULT_WRITTEN_TRANSLATIONS)), - False, is_initial_state, 0) + default_dest_state_name, content_id_for_default_outcome), + recorded_voiceovers, False, is_initial_state) @classmethod def convert_html_fields_in_state( - cls, state_dict, conversion_fn, - state_uses_old_interaction_cust_args_schema=False, - state_uses_old_rule_template_schema=False): + cls, + state_dict: StateDict, + conversion_fn: Callable[[str], str], + state_schema_version: int = feconf.CURRENT_STATE_SCHEMA_VERSION, + state_uses_old_interaction_cust_args_schema: bool = False, + state_uses_old_rule_template_schema: bool = False + ) -> StateDict: """Applies a conversion function on all the html strings in a state to migrate them to a desired state. @@ -3364,6 +4346,7 @@ def convert_html_fields_in_state( state_dict: dict. The dict representation of State object. conversion_fn: function. The conversion function to be applied on the states_dict. + state_schema_version: int. The state schema version. state_uses_old_interaction_cust_args_schema: bool. Whether the interaction customization arguments contain SubtitledHtml and SubtitledUnicode dicts (should be True if prior to state @@ -3378,7 +4361,7 @@ def convert_html_fields_in_state( """ state_dict['content']['html'] = ( conversion_fn(state_dict['content']['html'])) - if state_dict['interaction']['default_outcome']: + if state_dict['interaction']['default_outcome'] is not None: state_dict['interaction']['default_outcome'] = ( Outcome.convert_html_in_outcome( state_dict['interaction']['default_outcome'], @@ -3402,33 +4385,19 @@ def convert_html_fields_in_state( answer_group, conversion_fn, html_field_types_to_rule_specs) ) - if 'written_translations' in state_dict.keys(): - state_dict['written_translations'] = ( - WrittenTranslations. - convert_html_in_written_translations( - state_dict['written_translations'], conversion_fn)) - for hint_index, hint in enumerate(state_dict['interaction']['hints']): state_dict['interaction']['hints'][hint_index] = ( Hint.convert_html_in_hint(hint, conversion_fn)) interaction_id = state_dict['interaction']['id'] - if interaction_id is None: - return state_dict - - # TODO(#11950): Drop the following 'if' clause once all snapshots have - # been migrated. This is currently causing issues in migrating old - # snapshots to schema v34 because MathExpressionInput was still around - # at the time. It is conceptually OK to ignore customization args here - # because the MathExpressionInput has no customization arg fields. - if interaction_id == 'MathExpressionInput': - if state_dict['interaction']['solution']: - state_dict['interaction']['solution']['explanation']['html'] = ( - conversion_fn(state_dict['interaction']['solution'][ - 'explanation']['html'])) + all_interaction_ids = ( + interaction_registry.Registry.get_all_interaction_ids() + ) + interaction_id_is_valid = interaction_id not in all_interaction_ids + if interaction_id_is_valid or interaction_id is None: return state_dict - if state_dict['interaction']['solution']: + if state_dict['interaction']['solution'] is not None: if state_uses_old_rule_template_schema: interaction_spec = ( interaction_registry.Registry @@ -3469,31 +4438,466 @@ def convert_html_fields_in_state( if interaction_customization_arg_has_html: if 'choices' in ( - state_dict['interaction']['customization_args'].keys()): - state_dict['interaction']['customization_args'][ - 'choices']['value'] = ([ - conversion_fn(html) - for html in state_dict[ - 'interaction']['customization_args'][ - 'choices']['value'] - ]) + state_dict['interaction']['customization_args'].keys() + ): + # Here we use cast because the above 'if' condition + # forces every cust. args' 'choices' key to have type + # Dict[str, List[str]]. + html_choices_ca_dict = cast( + Dict[str, List[str]], + state_dict['interaction']['customization_args'][ + 'choices'] + ) + html_choices_ca_dict['value'] = ([ + conversion_fn(html) + for html in html_choices_ca_dict['value'] + ]) else: + ca_specs_dict = ( + interaction_registry.Registry + .get_all_specs_for_state_schema_version( + state_schema_version, + can_fetch_latest_specs=True + )[interaction_id]['customization_arg_specs'] + ) state_dict['interaction'] = ( InteractionInstance.convert_html_in_interaction( state_dict['interaction'], + ca_specs_dict, conversion_fn )) return state_dict - def get_all_html_content_strings(self): - """Get all html content strings in the state. + def get_content_html(self, content_id: str) -> Union[str, List[str]]: + """Returns the content belongs to a given content id of the object. + + Args: + content_id: str. The id of the content. + + Returns: + str. The html content corresponding to the given content id. + + Raises: + ValueError. The given content_id does not exist. + """ + content_id_to_translatable_content = ( + self.get_translatable_contents_collection() + .content_id_to_translatable_content) + + if content_id not in content_id_to_translatable_content: + raise ValueError('Content ID %s does not exist' % content_id) + + return content_id_to_translatable_content[content_id].content_value + + @classmethod + def traverse_v54_state_dict_for_contents( + cls, + state_dict: StateDict + ) -> Iterator[Tuple[ + Union[SubtitledHtmlDict, Dict[str, Union[str, List[str]]]], + translation_domain.ContentType, + Optional[str] + ]]: + """This method iterates throughout the state dict and yields the value + for each field. The yielded value is used for generating and updating + the content-ids for the fields in the state in their respective methods. + + Args: + state_dict: StateDict. State object represented in the dict format. + + Yields: + (str|list(str), str). A tuple containing content and content-id. + """ + yield ( + state_dict['content'], + translation_domain.ContentType.CONTENT, + None) + + interaction = state_dict['interaction'] + + default_outcome = interaction['default_outcome'] + if default_outcome is not None: + yield ( + default_outcome['feedback'], + translation_domain.ContentType.DEFAULT_OUTCOME, + None) + + answer_groups = interaction['answer_groups'] + for answer_group in answer_groups: + outcome = answer_group['outcome'] + yield ( + outcome['feedback'], + translation_domain.ContentType.FEEDBACK, + None) + + if interaction['id'] not in ['TextInput', 'SetInput']: + continue + + for rule_spec in answer_group['rule_specs']: + for input_name in sorted(rule_spec['inputs'].keys()): + input_value = rule_spec['inputs'][input_name] + if not isinstance(input_value, dict): + continue + if 'normalizedStrSet' in input_value: + yield ( + input_value, + translation_domain.ContentType.RULE, + 'input') + if 'unicodeStrSet' in input_value: + yield ( + input_value, + translation_domain.ContentType.RULE, + 'input') + + for hint in interaction['hints']: + yield ( + hint['hint_content'], translation_domain.ContentType.HINT, None) + + solution = interaction['solution'] + if solution is not None: + yield ( + solution['explanation'], + translation_domain.ContentType.SOLUTION, + None) + + interaction_id = interaction['id'] + customisation_args = interaction['customization_args'] + interaction_specs = ( + interaction_registry.Registry + .get_all_specs_for_state_schema_version( + feconf.CURRENT_STATE_SCHEMA_VERSION, + can_fetch_latest_specs=True + ) + ) + if interaction_id in interaction_specs: + ca_specs_dict = interaction_specs[interaction_id][ + 'customization_arg_specs'] + for spec in ca_specs_dict: + customisation_arg = customisation_args[spec['name']] + contents = ( + InteractionCustomizationArg.traverse_by_schema_and_get( + spec['schema'], customisation_arg['value'], [ + schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE, + schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML], + lambda x: x + ) + ) + for content in contents: + yield ( + content, + translation_domain.ContentType.CUSTOMIZATION_ARG, + spec['name'] + ) + + @classmethod + def update_old_content_id_to_new_content_id_in_v54_states( + cls, + states_dict: Dict[str, StateDict] + ) -> Tuple[Dict[str, StateDict], int]: + """Updates the old content-ids from the state fields like hints, + solution, etc with the newly generated content id. + + Args: + states_dict: list(dict(State)). List of dictionaries, where each + dict represents a state object. + + Returns: + states_dict: list(dict(State)). List of state dicts, with updated + content-ids. + """ + PossibleContentIdsType = Union[str, List[str], List[List[str]]] + + def _replace_content_id( + old_id: PossibleContentIdsType, + id_mapping: Dict[str, str] + ) -> str: + """Replace old Id with the new Id.""" + assert isinstance(old_id, str) + + # INVALID_CONTENT_ID doesn't corresponds to any existing content in + # the state. Such Ids cannot be replaced with any new id. + if old_id == feconf.INVALID_CONTENT_ID: + return old_id + + return id_mapping[old_id] + + object_content_ids_replacers: Dict[ + str, + Callable[ + [PossibleContentIdsType, Dict[str, str]], PossibleContentIdsType + ] + ] = {} + + object_content_ids_replacers['TranslatableHtmlContentId'] = ( + _replace_content_id) + + object_content_ids_replacers['SetOfTranslatableHtmlContentIds'] = ( + lambda ids_set, id_mapping: [ + _replace_content_id(old_id, id_mapping) + for old_id in ids_set + ] + ) + object_content_ids_replacers[ + 'ListOfSetsOfTranslatableHtmlContentIds'] = ( + lambda items, id_mapping: [ + [_replace_content_id(old_id, id_mapping)for old_id in ids_set] + for ids_set in items + ] + ) + content_id_generator = translation_domain.ContentIdGenerator() + for state_name in sorted(states_dict.keys()): + state: StateDict = states_dict[state_name] + new_voiceovers_mapping: Dict[str, Dict[str, VoiceoverDict]] = {} + old_to_new_content_id: Dict[str, str] = {} + old_voiceovers_mapping = state['recorded_voiceovers'][ + 'voiceovers_mapping'] + + for content, content_type, extra_prefix in ( + cls.traverse_v54_state_dict_for_contents(state) + ): + new_content_id = content_id_generator.generate( + content_type, extra_prefix=extra_prefix) + content_id_key = 'content_id' + if content_type == translation_domain.ContentType.RULE: + content_id_key = 'contentId' + + # Here we use MyPy ignore because the content Id key for the + # contents in the rule inputs is contentId instead of + # content_id. + old_content_id = content[content_id_key] # type: ignore[misc] + # Here we use MyPy ignore because the content Id key for the + # contents in the rule inputs is contentId instead of + # content_id. + content[content_id_key] = new_content_id # type: ignore[index] + + assert isinstance(old_content_id, str) + old_to_new_content_id[old_content_id] = new_content_id + + new_voiceovers_mapping[new_content_id] = old_voiceovers_mapping[ + old_content_id] + + state['recorded_voiceovers']['voiceovers_mapping'] = ( + new_voiceovers_mapping + ) + + interaction_specs = ( + interaction_registry.Registry + .get_all_specs_for_state_schema_version( + feconf.CURRENT_STATE_SCHEMA_VERSION, + can_fetch_latest_specs=True + ) + ) + interaction_id = state['interaction']['id'] + if interaction_id is None: + continue + + interaction = state['interaction'] + answer_groups = interaction['answer_groups'] + rule_descriptions = interaction_specs[interaction_id][ + 'rule_descriptions'] + answer_type = interaction_specs[interaction_id]['answer_type'] + + if interaction['solution']: + solution_dict = interaction['solution'] + assert solution_dict is not None + if answer_type in object_content_ids_replacers: + # Here we use cast because correct_answer can be of + # different types but the 'if' case above covers only for + # the PossibleContentIdsType. + correct_answer = cast( + PossibleContentIdsType, solution_dict['correct_answer']) + solution_dict['correct_answer'] = ( + object_content_ids_replacers[answer_type]( + correct_answer, old_to_new_content_id + ) + ) + + if not rule_descriptions: + continue + + rules_variables = { + name: re.findall(r'\{\{(.+?)\|(.+?)\}\}', description) + for name, description in rule_descriptions.items() + } + + for answer_group in answer_groups: + for rule_spec in answer_group['rule_specs']: + rule_inputs = rule_spec['inputs'] + rule_type = rule_spec['rule_type'] + for key, value_class in rules_variables[rule_type]: + if value_class not in object_content_ids_replacers: + continue + # Here we use cast because rule input can be a dict but + # the 'if' case above covers only for the + # PossibleContentIdsType. + rule_input = cast( + PossibleContentIdsType, rule_inputs[key]) + rule_inputs[key] = object_content_ids_replacers[ + value_class](rule_input, old_to_new_content_id) + + return states_dict, content_id_generator.next_content_id_index + + @classmethod + def generate_old_content_id_to_new_content_id_in_v54_states( + cls, + states_dict: Dict[str, StateDict] + ) -> Tuple[Dict[str, Dict[str, str]], int]: + """Generates the new content-id for each state field based on + next_content_id_index variable. + + Args: + states_dict: list(dict(State)). List of dictionaries, where each + dict represents a state object. + + Returns: + (dict(str, dict(str, str)), str). A tuple with the first field as a + dict and the second field is the value of the next_content_id_index. + The first field is a dict with state name as a key and + old-content-id to new-content-id dict as a value. + """ + content_id_generator = translation_domain.ContentIdGenerator() + states_to_content_id = {} + + for state_name in sorted(states_dict.keys()): + old_id_to_new_id: Dict[str, str] = {} + + for content, content_type, extra_prefix in ( + cls.traverse_v54_state_dict_for_contents( + states_dict[state_name]) + ): + if content_type == translation_domain.ContentType.RULE: + # Here we use MyPy ignore because the content Id key for the + # contents in the rule inputs is contentId instead of + # content_id. + content_id = content['contentId'] # type: ignore[misc] + else: + content_id = content['content_id'] + + assert isinstance(content_id, str) + old_id_to_new_id[content_id] = content_id_generator.generate( + content_type, extra_prefix=extra_prefix) + + states_to_content_id[state_name] = old_id_to_new_id + + return ( + states_to_content_id, + content_id_generator.next_content_id_index + ) + + def has_content_id(self, content_id: str) -> bool: + """Returns whether a given content ID is available in the translatable + content. + + Args: + content_id: str. The content ID that needs to be checked for the + availability. + + Returns: + bool. A boolean that indicates the availability of the content ID + in the translatable content. + """ + + available_translate_content = ( + self.get_translatable_contents_collection() + .content_id_to_translatable_content) + return bool(content_id in available_translate_content) + + +class StateVersionHistory: + """Class to represent an element of the version history list of a state. + The version history list of a state is the list of exploration versions + in which the state has been edited. + + Attributes: + previously_edited_in_version: int. The version number of the + exploration in which the state was previously edited. + state_name_in_previous_version: str. The name of the state in the + previously edited version. It is useful in case of state renames. + committer_id: str. The id of the user who committed the changes in the + previously edited version. + """ + + def __init__( + self, + previously_edited_in_version: Optional[int], + state_name_in_previous_version: Optional[str], + committer_id: str + ) -> None: + """Initializes the StateVersionHistory domain object. + + Args: + previously_edited_in_version: int. The version number of the + exploration on which the state was previously edited. + state_name_in_previous_version: str. The name of the state in the + previously edited version. It is useful in case of state + renames. + committer_id: str. The id of the user who committed the changes in + the previously edited version. + """ + self.previously_edited_in_version = previously_edited_in_version + self.state_name_in_previous_version = state_name_in_previous_version + self.committer_id = committer_id + + def to_dict(self) -> StateVersionHistoryDict: + """Returns a dict representation of the StateVersionHistory domain + object. + + Returns: + dict. The dict representation of the StateVersionHistory domain + object. + """ + return { + 'previously_edited_in_version': self.previously_edited_in_version, + 'state_name_in_previous_version': ( + self.state_name_in_previous_version), + 'committer_id': self.committer_id + } + + @classmethod + def from_dict( + cls, + state_version_history_dict: StateVersionHistoryDict + ) -> StateVersionHistory: + """Return a StateVersionHistory domain object from a dict. + + Args: + state_version_history_dict: dict. The dict representation of + StateVersionHistory object. Returns: - list(str). The list of all html content strings in the interaction. + StateVersionHistory. The corresponding StateVersionHistory domain + object. """ - html_list = ( - self.written_translations.get_all_html_content_strings() + - self.interaction.get_all_html_content_strings() + [ - self.content.html]) - return html_list + return cls( + state_version_history_dict['previously_edited_in_version'], + state_version_history_dict['state_name_in_previous_version'], + state_version_history_dict['committer_id'] + ) + + +# Note: This union type depends on several classes like SubtitledHtml, +# SubtitledHtmlDict, SubtitledUnicode and SubtitledUnicodeDict. So, it +# has to be defined after those classes are defined, otherwise backend +# tests will fail with 'module has no attribute' error. +UnionOfCustomizationArgsDictValues = Union[ + str, + int, + bool, + List[str], + List[SubtitledHtml], + List[SubtitledHtmlDict], + SubtitledHtmlDict, + SubtitledUnicode, + SubtitledUnicodeDict, + domain.ImageAndRegionDict, + domain.GraphDict +] + + +# Note: This Dict type depends on UnionOfCustomizationArgsDictValues so it +# has to be defined after UnionOfCustomizationArgsDictValues is defined, +# otherwise backend tests will fail with 'module has no attribute' error. +CustomizationArgsDictType = Dict[ + str, Dict[str, UnionOfCustomizationArgsDictValues] +] diff --git a/core/domain/state_domain_test.py b/core/domain/state_domain_test.py index fb799c135344..64d1651bdaa1 100644 --- a/core/domain/state_domain_test.py +++ b/core/domain/state_domain_test.py @@ -34,14 +34,32 @@ from core.domain import interaction_registry from core.domain import rules_registry from core.domain import state_domain -from core.domain import translatable_object_registry +from core.domain import translation_domain from core.tests import test_utils +from extensions.interactions import base + +from typing import Dict, List, Optional, Tuple, Type, Union class StateDomainUnitTests(test_utils.GenericTestBase): """Test methods operating on states.""" - def test_get_all_html_in_exploration_with_drag_and_drop_interaction(self): + def setUp(self) -> None: + super().setUp() + translation_dict = { + 'content_id_3': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + True + ) + } + self.dummy_entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict) + + def test_get_all_html_in_exploration_with_drag_and_drop_interaction( + self + ) -> None: """Test the method for extracting all the HTML from a state having DragAndDropSortInput interaction. """ @@ -49,35 +67,38 @@ def test_get_all_html_in_exploration_with_drag_and_drop_interaction(self): 'exp_id') exploration.add_states(['State1']) state = exploration.states['State1'] - state_content_dict = { - 'content_id': 'content', + state_content_dict: state_domain.SubtitledHtmlDict = { + 'content_id': 'content_0', 'html': '

    state content html

    ' } - state_customization_args_dict = { + choices_subtitled_dicts: List[state_domain.SubtitledHtmlDict] = [ + { + 'content_id': 'ca_choices_0', + 'html': '

    state customization arg html 1

    ' + }, { + 'content_id': 'ca_choices_1', + 'html': '

    state customization arg html 2

    ' + }, { + 'content_id': 'ca_choices_2', + 'html': '

    state customization arg html 3

    ' + }, { + 'content_id': 'ca_choices_3', + 'html': '

    state customization arg html 4

    ' + } + ] + state_customization_args_dict: ( + state_domain.CustomizationArgsDictType + ) = { 'choices': { - 'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '

    state customization arg html 1

    ' - }, { - 'content_id': 'ca_choices_1', - 'html': '

    state customization arg html 2

    ' - }, { - 'content_id': 'ca_choices_2', - 'html': '

    state customization arg html 3

    ' - }, { - 'content_id': 'ca_choices_3', - 'html': '

    state customization arg html 4

    ' - } - ] + 'value': choices_subtitled_dicts }, 'allowMultipleItemsInSamePosition': { - 'value': False + 'value': True } } state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( + 'Introduction', None, state_domain.SubtitledHtml( 'feedback_1', '

    State Feedback

    '), False, [], None, None), [ @@ -113,7 +134,7 @@ def test_get_all_html_in_exploration_with_drag_and_drop_interaction(self): [], None ) - state_solution_dict = { + state_solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': True, 'correct_answer': [ '

    state customization arg html 1

    ', @@ -122,163 +143,10 @@ def test_get_all_html_in_exploration_with_drag_and_drop_interaction(self): '

    state customization arg html 4

    ' ], 'explanation': { - 'content_id': 'solution', + 'content_id': 'solution_3', 'html': '

    This is solution for state1

    ' } } - state_written_translations_dict = { - 'translations_mapping': { - 'content': { - 'en': { - 'data_format': 'html', - 'translation': - '

    state written_translation content-en

    ', - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': - '

    state written_translation content-hi

    ', - 'needs_update': False - } - }, - 'ca_choices_0': { - 'hi': { - 'data_format': 'html', - 'translation': - ( - '

    state written_translation ca_choices_0-hi' - '

    ' - ), - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': - ( - '

    state written_translation ca_choices_0' - '-en

    ' - ), - 'needs_update': False - } - }, - 'ca_choices_1': { - 'hi': { - 'data_format': 'html', - 'translation': - ( - '

    state written_translation ca_choices_1-hi' - '

    ' - ), - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': - ( - '

    state written_translation ca_choices_1-en' - '

    ' - ), - 'needs_update': False - } - }, - 'ca_choices_2': { - 'hi': { - 'data_format': 'html', - 'translation': - ( - '

    state written_translation ca_choices_2-hi' - '

    ' - ), - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': - ( - '

    state written_translation ca_choices_2-en' - '

    ' - ), - 'needs_update': False - } - }, - 'ca_choices_3': { - 'hi': { - 'data_format': 'html', - 'translation': ( - '

    state written_translation ca_choices_3-hi' - '

    ' - ), - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': - ( - '

    state written_translation ca_choices_3-en' - '

    ' - ), - 'needs_update': False - } - }, - 'default_outcome': { - 'hi': { - 'data_format': 'html', - 'translation': - '

    state written_translation outcome-hi

    ', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': - '

    state written_translation outcome-en

    ', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': - '

    state written_translation feedback-hi

    ', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': - '

    state written_translation feedback-en

    ', - 'needs_update': False - } - }, - 'hint_1': { - 'hi': { - 'data_format': 'html', - 'translation': - '

    state written_translation hint_1-hi

    ', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': - '

    state written_translation hint_1-en

    ', - 'needs_update': False - } - }, - 'solution': { - 'hi': { - 'data_format': 'html', - 'translation': - '

    state written_translation solution-hi

    ', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': - '

    state written_translation solution-en

    ', - 'needs_update': False - } - } - } - } state_hint_list = [ state_domain.Hint( state_domain.SubtitledHtml( @@ -296,7 +164,7 @@ def test_get_all_html_in_exploration_with_drag_and_drop_interaction(self): ['

    state customization arg html 4

    '] ], 'explanation': { - 'content_id': 'solution', + 'content_id': 'solution_3', 'html': '

    This is solution for state1

    ' } } @@ -306,17 +174,15 @@ def test_get_all_html_in_exploration_with_drag_and_drop_interaction(self): state.update_interaction_id('DragAndDropSortInput') state.update_interaction_customization_args( state_customization_args_dict) - state.update_next_content_id_index(4) state.update_interaction_hints(state_hint_list) + # Ruling out the possibility of None for mypy type checking. + assert state.interaction.id is not None solution = state_domain.Solution.from_dict( state.interaction.id, state_solution_dict) state.update_interaction_solution(solution) state.update_interaction_answer_groups( [state_answer_group]) - state.update_written_translations( - state_domain.WrittenTranslations.from_dict( - state_written_translations_dict)) exp_services.save_new_exploration('owner_id', exploration) @@ -324,10 +190,15 @@ def test_get_all_html_in_exploration_with_drag_and_drop_interaction(self): rules_registry.Registry.get_html_field_types_to_rule_specs( state_schema_version=41)) - def mock_get_html_field_types_to_rule_specs(unused_cls): + def mock_get_html_field_types_to_rule_specs( + unused_cls: Type[state_domain.State] + ) -> Dict[str, rules_registry.RuleSpecsExtensionDict]: return mock_html_field_types_to_rule_specs_dict - def mock_get_interaction_by_id(cls, interaction_id): + def mock_get_interaction_by_id( + cls: Type[interaction_registry.Registry], + interaction_id: str + ) -> base.BaseInteraction: interaction = copy.deepcopy(cls._interactions[interaction_id]) # pylint: disable=protected-access interaction.answer_type = 'ListOfSetsOfHtmlStrings' return interaction @@ -340,30 +211,18 @@ def mock_get_interaction_by_id(cls, interaction_id): interaction_registry.Registry, 'get_interaction_by_id', classmethod(mock_get_interaction_by_id)) + html_list: List[str] = [] + + def _append_to_list(html_str: str) -> str: + html_list.append(html_str) + return html_str with rules_registry_swap, interaction_registry_swap: - html_list = state.get_all_html_content_strings() + state_domain.State.convert_html_fields_in_state( + state.to_dict(), _append_to_list) self.assertItemsEqual( html_list, [ - '

    state written_translation solution-hi

    ', - '

    state written_translation solution-en

    ', - '

    state written_translation content-hi

    ', - '

    state written_translation content-en

    ', - '

    state written_translation feedback-hi

    ', - '

    state written_translation feedback-en

    ', - '

    state written_translation hint_1-hi

    ', - '

    state written_translation hint_1-en

    ', - '

    state written_translation outcome-hi

    ', - '

    state written_translation outcome-en

    ', - '

    state written_translation ca_choices_0-hi

    ', - '

    state written_translation ca_choices_0-en

    ', - '

    state written_translation ca_choices_1-hi

    ', - '

    state written_translation ca_choices_1-en

    ', - '

    state written_translation ca_choices_2-hi

    ', - '

    state written_translation ca_choices_2-en

    ', - '

    state written_translation ca_choices_3-hi

    ', - '

    state written_translation ca_choices_3-en

    ', '

    State Feedback

    ', '

    IsEqualToOrdering rule_spec htmls

    ', '

    HasElementXAtPositionY rule_spec html

    ', @@ -379,13 +238,11 @@ def mock_get_interaction_by_id(cls, interaction_id): '

    state customization arg html 2

    ', '

    state customization arg html 3

    ', '

    state customization arg html 4

    ', - '

    state customization arg html 1

    ', - '

    state customization arg html 2

    ', - '

    state customization arg html 3

    ', - '

    state customization arg html 4

    ', '

    state content html

    ']) - def test_get_all_html_in_exploration_with_text_input_interaction(self): + def test_get_all_html_in_exploration_with_text_input_interaction( + self + ) -> None: """Test the method for extracting all the HTML from a state having TextInput interaction. """ @@ -395,13 +252,13 @@ def test_get_all_html_in_exploration_with_text_input_interaction(self): exploration.add_states(['State1']) state = exploration.states['State1'] - state_content_dict = { + state_content_dict: state_domain.SubtitledHtmlDict = { 'content_id': 'content', 'html': '

    state content html

    ' } state_answer_group = [state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback_1', '

    state outcome html

    '), False, [], None, None), [ @@ -416,7 +273,7 @@ def test_get_all_html_in_exploration_with_text_input_interaction(self): None )] state_default_outcome = state_domain.Outcome( - 'State1', state_domain.SubtitledHtml( + 'State1', None, state_domain.SubtitledHtml( 'default_outcome', '

    Default outcome for State1

    '), False, [], None, None ) @@ -432,7 +289,7 @@ def test_get_all_html_in_exploration_with_text_input_interaction(self): ) ), ] - state_solution_dict = { + state_solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': True, 'correct_answer': 'Answer1', 'explanation': { @@ -440,17 +297,17 @@ def test_get_all_html_in_exploration_with_text_input_interaction(self): 'html': '

    This is solution for state1

    ' } } - state_interaction_cust_args = { + state_interaction_cust_args: state_domain.CustomizationArgsDictType = { 'placeholder': { 'value': { 'content_id': 'ca_placeholder_0', 'unicode_str': '' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } - state.update_next_content_id_index(3) state.update_content( state_domain.SubtitledHtml.from_dict(state_content_dict)) state.update_interaction_id('TextInput') @@ -459,13 +316,23 @@ def test_get_all_html_in_exploration_with_text_input_interaction(self): state_answer_group) state.update_interaction_default_outcome(state_default_outcome) state.update_interaction_hints(state_hint_list) + # Ruling out the possibility of None for mypy type checking. + assert state.interaction.id is not None solution = state_domain.Solution.from_dict( state.interaction.id, state_solution_dict) state.update_interaction_solution(solution) exp_services.save_new_exploration('owner_id', exploration) - html_list = state.get_all_html_content_strings() - self.assertEqual( + html_list: List[str] = [] + + def _append_to_list(html_str: str) -> str: + html_list.append(html_str) + return html_str + + state_domain.State.convert_html_fields_in_state( + state.to_dict(), _append_to_list) + + self.assertItemsEqual( html_list, [ '

    state outcome html

    ', @@ -475,7 +342,9 @@ def test_get_all_html_in_exploration_with_text_input_interaction(self): '

    This is solution for state1

    ', '

    state content html

    ']) - def test_get_all_html_in_exploration_with_item_selection_interaction(self): + def test_get_all_html_in_exploration_with_item_selection_interaction( + self + ) -> None: """Test the method for extracting all the HTML from a state having ItemSelectionInput interaction. """ @@ -485,11 +354,28 @@ def test_get_all_html_in_exploration_with_item_selection_interaction(self): exploration.add_states(['State1']) state = exploration.states['State1'] - state_content_dict = { + state_content_dict: state_domain.SubtitledHtmlDict = { 'content_id': 'content', 'html': '

    state content html

    ' } - state_customization_args_dict = { + choices_subtitled_dicts: List[state_domain.SubtitledHtmlDict] = [ + { + 'content_id': 'ca_choices_0', + 'html': '

    init_state customization arg html 1

    ' + }, { + 'content_id': 'ca_choices_1', + 'html': '

    init_state customization arg html 2

    ' + }, { + 'content_id': 'ca_choices_2', + 'html': '

    init_state customization arg html 3

    ' + }, { + 'content_id': 'ca_choices_3', + 'html': '

    init_state customization arg html 4

    ' + }, + ] + state_customization_args_dict: Dict[ + str, Dict[str, Union[int, List[state_domain.SubtitledHtmlDict]]] + ] = { 'maxAllowableSelectionCount': { 'value': 1 }, @@ -497,26 +383,12 @@ def test_get_all_html_in_exploration_with_item_selection_interaction(self): 'value': 1 }, 'choices': { - 'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '

    init_state customization arg html 1

    ' - }, { - 'content_id': 'ca_choices_1', - 'html': '

    init_state customization arg html 2

    ' - }, { - 'content_id': 'ca_choices_2', - 'html': '

    init_state customization arg html 3

    ' - }, { - 'content_id': 'ca_choices_3', - 'html': '

    init_state customization arg html 4

    ' - }, - ] + 'value': choices_subtitled_dicts } } state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback', '

    state outcome html

    '), False, [], None, None), [ @@ -545,7 +417,7 @@ def test_get_all_html_in_exploration_with_item_selection_interaction(self): [], None ) - state_solution_dict = { + state_solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': True, 'correct_answer': [ '

    state customization arg html 1

    ', @@ -572,9 +444,10 @@ def test_get_all_html_in_exploration_with_item_selection_interaction(self): state.update_interaction_answer_groups([state_answer_group]) state.update_interaction_customization_args( state_customization_args_dict) - state.update_next_content_id_index(4) state.update_interaction_hints(state_hint_list) + # Ruling out the possibility of None for mypy type checking. + assert state.interaction.id is not None solution = state_domain.Solution.from_dict( state.interaction.id, state_solution_dict) state.update_interaction_solution(solution) @@ -584,10 +457,15 @@ def test_get_all_html_in_exploration_with_item_selection_interaction(self): rules_registry.Registry.get_html_field_types_to_rule_specs( state_schema_version=41)) - def mock_get_html_field_types_to_rule_specs(unused_cls): + def mock_get_html_field_types_to_rule_specs( + unused_cls: Type[state_domain.State] + ) -> Dict[str, rules_registry.RuleSpecsExtensionDict]: return mock_html_field_types_to_rule_specs_dict - def mock_get_interaction_by_id(cls, interaction_id): + def mock_get_interaction_by_id( + cls: Type[interaction_registry.Registry], + interaction_id: str + ) -> base.BaseInteraction: interaction = copy.deepcopy(cls._interactions[interaction_id]) # pylint: disable=protected-access interaction.answer_type = 'SetOfHtmlString' interaction.can_have_solution = True @@ -601,10 +479,16 @@ def mock_get_interaction_by_id(cls, interaction_id): interaction_registry.Registry, 'get_interaction_by_id', classmethod(mock_get_interaction_by_id)) + html_list: List[str] = [] + + def _append_to_list(html_str: str) -> str: + html_list.append(html_str) + return html_str with rules_registry_swap, interaction_registry_swap: - html_list = state.get_all_html_content_strings() + state_domain.State.convert_html_fields_in_state( + state.to_dict(), _append_to_list) - self.assertEqual( + self.assertItemsEqual( html_list, [ '

    state outcome html

    ', @@ -614,17 +498,13 @@ def mock_get_interaction_by_id(cls, interaction_id): '

    DoesNotContainAtLeastOneOf rule_spec html

    ', '', '

    Hello, this is html1 for hint 1

    ', '

    This is solution for state1

    ', - '

    state customization arg html 1

    ', - '

    state customization arg html 2

    ', - '

    state customization arg html 3

    ', - '

    state customization arg html 4

    ', '

    init_state customization arg html 1

    ', '

    init_state customization arg html 2

    ', '

    init_state customization arg html 3

    ', '

    init_state customization arg html 4

    ', '

    state content html

    ']) - def test_rule_spec_with_invalid_html_format(self): + def test_rule_spec_with_invalid_html_format(self) -> None: """Test the method for extracting all the HTML from a state when the rule_spec has invalid html format. """ @@ -635,7 +515,7 @@ def test_rule_spec_with_invalid_html_format(self): state = exploration.states['State1'] state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback', '

    state outcome html

    '), False, [], None, None), [ @@ -676,19 +556,22 @@ def test_rule_spec_with_invalid_html_format(self): mock_html_field_types_to_rule_specs_dict.values()): html_type_dict['format'] = 'invalid format' - def mock_get_html_field_types_to_rule_specs(unused_cls): + def mock_get_html_field_types_to_rule_specs( + unused_cls: Type[state_domain.State] + ) -> Dict[str, rules_registry.RuleSpecsExtensionDict]: return mock_html_field_types_to_rule_specs_dict with self.swap( rules_registry.Registry, 'get_html_field_types_to_rule_specs', classmethod(mock_get_html_field_types_to_rule_specs) ): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The rule spec does not belong to a valid format.'): - state.get_all_html_content_strings() + state_domain.State.convert_html_fields_in_state( + state.to_dict(), lambda x: x) - def test_update_customization_args_with_invalid_content_id(self): + def test_update_customization_args_with_invalid_content_id(self) -> None: """Test the method for updating interaction customization arguments when a content_id is invalid (set to None). """ @@ -697,7 +580,9 @@ def test_update_customization_args_with_invalid_content_id(self): 'exp_id') exploration.add_states(['State1']) state = exploration.states['State1'] - state_customization_args_dict = { + state_customization_args_dict: Dict[ + str, Dict[str, Union[List[Dict[str, Optional[str]]], int]] + ] = { 'maxAllowableSelectionCount': { 'value': 1 }, @@ -717,15 +602,18 @@ def test_update_customization_args_with_invalid_content_id(self): } } + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. state.update_interaction_id('ItemSelectionInput') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected content id to be a string, received None' ): state.update_interaction_customization_args( - state_customization_args_dict) + state_customization_args_dict) # type: ignore[arg-type] - def test_rule_spec_with_html_having_invalid_input_variable(self): + def test_rule_spec_with_html_having_invalid_input_variable(self) -> None: """Test the method for extracting all the HTML from a state when the rule_spec has html but the input variable is invalid. """ @@ -736,7 +624,7 @@ def test_rule_spec_with_html_having_invalid_input_variable(self): state = exploration.states['State1'] state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback', '

    state outcome html

    '), False, [], None, None), [ @@ -749,7 +637,26 @@ def test_rule_spec_with_html_having_invalid_input_variable(self): [], None ) - state_customization_args_dict = { + + choices_subtitled_dicts: List[state_domain.SubtitledHtmlDict] = [ + { + 'content_id': 'ca_choices_0', + 'html': '

    init_state customization arg html 1

    ' + }, { + 'content_id': 'ca_choices_1', + 'html': '

    init_state customization arg html 2

    ' + }, { + 'content_id': 'ca_choices_2', + 'html': '

    init_state customization arg html 3

    ' + }, { + 'content_id': 'ca_choices_3', + 'html': '

    init_state customization arg html 4

    ' + } + ] + + state_customization_args_dict: Dict[ + str, Dict[str, Union[List[state_domain.SubtitledHtmlDict], int]] + ] = { 'maxAllowableSelectionCount': { 'value': 1 }, @@ -757,21 +664,7 @@ def test_rule_spec_with_html_having_invalid_input_variable(self): 'value': 1 }, 'choices': { - 'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '

    init_state customization arg html 1

    ' - }, { - 'content_id': 'ca_choices_1', - 'html': '

    init_state customization arg html 2

    ' - }, { - 'content_id': 'ca_choices_2', - 'html': '

    init_state customization arg html 3

    ' - }, { - 'content_id': 'ca_choices_3', - 'html': '

    init_state customization arg html 4

    ' - } - ] + 'value': choices_subtitled_dicts } } @@ -789,20 +682,23 @@ def test_rule_spec_with_html_having_invalid_input_variable(self): html_type_dict['ruleTypes']['Equals']['htmlInputVariables'] = ( ['y']) - def mock_get_html_field_types_to_rule_specs(unused_cls): + def mock_get_html_field_types_to_rule_specs( + unused_cls: Type[state_domain.State] + ) -> Dict[str, rules_registry.RuleSpecsExtensionDict]: return mock_html_field_types_to_rule_specs_dict with self.swap( rules_registry.Registry, 'get_html_field_types_to_rule_specs', classmethod(mock_get_html_field_types_to_rule_specs) ): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Rule spec should have at least one valid input variable with ' 'Html in it.'): - state.get_all_html_content_strings() + state_domain.State.convert_html_fields_in_state( + state.to_dict(), lambda x: x) - def test_get_all_html_when_solution_has_invalid_answer_type(self): + def test_get_all_html_when_solution_has_invalid_answer_type(self) -> None: """Test the method for extracting all the HTML from a state when the interaction has a solution but the answer_type for the corrent_answer is invalid. @@ -811,27 +707,30 @@ def test_get_all_html_when_solution_has_invalid_answer_type(self): 'exp_id') exploration.add_states(['State1']) state = exploration.states['State1'] - state_content_dict = { + state_content_dict: state_domain.SubtitledHtmlDict = { 'content_id': 'content', 'html': '

    state content html

    ' } - state_customization_args_dict = { + choices_subtitled_dicts: List[state_domain.SubtitledHtmlDict] = [ + { + 'content_id': 'ca_choices_0', + 'html': '

    state customization arg html 1

    ' + }, { + 'content_id': 'ca_choices_1', + 'html': '

    state customization arg html 2

    ' + }, { + 'content_id': 'ca_choices_2', + 'html': '

    state customization arg html 3

    ' + }, { + 'content_id': 'ca_choices_3', + 'html': '

    state customization arg html 4

    ' + } + ] + state_customization_args_dict: Dict[ + str, Dict[str, Union[List[state_domain.SubtitledHtmlDict], bool]] + ] = { 'choices': { - 'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '

    state customization arg html 1

    ' - }, { - 'content_id': 'ca_choices_1', - 'html': '

    state customization arg html 2

    ' - }, { - 'content_id': 'ca_choices_2', - 'html': '

    state customization arg html 3

    ' - }, { - 'content_id': 'ca_choices_3', - 'html': '

    state customization arg html 4

    ' - } - ] + 'value': choices_subtitled_dicts }, 'allowMultipleItemsInSamePosition': { 'value': False @@ -846,8 +745,7 @@ def test_get_all_html_when_solution_has_invalid_answer_type(self): ) ] - state_solution_dict = { - 'interaction_id': '', + state_solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': True, 'correct_answer': [ ['

    state customization arg html 1

    '], @@ -866,8 +764,9 @@ def test_get_all_html_when_solution_has_invalid_answer_type(self): state.update_interaction_id('DragAndDropSortInput') state.update_interaction_customization_args( state_customization_args_dict) - state.update_next_content_id_index(4) state.update_interaction_hints(state_hint_list) + # Ruling out the possibility of None for mypy type checking. + assert state.interaction.id is not None solution = state_domain.Solution.from_dict( state.interaction.id, state_solution_dict) state.update_interaction_solution(solution) @@ -882,19 +781,22 @@ def test_get_all_html_when_solution_has_invalid_answer_type(self): rules_registry.Registry.get_html_field_types_to_rule_specs( state_schema_version=41)) - def mock_get_html_field_types_to_rule_specs(unused_cls): + def mock_get_html_field_types_to_rule_specs( + unused_cls: Type[state_domain.State] + ) -> Dict[str, rules_registry.RuleSpecsExtensionDict]: return mock_html_field_types_to_rule_specs_dict with self.swap( rules_registry.Registry, 'get_html_field_types_to_rule_specs', classmethod(mock_get_html_field_types_to_rule_specs)): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The solution does not have a valid ' 'correct_answer type.'): - state.get_all_html_content_strings() + state_domain.State.convert_html_fields_in_state( + state.to_dict(), lambda x: x) - def test_get_all_html_when_interaction_is_none(self): + def test_get_all_html_when_interaction_is_none(self) -> None: """Test the method for extracting all the HTML from a state when the state has no interaction. """ @@ -902,8 +804,8 @@ def test_get_all_html_when_interaction_is_none(self): 'exp_id') exploration.add_states(['State1']) state = exploration.states['State1'] - state_content_dict = { - 'content_id': 'content', + state_content_dict: state_domain.SubtitledHtmlDict = { + 'content_id': 'content_1', 'html': '

    state content html

    ' } @@ -912,19 +814,19 @@ def test_get_all_html_when_interaction_is_none(self): exp_services.save_new_exploration('owner_id', exploration) html_list = state.get_all_html_content_strings() - self.assertEqual(html_list, ['', '

    state content html

    ']) + self.assertItemsEqual(html_list, ['', '

    state content html

    ']) - def test_export_state_to_dict(self): + def test_export_state_to_dict(self) -> None: """Test exporting a state to a dict.""" exploration = exp_domain.Exploration.create_default_exploration( 'exp_id') exploration.add_states(['New state']) state_dict = exploration.states['New state'].to_dict() - expected_dict = { + expected_dict: state_domain.StateDict = { 'classifier_model_id': None, 'content': { - 'content_id': 'content', + 'content_id': 'content_2', 'html': '' }, 'interaction': { @@ -933,8 +835,9 @@ def test_export_state_to_dict(self): 'customization_args': {}, 'default_outcome': { 'dest': 'New state', + 'dest_if_really_stuck': None, 'feedback': { - 'content_id': 'default_outcome', + 'content_id': 'default_outcome_3', 'html': '' }, 'labelled_as_correct': False, @@ -947,32 +850,25 @@ def test_export_state_to_dict(self): 'solution': None, }, 'linked_skill_id': None, - 'next_content_id_index': 0, 'param_changes': [], 'recorded_voiceovers': { 'voiceovers_mapping': { - 'content': {}, - 'default_outcome': {} + 'content_2': {}, + 'default_outcome_3': {} } }, 'solicit_answer_details': False, - 'card_is_checkpoint': False, - 'written_translations': { - 'translations_mapping': { - 'content': {}, - 'default_outcome': {} - } - } + 'card_is_checkpoint': False } self.assertEqual(expected_dict, state_dict) - def test_can_undergo_classification(self): + def test_can_undergo_classification(self) -> None: """Test the can_undergo_classification() function.""" exploration_id = 'eid' test_exp_filepath = os.path.join( feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) - assets_list = [] + assets_list: List[Tuple[str, bytes]] = [] exp_services.save_new_exploration_from_yaml_and_assets( feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id, assets_list) @@ -989,13 +885,13 @@ def test_can_undergo_classification(self): self.assertFalse( state_without_training_data.can_undergo_classification()) - def test_get_training_data(self): + def test_get_training_data(self) -> None: """Test retrieval of training data.""" exploration_id = 'eid' test_exp_filepath = os.path.join( feconf.SAMPLE_EXPLORATIONS_DIR, 'classifier_demo_exploration.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) - assets_list = [] + assets_list: List[Tuple[str, bytes]] = [] exp_services.save_new_exploration_from_yaml_and_assets( feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id, assets_list) @@ -1012,7 +908,9 @@ def test_get_training_data(self): self.assertEqual(observed_training_data, expected_training_data) - def test_get_content_html_with_correct_state_name_returns_html(self): + def test_get_content_html_with_correct_state_name_returns_html( + self + ) -> None: exploration = exp_domain.Exploration.create_default_exploration('0') init_state = exploration.states[exploration.init_state_name] @@ -1033,12 +931,12 @@ def test_get_content_html_with_correct_state_name_returns_html(self): self.assertEqual( init_state.get_content_html('hint_1'), '

    Changed hint one

    ') - def test_rte_content_validation_for_android(self): + def test_rte_content_validation_for_android(self) -> None: exploration = exp_domain.Exploration.create_default_exploration('0') init_state = exploration.states[exploration.init_state_name] init_state.update_interaction_id('TextInput') - solution_dict = { + solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': False, 'correct_answer': 'helloworld!', 'explanation': { @@ -1051,12 +949,16 @@ def test_rte_content_validation_for_android(self): }, } + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.id is not None solution = state_domain.Solution.from_dict( init_state.interaction.id, solution_dict ) init_state.update_interaction_solution(solution) self.assertFalse(init_state.is_rte_content_supported_on_android()) solution_dict['explanation']['html'] = '' + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.id is not None init_state.update_interaction_solution(state_domain.Solution.from_dict( init_state.interaction.id, solution_dict)) self.assertTrue(init_state.is_rte_content_supported_on_android()) @@ -1080,7 +982,7 @@ def test_rte_content_validation_for_android(self): self.assertTrue(init_state.is_rte_content_supported_on_android()) default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( + 'Introduction', None, state_domain.SubtitledHtml( 'default_outcome', ( '

    ') })) - self.assertTrue(init_state.is_rte_content_supported_on_android()) + self.assertFalse(init_state.is_rte_content_supported_on_android()) init_state.update_content( state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', + 'content_id': 'content_0', 'html': ( '

    None: """Checks that the provided interaction is supported on Android.""" init_state = _create_init_state_for_interaction_verification() init_state.update_interaction_id(interaction_id) @@ -1181,7 +1089,9 @@ def _verify_interaction_supports_android(self, interaction_id): init_state.interaction.is_supported_on_android_app()) _checked_interaction_ids.add(interaction_id) - def _verify_interaction_does_not_support_android(self, interaction_id): + def _verify_interaction_does_not_support_android( + self: StateDomainUnitTests, interaction_id: str + ) -> None: """Checks that the provided interaction is not supported on Android. """ @@ -1191,7 +1101,9 @@ def _verify_interaction_does_not_support_android(self, interaction_id): init_state.interaction.is_supported_on_android_app()) _checked_interaction_ids.add(interaction_id) - def _verify_all_interaction_ids_checked(self): + def _verify_all_interaction_ids_checked( + self: StateDomainUnitTests + ) -> None: """Verifies that all the interaction ids are checked.""" all_interaction_ids = set( interaction_registry.Registry.get_all_interaction_ids()) @@ -1224,7 +1136,7 @@ def _verify_all_interaction_ids_checked(self): _verify_all_interaction_ids_checked(self) - def test_get_content_html_with_invalid_content_id_raise_error(self): + def test_get_content_html_with_invalid_content_id_raise_error(self) -> None: exploration = exp_domain.Exploration.create_default_exploration('0') init_state = exploration.states[exploration.init_state_name] init_state.update_interaction_id('TextInput') @@ -1238,484 +1150,77 @@ def test_get_content_html_with_invalid_content_id_raise_error(self): self.assertEqual( init_state.get_content_html('hint_1'), '

    hint one

    ') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'Content ID Invalid id does not exist'): init_state.get_content_html('Invalid id') - def test_get_content_id_mapping_needing_translations_with_existing_translations(self): # pylint: disable=line-too-long - exploration = exp_domain.Exploration.create_default_exploration('0') - init_state = exploration.states[exploration.init_state_name] - init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - init_state.update_interaction_id('TextInput') - default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( - 'default_outcome', '

    The default outcome.

    '), - False, [], None, None + def test_state_operations(self) -> None: + """Test adding, updating and checking existence of states.""" + exploration = exp_domain.Exploration.create_default_exploration('eid') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index ) + self.assertNotIn('invalid_state_name', exploration.states) - init_state.update_interaction_default_outcome(default_outcome) + self.assertEqual(len(exploration.states), 1) - state_answer_group = state_domain.AnswerGroup( - state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( - 'feedback_1', '

    Feedback

    '), False, [], None, None), - [ - state_domain.RuleSpec( - 'Contains', - { - 'x': { - 'contentId': 'rule_input_Equals', - 'normalizedStrSet': ['Test'] - } - }) - ], - [], - None - ) + default_state_name = exploration.init_state_name + exploration.rename_state(default_state_name, 'Renamed state') + self.assertEqual(len(exploration.states), 1) + self.assertEqual(exploration.init_state_name, 'Renamed state') - init_state.update_interaction_answer_groups( - [state_answer_group]) - hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

    hint one

    ') - ) - ] - init_state.update_interaction_hints(hints_list) + # Add a new state. + exploration.add_states(['State 2']) + self.assertEqual(len(exploration.states), 2) - solution_dict = { - 'answer_is_exclusive': False, - 'correct_answer': 'helloworld!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    hello_world is a string

    ' - }, - } + # It is OK to rename a state to the same name. + exploration.rename_state('State 2', 'State 2') - solution = state_domain.Solution.from_dict( - init_state.interaction.id, solution_dict) - init_state.update_interaction_solution(solution) + # But it is not OK to add or rename a state using a name that already + # exists. + with self.assertRaisesRegex(ValueError, 'Duplicate state name'): + exploration.add_states(['State 2']) + with self.assertRaisesRegex(ValueError, 'Duplicate state name'): + exploration.rename_state('State 2', 'Renamed state') - written_translations_dict = { - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    hello!

    ', - 'needs_update': False - } - }, - 'hint_1': {}, - 'default_outcome': {}, - 'solution': {}, - 'feedback_1': {} - } - } - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) + # And it is OK to rename a state to 'END' (old terminal pseudostate). It + # is tested throughout this test because a lot of old behavior used to + # be specific to states named 'END'. These tests validate that is no + # longer the situation. + exploration.rename_state('State 2', 'END') - init_state.update_written_translations(written_translations) + # Should successfully be able to name it back. + exploration.rename_state('END', 'State 2') - content_id_mapping_needing_translations = ( - init_state.get_content_id_mapping_needing_translations('hi')) - self.assertEqual( - content_id_mapping_needing_translations[ - 'hint_1' - ].content, - '

    hint one

    ' - ) - self.assertEqual( - content_id_mapping_needing_translations[ - 'solution' - ].content, - '

    hello_world is a string

    ' - ) - self.assertEqual( - content_id_mapping_needing_translations[ - 'feedback_1' - ].content, - '

    Feedback

    ', - ) - self.assertEqual( - content_id_mapping_needing_translations[ - 'default_outcome' - ].content, - '

    The default outcome.

    ' - ) - - def test_get_content_id_mapping_needing_translations_with_interaction_translations(self): # pylint: disable=line-too-long - exploration = exp_domain.Exploration.create_default_exploration('0') - init_state = exploration.states[exploration.init_state_name] - init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - init_state.update_interaction_id('TextInput') - state_interaction_cust_args = { - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': 'Placeholder' - } - }, - 'rows': {'value': 1} - } - init_state.update_interaction_customization_args( - state_interaction_cust_args) - - default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( - 'default_outcome', '

    The default outcome.

    '), - False, [], None, None - ) - - init_state.update_interaction_default_outcome(default_outcome) - state_answer_group = state_domain.AnswerGroup( - state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( - 'feedback_1', '

    Feedback

    '), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Contains', - { - 'x': { - 'contentId': 'rule_input_4', - 'normalizedStrSet': ['Input1', 'Input2'] - } - }) - ], - [], - None - ) - - init_state.update_interaction_answer_groups( - [state_answer_group]) - hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

    hint one

    ') - ) - ] - init_state.update_interaction_hints(hints_list) - - solution_dict = { - 'answer_is_exclusive': False, - 'correct_answer': 'helloworld!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    hello_world is a string

    ' - }, - } - - solution = state_domain.Solution.from_dict( - init_state.interaction.id, solution_dict) - init_state.update_interaction_solution(solution) - - written_translations_dict = { - 'translations_mapping': { - 'content': {}, - 'hint_1': {}, - 'default_outcome': {}, - 'solution': {}, - 'feedback_1': {}, - 'ca_placeholder_0': {}, - 'rule_input_4': {} - } - } - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - - init_state.update_written_translations(written_translations) - - content_id_mapping_needing_translations = ( - init_state.get_content_id_mapping_needing_translations('hi')) - self.assertEqual( - content_id_mapping_needing_translations[ - 'hint_1' - ].content, - '

    hint one

    ' - ) - self.assertEqual( - content_id_mapping_needing_translations[ - 'solution' - ].content, - '

    hello_world is a string

    ' - ) - self.assertEqual( - content_id_mapping_needing_translations[ - 'feedback_1' - ].content, - '

    Feedback

    ' - ) - self.assertEqual( - content_id_mapping_needing_translations[ - 'default_outcome' - ].content, - '

    The default outcome.

    ' - ) - self.assertEqual( - content_id_mapping_needing_translations[ - 'content' - ].content, - '

    This is content

    ', - ) - self.assertEqual( - content_id_mapping_needing_translations[ - 'ca_placeholder_0' - ].content, - 'Placeholder' - ) - rule_translatable_item = content_id_mapping_needing_translations[ - 'rule_input_4' - ] - self.assertEqual(rule_translatable_item.content, ['Input1', 'Input2']) - self.assertEqual(rule_translatable_item.interaction_id, 'TextInput') - self.assertEqual(rule_translatable_item.rule_type, 'Contains') - - def test_get_content_id_mapping_needing_translations_for_set_input_rule(self): # pylint: disable=line-too-long - exploration = exp_domain.Exploration.create_default_exploration('0') - init_state = exploration.states[exploration.init_state_name] - init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - init_state.update_interaction_id('SetInput') - - state_answer_group = state_domain.AnswerGroup( - state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( - 'feedback_1', '

    Feedback

    '), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Equals', - { - 'x': { - 'contentId': 'rule_input_4', - 'unicodeStrSet': ['Input1', 'Input2'] - } - }) - ], - [], - None - ) - init_state.update_interaction_answer_groups( - [state_answer_group]) - - written_translations_dict = { - 'translations_mapping': { - 'content': {}, - 'feedback_1': {}, - 'rule_input_4': {} - } - } - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - init_state.update_written_translations(written_translations) - - content_id_mapping_needing_translations = ( - init_state.get_content_id_mapping_needing_translations('hi')) - rule_translatable_item = content_id_mapping_needing_translations[ - 'rule_input_4' - ] - self.assertEqual(rule_translatable_item.content, ['Input1', 'Input2']) - self.assertEqual(rule_translatable_item.interaction_id, 'SetInput') - self.assertEqual(rule_translatable_item.rule_type, 'Equals') - - def test_get_content_id_mapping_needing_translations_does_not_return_numeric_content(self): # pylint: disable=line-too-long - exploration = exp_domain.Exploration.create_default_exploration('0') - init_state = exploration.states[exploration.init_state_name] - # Set the content. - init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - # Set the multiple choice interaction. - init_state.update_interaction_id('MultipleChoiceInput') - state_interaction_cust_args = { - 'showChoicesInShuffledOrder': { - 'value': True - }, - 'choices': { - 'value': [ - { - 'content_id': 'ca_choices_0', - 'html': '\u003cp\u003eoption 1\u003c/p\u003e' - }, - { - 'content_id': 'ca_choices_1', - 'html': '1,000' - }, - { - 'content_id': 'ca_choices_2', - 'html': '100' - } - ] - } - } - init_state.update_interaction_customization_args( - state_interaction_cust_args) - # Set the default outcome. - default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml( - 'default_outcome', '

    The default outcome.

    '), - False, [], None, None - ) - init_state.update_interaction_default_outcome(default_outcome) - # Set the translations. - written_translations_dict = { - 'translations_mapping': { - 'content': {}, - 'default_outcome': {}, - 'ca_choices_0': {}, - 'ca_choices_1': {}, - 'ca_choices_2': {} - } - } - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - init_state.update_written_translations(written_translations) - - # Choice 2 should not be returned as its value is numeric. - content_id_mapping_needing_translations = ( - init_state.get_content_id_mapping_needing_translations('hi')) - self.assertEqual( - content_id_mapping_needing_translations[ - 'content' - ].content, '

    This is content

    ') - self.assertEqual( - content_id_mapping_needing_translations[ - 'default_outcome' - ].content, '

    The default outcome.

    ') - self.assertEqual( - content_id_mapping_needing_translations[ - 'ca_choices_0' - ].content, '\u003cp\u003eoption 1\u003c/p\u003e') - self.assertEqual( - content_id_mapping_needing_translations[ - 'ca_choices_1' - ].content, '1,000') - self.assertFalse( - 'ca_choices_2' in content_id_mapping_needing_translations) - - def test_add_translation_works_correctly(self): - exploration = exp_domain.Exploration.create_default_exploration('0') - init_state = exploration.states[exploration.init_state_name] - init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - - self.assertEqual(init_state.get_translation_counts(), {}) - - init_state.add_translation('content', 'hi', '

    Translated text

    ') - - self.assertEqual(init_state.get_translation_counts(), {'hi': 1}) - - def test_get_translation_counts_returns_correct_value(self): - state = state_domain.State.create_default_state(None) - state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'content', - 'html': '

    This is content

    ' - })) - - self.set_interaction_for_state(state, 'TextInput') - - hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

    hint one

    '))] - state.update_interaction_hints(hints_list) - - solution_dict = { - 'answer_is_exclusive': False, - 'correct_answer': 'helloworld!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    hello_world is a string

    ' - }, - } - - solution = state_domain.Solution.from_dict( - state.interaction.id, solution_dict) - - state.update_interaction_solution(solution) - state.validate({}, True) - state.add_translation('hint_1', 'hi', 'Some translation') - state.add_translation('content', 'hi', 'Some translation') - - self.assertEqual(state.get_translation_counts(), {'hi': 2}) - - # Adding interaction placeholder translation won't be reflected in - # get_translation_counts method. - state.add_translation('ca_placeholder_0', 'hi', 'Some translation') - - self.assertEqual(state.get_translation_counts(), {'hi': 2}) - - def test_state_operations(self): - """Test adding, updating and checking existence of states.""" - exploration = exp_domain.Exploration.create_default_exploration('eid') - self.assertNotIn('invalid_state_name', exploration.states) - - self.assertEqual(len(exploration.states), 1) - - default_state_name = exploration.init_state_name - exploration.rename_state(default_state_name, 'Renamed state') - self.assertEqual(len(exploration.states), 1) - self.assertEqual(exploration.init_state_name, 'Renamed state') - - # Add a new state. - exploration.add_states(['State 2']) - self.assertEqual(len(exploration.states), 2) - - # It is OK to rename a state to the same name. - exploration.rename_state('State 2', 'State 2') - - # But it is not OK to add or rename a state using a name that already - # exists. - with self.assertRaisesRegexp(ValueError, 'Duplicate state name'): - exploration.add_states(['State 2']) - with self.assertRaisesRegexp(ValueError, 'Duplicate state name'): - exploration.rename_state('State 2', 'Renamed state') - - # And it is OK to rename a state to 'END' (old terminal pseudostate). It - # is tested throughout this test because a lot of old behavior used to - # be specific to states named 'END'. These tests validate that is no - # longer the situation. - exploration.rename_state('State 2', 'END') - - # Should successfully be able to name it back. - exploration.rename_state('END', 'State 2') - - # The exploration now has exactly two states. - self.assertNotIn(default_state_name, exploration.states) - self.assertIn('Renamed state', exploration.states) - self.assertIn('State 2', exploration.states) + # The exploration now has exactly two states. + self.assertNotIn(default_state_name, exploration.states) + self.assertIn('Renamed state', exploration.states) + self.assertIn('State 2', exploration.states) # Can successfully add 'END' state. exploration.add_states(['END']) # Should fail to rename like any other state. - with self.assertRaisesRegexp(ValueError, 'Duplicate state name'): + with self.assertRaisesRegex(ValueError, 'Duplicate state name'): exploration.rename_state('State 2', 'END') + default_outcome = exploration.states[ + 'Renamed state'].interaction.default_outcome + assert default_outcome is not None # Ensure the other states are connected to END. - exploration.states[ - 'Renamed state'].interaction.default_outcome.dest = 'State 2' - exploration.states['State 2'].interaction.default_outcome.dest = 'END' + default_outcome.dest = 'State 2' + + default_outcome = exploration.states[ + 'State 2'].interaction.default_outcome + assert default_outcome is not None + default_outcome.dest = 'END' # Ensure the other states have interactions. self.set_interaction_for_state( - exploration.states['Renamed state'], 'TextInput') + exploration.states['Renamed state'], 'TextInput', + content_id_generator) self.set_interaction_for_state( - exploration.states['State 2'], 'TextInput') + exploration.states['State 2'], 'TextInput', content_id_generator) # Other miscellaneous requirements for validation. exploration.title = 'Title' @@ -1724,7 +1229,7 @@ def test_state_operations(self): # The exploration should NOT be terminable even though it has a state # called 'END' and everything else is connected to it. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This state does not have any interaction specified.'): exploration.validate(strict=True) @@ -1734,7 +1239,8 @@ def test_state_operations(self): # default outcome or answer groups. exploration.rename_state('END', 'AnotherEnd') another_end_state = exploration.states['AnotherEnd'] - self.set_interaction_for_state(another_end_state, 'EndExploration') + self.set_interaction_for_state( + another_end_state, 'EndExploration', content_id_generator) another_end_state.update_interaction_default_outcome(None) exploration.validate(strict=True) @@ -1745,50 +1251,61 @@ def test_state_operations(self): exploration.delete_state('END') self.assertNotIn('END', exploration.states) - def test_update_solicit_answer_details(self): + def test_update_solicit_answer_details(self) -> None: """Test updating solicit_answer_details.""" - state = state_domain.State.create_default_state('state_1') + state = state_domain.State.create_default_state( + 'state_1', 'content_0', 'default_outcome_1') self.assertEqual(state.solicit_answer_details, False) state.update_solicit_answer_details(True) self.assertEqual(state.solicit_answer_details, True) - def test_update_solicit_answer_details_with_non_bool_fails(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_update_solicit_answer_details_with_non_bool_fails(self) -> None: """Test updating solicit_answer_details with non bool value.""" exploration = exp_domain.Exploration.create_default_exploration('eid') init_state = exploration.states[exploration.init_state_name] self.assertEqual(init_state.solicit_answer_details, False) - with self.assertRaisesRegexp(Exception, ( + with self.assertRaisesRegex(Exception, ( 'Expected solicit_answer_details to be a boolean, received')): - init_state.update_solicit_answer_details('abc') + init_state.update_solicit_answer_details('abc') # type: ignore[arg-type] init_state = exploration.states[exploration.init_state_name] self.assertEqual(init_state.solicit_answer_details, False) - def test_update_linked_skill_id(self): + def test_update_linked_skill_id(self) -> None: """Test updating linked_skill_id.""" - state = state_domain.State.create_default_state('state_1') + state = state_domain.State.create_default_state( + 'state_1', 'content_0', 'default_outcome_1') self.assertEqual(state.linked_skill_id, None) state.update_linked_skill_id('string_2') self.assertEqual(state.linked_skill_id, 'string_2') - def test_update_card_is_checkpoint(self): + def test_update_card_is_checkpoint(self) -> None: """Test update card_is_checkpoint.""" - state = state_domain.State.create_default_state('state_1') + state = state_domain.State.create_default_state( + 'state_1', 'content_0', 'default_outcome_1') self.assertEqual(state.card_is_checkpoint, False) state.update_card_is_checkpoint(True) self.assertEqual(state.card_is_checkpoint, True) - def test_update_card_is_checkpoint_with_non_bool_fails(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_update_card_is_checkpoint_with_non_bool_fails(self) -> None: """Test updating card_is_checkpoint with non bool value.""" exploration = exp_domain.Exploration.create_default_exploration('eid') init_state = exploration.states[exploration.init_state_name] self.assertEqual(init_state.card_is_checkpoint, True) - with self.assertRaisesRegexp(Exception, ( + with self.assertRaisesRegex(Exception, ( 'Expected card_is_checkpoint to be a boolean, received')): - init_state.update_card_is_checkpoint('abc') + init_state.update_card_is_checkpoint('abc') # type: ignore[arg-type] init_state = exploration.states[exploration.init_state_name] self.assertEqual(init_state.card_is_checkpoint, True) - def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): + def test_convert_html_fields_in_state_with_drag_and_drop_interaction( + self + ) -> None: """Test the method for converting all the HTML in a state having DragAndDropSortInput interaction. """ @@ -1800,66 +1317,11 @@ def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): '"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &' 'amp;quot;svg_filename&quot;: &quot;&quot;}">') - written_translations_dict_with_old_math_schema = { - 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': html_with_old_math_schema, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': html_with_old_math_schema, - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - written_translations_dict_with_new_math_schema = { - 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': html_with_new_math_schema, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': html_with_new_math_schema, - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - answer_group_dict_with_old_math_schema = { + answer_group_dict_with_old_math_schema: state_domain.AnswerGroupDict = { 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1903,6 +1365,7 @@ def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): answer_group_dict_with_new_math_schema = { 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '

    Feedback

    ' @@ -1943,12 +1406,26 @@ def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): 'training_data': [], 'tagged_skill_misconception_id': None } - state_dict_with_old_math_schema = { + choices_subtitled_dicts: List[state_domain.SubtitledHtmlDict] = [ + { + 'content_id': 'ca_choices_0', + 'html': html_with_old_math_schema + }, { + 'content_id': 'ca_choices_1', + 'html': '

    2

    ' + }, { + 'content_id': 'ca_choices_2', + 'html': '

    3

    ' + }, { + 'content_id': 'ca_choices_3', + 'html': '

    4

    ' + } + ] + state_dict_with_old_math_schema: state_domain.StateDict = { 'content': { - 'content_id': 'content', 'html': 'Hello!' + 'content_id': 'content_0', 'html': 'Hello!' }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -1968,25 +1445,14 @@ def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): ) }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False }, 'customization_args': { 'choices': { - 'value': [{ - 'content_id': 'ca_choices_0', - 'html': html_with_old_math_schema - }, { - 'content_id': 'ca_choices_1', - 'html': '

    2

    ' - }, { - 'content_id': 'ca_choices_2', - 'html': '

    3

    ' - }, { - 'content_id': 'ca_choices_3', - 'html': '

    4

    ' - }] + 'value': choices_subtitled_dicts }, 'allowMultipleItemsInSamePosition': {'value': True} }, @@ -2019,18 +1485,17 @@ def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): 'html': '

    This is solution for state1

    ' } } - }, - 'written_translations': ( - written_translations_dict_with_old_math_schema) + 'recorded_voiceovers': { + 'voiceovers_mapping': {} + } } state_dict_with_new_math_schema = { 'content': { - 'content_id': 'content', 'html': 'Hello!' + 'content_id': 'content_0', 'html': 'Hello!' }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -2050,6 +1515,7 @@ def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): ) }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False @@ -2101,11 +1567,12 @@ def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): 'html': '

    This is solution for state1

    ' } } - }, - 'written_translations': ( - written_translations_dict_with_new_math_schema) + 'recorded_voiceovers': { + 'voiceovers_mapping': {} + } } + self.assertEqual( state_domain.State.convert_html_fields_in_state( state_dict_with_old_math_schema, @@ -2114,7 +1581,9 @@ def test_convert_html_fields_in_state_with_drag_and_drop_interaction(self): state_uses_old_rule_template_schema=True), state_dict_with_new_math_schema) - def test_convert_html_fields_in_state_with_item_selection_interaction(self): + def test_convert_html_fields_in_state_with_item_selection_interaction( + self + ) -> None: """Test the method for converting all the HTML in a state having ItemSelection interaction. """ @@ -2126,7 +1595,9 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): '"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &' 'amp;quot;svg_filename&quot;: &quot;&quot;}">') - answer_group_with_old_math_schema = [{ + answer_group_with_old_math_schema: List[ + state_domain.AnswerGroupDict + ] = [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': { @@ -2150,6 +1621,7 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): }], 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback', 'html': html_with_old_math_schema @@ -2187,6 +1659,7 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): }], 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback', 'html': html_with_new_math_schema @@ -2200,12 +1673,26 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): 'tagged_skill_misconception_id': None }] - state_dict_with_old_math_schema = { + choices_subtitled_dicts: List[state_domain.SubtitledHtmlDict] = [ + { + 'content_id': 'ca_choices_0', + 'html': '

    init_state customization arg html 1

    ' + }, { + 'content_id': 'ca_choices_1', + 'html': html_with_old_math_schema + }, { + 'content_id': 'ca_choices_2', + 'html': '

    init_state customization arg html 3

    ' + }, { + 'content_id': 'ca_choices_3', + 'html': '

    init_state customization arg html 4

    ' + } + ] + state_dict_with_old_math_schema: state_domain.StateDict = { 'content': { - 'content_id': 'content', 'html': 'Hello!' + 'content_id': 'content_0', 'html': 'Hello!' }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -2238,6 +1725,7 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): ) }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False @@ -2250,33 +1738,23 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): 'value': 1 }, 'choices': { - 'value': [{ - 'content_id': 'ca_choices_0', - 'html': '

    init_state customization arg html 1

    ' - }, { - 'content_id': 'ca_choices_1', - 'html': html_with_old_math_schema - }, { - 'content_id': 'ca_choices_2', - 'html': '

    init_state customization arg html 3

    ' - }, { - 'content_id': 'ca_choices_3', - 'html': '

    init_state customization arg html 4

    ' - }] + 'value': choices_subtitled_dicts } }, 'confirmed_unclassified_answers': [], 'id': 'ItemSelectionInput', 'hints': [] + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': {} } } state_dict_with_new_math_schema = { 'content': { - 'content_id': 'content', 'html': 'Hello!' + 'content_id': 'content_0', 'html': 'Hello!' }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -2309,6 +1787,7 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): ) }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False @@ -2339,6 +1818,9 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): 'confirmed_unclassified_answers': [], 'id': 'ItemSelectionInput', 'hints': [] + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': {} } } interaction_registry.Registry.get_all_specs_for_state_schema_version( @@ -2352,7 +1834,9 @@ def test_convert_html_fields_in_state_with_item_selection_interaction(self): state_uses_old_rule_template_schema=True), state_dict_with_new_math_schema) - def test_convert_html_fields_in_state_with_text_input_interaction(self): + def test_convert_html_fields_in_state_with_text_input_interaction( + self + ) -> None: """Test the method for converting all the HTML in a state having TextInput interaction. """ @@ -2364,9 +1848,10 @@ def test_convert_html_fields_in_state_with_text_input_interaction(self): '"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &' 'amp;quot;svg_filename&quot;: &quot;&quot;}">') - answer_group_with_old_math_schema = { + answer_group_with_old_math_schema: state_domain.AnswerGroupDict = { 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': html_with_old_math_schema @@ -2388,6 +1873,7 @@ def test_convert_html_fields_in_state_with_text_input_interaction(self): answer_group_with_new_math_schema = { 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': html_with_new_math_schema @@ -2407,12 +1893,11 @@ def test_convert_html_fields_in_state_with_text_input_interaction(self): 'tagged_skill_misconception_id': None } - state_dict_with_old_math_schema = { + state_dict_with_old_math_schema: state_domain.StateDict = { 'content': { - 'content_id': 'content', 'html': html_with_old_math_schema + 'content_id': 'content_0', 'html': html_with_old_math_schema }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -2434,6 +1919,7 @@ def test_convert_html_fields_in_state_with_text_input_interaction(self): 'html': html_with_old_math_schema }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False @@ -2447,181 +1933,13 @@ def test_convert_html_fields_in_state_with_text_input_interaction(self): 'content_id': 'ca_placeholder_0', 'unicode_str': '' } - } - }, - 'confirmed_unclassified_answers': [], - 'id': 'TextInput', - 'hints': [ - { - 'hint_content': { - 'content_id': 'hint_1', - 'html': html_with_old_math_schema - } - }, - { - 'hint_content': { - 'content_id': 'hint_2', - 'html': html_with_old_math_schema - } - }] - } - } - - state_dict_with_new_math_schema = { - 'content': { - 'content_id': 'content', 'html': html_with_new_math_schema - }, - 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, - 'solicit_answer_details': False, - 'card_is_checkpoint': False, - 'linked_skill_id': None, - 'classifier_model_id': None, - 'interaction': { - 'solution': { - 'answer_is_exclusive': True, - 'correct_answer': 'Answer1', - 'explanation': { - 'content_id': 'solution', - 'html': html_with_new_math_schema - } - }, - 'answer_groups': [answer_group_with_new_math_schema], - 'default_outcome': { - 'param_changes': [], - 'feedback': { - 'content_id': 'default_outcome', - 'html': html_with_new_math_schema - }, - 'dest': 'Introduction', - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False - }, - 'customization_args': { - 'rows': { - 'value': 1 }, - 'placeholder': { - 'value': { - 'content_id': 'ca_placeholder_0', - 'unicode_str': '' - } + 'catchMisspellings': { + 'value': False } }, 'confirmed_unclassified_answers': [], 'id': 'TextInput', - 'hints': [ - { - 'hint_content': { - 'content_id': 'hint_1', - 'html': html_with_new_math_schema - } - }, - { - 'hint_content': { - 'content_id': 'hint_2', - 'html': html_with_new_math_schema - } - }] - } - } - self.assertEqual( - state_domain.State.convert_html_fields_in_state( - state_dict_with_old_math_schema, - html_validation_service. - add_math_content_to_math_rte_components), - state_dict_with_new_math_schema) - - def test_convert_html_fields_in_state_with_math_expression_input(self): - """Test the method for converting all the HTML in a state having - MathExpressionInput interaction. - """ - html_with_old_math_schema = ( - '

    Value

    ') - html_with_new_math_schema = ( - '

    Value

    ') - answer_group_with_old_math_schema = { - 'outcome': { - 'dest': 'Introduction', - 'feedback': { - 'content_id': 'feedback_1', - 'html': html_with_old_math_schema - }, - 'labelled_as_correct': False, - 'param_changes': [], - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None - }, - 'rule_specs': [{ - 'inputs': { - 'x': 'Test' - }, - 'rule_type': 'Equals' - }], - 'training_data': [], - 'tagged_skill_misconception_id': None - } - answer_group_with_new_math_schema = { - 'outcome': { - 'dest': 'Introduction', - 'feedback': { - 'content_id': 'feedback_1', - 'html': html_with_new_math_schema - }, - 'labelled_as_correct': False, - 'param_changes': [], - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None - }, - 'rule_specs': [{ - 'inputs': { - 'x': 'Test' - }, - 'rule_type': 'Equals' - }], - 'training_data': [], - 'tagged_skill_misconception_id': None - } - - state_dict_with_old_math_schema = { - 'content': { - 'content_id': 'content', 'html': html_with_old_math_schema - }, - 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, - 'solicit_answer_details': False, - 'card_is_checkpoint': False, - 'linked_skill_id': None, - 'classifier_model_id': None, - 'interaction': { - 'solution': { - 'answer_is_exclusive': True, - 'correct_answer': '42', - 'explanation': { - 'content_id': 'solution', - 'html': html_with_old_math_schema - } - }, - 'answer_groups': [answer_group_with_old_math_schema], - 'default_outcome': { - 'param_changes': [], - 'feedback': { - 'content_id': 'default_outcome', - 'html': html_with_old_math_schema - }, - 'dest': 'Introduction', - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False - }, - 'customization_args': {}, - 'confirmed_unclassified_answers': [], - 'id': 'MathExpressionInput', 'hints': [ { 'hint_content': { @@ -2635,15 +1953,17 @@ def test_convert_html_fields_in_state_with_math_expression_input(self): 'html': html_with_old_math_schema } }] + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': {} } } state_dict_with_new_math_schema = { 'content': { - 'content_id': 'content', 'html': html_with_new_math_schema + 'content_id': 'content_0', 'html': html_with_new_math_schema }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -2651,287 +1971,41 @@ def test_convert_html_fields_in_state_with_math_expression_input(self): 'interaction': { 'solution': { 'answer_is_exclusive': True, - 'correct_answer': '42', + 'correct_answer': 'Answer1', 'explanation': { 'content_id': 'solution', 'html': html_with_new_math_schema } - }, - 'answer_groups': [answer_group_with_new_math_schema], - 'default_outcome': { - 'param_changes': [], - 'feedback': { - 'content_id': 'default_outcome', - 'html': html_with_new_math_schema - }, - 'dest': 'Introduction', - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False - }, - 'customization_args': {}, - 'confirmed_unclassified_answers': [], - 'id': 'MathExpressionInput', - 'hints': [ - { - 'hint_content': { - 'content_id': 'hint_1', - 'html': html_with_new_math_schema - } - }, - { - 'hint_content': { - 'content_id': 'hint_2', - 'html': html_with_new_math_schema - } - }] - } - } - self.assertEqual( - state_domain.State.convert_html_fields_in_state( - state_dict_with_old_math_schema, - html_validation_service. - add_math_content_to_math_rte_components), - state_dict_with_new_math_schema) - - def test_convert_html_fields_in_state_with_old_written_translations(self): - """Test the method for converting all the HTML in a state having - written_translations in the old format. This is needed for converting - older snapshots (prior to state schema version 35) properly. - - TODO(#11950): Remove this test once old schema migration functions are - deleted. - """ - html_with_old_math_schema = ( - '

    Value

    ') - html_with_new_math_schema = ( - '

    Value

    ') - written_translations_dict_with_old_math_schema_and_old_format = { - 'translations_mapping': { - 'content1': { - 'en': { - 'html': html_with_old_math_schema, - 'needs_update': True - }, - 'hi': { - 'html': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'html': html_with_old_math_schema, - 'needs_update': False - }, - 'en': { - 'html': 'hello!', - 'needs_update': False - } - } - } - } - written_translations_dict_with_new_math_schema_and_old_format = { - 'translations_mapping': { - 'content1': { - 'en': { - 'html': html_with_new_math_schema, - 'needs_update': True - }, - 'hi': { - 'html': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'html': html_with_new_math_schema, - 'needs_update': False - }, - 'en': { - 'html': 'hello!', - 'needs_update': False - } - } - } - } - - answer_group_dict_with_old_math_schema = { - 'outcome': { - 'dest': 'Introduction', - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'labelled_as_correct': False, - 'param_changes': [], - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None - }, - 'rule_specs': [{ - 'inputs': { - 'x': [[html_with_old_math_schema]] - }, - 'rule_type': 'IsEqualToOrdering' - }], - 'training_data': [], - 'tagged_skill_misconception_id': None - } - answer_group_dict_with_new_math_schema = { - 'outcome': { - 'dest': 'Introduction', - 'feedback': { - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - }, - 'labelled_as_correct': False, - 'param_changes': [], - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None - }, - 'rule_specs': [{ - 'inputs': { - 'x': [[html_with_new_math_schema]] - }, - 'rule_type': 'IsEqualToOrdering' - }], - 'training_data': [], - 'tagged_skill_misconception_id': None - } - state_dict_with_old_math_schema = { - 'content': { - 'content_id': 'content', 'html': 'Hello!' - }, - 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, - 'solicit_answer_details': False, - 'card_is_checkpoint': False, - 'linked_skill_id': None, - 'classifier_model_id': None, - 'interaction': { - 'answer_groups': [answer_group_dict_with_old_math_schema], - 'default_outcome': { - 'param_changes': [], - 'feedback': { - 'content_id': 'default_outcome', - 'html': ( - '

    ' - 'Hello this is test case to check ' - 'image tag inside p tag

    ' - ) - }, - 'dest': 'Introduction', - 'refresher_exploration_id': None, - 'missing_prerequisite_skill_id': None, - 'labelled_as_correct': False - }, - 'customization_args': { - 'choices': { - 'value': [{ - 'content_id': 'ca_choices_0', - 'html': html_with_old_math_schema - }, { - 'content_id': 'ca_choices_1', - 'html': '

    2

    ' - }, { - 'content_id': 'ca_choices_2', - 'html': '

    3

    ' - }, { - 'content_id': 'ca_choices_3', - 'html': '

    4

    ' - }] - }, - 'allowMultipleItemsInSamePosition': {'value': True} - }, - 'confirmed_unclassified_answers': [], - 'id': 'DragAndDropSortInput', - 'hints': [ - { - 'hint_content': { - 'content_id': 'hint_1', - 'html': html_with_old_math_schema - } - }, - { - 'hint_content': { - 'content_id': 'hint_2', - 'html': html_with_old_math_schema - } - } - ], - 'solution': { - 'answer_is_exclusive': True, - 'correct_answer': [ - [html_with_old_math_schema], - ['

    2

    '], - ['

    3

    '], - ['

    4

    '] - ], - 'explanation': { - 'content_id': 'solution', - 'html': '

    This is solution for state1

    ' - } - } - - }, - 'written_translations': ( - written_translations_dict_with_old_math_schema_and_old_format) - } - - state_dict_with_new_math_schema = { - 'content': { - 'content_id': 'content', 'html': 'Hello!' - }, - 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, - 'solicit_answer_details': False, - 'card_is_checkpoint': False, - 'linked_skill_id': None, - 'classifier_model_id': None, - 'interaction': { - 'answer_groups': [answer_group_dict_with_new_math_schema], + }, + 'answer_groups': [answer_group_with_new_math_schema], 'default_outcome': { 'param_changes': [], 'feedback': { 'content_id': 'default_outcome', - 'html': ( - '

    ' - 'Hello this is test case to check ' - 'image tag inside p tag

    ' - ) + 'html': html_with_new_math_schema }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False }, 'customization_args': { - 'choices': { - 'value': [{ - 'content_id': 'ca_choices_0', - 'html': html_with_new_math_schema - }, { - 'content_id': 'ca_choices_1', - 'html': '

    2

    ' - }, { - 'content_id': 'ca_choices_2', - 'html': '

    3

    ' - }, { - 'content_id': 'ca_choices_3', - 'html': '

    4

    ' - }] + 'rows': { + 'value': 1 }, - 'allowMultipleItemsInSamePosition': {'value': True} + 'placeholder': { + 'value': { + 'content_id': 'ca_placeholder_0', + 'unicode_str': '' + } + }, + 'catchMisspellings': { + 'value': False + } }, 'confirmed_unclassified_answers': [], - 'id': 'DragAndDropSortInput', + 'id': 'TextInput', 'hints': [ { 'hint_content': { @@ -2944,43 +2018,30 @@ def test_convert_html_fields_in_state_with_old_written_translations(self): 'content_id': 'hint_2', 'html': html_with_new_math_schema } - } - ], - 'solution': { - 'answer_is_exclusive': True, - 'correct_answer': [ - [html_with_new_math_schema], - ['

    2

    '], - ['

    3

    '], - ['

    4

    '] - ], - 'explanation': { - 'content_id': 'solution', - 'html': '

    This is solution for state1

    ' - } - } - + }] }, - 'written_translations': ( - written_translations_dict_with_new_math_schema_and_old_format) + 'recorded_voiceovers': { + 'voiceovers_mapping': {} + } } self.assertEqual( state_domain.State.convert_html_fields_in_state( state_dict_with_old_math_schema, html_validation_service. - add_math_content_to_math_rte_components, - state_uses_old_rule_template_schema=True), + add_math_content_to_math_rte_components), state_dict_with_new_math_schema) def test_convert_html_fields_in_state_having_rule_spec_with_invalid_format( - self): + self) -> None: """Test the method for converting the HTML in a state when the rule_spec has invalid html format. """ html_with_old_math_schema = ( '

    Value

    ') - answer_group_with_old_math_schema = [{ + answer_group_with_old_math_schema: List[ + state_domain.AnswerGroupDict + ] = [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': { @@ -2994,6 +2055,7 @@ def test_convert_html_fields_in_state_having_rule_spec_with_invalid_format( }], 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback', 'html': html_with_old_math_schema @@ -3007,12 +2069,11 @@ def test_convert_html_fields_in_state_having_rule_spec_with_invalid_format( 'tagged_skill_misconception_id': None }] - state_dict_with_old_math_schema = { + state_dict_with_old_math_schema: state_domain.StateDict = { 'content': { - 'content_id': 'content', 'html': 'Hello!' + 'content_id': 'content_0', 'html': 'Hello!' }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -3033,6 +2094,7 @@ def test_convert_html_fields_in_state_having_rule_spec_with_invalid_format( ) }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False @@ -3056,6 +2118,9 @@ def test_convert_html_fields_in_state_having_rule_spec_with_invalid_format( 'confirmed_unclassified_answers': [], 'id': 'ItemSelectionInput', 'hints': [] + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': {} } } @@ -3067,13 +2132,15 @@ def test_convert_html_fields_in_state_having_rule_spec_with_invalid_format( html_type_dict['format'] = 'invalid format' def mock_get_html_field_types_to_rule_specs( - unused_cls, state_schema_version=None): # pylint: disable=unused-argument + unused_cls: Type[state_domain.State], # pylint: disable=unused-argument + state_schema_version: Optional[int] = None # pylint: disable=unused-argument + ) -> Dict[str, rules_registry.RuleSpecsExtensionDict]: return mock_html_field_types_to_rule_specs_dict with self.swap( rules_registry.Registry, 'get_html_field_types_to_rule_specs', classmethod(mock_get_html_field_types_to_rule_specs)): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The rule spec does not belong to a valid format.'): state_domain.State.convert_html_fields_in_state( @@ -3082,14 +2149,18 @@ def mock_get_html_field_types_to_rule_specs( add_math_content_to_math_rte_components, state_uses_old_rule_template_schema=True) - def test_convert_html_fields_in_rule_spec_with_invalid_input_variable(self): + def test_convert_html_fields_in_rule_spec_with_invalid_input_variable( + self + ) -> None: """Test the method for converting the HTML in a state when the rule_spec has invalid input variable. """ html_with_old_math_schema = ( '

    Value

    ') - answer_group_with_old_math_schema = [{ + answer_group_with_old_math_schema: List[ + state_domain.AnswerGroupDict + ] = [{ 'rule_specs': [{ 'rule_type': 'Equals', 'inputs': { @@ -3103,6 +2174,7 @@ def test_convert_html_fields_in_rule_spec_with_invalid_input_variable(self): }], 'outcome': { 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback', 'html': html_with_old_math_schema @@ -3116,12 +2188,11 @@ def test_convert_html_fields_in_rule_spec_with_invalid_input_variable(self): 'tagged_skill_misconception_id': None }] - state_dict_with_old_math_schema = { + state_dict_with_old_math_schema: state_domain.StateDict = { 'content': { - 'content_id': 'content', 'html': 'Hello!' + 'content_id': 'content_0', 'html': 'Hello!' }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -3142,6 +2213,7 @@ def test_convert_html_fields_in_rule_spec_with_invalid_input_variable(self): ) }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False @@ -3165,6 +2237,9 @@ def test_convert_html_fields_in_rule_spec_with_invalid_input_variable(self): 'confirmed_unclassified_answers': [], 'id': 'ItemSelectionInput', 'hints': [] + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': {} } } @@ -3177,14 +2252,16 @@ def test_convert_html_fields_in_rule_spec_with_invalid_input_variable(self): html_type_dict['ruleTypes']['Equals']['htmlInputVariables'] = ( ['y']) - def mock_get_html_field_types_to_rule_specs(unused_cls): + def mock_get_html_field_types_to_rule_specs( + unused_cls: Type[state_domain.State] + ) -> Dict[str, rules_registry.RuleSpecsExtensionDict]: return mock_html_field_types_to_rule_specs_dict with self.swap( rules_registry.Registry, 'get_html_field_types_to_rule_specs', classmethod(mock_get_html_field_types_to_rule_specs) ): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Rule spec should have at least one valid input variable with ' 'Html in it.'): @@ -3193,7 +2270,9 @@ def mock_get_html_field_types_to_rule_specs(unused_cls): html_validation_service. add_math_content_to_math_rte_components) - def test_convert_html_fields_in_rule_spec_with_invalid_correct_answer(self): + def test_convert_html_fields_in_rule_spec_with_invalid_correct_answer( + self + ) -> None: """Test the method for converting the HTML in a state when the interaction solution has invalid answer type. """ @@ -3201,26 +2280,26 @@ def test_convert_html_fields_in_rule_spec_with_invalid_correct_answer(self): '

    Value

    ') - state_dict_with_old_math_schema = { + old_solution_dict: state_domain.SolutionDict = { + 'answer_is_exclusive': True, + 'correct_answer': 'Answer1', + 'explanation': { + 'content_id': 'solution', + 'html': html_with_old_math_schema + } + } + + state_dict_with_old_math_schema: state_domain.StateDict = { 'content': { - 'content_id': 'content', 'html': html_with_old_math_schema + 'content_id': 'content_0', 'html': html_with_old_math_schema }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, 'classifier_model_id': None, 'interaction': { - 'solution': { - 'interaction_id': '', - 'answer_is_exclusive': True, - 'correct_answer': 'Answer1', - 'explanation': { - 'content_id': 'solution', - 'html': html_with_old_math_schema - } - }, + 'solution': old_solution_dict, 'answer_groups': [], 'default_outcome': { 'param_changes': [], @@ -3229,6 +2308,7 @@ def test_convert_html_fields_in_rule_spec_with_invalid_correct_answer(self): 'html': html_with_old_math_schema }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False @@ -3257,6 +2337,9 @@ def test_convert_html_fields_in_rule_spec_with_invalid_correct_answer(self): } } ] + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': {} } } @@ -3266,14 +2349,16 @@ def test_convert_html_fields_in_rule_spec_with_invalid_correct_answer(self): mock_html_field_types_to_rule_specs_dict['NormalizedString'] = ( mock_html_field_types_to_rule_specs_dict.pop('SetOfHtmlString')) - def mock_get_html_field_types_to_rule_specs(unused_cls): + def mock_get_html_field_types_to_rule_specs( + unused_cls: Type[state_domain.State] + ) -> Dict[str, rules_registry.RuleSpecsExtensionDict]: return mock_html_field_types_to_rule_specs_dict with self.swap( rules_registry.Registry, 'get_html_field_types_to_rule_specs', classmethod(mock_get_html_field_types_to_rule_specs) ): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The solution does not have a valid ' 'correct_answer type.'): @@ -3282,7 +2367,9 @@ def mock_get_html_field_types_to_rule_specs(unused_cls): html_validation_service. add_math_content_to_math_rte_components) - def test_convert_html_fields_in_state_when_interaction_is_none(self): + def test_convert_html_fields_in_state_when_interaction_is_none( + self + ) -> None: """Test the method for converting all the HTML in a state having no interaction. """ @@ -3295,12 +2382,11 @@ def test_convert_html_fields_in_state_when_interaction_is_none(self): 'amp;quot;svg_filename&quot;: &quot;&quot;}">') - state_dict_with_old_math_schema = { + state_dict_with_old_math_schema: state_domain.StateDict = { 'content': { - 'content_id': 'content', 'html': html_with_old_math_schema + 'content_id': 'content_0', 'html': html_with_old_math_schema }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -3315,11 +2401,12 @@ def test_convert_html_fields_in_state_when_interaction_is_none(self): 'html': html_with_old_math_schema }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False }, - 'customization_args': None, + 'customization_args': {}, 'confirmed_unclassified_answers': [], 'id': None, 'hints': [ @@ -3335,15 +2422,17 @@ def test_convert_html_fields_in_state_when_interaction_is_none(self): 'html': html_with_old_math_schema } }] + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': {} } } - state_dict_with_new_math_schema = { + state_dict_with_new_math_schema: state_domain.StateDict = { 'content': { - 'content_id': 'content', 'html': html_with_new_math_schema + 'content_id': 'content_0', 'html': html_with_new_math_schema }, 'param_changes': [], - 'content_ids_to_audio_translations': {'content': {}}, 'solicit_answer_details': False, 'card_is_checkpoint': False, 'linked_skill_id': None, @@ -3358,11 +2447,12 @@ def test_convert_html_fields_in_state_when_interaction_is_none(self): 'html': html_with_new_math_schema }, 'dest': 'Introduction', + 'dest_if_really_stuck': None, 'refresher_exploration_id': None, 'missing_prerequisite_skill_id': None, 'labelled_as_correct': False }, - 'customization_args': None, + 'customization_args': {}, 'confirmed_unclassified_answers': [], 'id': None, 'hints': [ @@ -3378,9 +2468,12 @@ def test_convert_html_fields_in_state_when_interaction_is_none(self): 'html': html_with_new_math_schema } }] + }, + 'recorded_voiceovers': { + 'voiceovers_mapping': {} } } - solution_dict = { + solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': True, 'correct_answer': 'Answer1', 'explanation': { @@ -3396,156 +2489,135 @@ def test_convert_html_fields_in_state_when_interaction_is_none(self): state_dict_with_new_math_schema) # Assert that no action is performed on a solution dict when the # interaction ID is None. + # Here we use MyPy ignore because for testing purposes here we are + # not defining BaseInteractionDict's Key. self.assertEqual( state_domain.Solution.convert_html_in_solution( None, solution_dict, html_validation_service. add_math_content_to_math_rte_components, rules_registry.Registry.get_html_field_types_to_rule_specs(), - {} + {} # type: ignore[typeddict-item] ), solution_dict) - def test_subtitled_html_validation_with_invalid_html_type(self): + def test_subtitled_html_validation_with_invalid_html_type(self) -> None: """Test validation of subtitled HTML with invalid html type.""" subtitled_html = state_domain.SubtitledHtml( 'content_id', '

    some html

    ') subtitled_html.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid content HTML' ): with self.swap(subtitled_html, 'html', 20): subtitled_html.validate() - def test_subtitled_html_validation_with_invalid_content(self): + def test_subtitled_html_validation_with_invalid_content(self) -> None: """Test validation of subtitled HTML with invalid content.""" subtitled_html = state_domain.SubtitledHtml( 'content_id', '

    some html

    ') subtitled_html.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected content id to be a string, ' + 'received 20'): with self.swap(subtitled_html, 'content_id', 20): subtitled_html.validate() - def test_subtitled_unicode_validation_with_invalid_html_type(self): + def test_subtitled_unicode_validation_with_invalid_html_type(self) -> None: """Test validation of subtitled unicode with invalid unicode type.""" subtitled_unicode = state_domain.SubtitledUnicode( 'content_id', 'some string') subtitled_unicode.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid content unicode' ): with self.swap(subtitled_unicode, 'unicode_str', 20): subtitled_unicode.validate() - def test_subtitled_unicode_validation_with_invalid_content(self): + def test_subtitled_unicode_validation_with_invalid_content(self) -> None: """Test validation of subtitled unicode with invalid content.""" subtitled_unicode = state_domain.SubtitledUnicode( 'content_id', 'some html string') subtitled_unicode.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected content id to be a string, ' + 'received 20'): with self.swap(subtitled_unicode, 'content_id', 20): subtitled_unicode.validate() - def test_voiceover_validation(self): + def test_voiceover_validation(self) -> None: """Test validation of voiceover.""" audio_voiceover = state_domain.Voiceover('a.mp3', 20, True, 24.5) audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected audio filename to be a string' - ): + ): with self.swap(audio_voiceover, 'filename', 20): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid audio filename' - ): + ): with self.swap(audio_voiceover, 'filename', '.invalidext'): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid audio filename' - ): + ): with self.swap(audio_voiceover, 'filename', 'justanextension'): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid audio filename' - ): + ): with self.swap(audio_voiceover, 'filename', 'a.invalidext'): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected file size to be an int' - ): + ): with self.swap(audio_voiceover, 'file_size_bytes', 'abc'): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid file size' - ): + ): with self.swap(audio_voiceover, 'file_size_bytes', -3): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected needs_update to be a bool' - ): + ): with self.swap(audio_voiceover, 'needs_update', 'hello'): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected duration_secs to be a float' - ): + ): with self.swap(audio_voiceover, 'duration_secs', 'test'): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected duration_secs to be a float' - ): - with self.swap(audio_voiceover, 'duration_secs', 10): + ): + with self.swap(audio_voiceover, 'duration_secs', '10'): audio_voiceover.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected duration_secs to be positive number, ' 'or zero if not yet specified' - ): + ): with self.swap(audio_voiceover, 'duration_secs', -3.45): audio_voiceover.validate() - def test_written_translation_validation(self): - """Test validation of translation script.""" - written_translation = state_domain.WrittenTranslation( - 'html', 'Test.', True) - written_translation.validate() - - with self.assertRaisesRegexp( - AssertionError, 'Expected unicode HTML string, received 30'): - with self.swap(written_translation, 'translation', 30): - written_translation.validate() - - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected needs_update to be a bool' - ): - with self.swap(written_translation, 'needs_update', 20): - written_translation.validate() - - with self.assertRaisesRegexp( - utils.ValidationError, 'Invalid data_format' - ): - with self.swap(written_translation, 'data_format', 'int'): - written_translation.validate() - - with self.assertRaisesRegexp( - utils.ValidationError, 'Invalid data_format' - ): - with self.swap(written_translation, 'data_format', 2): - written_translation.validate() - - def test_hints_validation(self): + def test_hints_validation(self) -> None: """Test validation of state hints.""" exploration = exp_domain.Exploration.create_default_exploration('eid') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) exploration.objective = 'Objective' init_state = exploration.states[exploration.init_state_name] - self.set_interaction_for_state(init_state, 'TextInput') + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) + exploration.next_content_id_index = ( + content_id_generator.next_content_id_index) exploration.validate() hints_list = [ @@ -3555,7 +2627,7 @@ def test_hints_validation(self): ] init_state.update_interaction_hints(hints_list) - solution_dict = { + solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': False, 'correct_answer': 'helloworld!', 'explanation': { @@ -3564,6 +2636,8 @@ def test_hints_validation(self): }, } + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.id is not None solution = state_domain.Solution.from_dict( init_state.interaction.id, solution_dict ) @@ -3590,41 +2664,53 @@ def test_hints_validation(self): del hints_list[1] init_state.update_interaction_hints(hints_list) - init_state.update_next_content_id_index(4) self.assertEqual(len(init_state.interaction.hints), 2) exploration.validate() - def test_update_customization_args_with_non_unique_content_ids(self): + def test_update_customization_args_with_non_unique_content_ids( + self + ) -> None: """Test that update customization args throws an error when passed customization args with non-unique content ids. """ exploration = exp_domain.Exploration.create_default_exploration('eid') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) init_state = exploration.states[exploration.init_state_name] - self.set_interaction_for_state(init_state, 'MultipleChoiceInput') - with self.assertRaisesRegexp( + self.set_interaction_for_state( + init_state, 'MultipleChoiceInput', content_id_generator) + choices_subtitled_dicts: List[state_domain.SubtitledHtmlDict] = [ + { + 'content_id': 'non-unique-content-id', + 'html': '1' + }, { + 'content_id': 'non-unique-content-id', + 'html': '2' + } + ] + with self.assertRaisesRegex( Exception, 'All customization argument content_ids should be unique.' ): init_state.update_interaction_customization_args({ 'choices': { - 'value': [{ - 'content_id': 'non-unique-content-id', - 'html': '1' - }, { - 'content_id': 'non-unique-content-id', - 'html': '2' - }] + 'value': choices_subtitled_dicts }, 'showChoicesInShuffledOrder': {'value': True} }) - def test_solution_validation(self): + def test_solution_validation(self) -> None: """Test validation of state solution.""" exploration = exp_domain.Exploration.create_default_exploration('eid') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) exploration.objective = 'Objective' init_state = exploration.states[exploration.init_state_name] - self.set_interaction_for_state(init_state, 'TextInput') + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) exploration.validate() # Solution should be set to None as default. @@ -3636,17 +2722,23 @@ def test_solution_validation(self): ) ] init_state.update_interaction_hints(hints_list) - solution_dict = { + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': False, - 'correct_answer': [0, 0], + 'correct_answer': [0, 0], # type: ignore[typeddict-item] 'explanation': { 'content_id': 'solution', 'html': '

    hello_world is a string

    ' } } + # Ruling out the possibility of None for mypy type checking. + assert init_state.interaction.id is not None # Object type of answer must match that of correct_answer. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( AssertionError, re.escape('Expected unicode string, received [0, 0]') ): @@ -3667,73 +2759,34 @@ def test_solution_validation(self): init_state.interaction.id, solution_dict)) exploration.validate() - def test_validate_state_unique_content_ids(self): - exploration = exp_domain.Exploration.create_default_exploration('eid') - init_state = exploration.states[exploration.init_state_name] - init_state.update_interaction_id('MultipleChoiceInput') - init_state.update_interaction_customization_args({ - 'choices': { - 'value': [{ - 'content_id': '', - 'html': 'one' - }] - }, - 'showChoicesInShuffledOrder': {'value': True} - }) - - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected all content_ids to be unique, received' - ): - with self.swap( - init_state.interaction.customization_args['choices'].value[0], - 'content_id', - 'content' - ): - exploration.validate() - - def test_validate_state_content_id_indexes(self): - exploration = exp_domain.Exploration.create_default_exploration('eid') - init_state = exploration.states[exploration.init_state_name] - init_state.update_interaction_id('MultipleChoiceInput') - init_state.update_interaction_customization_args({ - 'choices': { - 'value': [{ - 'content_id': 'ca_choices_10', - 'html': 'one' - }] - }, - 'showChoicesInShuffledOrder': {'value': True} - }) - init_state.update_next_content_id_index(9) - - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected all content id indexes to be less than the "next ' - 'content id index"' - ): - exploration.validate() - - def test_validate_state_solicit_answer_details(self): + def test_validate_state_solicit_answer_details(self) -> None: """Test validation of solicit_answer_details.""" exploration = exp_domain.Exploration.create_default_exploration('eid') + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) init_state = exploration.states[exploration.init_state_name] self.assertEqual(init_state.solicit_answer_details, False) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected solicit_answer_details to be ' + 'a boolean, received'): with self.swap(init_state, 'solicit_answer_details', 'abc'): exploration.validate() self.assertEqual(init_state.solicit_answer_details, False) - self.set_interaction_for_state(init_state, 'Continue') + self.set_interaction_for_state( + init_state, 'Continue', content_id_generator) self.assertEqual(init_state.interaction.id, 'Continue') exploration.validate() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The Continue interaction does not ' + 'support soliciting answer details from learners.'): with self.swap(init_state, 'solicit_answer_details', True): exploration.validate() - self.set_interaction_for_state(init_state, 'TextInput') + self.set_interaction_for_state( + init_state, 'TextInput', content_id_generator) + exploration.next_content_id_index = ( + content_id_generator.next_content_id_index + ) self.assertEqual(init_state.interaction.id, 'TextInput') self.assertEqual(init_state.solicit_answer_details, False) exploration.validate() @@ -3743,37 +2796,37 @@ def test_validate_state_solicit_answer_details(self): init_state = exploration.states[exploration.init_state_name] self.assertEqual(init_state.solicit_answer_details, True) - def test_validate_state_linked_skill_id(self): + def test_validate_state_linked_skill_id(self) -> None: """Test validation of linked_skill_id.""" exploration = exp_domain.Exploration.create_default_exploration('eid') init_state = exploration.states[exploration.init_state_name] self.assertEqual(init_state.linked_skill_id, None) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected linked_skill_id to be ' + 'a str, received 12.'): with self.swap(init_state, 'linked_skill_id', 12): exploration.validate() self.assertEqual(init_state.linked_skill_id, None) - def test_validate_state_card_is_checkpoint(self): + def test_validate_state_card_is_checkpoint(self) -> None: """Test validation of card_is_checkpoint.""" exploration = exp_domain.Exploration.create_default_exploration('eid') init_state = exploration.states[exploration.init_state_name] self.assertEqual(init_state.card_is_checkpoint, True) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected card_is_checkpoint to be ' + 'a boolean, received'): with self.swap(init_state, 'card_is_checkpoint', 'abc'): exploration.validate() self.assertEqual(init_state.card_is_checkpoint, True) - def test_validate_solution_answer_is_exclusive(self): + def test_validate_solution_answer_is_exclusive(self) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') # Solution should be set to None as default. self.assertEqual(exploration.init_state.interaction.solution, None) - solution_dict = { + solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': False, 'correct_answer': 'hello_world!', 'explanation': { @@ -3786,151 +2839,19 @@ def test_validate_solution_answer_is_exclusive(self): state_domain.SubtitledHtml('hint_1', '') ) ] + # Ruling out the possibility of None for mypy type checking. + assert exploration.init_state.interaction.id is not None solution = state_domain.Solution.from_dict( - exploration.init_state.interaction.id, solution_dict) - exploration.init_state.update_interaction_hints(hints_list) - exploration.init_state.update_interaction_solution(solution) - exploration.validate() - - solution_dict = { - 'answer_is_exclusive': 1, - 'correct_answer': 'hello_world!', - 'explanation': { - 'content_id': 'solution', - 'html': '

    hello_world is a string

    ' - } - } - solution = state_domain.Solution.from_dict( - exploration.init_state.interaction.id, solution_dict) - exploration.init_state.update_interaction_solution(solution) - with self.assertRaisesRegexp( - Exception, 'Expected answer_is_exclusive to be bool, received 1'): - exploration.validate() - - def test_validate_non_list_param_changes(self): - exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - exploration.init_state.param_changes = 0 - - with self.assertRaisesRegexp( - Exception, 'Expected state param_changes to be a list, received 0'): - exploration.init_state.validate(None, True) - - def test_validate_duplicate_content_id_with_answer_group_feedback(self): - exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - state_answer_group = state_domain.AnswerGroup( - state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( - 'feedback_1', '

    Feedback

    '), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Contains', - { - 'x': { - 'contentId': 'rule_input_Contains', - 'normalizedStrSet': ['Test'] - } - }) - ], - [], - None - ) - - exploration.init_state.update_interaction_answer_groups( - [state_answer_group]) - exploration.init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'feedback_1', - 'html': '

    Feedback

    ' - })) - - with self.assertRaisesRegexp( - Exception, 'Found a duplicate content id feedback_1'): - exploration.init_state.validate(None, True) - - def test_validate_duplicate_content_id_with_answer_group_rules(self): - exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - state_answer_group = state_domain.AnswerGroup( - state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( - 'feedback_1', '

    Feedback

    '), - False, [], None, None), - [ - state_domain.RuleSpec( - 'Contains', - { - 'x': { - 'contentId': 'rule_input_Contains', - 'normalizedStrSet': ['Test'] - } - }), - state_domain.RuleSpec( - 'Contains', - { - 'x': { - 'contentId': 'rule_input_Contains', - 'normalizedStrSet': ['Test1'] - } - }) - ], - [], - None - ) - - exploration.init_state.update_interaction_answer_groups( - [state_answer_group]) - - with self.assertRaisesRegexp( - Exception, 'Found a duplicate content id rule_input_Contains'): - exploration.init_state.validate(None, True) - - def test_validate_duplicate_content_id_with_default_outcome(self): - exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - default_outcome = state_domain.Outcome( - 'Introduction', state_domain.SubtitledHtml('default_outcome', ''), - False, [], None, None - ) - exploration.init_state.update_interaction_default_outcome( - default_outcome - ) - exploration.init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'default_outcome', - 'html': '' - })) - - with self.assertRaisesRegexp( - Exception, 'Found a duplicate content id default_outcome'): - exploration.init_state.validate(None, True) - - def test_validate_duplicate_content_id_with_hints(self): - exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml('hint_1', '

    some html

    ') - ) - ] - - exploration.init_state.update_interaction_hints(hints_list) - exploration.init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'hint_1', - 'html': '' - })) - - with self.assertRaisesRegexp( - Exception, 'Found a duplicate content id hint_1'): - exploration.init_state.validate(None, True) - - def test_validate_duplicate_content_id_with_solution(self): - exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - subtitled_html = state_domain.SubtitledHtml('content_id', 'some html') - - hints_list = [state_domain.Hint(subtitled_html)] + exploration.init_state.interaction.id, solution_dict) + exploration.init_state.update_interaction_hints(hints_list) + exploration.init_state.update_interaction_solution(solution) + exploration.validate() - exploration.init_state.interaction.hints = hints_list + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. solution_dict = { - 'answer_is_exclusive': True, + 'answer_is_exclusive': 1, # type: ignore[typeddict-item] 'correct_answer': 'hello_world!', 'explanation': { 'content_id': 'solution', @@ -3940,76 +2861,46 @@ def test_validate_duplicate_content_id_with_solution(self): solution = state_domain.Solution.from_dict( exploration.init_state.interaction.id, solution_dict) exploration.init_state.update_interaction_solution(solution) - exploration.init_state.update_content( - state_domain.SubtitledHtml.from_dict({ - 'content_id': 'solution', - 'html': '' - })) + with self.assertRaisesRegex( + Exception, 'Expected answer_is_exclusive to be bool, received 1'): + exploration.validate() + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_list_param_changes(self) -> None: + exploration = self.save_new_valid_exploration('exp_id', 'owner_id') + exploration.init_state.param_changes = 0 # type: ignore[assignment] - with self.assertRaisesRegexp( - Exception, 'Found a duplicate content id solution'): - exploration.init_state.validate(None, True) + with self.assertRaisesRegex( + Exception, 'Expected state param_changes to be a list, received 0'): + exploration.init_state.validate({}, True) - def test_cannot_convert_state_dict_to_yaml_with_invalid_state_dict(self): + def test_cannot_convert_state_dict_to_yaml_with_invalid_state_dict( + self + ) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. with contextlib.ExitStack() as stack: captured_logs = stack.enter_context( self.capture_logging(min_level=logging.ERROR)) stack.enter_context( - self.assertRaisesRegexp( + self.assertRaisesRegex( Exception, 'string indices must be integers') ) exploration.init_state.convert_state_dict_to_yaml( - 'invalid_state_dict', 10) + 'invalid_state_dict', 10) # type: ignore[arg-type] self.assertEqual(len(captured_logs), 1) self.assertIn('Bad state dict: invalid_state_dict', captured_logs[0]) - def test_cannot_update_hints_with_content_id_not_in_written_translations( - self): - exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - old_hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml( - 'hint_1', '

    Hello, this is html1 for state2

    ') - ) - ] - new_hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml( - 'hint_2', '

    Hello, this is html2 for state2

    ') - ) - ] - - exploration.init_state.update_interaction_hints(old_hints_list) - - written_translations_dict = { - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Test!

    ', - 'needs_update': True - } - }, - 'default_outcome': {} - } - } - written_translations = ( - state_domain.WrittenTranslations.from_dict( - written_translations_dict)) - - exploration.init_state.update_written_translations(written_translations) - - with self.assertRaisesRegexp( - Exception, - 'The content_id hint_1 does not exist in written_translations'): - exploration.init_state.update_interaction_hints(new_hints_list) - def test_cannot_update_hints_with_content_id_not_in_recorded_voiceovers( - self): + self + ) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') old_hints_list = [ state_domain.Hint( @@ -4026,7 +2917,7 @@ def test_cannot_update_hints_with_content_id_not_in_recorded_voiceovers( exploration.init_state.update_interaction_hints(old_hints_list) - recorded_voiceovers_dict = { + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content': { 'en': { @@ -4044,61 +2935,14 @@ def test_cannot_update_hints_with_content_id_not_in_recorded_voiceovers( exploration.init_state.update_recorded_voiceovers(recorded_voiceovers) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The content_id hint_1 does not exist in recorded_voiceovers'): exploration.init_state.update_interaction_hints(new_hints_list) - def test_cannot_update_hints_with_new_content_id_in_written_translations( - self): - exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - old_hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml( - 'hint_1', '

    Hello, this is html1 for state2

    ') - ) - ] - new_hints_list = [ - state_domain.Hint( - state_domain.SubtitledHtml( - 'hint_2', '

    Hello, this is html2 for state2

    ') - ) - ] - - exploration.init_state.update_interaction_hints(old_hints_list) - - written_translations_dict = { - 'translations_mapping': { - 'hint_2': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Test!

    ', - 'needs_update': True - } - }, - 'hint_1': { - 'hi': { - 'data_format': 'html', - 'translation': '

    Test1!

    ', - 'needs_update': True - } - }, - 'default_outcome': {} - } - } - written_translations = ( - state_domain.WrittenTranslations.from_dict( - written_translations_dict)) - - exploration.init_state.update_written_translations(written_translations) - - with self.assertRaisesRegexp( - Exception, - 'The content_id hint_2 already exists in written_translations'): - exploration.init_state.update_interaction_hints(new_hints_list) - def test_cannot_update_hints_with_new_content_id_in_recorded_voiceovers( - self): + self + ) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') old_hints_list = [ state_domain.Hint( @@ -4115,7 +2959,7 @@ def test_cannot_update_hints_with_new_content_id_in_recorded_voiceovers( exploration.init_state.update_interaction_hints(old_hints_list) - recorded_voiceovers_dict = { + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'hint_1': { 'en': { @@ -4141,12 +2985,14 @@ def test_cannot_update_hints_with_new_content_id_in_recorded_voiceovers( exploration.init_state.update_recorded_voiceovers(recorded_voiceovers) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The content_id hint_2 already exists in recorded_voiceovers'): exploration.init_state.update_interaction_hints(new_hints_list) - def test_cannot_update_interaction_solution_with_non_dict_solution(self): + def test_cannot_update_interaction_solution_with_non_dict_solution( + self + ) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') hints_list = [ state_domain.Hint( @@ -4154,7 +3000,7 @@ def test_cannot_update_interaction_solution_with_non_dict_solution(self): 'hint_1', '

    Hello, this is html1 for state2

    ') ) ] - solution_dict = { + solution_dict: state_domain.SolutionDict = { 'answer_is_exclusive': True, 'correct_answer': u'hello_world!', 'explanation': { @@ -4162,21 +3008,28 @@ def test_cannot_update_interaction_solution_with_non_dict_solution(self): 'html': u'

    hello_world is a string

    ' } } + # Ruling out the possibility of None for mypy type checking. + assert exploration.init_state.interaction.id is not None solution = state_domain.Solution.from_dict( exploration.init_state.interaction.id, solution_dict) exploration.init_state.update_interaction_hints(hints_list) exploration.init_state.update_interaction_solution(solution) + # Ruling out the possibility of None for mypy type checking. + assert exploration.init_state.interaction.solution is not None self.assertEqual( exploration.init_state.interaction.solution.to_dict(), solution_dict) - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( Exception, 'Expected solution to be a Solution object,' 'received test string'): - exploration.init_state.update_interaction_solution('test string') + exploration.init_state.update_interaction_solution('test string') # type: ignore[arg-type] - def test_update_interaction_solution_with_no_solution(self): + def test_update_interaction_solution_with_no_solution(self) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') hints_list = [ state_domain.Hint( @@ -4191,28 +3044,37 @@ def test_update_interaction_solution_with_no_solution(self): self.assertIsNone(exploration.init_state.interaction.solution) - def test_cannot_update_interaction_hints_with_non_list_hints(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_cannot_update_interaction_hints_with_non_list_hints( + self + ) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected hints_list to be a list'): - exploration.init_state.update_interaction_hints({}) + exploration.init_state.update_interaction_hints({}) # type: ignore[arg-type] + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. def test_cannot_update_non_list_interaction_confirmed_unclassified_answers( - self): + self + ) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected confirmed_unclassified_answers to be a list'): ( exploration.init_state - .update_interaction_confirmed_unclassified_answers({})) + .update_interaction_confirmed_unclassified_answers({})) # type: ignore[arg-type] - def test_update_interaction_confirmed_unclassified_answers(self): + def test_update_interaction_confirmed_unclassified_answers(self) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback_1', '

    Feedback

    '), False, [], None, None), [ @@ -4240,74 +3102,93 @@ def test_update_interaction_confirmed_unclassified_answers(self): exploration.init_state.interaction.confirmed_unclassified_answers, [state_answer_group]) - def test_cannot_update_non_list_interaction_answer_groups(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_cannot_update_non_list_interaction_answer_groups(self) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected interaction_answer_groups to be a list'): exploration.init_state.update_interaction_answer_groups( - 'invalid_answer_groups') + 'invalid_answer_groups') # type: ignore[arg-type] - def test_cannot_update_answer_groups_with_non_dict_rule_inputs(self): + def test_cannot_update_answer_groups_with_non_dict_rule_inputs( + self + ) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback_1', '

    Feedback

    '), False, [], None, None), [ state_domain.RuleSpec( - 'Contains', [] + 'Contains', {} ) ], [], None ) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + state_answer_group.rule_specs[0].inputs = [] # type: ignore[assignment] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape('Expected rule_inputs to be a dict, received []') ): exploration.init_state.update_interaction_answer_groups( [state_answer_group]) - def test_cannot_update_answer_groups_with_non_list_rule_specs(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_cannot_update_answer_groups_with_non_list_rule_specs(self) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback_1', '

    Feedback

    '), False, [], None, None - ), {}, [], None + ), [], [], None ) - state_answer_group.rule_specs = {} + state_answer_group.rule_specs = {} # type: ignore[assignment] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected answer group rule specs to be a list'): exploration.init_state.update_interaction_answer_groups( [state_answer_group]) - def test_cannot_update_answer_groups_with_invalid_rule_input_value(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_cannot_update_answer_groups_with_invalid_rule_input_value( + self + ) -> None: exploration = self.save_new_valid_exploration('exp_id', 'owner_id') + test_inputs: Dict[str, Dict[str, Union[str, List[str]]]] = { + 'x': { + 'contentId': 'rule_input_Equals', + 'normalizedStrSet': [[]] # type: ignore[list-item] + } + } state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback_1', '

    Feedback

    '), False, [], None, None), [ state_domain.RuleSpec( 'Contains', - { - 'x': { - 'contentId': 'rule_input_Equals', - 'normalizedStrSet': [[]] - } - }) + test_inputs + ) ], [], None ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape( 'Value has the wrong type. It should be a TranslatableSetOf' @@ -4317,10 +3198,10 @@ def test_cannot_update_answer_groups_with_invalid_rule_input_value(self): exploration.init_state.update_interaction_answer_groups( [state_answer_group]) - def test_validate_rule_spec(self): - observed_log_messages = [] + def test_validate_rule_spec(self) -> None: + observed_log_messages: List[str] = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.error().""" observed_log_messages.append(msg % args) @@ -4329,7 +3210,7 @@ def _mock_logging_function(msg, *args): exploration = self.save_new_valid_exploration('exp_id', 'owner_id') state_answer_group = state_domain.AnswerGroup( state_domain.Outcome( - exploration.init_state_name, state_domain.SubtitledHtml( + exploration.init_state_name, None, state_domain.SubtitledHtml( 'feedback_1', '

    Feedback

    '), False, [], None, None), [ @@ -4348,7 +3229,7 @@ def _mock_logging_function(msg, *args): exploration.init_state.update_interaction_answer_groups( [state_answer_group]) - with logging_swap, self.assertRaisesRegexp(KeyError, '\'x\''): + with logging_swap, self.assertRaisesRegex(KeyError, '\'x\''): ( exploration.init_state.interaction.answer_groups[0] .rule_specs[0].validate([], {}) @@ -4366,9 +3247,12 @@ def _mock_logging_function(msg, *args): class InteractionCustomizationArgDomainTests(test_utils.GenericTestBase): """Test methods for InteractionCustomizationArg domain object.""" - def test_traverse_by_schema_and_convert(self): - html = [] - def extract_html(value, unused_schema_obj_type): + def test_traverse_by_schema_and_convert(self) -> None: + html: List[str] = [] + def extract_html( + value: state_domain.SubtitledHtml, + unused_schema_obj_type: str + ) -> List[str]: """Extracts html from SubtitledHtml values. Args: @@ -4405,7 +3289,7 @@ def extract_html(value, unused_schema_obj_type): self.assertEqual(html, ['

    testing

    ']) - def test_traverse_by_schema_and_get(self): + def test_traverse_by_schema_and_get(self) -> None: html = [] schema = { @@ -4436,8 +3320,8 @@ def test_traverse_by_schema_and_get(self): class SubtitledUnicodeDomainUnitTests(test_utils.GenericTestBase): """Test SubtitledUnicode domain object methods.""" - def test_from_and_to_dict(self): - subtitled_unicode_dict = { + def test_from_and_to_dict(self) -> None: + subtitled_unicode_dict: state_domain.SubtitledUnicodeDict = { 'content_id': 'id', 'unicode_str': '' } @@ -4445,7 +3329,7 @@ def test_from_and_to_dict(self): subtitled_unicode_dict) self.assertEqual(subtitled_unicode.to_dict(), subtitled_unicode_dict) - def test_create_default(self): + def test_create_default(self) -> None: subtitled_unicode = ( state_domain.SubtitledUnicode.create_default_subtitled_unicode( 'id') @@ -4456,360 +3340,11 @@ def test_create_default(self): }) -class WrittenTranslationsDomainUnitTests(test_utils.GenericTestBase): - """Test methods operating on written transcripts.""" - - def test_data_formats_are_correct_and_complete(self): - translatable_class_names_in_data_formats = sorted( - state_domain.WrittenTranslation. - DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE.values()) - self.assertEqual( - translatable_class_names_in_data_formats, - translatable_object_registry.Registry.get_all_class_names()) - - def test_from_and_to_dict_works_correctly(self): - written_translations_dict = { - 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': 'hello', - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - }, - 'fr': { - 'data_format': 'set_of_normalized_string', - 'translation': ['test1', 'test2'], - 'needs_update': False - }, - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': 'Testing!', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - }, - 'fr': { - 'data_format': 'set_of_normalized_string', - 'translation': ['test1', 'test2'], - 'needs_update': False - } - } - } - } - - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - self.assertEqual( - written_translations.to_dict(), written_translations_dict) - - def test_get_content_ids_for_text_translation_return_correct_list_of_content_id(self): # pylint: disable=line-too-long - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': {} - }) - self.assertEqual( - written_translations.get_content_ids_for_text_translation(), []) - - written_translations.add_content_id_for_translation('feedback_1') - written_translations.add_content_id_for_translation('feedback_2') - self.assertItemsEqual( - written_translations.get_content_ids_for_text_translation(), [ - 'feedback_2', 'feedback_1']) - - def test_get_translated_content_in_non_existing_language_raise_error(self): - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': { - 'content': { - 'en': { - 'data_format': 'html', - 'translation': '

    In English.

    ', - 'needs_update': False - } - } - } - }) - translated_content = written_translations.get_translated_content( - 'content', 'en') - self.assertEqual(translated_content, '

    In English.

    ') - - with self.assertRaisesRegexp( - Exception, 'Translation for the given content_id content does not ' - 'exist in hi language code'): - written_translations.get_translated_content('content', 'hi') - - def test_get_translated_content_for_invalid_content_id_raise_error(self): - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': { - 'content': { - 'en': { - 'data_format': 'html', - 'translation': '

    In English.

    ', - 'needs_update': False - } - } - } - }) - translated_content = written_translations.get_translated_content( - 'content', 'en') - self.assertEqual(translated_content, '

    In English.

    ') - - with self.assertRaisesRegexp( - Exception, 'Invalid content_id: invalid_id'): - written_translations.get_translated_content('invalid_id', 'hi') - - def test_add_content_id_for_translations_adds_content_id(self): - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': {} - }) - - self.assertEqual( - len(written_translations.get_content_ids_for_text_translation()), 0) - - new_content_id = 'content_id' - written_translations.add_content_id_for_translation(new_content_id) - - self.assertEqual( - len(written_translations.get_content_ids_for_text_translation()), 1) - self.assertEqual( - written_translations.get_content_ids_for_text_translation(), - ['content_id']) - - def test_add_content_id_for_translation_with_invalid_content_id_raise_error( - self): - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': {} - }) - invalid_content_id = 123 - with self.assertRaisesRegexp( - Exception, 'Expected content_id to be a string, received 123'): - written_translations.add_content_id_for_translation( - invalid_content_id) - - def test_add_content_id_for_translation_with_existing_content_id_raise_error( # pylint: disable=line-too-long - self): - written_translations_dict = { - 'translations_mapping': { - 'feedback_1': { - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - existing_content_id = 'feedback_1' - with self.assertRaisesRegexp( - Exception, 'The content_id feedback_1 already exist.'): - written_translations.add_content_id_for_translation( - existing_content_id) - - def test_delete_content_id_for_translations_deletes_content_id(self): - old_written_translations_dict = { - 'translations_mapping': { - 'content': { - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - - written_translations = state_domain.WrittenTranslations.from_dict( - old_written_translations_dict) - self.assertEqual( - len(written_translations.get_content_ids_for_text_translation()), 1) - - written_translations.delete_content_id_for_translation('content') - - self.assertEqual( - len(written_translations.get_content_ids_for_text_translation()), 0) - - def test_delete_content_id_for_translation_with_nonexisting_content_id_raise_error(self): # pylint: disable=line-too-long - written_translations_dict = { - 'translations_mapping': { - 'content': {} - } - } - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - nonexisting_content_id_to_delete = 'feedback_1' - with self.assertRaisesRegexp( - Exception, 'The content_id feedback_1 does not exist.'): - written_translations.delete_content_id_for_translation( - nonexisting_content_id_to_delete) - - def test_delete_content_id_for_translation_with_invalid_content_id_raise_error(self): # pylint: disable=line-too-long - written_translations = state_domain.WrittenTranslations.from_dict({ - 'translations_mapping': {} - }) - invalid_content_id_to_delete = 123 - with self.assertRaisesRegexp( - Exception, 'Expected content_id to be a string, '): - written_translations.delete_content_id_for_translation( - invalid_content_id_to_delete) - - def test_validation_with_invalid_content_id_raise_error(self): - written_translations_dict = { - 'translations_mapping': { - 123: {} - } - } - - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - - with self.assertRaisesRegexp( - Exception, 'Expected content_id to be a string, '): - written_translations.validate([123]) - - def test_validate_non_dict_language_code_to_written_translation(self): - written_translations = state_domain.WrittenTranslations({ - 'en': [] - }) - - with self.assertRaisesRegexp( - Exception, - re.escape('Expected content_id value to be a dict, received []')): - written_translations.validate(None) - - def test_validation_with_invalid_type_language_code_raise_error(self): - written_translations_dict = { - 'translations_mapping': { - 'content': { - 123: { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - - with self.assertRaisesRegexp( - Exception, 'Expected language_code to be a string, '): - written_translations.validate(['content']) - - def test_validation_with_unknown_language_code_raise_error(self): - written_translations_dict = { - 'translations_mapping': { - 'content': { - 'ed': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - - with self.assertRaisesRegexp(Exception, 'Invalid language_code: ed'): - written_translations.validate(['content']) - - def test_validation_with_invalid_content_id_list(self): - written_translations_dict = { - 'translations_mapping': { - 'content': { - 'en': { - 'data_format': 'html', - 'translation': '

    hello!

    ', - 'needs_update': False - } - } - } - } - - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - - with self.assertRaisesRegexp( - Exception, - re.escape( - 'Expected state written_translations to match the listed ' - 'content ids [\'invalid_content\']')): - written_translations.validate(['invalid_content']) - - def test_get_content_ids_that_are_correctly_translated(self): - written_translations_dict = { - 'translations_mapping': { - 'content': {}, - 'hint_1': {} - } - } - - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - - self.assertEqual( - written_translations.get_content_ids_that_are_correctly_translated( - 'hi'), []) - - def test_get_content_ids_that_are_correctly_translated_with_some_existing_translations(self): # pylint: disable=line-too-long - written_translations_dict = { - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    hello!

    ', - 'needs_update': False - } - }, - 'hint_1': {} - } - } - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - - self.assertEqual( - written_translations.get_content_ids_that_are_correctly_translated( - 'hi'), ['content']) - - def test_get_content_ids_that_are_correctly_translated_with_some_existing_translations_needs_update(self): # pylint: disable=line-too-long - written_translations_dict = { - 'translations_mapping': { - 'content': { - 'hi': { - 'data_format': 'html', - 'translation': '

    hello!

    ', - 'needs_update': True - } - }, - 'hint_1': {} - } - } - written_translations = state_domain.WrittenTranslations.from_dict( - written_translations_dict) - - self.assertEqual( - written_translations.get_content_ids_that_are_correctly_translated( - 'hi'), []) - - class RecordedVoiceoversDomainUnitTests(test_utils.GenericTestBase): """Test methods operating on recorded voiceovers.""" - def test_from_and_to_dict_wroks_correctly(self): - recorded_voiceovers_dict = { + def test_from_and_to_dict_wroks_correctly(self) -> None: + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content1': { 'en': { @@ -4847,7 +3382,9 @@ def test_from_and_to_dict_wroks_correctly(self): self.assertEqual( recorded_voiceovers.to_dict(), recorded_voiceovers_dict) - def test_get_content_ids_for_voiceovers_return_correct_list_of_content_id(self): # pylint: disable=line-too-long + def test_get_content_ids_for_voiceovers_return_correct_list_of_content_id( + self + ) -> None: recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': {} }) @@ -4860,7 +3397,7 @@ def test_get_content_ids_for_voiceovers_return_correct_list_of_content_id(self): recorded_voiceovers.get_content_ids_for_voiceovers(), ['feedback_2', 'feedback_1']) - def test_add_content_id_for_voiceovers_adds_content_id(self): + def test_add_content_id_for_voiceovers_adds_content_id(self) -> None: recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': {} }) @@ -4877,20 +3414,25 @@ def test_add_content_id_for_voiceovers_adds_content_id(self): recorded_voiceovers.get_content_ids_for_voiceovers(), ['content_id']) + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_add_content_id_for_voiceover_with_invalid_content_id_raise_error( - self): + self + ) -> None: recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': {} }) invalid_content_id = 123 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected content_id to be a string, received 123'): recorded_voiceovers.add_content_id_for_voiceover( - invalid_content_id) + invalid_content_id) # type: ignore[arg-type] def test_add_content_id_for_voiceover_with_existing_content_id_raise_error( # pylint: disable=line-too-long - self): - recorded_voiceovers_dict = { + self + ) -> None: + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'feedback_1': { 'en': { @@ -4906,13 +3448,13 @@ def test_add_content_id_for_voiceover_with_existing_content_id_raise_error( # py recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict( recorded_voiceovers_dict) existing_content_id = 'feedback_1' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The content_id feedback_1 already exist.'): recorded_voiceovers.add_content_id_for_voiceover( existing_content_id) - def test_delete_content_id_for_voiceovers_deletes_content_id(self): - old_recorded_voiceovers_dict = { + def test_delete_content_id_for_voiceovers_deletes_content_id(self) -> None: + old_recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content': { 'en': { @@ -4935,8 +3477,10 @@ def test_delete_content_id_for_voiceovers_deletes_content_id(self): self.assertEqual( len(recorded_voiceovers.get_content_ids_for_voiceovers()), 0) - def test_delete_content_id_for_voiceover_with_nonexisting_content_id_raise_error(self): # pylint: disable=line-too-long - recorded_voiceovers_dict = { + def test_delete_content_id_for_voiceover_with_nonexisting_content_id_raise_error( # pylint: disable=line-too-long + self + ) -> None: + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content': {} } @@ -4944,50 +3488,69 @@ def test_delete_content_id_for_voiceover_with_nonexisting_content_id_raise_error recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict( recorded_voiceovers_dict) nonexisting_content_id_to_delete = 'feedback_1' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The content_id feedback_1 does not exist.'): recorded_voiceovers.delete_content_id_for_voiceover( nonexisting_content_id_to_delete) - def test_delete_content_id_for_voiceover_with_invalid_content_id_raise_error(self): # pylint: disable=line-too-long + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_delete_content_id_for_voiceover_with_invalid_content_id_raise_error( # pylint: disable=line-too-long + self + ) -> None: recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': {} }) invalid_content_id_to_delete = 123 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected content_id to be a string, '): recorded_voiceovers.delete_content_id_for_voiceover( - invalid_content_id_to_delete) + invalid_content_id_to_delete) # type: ignore[arg-type] - def test_validation_with_invalid_content_id_raise_error(self): - recorded_voiceovers_dict = { + def test_validation_with_invalid_content_id_raise_error(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { - 123: {} + 123: {} # type: ignore[dict-item] } } recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict( recorded_voiceovers_dict) - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( Exception, 'Expected content_id to be a string, '): - recorded_voiceovers.validate([123]) + recorded_voiceovers.validate([123]) # type: ignore[list-item] - def test_validate_non_dict_language_code_to_voiceover(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_dict_language_code_to_voiceover(self) -> None: recorded_voiceovers = state_domain.RecordedVoiceovers({ - 'en': [] + 'en': [] # type: ignore[dict-item] }) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape('Expected content_id value to be a dict, received []')): recorded_voiceovers.validate(None) - def test_validation_with_invalid_type_language_code_raise_error(self): - recorded_voiceovers_dict = { + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_with_invalid_type_language_code_raise_error( + self + ) -> None: + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content': { - 123: { + 123: { # type: ignore[dict-item] 'filename': 'xyz.mp3', 'file_size_bytes': 123, 'needs_update': False, @@ -5000,12 +3563,12 @@ def test_validation_with_invalid_type_language_code_raise_error(self): recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict( recorded_voiceovers_dict) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected language_code to be a string, '): recorded_voiceovers.validate(['content']) - def test_validation_with_unknown_language_code_raise_error(self): - recorded_voiceovers_dict = { + def test_validation_with_unknown_language_code_raise_error(self) -> None: + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content': { 'ed': { @@ -5021,11 +3584,11 @@ def test_validation_with_unknown_language_code_raise_error(self): recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict( recorded_voiceovers_dict) - with self.assertRaisesRegexp(Exception, 'Invalid language_code: ed'): + with self.assertRaisesRegex(Exception, 'Invalid language_code: ed'): recorded_voiceovers.validate(['content']) - def test_validation_with_invalid_content_id_list(self): - recorded_voiceovers_dict = { + def test_validation_with_invalid_content_id_list(self) -> None: + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content': { 'en': { @@ -5041,7 +3604,7 @@ def test_validation_with_invalid_content_id_list(self): recorded_voiceovers = state_domain.RecordedVoiceovers.from_dict( recorded_voiceovers_dict) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape( 'Expected state recorded_voiceovers to match the listed ' @@ -5051,27 +3614,30 @@ def test_validation_with_invalid_content_id_list(self): class VoiceoverDomainTests(test_utils.GenericTestBase): - def setUp(self): - super(VoiceoverDomainTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.voiceover = state_domain.Voiceover('filename.mp3', 10, False, 15.0) - def test_validate_non_str_filename(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_filename(self) -> None: self.voiceover.validate() - self.voiceover.filename = 0 - with self.assertRaisesRegexp( + self.voiceover.filename = 0 # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected audio filename to be a string'): self.voiceover.validate() - def test_validate_filename(self): + def test_validate_filename(self) -> None: self.voiceover.validate() self.voiceover.filename = 'invalid_filename' - with self.assertRaisesRegexp(Exception, 'Invalid audio filename'): + with self.assertRaisesRegex(Exception, 'Invalid audio filename'): self.voiceover.validate() - def test_validate_audio_extension(self): + def test_validate_audio_extension(self) -> None: self.voiceover.validate() self.voiceover.filename = 'filename.png' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape( 'Invalid audio filename: it should have one of the following ' @@ -5079,44 +3645,91 @@ def test_validate_audio_extension(self): % list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys()))): self.voiceover.validate() - def test_validate_non_int_file_size_bytes(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_int_file_size_bytes(self) -> None: self.voiceover.validate() - self.voiceover.file_size_bytes = 'file_size_bytes' - with self.assertRaisesRegexp( + self.voiceover.file_size_bytes = 'file_size_bytes' # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected file size to be an int'): self.voiceover.validate() - def test_validate_negative_file_size_bytes(self): + def test_validate_negative_file_size_bytes(self) -> None: self.voiceover.validate() self.voiceover.file_size_bytes = -1 - with self.assertRaisesRegexp(Exception, 'Invalid file size'): + with self.assertRaisesRegex(Exception, 'Invalid file size'): self.voiceover.validate() - def test_validate_non_bool_needs_update(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_bool_needs_update(self) -> None: self.voiceover.validate() - self.voiceover.needs_update = 'needs_update' - with self.assertRaisesRegexp( + self.voiceover.needs_update = 'needs_update' # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected needs_update to be a bool'): self.voiceover.validate() - def test_validate_float_duration_secs(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_str_duration_secs(self) -> None: self.voiceover.validate() - self.voiceover.duration_secs = 'duration_secs' - with self.assertRaisesRegexp( + self.voiceover.duration_secs = 'duration_secs' # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected duration_secs to be a float'): self.voiceover.validate() - def test_validate_int_duration_secs(self): + def test_validate_int_duration_secs(self) -> None: self.voiceover.validate() self.voiceover.duration_secs = 10 - with self.assertRaisesRegexp( - Exception, 'Expected duration_secs to be a float'): - self.voiceover.validate() + self.voiceover.validate() + self.assertEqual(self.voiceover.duration_secs, 10) + + def test_validate_float_duration_secs(self) -> None: + self.voiceover.validate() + self.voiceover.duration_secs = 10.5 + self.voiceover.validate() + self.assertEqual(self.voiceover.duration_secs, 10.5) - def test_validate_negative_duration_seconds(self): + def test_validate_negative_duration_seconds(self) -> None: self.voiceover.validate() self.voiceover.duration_secs = -1.45 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected duration_secs to be positive number, ' 'or zero if not yet specified'): self.voiceover.validate() + + +class StateVersionHistoryDomainUnitTests(test_utils.GenericTestBase): + + def test_state_version_history_gets_created(self) -> None: + expected_dict: state_domain.StateVersionHistoryDict = { + 'previously_edited_in_version': 1, + 'state_name_in_previous_version': 'state 1', + 'committer_id': 'user_1' + } + actual_dict = state_domain.StateVersionHistory( + 1, 'state 1', 'user_1').to_dict() + + self.assertEqual( + expected_dict, actual_dict) + + def test_state_version_history_gets_created_from_dict(self) -> None: + state_version_history_dict: state_domain.StateVersionHistoryDict = { + 'previously_edited_in_version': 1, + 'state_name_in_previous_version': 'state 1', + 'committer_id': 'user_1' + } + state_version_history = state_domain.StateVersionHistory.from_dict( + state_version_history_dict) + + self.assertEqual( + state_version_history.previously_edited_in_version, + state_version_history_dict['previously_edited_in_version']) + self.assertEqual( + state_version_history.state_name_in_previous_version, + state_version_history_dict['state_name_in_previous_version']) + self.assertEqual( + state_version_history.to_dict(), state_version_history_dict) diff --git a/core/domain/stats_domain.py b/core/domain/stats_domain.py index 4883529fc518..1e8c3bd045ff 100644 --- a/core/domain/stats_domain.py +++ b/core/domain/stats_domain.py @@ -26,11 +26,20 @@ from core import feconf from core import utils from core.constants import constants -from core.domain import action_registry from core.domain import customization_args_util from core.domain import exp_domain -from core.domain import interaction_registry -from core.domain import playthrough_issue_registry + +from typing import Any, Dict, Final, List, Literal, Optional, TypedDict, Union + +# TODO(#14537): Refactor this file and remove imports marked +# with 'invalid-import-from'. +from core.domain import action_registry # pylint: disable=invalid-import-from # isort:skip +from core.domain import interaction_registry # pylint: disable=invalid-import-from # isort:skip +from core.domain import playthrough_issue_registry # pylint: disable=invalid-import-from # isort:skip + +MYPY = False +if MYPY: # pragma: no cover + from core.domain import state_domain # These are special sentinel values attributed to answers migrated from the old # answer storage model. Those answers could not have session IDs or time spent @@ -41,31 +50,172 @@ # NOTE TO DEVELOPERS: All other state answer data model entities must not ever # store this session ID unless it was created by the 2017 answer migration job # (see #1205). Also, this string must never change. -MIGRATED_STATE_ANSWER_SESSION_ID_2017 = 'migrated_state_answer_session_id_2017' -MIGRATED_STATE_ANSWER_TIME_SPENT_IN_SEC = 0.0 +MIGRATED_STATE_ANSWER_SESSION_ID_2017: Final = ( + 'migrated_state_answer_session_id_2017') +MIGRATED_STATE_ANSWER_TIME_SPENT_IN_SEC: Final = 0.0 # These values dictate the types of calculation objects stored in # StateAnswersCalcOutput. -CALC_OUTPUT_TYPE_ANSWER_FREQUENCY_LIST = 'AnswerFrequencyList' -CALC_OUTPUT_TYPE_CATEGORIZED_ANSWER_FREQUENCY_LISTS = ( +CALC_OUTPUT_TYPE_ANSWER_FREQUENCY_LIST: Final = 'AnswerFrequencyList' +CALC_OUTPUT_TYPE_CATEGORIZED_ANSWER_FREQUENCY_LISTS: Final = ( 'CategorizedAnswerFrequencyLists') # The maximum size in bytes the learner_answer_info_list can take # in LearnerAnswerDetails. -MAX_LEARNER_ANSWER_INFO_LIST_BYTE_SIZE = 900000 +MAX_LEARNER_ANSWER_INFO_LIST_BYTE_SIZE: Final = 900000 # The maximum size in bytes the answer_details can take in # LearnerAnswerInfo. -MAX_ANSWER_DETAILS_BYTE_SIZE = 10000 +MAX_ANSWER_DETAILS_BYTE_SIZE: Final = 10000 + +IssuesCustomizationArgsDictType = Dict[ + str, Dict[str, Union[str, int, List[str]]] +] + + +class SubmittedAnswerDict(TypedDict): + """Dictionary representing the SubmittedAnswer object.""" + + answer: state_domain.AcceptableCorrectAnswerTypes + time_spent_in_sec: float + answer_group_index: int + rule_spec_index: int + classification_categorization: str + session_id: str + interaction_id: str + params: Dict[str, Union[str, int]] + rule_spec_str: Optional[str] + answer_str: Optional[str] + + +class StateAnswersDict(TypedDict): + """Dictionary representing the StateAnswers object.""" + + exploration_id: str + exploration_version: int + state_name: str + interaction_id: str + submitted_answer_list: List[SubmittedAnswerDict] + + +class ExplorationIssueDict(TypedDict): + """Dictionary representing the ExplorationIssue object.""" + + issue_type: str + issue_customization_args: IssuesCustomizationArgsDictType + playthrough_ids: List[str] + schema_version: int + is_valid: bool + + +class PlaythroughDict(TypedDict): + """Dictionary representing the PlayThrough object.""" + + exp_id: str + exp_version: int + issue_type: str + issue_customization_args: IssuesCustomizationArgsDictType + actions: List[LearnerActionDict] + + +class ExplorationIssuesDict(TypedDict): + """Dictionary representing the ExplorationIssues object.""" + + exp_id: str + exp_version: int + unresolved_issues: List[ExplorationIssueDict] + + +class LearnerAnswerDetailsDict(TypedDict): + """Dictionary representing the LearnerAnswerDetail object.""" + + state_reference: str + entity_type: str + interaction_id: str + learner_answer_info_list: List[LearnerAnswerInfoDict] + accumulated_answer_info_json_size_bytes: int + learner_answer_info_schema_version: int + + +class ExplorationStatsDict(TypedDict): + """Dictionary representing the ExplorationStats object.""" + + exp_id: str + exp_version: int + num_starts_v1: int + num_starts_v2: int + num_actual_starts_v1: int + num_actual_starts_v2: int + num_completions_v1: int + num_completions_v2: int + state_stats_mapping: Dict[str, Dict[str, int]] + + +class ExplorationStatsFrontendDict(TypedDict): + """Dictionary representing the ExplorationStats object + for use in frontend.""" + + exp_id: str + exp_version: int + num_starts: int + num_actual_starts: int + num_completions: int + state_stats_mapping: Dict[str, Dict[str, int]] + + +# In argument 'customization_args', we used Any type because it accepts the +# values of customization args and that values can be of type str, int, Dict, +# bool, List and other types too. So to make it generalize for every type of +# values, we used Any here. +class LearnerActionDict(TypedDict): + """Dictionary representing the LearnerAction object.""" + + action_type: str + action_customization_args: Dict[str, Dict[str, Union[str, int]]] + schema_version: int + + +class AnswerOccurrenceDict(TypedDict): + """Dictionary representing the AnswerOccurrence object.""" + + answer: state_domain.AcceptableCorrectAnswerTypes + frequency: int + + +class LearnerAnswerInfoDict(TypedDict): + """Dictionary representing LearnerAnswerInfo object.""" + + id: str + answer: Optional[Union[str, int, Dict[str, str], List[str]]] + answer_details: str + created_on: str + + +class AggregatedStatsDict(TypedDict): + """Dictionary representing aggregated_stats dict used to validate the + SessionStateStats domain object.""" + + num_starts: int + num_actual_starts: int + num_completions: int + state_stats_mapping: Dict[str, Dict[str, int]] class ExplorationStats: """Domain object representing analytics data for an exploration.""" def __init__( - self, exp_id, exp_version, num_starts_v1, num_starts_v2, - num_actual_starts_v1, num_actual_starts_v2, num_completions_v1, - num_completions_v2, state_stats_mapping): + self, + exp_id: str, + exp_version: int, + num_starts_v1: int, + num_starts_v2: int, + num_actual_starts_v1: int, + num_actual_starts_v2: int, + num_completions_v1: int, + num_completions_v2: int, + state_stats_mapping: Dict[str, StateStats] + ) -> None: """Constructs an ExplorationStats domain object. Args: @@ -95,7 +245,7 @@ def __init__( self.state_stats_mapping = state_stats_mapping @property - def num_starts(self): + def num_starts(self) -> int: """Returns the number of learners who started the exploration. Returns: @@ -104,7 +254,7 @@ def num_starts(self): return self.num_starts_v1 + self.num_starts_v2 @property - def num_actual_starts(self): + def num_actual_starts(self) -> int: """Returns the number of learners who actually attempted the exploration. These are the learners who have completed the initial state of the exploration and traversed to the next state. @@ -115,7 +265,7 @@ def num_actual_starts(self): return self.num_actual_starts_v1 + self.num_actual_starts_v2 @property - def num_completions(self): + def num_completions(self) -> int: """Returns the number of learners who completed the exploration. Returns: @@ -123,14 +273,14 @@ def num_completions(self): """ return self.num_completions_v1 + self.num_completions_v2 - def to_dict(self): + def to_dict(self) -> ExplorationStatsDict: """Returns a dict representation of the domain object.""" state_stats_mapping_dict = {} for state_name in self.state_stats_mapping: state_stats_mapping_dict[state_name] = self.state_stats_mapping[ state_name].to_dict() - exploration_stats_dict = { + exploration_stats_dict: ExplorationStatsDict = { 'exp_id': self.exp_id, 'exp_version': self.exp_version, 'num_starts_v1': self.num_starts_v1, @@ -143,7 +293,7 @@ def to_dict(self): } return exploration_stats_dict - def to_frontend_dict(self): + def to_frontend_dict(self) -> ExplorationStatsFrontendDict: """Returns a dict representation of the domain object for use in the frontend. """ @@ -152,7 +302,7 @@ def to_frontend_dict(self): state_stats_mapping_dict[state_name] = self.state_stats_mapping[ state_name].to_frontend_dict() - exploration_stats_dict = { + exploration_stats_dict: ExplorationStatsFrontendDict = { 'exp_id': self.exp_id, 'exp_version': self.exp_version, 'num_starts': self.num_starts, @@ -163,7 +313,12 @@ def to_frontend_dict(self): return exploration_stats_dict @classmethod - def create_default(cls, exp_id, exp_version, state_stats_mapping): + def create_default( + cls, + exp_id: str, + exp_version: int, + state_stats_mapping: Dict[str, StateStats] + ) -> ExplorationStats: """Creates a ExplorationStats domain object and sets all properties to 0. @@ -178,7 +333,7 @@ def create_default(cls, exp_id, exp_version, state_stats_mapping): """ return cls(exp_id, exp_version, 0, 0, 0, 0, 0, 0, state_stats_mapping) - def get_sum_of_first_hit_counts(self): + def get_sum_of_first_hit_counts(self) -> int: """Compute the sum of first hit counts for the exploration stats. Returns: @@ -190,10 +345,17 @@ def get_sum_of_first_hit_counts(self): sum_first_hits += state_stats.first_hit_count return sum_first_hits - def validate(self): + def validate(self) -> None: """Validates the ExplorationStats domain object.""" - exploration_stats_properties = [ + exploration_stats_properties: List[Literal[ + 'num_starts_v1', + 'num_starts_v2', + 'num_actual_starts_v1', + 'num_actual_starts_v2', + 'num_completions_v1', + 'num_completions_v2' + ]] = [ 'num_starts_v1', 'num_starts_v2', 'num_actual_starts_v1', @@ -212,7 +374,6 @@ def validate(self): self.exp_version)) exploration_stats_dict = self.to_dict() - for stat_property in exploration_stats_properties: if not isinstance(exploration_stats_dict[stat_property], int): raise utils.ValidationError( @@ -227,7 +388,7 @@ def validate(self): 'Expected state_stats_mapping to be a dict, received %s' % ( self.state_stats_mapping)) - def clone(self): + def clone(self) -> ExplorationStats: """Returns a clone of this instance.""" return ExplorationStats( self.exp_id, self.exp_version, self.num_starts_v1, @@ -247,11 +408,19 @@ class StateStats: """ def __init__( - self, total_answers_count_v1, total_answers_count_v2, - useful_feedback_count_v1, useful_feedback_count_v2, - total_hit_count_v1, total_hit_count_v2, first_hit_count_v1, - first_hit_count_v2, num_times_solution_viewed_v2, - num_completions_v1, num_completions_v2): + self, + total_answers_count_v1: int, + total_answers_count_v2: int, + useful_feedback_count_v1: int, + useful_feedback_count_v2: int, + total_hit_count_v1: int, + total_hit_count_v2: int, + first_hit_count_v1: int, + first_hit_count_v2: int, + num_times_solution_viewed_v2: int, + num_completions_v1: int, + num_completions_v2: int + ) -> None: """Constructs a StateStats domain object. Args: @@ -289,7 +458,7 @@ def __init__( self.num_completions_v2 = num_completions_v2 @property - def total_answers_count(self): + def total_answers_count(self) -> int: """Returns the total number of answers submitted to this state. Returns: @@ -298,7 +467,7 @@ def total_answers_count(self): return self.total_answers_count_v1 + self.total_answers_count_v2 @property - def useful_feedback_count(self): + def useful_feedback_count(self) -> int: """Returns the total number of answers that received useful feedback. Returns: @@ -307,7 +476,7 @@ def useful_feedback_count(self): return self.useful_feedback_count_v1 + self.useful_feedback_count_v2 @property - def total_hit_count(self): + def total_hit_count(self) -> int: """Returns the total number of times the state was entered. Returns: @@ -316,7 +485,7 @@ def total_hit_count(self): return self.total_hit_count_v1 + self.total_hit_count_v2 @property - def first_hit_count(self): + def first_hit_count(self) -> int: """Returns the number of times the state was entered for the first time. Returns: @@ -325,7 +494,7 @@ def first_hit_count(self): return self.first_hit_count_v1 + self.first_hit_count_v2 @property - def num_completions(self): + def num_completions(self) -> int: """Returns total number of times the state was completed. Returns: @@ -334,7 +503,7 @@ def num_completions(self): return self.num_completions_v1 + self.num_completions_v2 @property - def num_times_solution_viewed(self): + def num_times_solution_viewed(self) -> int: """Returns the number of times the solution button was triggered. Returns: @@ -344,18 +513,23 @@ def num_times_solution_viewed(self): return self.num_times_solution_viewed_v2 @classmethod - def create_default(cls): + def create_default(cls) -> StateStats: """Creates a StateStats domain object and sets all properties to 0.""" return cls(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) - def aggregate_from(self, other): + def aggregate_from( + self, other: Union[StateStats, SessionStateStats] + ) -> None: """Aggregates data from the other state stats into self. Args: other: StateStats | SessionStateStats. The other collection of stats to aggregate from. + + Raises: + TypeError. Given SessionStateStats can not be aggregated from. """ - if other.__class__ is self.__class__: + if isinstance(other, StateStats): self.total_answers_count_v1 += other.total_answers_count_v1 self.total_answers_count_v2 += other.total_answers_count_v2 self.useful_feedback_count_v1 += other.useful_feedback_count_v1 @@ -368,7 +542,7 @@ def aggregate_from(self, other): other.num_times_solution_viewed_v2) self.num_completions_v1 += other.num_completions_v1 self.num_completions_v2 += other.num_completions_v2 - elif other.__class__ is SessionStateStats: + elif isinstance(other, SessionStateStats): self.total_answers_count_v2 += other.total_answers_count self.useful_feedback_count_v2 += other.useful_feedback_count self.total_hit_count_v2 += other.total_hit_count @@ -379,7 +553,7 @@ def aggregate_from(self, other): raise TypeError( '%s can not be aggregated from' % (other.__class__.__name__,)) - def to_dict(self): + def to_dict(self) -> Dict[str, int]: """Returns a dict representation of the domain object.""" state_stats_dict = { 'total_answers_count_v1': self.total_answers_count_v1, @@ -397,7 +571,7 @@ def to_dict(self): } return state_stats_dict - def to_frontend_dict(self): + def to_frontend_dict(self) -> Dict[str, int]: """Returns a dict representation of the domain object for use in the frontend. """ @@ -411,7 +585,7 @@ def to_frontend_dict(self): } return state_stats_dict - def __repr__(self): + def __repr__(self) -> str: """Returns a detailed representation of self, distinguishing v1 values from v2 values. @@ -430,7 +604,7 @@ def __repr__(self): self.__class__.__name__, ', '.join('%s=%r' % (prop, getattr(self, prop)) for prop in props)) - def __str__(self): + def __str__(self) -> str: """Returns a simple representation of self, combining v1 and v2 values. Returns: @@ -448,7 +622,9 @@ def __str__(self): self.__class__.__name__, ', '.join('%s=%r' % (prop, getattr(self, prop)) for prop in props)) - def __eq__(self, other): + # NOTE: Here we use type Any because of: + # https://github.com/python/mypy/issues/363#issue-39383094 + def __eq__(self, other: Any) -> Any: """Implements == comparison between two StateStats instances, returning whether they both hold the same values. @@ -458,40 +634,41 @@ def __eq__(self, other): Returns: bool. Whether the two instances have the same values. """ - if other.__class__ is self.__class__: - return ( - self.total_answers_count_v1, - self.total_answers_count_v2, - self.useful_feedback_count_v1, - self.useful_feedback_count_v2, - self.total_hit_count_v1, - self.total_hit_count_v2, - self.first_hit_count_v1, - self.first_hit_count_v2, - self.num_times_solution_viewed_v2, - self.num_completions_v1, - self.num_completions_v2, - ) == ( - other.total_answers_count_v1, - other.total_answers_count_v2, - other.useful_feedback_count_v1, - other.useful_feedback_count_v2, - other.total_hit_count_v1, - other.total_hit_count_v2, - other.first_hit_count_v1, - other.first_hit_count_v2, - other.num_times_solution_viewed_v2, - other.num_completions_v1, - other.num_completions_v2, - ) - return NotImplemented # https://stackoverflow.com/a/44575926 - - def __hash__(self): + if not isinstance(other, StateStats): + # https://docs.python.org/3.7/library/constants.html + return NotImplemented + return ( + self.total_answers_count_v1, + self.total_answers_count_v2, + self.useful_feedback_count_v1, + self.useful_feedback_count_v2, + self.total_hit_count_v1, + self.total_hit_count_v2, + self.first_hit_count_v1, + self.first_hit_count_v2, + self.num_times_solution_viewed_v2, + self.num_completions_v1, + self.num_completions_v2, + ) == ( + other.total_answers_count_v1, + other.total_answers_count_v2, + other.useful_feedback_count_v1, + other.useful_feedback_count_v2, + other.total_hit_count_v1, + other.total_hit_count_v2, + other.first_hit_count_v1, + other.first_hit_count_v2, + other.num_times_solution_viewed_v2, + other.num_completions_v1, + other.num_completions_v2, + ) + + def __hash__(self) -> int: """Disallow hashing StateStats since they are mutable by design.""" raise TypeError('%s is unhashable' % self.__class__.__name__) @classmethod - def from_dict(cls, state_stats_dict): + def from_dict(cls, state_stats_dict: Dict[str, int]) -> StateStats: """Constructs a StateStats domain object from a dict.""" return cls( state_stats_dict['total_answers_count_v1'], @@ -506,7 +683,7 @@ def from_dict(cls, state_stats_dict): state_stats_dict['num_completions_v1'], state_stats_dict['num_completions_v2']) - def validate(self): + def validate(self) -> None: """Validates the StateStats domain object.""" state_stats_properties = [ @@ -534,7 +711,7 @@ def validate(self): raise utils.ValidationError( '%s cannot have negative values' % (stat_property)) - def clone(self): + def clone(self) -> StateStats: """Returns a clone of this instance.""" return StateStats( self.total_answers_count_v1, self.total_answers_count_v2, @@ -551,8 +728,14 @@ class SessionStateStats: """ def __init__( - self, total_answers_count, useful_feedback_count, total_hit_count, - first_hit_count, num_times_solution_viewed, num_completions): + self, + total_answers_count: int, + useful_feedback_count: int, + total_hit_count: int, + first_hit_count: int, + num_times_solution_viewed: int, + num_completions: int + ): """Constructs a SessionStateStats domain object. Args: @@ -574,7 +757,7 @@ def __init__( self.num_times_solution_viewed = num_times_solution_viewed self.num_completions = num_completions - def __repr__(self): + def __repr__(self) -> str: """Returns a detailed string representation of self.""" props = [ 'total_answers_count', @@ -588,7 +771,7 @@ def __repr__(self): self.__class__.__name__, ', '.join('%s=%r' % (prop, getattr(self, prop)) for prop in props)) - def to_dict(self): + def to_dict(self) -> Dict[str, int]: """Returns a dict representation of self.""" session_state_stats_dict = { 'total_answers_count': self.total_answers_count, @@ -600,7 +783,77 @@ def to_dict(self): } return session_state_stats_dict - def __eq__(self, other): + @staticmethod + def validate_aggregated_stats_dict( + aggregated_stats: AggregatedStatsDict + ) -> AggregatedStatsDict: + """Validates the SessionStateStats domain object. + + Args: + aggregated_stats: dict. The aggregated stats dict to validate. + + Returns: + aggregated_stats: dict. The validated aggregated stats dict. + + Raises: + ValidationError. Whether the aggregated_stats dict is invalid. + """ + + exploration_stats_properties = [ + 'num_starts', + 'num_actual_starts', + 'num_completions' + ] + state_stats_properties = [ + 'total_answers_count', + 'useful_feedback_count', + 'total_hit_count', + 'first_hit_count', + 'num_times_solution_viewed', + 'num_completions' + ] + for exp_stats_property in exploration_stats_properties: + if exp_stats_property not in aggregated_stats: + raise utils.ValidationError( + '%s not in aggregated stats dict.' % (exp_stats_property)) + # Here we use MyPy ignore because MyPy does not recognize + # that keys represented by the variable exp_stats_property + # are string literals. + if not isinstance(aggregated_stats[exp_stats_property], int): # type: ignore[misc] + raise utils.ValidationError( + 'Expected %s to be an int, received %s' % ( + exp_stats_property, + # Here we use MyPy ignore because MyPy does not + # recognize that keys represented by the variable + # exp_stats_property are string literals. + aggregated_stats[exp_stats_property] # type: ignore[misc] + ) + ) + state_stats_mapping = aggregated_stats['state_stats_mapping'] + for state_name in state_stats_mapping: + for state_stats_property in state_stats_properties: + if state_stats_property not in state_stats_mapping[state_name]: + raise utils.ValidationError( + '%s not in state stats mapping of %s in aggregated ' + 'stats dict.' % (state_stats_property, state_name)) + if not isinstance( + state_stats_mapping[state_name][state_stats_property], + int + ): + state_stats = state_stats_mapping[state_name] + raise utils.ValidationError( + 'Expected %s to be an int, received %s' % ( + state_stats_property, + state_stats[state_stats_property] + ) + ) + # The aggregated_stats parameter does not represent any domain class, + # hence dict form of the data is returned from here. + return aggregated_stats + + # NOTE: Here we use type Any because of: + # https://github.com/python/mypy/issues/363#issue-39383094 + def __eq__(self, other: Any) -> Any: """Implements == comparison between two SessionStateStats instances, returning whether they hold the same values. @@ -610,35 +863,38 @@ def __eq__(self, other): Returns: bool. Whether the two instances have the same values. """ - if other.__class__ is self.__class__: - return ( - self.total_answers_count, - self.useful_feedback_count, - self.total_hit_count, - self.first_hit_count, - self.num_times_solution_viewed, - self.num_completions, - ) == ( - other.total_answers_count, - other.useful_feedback_count, - other.total_hit_count, - other.first_hit_count, - other.num_times_solution_viewed, - other.num_completions, - ) - return NotImplemented # https://stackoverflow.com/a/44575926 - - def __hash__(self): + if not isinstance(other, SessionStateStats): + # https://docs.python.org/3.7/library/constants.html + return NotImplemented + return ( + self.total_answers_count, + self.useful_feedback_count, + self.total_hit_count, + self.first_hit_count, + self.num_times_solution_viewed, + self.num_completions, + ) == ( + other.total_answers_count, + other.useful_feedback_count, + other.total_hit_count, + other.first_hit_count, + other.num_times_solution_viewed, + other.num_completions, + ) + + def __hash__(self) -> int: """Disallow hashing SessionStateStats since it is mutable by design.""" raise TypeError('%s is unhashable' % self.__class__.__name__) @classmethod - def create_default(cls): + def create_default(cls) -> SessionStateStats: """Creates a SessionStateStats domain object with all values at 0.""" return cls(0, 0, 0, 0, 0, 0) @classmethod - def from_dict(cls, session_state_stats_dict): + def from_dict( + cls, session_state_stats_dict: Dict[str, int] + ) -> SessionStateStats: """Creates a SessionStateStats domain object from the given dict.""" return cls( session_state_stats_dict['total_answers_count'], @@ -654,7 +910,12 @@ class ExplorationIssues: exploration. """ - def __init__(self, exp_id, exp_version, unresolved_issues): + def __init__( + self, + exp_id: str, + exp_version: int, + unresolved_issues: List[ExplorationIssue] + ) -> None: """Constructs an ExplorationIssues domain object. Args: @@ -668,7 +929,7 @@ def __init__(self, exp_id, exp_version, unresolved_issues): self.unresolved_issues = unresolved_issues @classmethod - def create_default(cls, exp_id, exp_version): + def create_default(cls, exp_id: str, exp_version: int) -> ExplorationIssues: """Creates a default ExplorationIssues domain object. Args: @@ -680,7 +941,7 @@ def create_default(cls, exp_id, exp_version): """ return cls(exp_id, exp_version, []) - def to_dict(self): + def to_dict(self) -> ExplorationIssuesDict: """Returns a dict representation of the ExplorationIssues domain object. Returns: @@ -696,7 +957,9 @@ def to_dict(self): } @classmethod - def from_dict(cls, exp_issues_dict): + def from_dict( + cls, exp_issues_dict: ExplorationIssuesDict + ) -> ExplorationIssues: """Returns an ExplorationIssues object from a dict. Args: @@ -714,7 +977,7 @@ def from_dict(cls, exp_issues_dict): exp_issues_dict['exp_id'], exp_issues_dict['exp_version'], unresolved_issues) - def validate(self): + def validate(self) -> None: """Validates the ExplorationIssues domain object.""" if not isinstance(self.exp_id, str): raise utils.ValidationError( @@ -739,8 +1002,13 @@ class Playthrough: """Domain object representing a learner playthrough.""" def __init__( - self, exp_id, exp_version, issue_type, issue_customization_args, - actions): + self, + exp_id: str, + exp_version: int, + issue_type: str, + issue_customization_args: IssuesCustomizationArgsDictType, + actions: List[LearnerAction] + ): """Constructs a Playthrough domain object. Args: @@ -757,7 +1025,7 @@ def __init__( self.issue_customization_args = issue_customization_args self.actions = actions - def to_dict(self): + def to_dict(self) -> PlaythroughDict: """Returns a dict representation of the Playthrough domain object. Returns: @@ -773,7 +1041,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, playthrough_data): + def from_dict(cls, playthrough_data: PlaythroughDict) -> Playthrough: """Checks whether the playthrough dict has the correct keys and then returns a domain object instance. @@ -807,7 +1075,7 @@ def from_dict(cls, playthrough_data): playthrough.validate() return playthrough - def validate(self): + def validate(self) -> None: """Validates the Playthrough domain object.""" if not isinstance(self.exp_id, str): raise utils.ValidationError( @@ -833,9 +1101,9 @@ def validate(self): try: issue = playthrough_issue_registry.Registry.get_issue_by_type( self.issue_type) - except KeyError: + except KeyError as e: raise utils.ValidationError('Invalid issue type: %s' % ( - self.issue_type)) + self.issue_type)) from e customization_args_util.validate_customization_args_and_values( 'issue', self.issue_type, self.issue_customization_args, @@ -854,8 +1122,13 @@ class ExplorationIssue: """Domain object representing an exploration issue.""" def __init__( - self, issue_type, issue_customization_args, playthrough_ids, - schema_version, is_valid): + self, + issue_type: str, + issue_customization_args: IssuesCustomizationArgsDictType, + playthrough_ids: List[str], + schema_version: int, + is_valid: bool + ): """Constructs an ExplorationIssue domain object. Args: @@ -875,7 +1148,21 @@ def __init__( self.schema_version = schema_version self.is_valid = is_valid - def to_dict(self): + # NOTE: Here we use type Any because of: + # https://github.com/python/mypy/issues/363#issue-39383094 + def __eq__(self, other: Any) -> Any: + if not isinstance(other, ExplorationIssue): + # https://docs.python.org/3.7/library/constants.html + return NotImplemented + return ( + self.issue_type == other.issue_type and + self.issue_customization_args == other.issue_customization_args and + self.playthrough_ids == other.playthrough_ids and + self.schema_version == other.schema_version and + self.is_valid == other.is_valid + ) + + def to_dict(self) -> ExplorationIssueDict: """Returns a dict representation of the ExplorationIssue domain object. Returns: @@ -890,7 +1177,9 @@ def to_dict(self): } @classmethod - def from_dict(cls, exp_issue_dict): + def from_dict( + cls, exp_issue_dict: ExplorationIssueDict + ) -> ExplorationIssue: """Checks whether the exploration issue dict has the correct keys and then returns a domain object instance. @@ -921,7 +1210,9 @@ def from_dict(cls, exp_issue_dict): return exp_issue @classmethod - def update_exp_issue_from_model(cls, issue_dict): + def update_exp_issue_from_model( + cls, issue_dict: ExplorationIssueDict + ) -> None: """Converts the exploration issue blob given from current issue_schema_version to current issue_schema_version + 1. Note that the issue_dict being passed in is modified in-place. @@ -937,7 +1228,12 @@ def update_exp_issue_from_model(cls, issue_dict): issue_dict = conversion_fn(issue_dict) @classmethod - def _convert_issue_v1_dict_to_v2_dict(cls, issue_dict): + def _convert_issue_v1_dict_to_v2_dict( + cls, + issue_dict: Dict[ + str, Union[str, Dict[str, Dict[str, str]], List[str], int, bool] + ] + ) -> None: """Converts a v1 issue dict to a v2 issue dict. This function is now implemented only for testing purposes and must be rewritten when an actual schema migration from v1 to v2 takes place. @@ -946,7 +1242,7 @@ def _convert_issue_v1_dict_to_v2_dict(cls, issue_dict): 'The _convert_issue_v1_dict_to_v2_dict() method is missing from the' ' derived class. It should be implemented in the derived class.') - def validate(self): + def validate(self) -> None: """Validates the ExplorationIssue domain object.""" if not isinstance(self.issue_type, str): raise utils.ValidationError( @@ -961,9 +1257,9 @@ def validate(self): try: issue = playthrough_issue_registry.Registry.get_issue_by_type( self.issue_type) - except KeyError: + except KeyError as e: raise utils.ValidationError('Invalid issue type: %s' % ( - self.issue_type)) + self.issue_type)) from e customization_args_util.validate_customization_args_and_values( 'issue', self.issue_type, self.issue_customization_args, @@ -984,7 +1280,12 @@ def validate(self): class LearnerAction: """Domain object representing a learner action.""" - def __init__(self, action_type, action_customization_args, schema_version): + def __init__( + self, + action_type: str, + action_customization_args: Dict[str, Dict[str, Union[str, int]]], + schema_version: int + ): """Constructs a LearnerAction domain object. Args: @@ -999,7 +1300,7 @@ def __init__(self, action_type, action_customization_args, schema_version): self.action_customization_args = action_customization_args self.schema_version = schema_version - def to_dict(self): + def to_dict(self) -> LearnerActionDict: """Returns a dict representation of the LearnerAction domain object. Returns: @@ -1012,7 +1313,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, action_dict): + def from_dict(cls, action_dict: LearnerActionDict) -> LearnerAction: """Returns a LearnerAction object from a dict. Args: @@ -1028,7 +1329,9 @@ def from_dict(cls, action_dict): action_dict['schema_version']) @classmethod - def update_learner_action_from_model(cls, action_dict): + def update_learner_action_from_model( + cls, action_dict: LearnerActionDict + ) -> None: """Converts the learner action blob given from current action_schema_version to current action_schema_version + 1. Note that the action_dict being passed in is modified in-place. @@ -1044,7 +1347,9 @@ def update_learner_action_from_model(cls, action_dict): action_dict = conversion_fn(action_dict) @classmethod - def _convert_action_v1_dict_to_v2_dict(cls, action_dict): + def _convert_action_v1_dict_to_v2_dict( + cls, action_dict: LearnerActionDict + ) -> None: """Converts a v1 action dict to a v2 action dict. This function is now implemented only for testing purposes and must be rewritten when an actual schema migration from v1 to v2 takes place. @@ -1053,7 +1358,7 @@ def _convert_action_v1_dict_to_v2_dict(cls, action_dict): 'The _convert_action_v1_dict_to_v2_dict() method is missing from ' 'the derived class. It should be implemented in the derived class.') - def validate(self): + def validate(self) -> None: """Validates the LearnerAction domain object.""" if not isinstance(self.action_type, str): raise utils.ValidationError( @@ -1068,9 +1373,9 @@ def validate(self): try: action = action_registry.Registry.get_action_by_type( self.action_type) - except KeyError: + except KeyError as e: raise utils.ValidationError( - 'Invalid action type: %s' % self.action_type) + 'Invalid action type: %s' % self.action_type) from e customization_args_util.validate_customization_args_and_values( 'action', self.action_type, self.action_customization_args, action.customization_arg_specs) @@ -1083,15 +1388,20 @@ class StateAnswers: """Domain object containing answers submitted to an exploration state.""" def __init__( - self, exploration_id, exploration_version, state_name, - interaction_id, submitted_answer_list, - schema_version=feconf.CURRENT_STATE_ANSWERS_SCHEMA_VERSION): + self, + exploration_id: str, + exploration_version: int, + state_name: str, + interaction_id: str, + submitted_answer_list: List[SubmittedAnswer], + schema_version: int = feconf.CURRENT_STATE_ANSWERS_SCHEMA_VERSION + ) -> None: """Constructs a StateAnswers domain object. Args: exploration_id: str. The ID of the exploration corresponding to submitted answers. - exploration_version: str. The version of the exploration + exploration_version: int. The version of the exploration corresponding to submitted answers. state_name: str. The state to which the answers were submitted. interaction_id: str. The ID of the interaction which created the @@ -1099,7 +1409,7 @@ def __init__( submitted_answer_list: list. The list of SubmittedAnswer domain objects that were submitted to the exploration and version specified in this object. - schema_version: str. The schema version of this answers object. + schema_version: int. The schema version of this answers object. """ self.exploration_id = exploration_id self.exploration_version = exploration_version @@ -1108,14 +1418,14 @@ def __init__( self.submitted_answer_list = submitted_answer_list self.schema_version = schema_version - def get_submitted_answer_dict_list(self): + def get_submitted_answer_dict_list(self) -> List[SubmittedAnswerDict]: """Returns the submitted_answer_list stored within this object as a list of StateAnswer dicts. """ return [state_answer.to_dict() for state_answer in self.submitted_answer_list] - def validate(self): + def validate(self) -> None: """Validates StateAnswers domain object entity.""" if not isinstance(self.exploration_id, str): @@ -1173,10 +1483,18 @@ class SubmittedAnswer: # without warning or migration. def __init__( - self, answer, interaction_id, answer_group_index, - rule_spec_index, classification_categorization, params, - session_id, time_spent_in_sec, rule_spec_str=None, - answer_str=None): + self, + answer: state_domain.AcceptableCorrectAnswerTypes, + interaction_id: str, + answer_group_index: int, + rule_spec_index: int, + classification_categorization: str, + params: Dict[str, Union[str, int]], + session_id: str, + time_spent_in_sec: float, + rule_spec_str: Optional[str] = None, + answer_str: Optional[str] = None + ) -> None: self.answer = answer self.interaction_id = interaction_id self.answer_group_index = answer_group_index @@ -1188,13 +1506,13 @@ def __init__( self.rule_spec_str = rule_spec_str self.answer_str = answer_str - def to_dict(self): + def to_dict(self) -> SubmittedAnswerDict: """Returns the dict of submitted answer. Returns: dict. The submitted answer dict. """ - submitted_answer_dict = { + submitted_answer_dict: SubmittedAnswerDict = { 'answer': self.answer, 'interaction_id': self.interaction_id, 'answer_group_index': self.answer_group_index, @@ -1203,6 +1521,8 @@ def to_dict(self): 'params': self.params, 'session_id': self.session_id, 'time_spent_in_sec': self.time_spent_in_sec, + 'rule_spec_str': self.rule_spec_str, + 'answer_str': self.answer_str } if self.rule_spec_str is not None: submitted_answer_dict['rule_spec_str'] = self.rule_spec_str @@ -1211,7 +1531,9 @@ def to_dict(self): return submitted_answer_dict @classmethod - def from_dict(cls, submitted_answer_dict): + def from_dict( + cls, submitted_answer_dict: SubmittedAnswerDict + ) -> SubmittedAnswer: """Returns the domain object representing an answer submitted to a state. @@ -1230,7 +1552,7 @@ def from_dict(cls, submitted_answer_dict): rule_spec_str=submitted_answer_dict.get('rule_spec_str'), answer_str=submitted_answer_dict.get('answer_str')) - def validate(self): + def validate(self) -> None: """Validates this submitted answer object.""" # TODO(bhenning): Validate the normalized answer against future answer # objects after #956 is addressed. @@ -1317,12 +1639,15 @@ class AnswerOccurrence: of times. """ - def __init__(self, answer, frequency): + def __init__( + self, answer: state_domain.AcceptableCorrectAnswerTypes, + frequency: int + ) -> None: """Initialize domain object for answer occurrences.""" self.answer = answer self.frequency = frequency - def to_raw_type(self): + def to_raw_type(self) -> AnswerOccurrenceDict: """Returns a Python dict representing the specific answer. Returns: @@ -1338,7 +1663,9 @@ def to_raw_type(self): } @classmethod - def from_raw_type(cls, answer_occurrence_dict): + def from_raw_type( + cls, answer_occurrence_dict: AnswerOccurrenceDict + ) -> AnswerOccurrence: """Returns domain object that represents a specific answer that occurred some number of times. @@ -1363,23 +1690,25 @@ class AnswerCalculationOutput: calculation. """ - def __init__(self, calculation_output_type): + def __init__(self, calculation_output_type: str): self.calculation_output_type = calculation_output_type class AnswerFrequencyList(AnswerCalculationOutput): """Domain object that represents an output list of AnswerOccurrences.""" - def __init__(self, answer_occurrences=None): + def __init__( + self, answer_occurrences: Optional[List[AnswerOccurrence]] = None + ) -> None: """Initialize domain object for answer frequency list for a given list of AnswerOccurrence objects (default is empty list). """ - super(AnswerFrequencyList, self).__init__( + super().__init__( CALC_OUTPUT_TYPE_ANSWER_FREQUENCY_LIST) self.answer_occurrences = ( answer_occurrences if answer_occurrences else []) - def to_raw_type(self): + def to_raw_type(self) -> List[AnswerOccurrenceDict]: """Returns the answer occurrences list with each answer represented as a Python dict. @@ -1396,7 +1725,9 @@ def to_raw_type(self): for answer_occurrence in self.answer_occurrences] @classmethod - def from_raw_type(cls, answer_occurrence_list): + def from_raw_type( + cls, answer_occurrence_list: List[AnswerOccurrenceDict] + ) -> AnswerFrequencyList: """Creates a domain object that represents an output list of AnswerOccurrences. @@ -1421,17 +1752,22 @@ class CategorizedAnswerFrequencyLists(AnswerCalculationOutput): categories. """ - def __init__(self, categorized_answer_freq_lists=None): + def __init__( + self, + categorized_answer_freq_lists: Optional[ + Dict[str, AnswerFrequencyList] + ] = None + ) -> None: """Initialize domain object for categorized answer frequency lists for a given dict (default is empty). """ - super(CategorizedAnswerFrequencyLists, self).__init__( + super().__init__( CALC_OUTPUT_TYPE_CATEGORIZED_ANSWER_FREQUENCY_LISTS) self.categorized_answer_freq_lists = ( categorized_answer_freq_lists if categorized_answer_freq_lists else {}) - def to_raw_type(self): + def to_raw_type(self) -> Dict[str, List[AnswerOccurrenceDict]]: """Returns the categorized frequency Python dict. Returns: @@ -1450,7 +1786,9 @@ def to_raw_type(self): } @classmethod - def from_raw_type(cls, categorized_frequency_dict): + def from_raw_type( + cls, categorized_frequency_dict: Dict[str, List[AnswerOccurrenceDict]] + ) -> CategorizedAnswerFrequencyLists: """Returns the domain object for categorized answer frequency dict for a given dict. @@ -1481,14 +1819,22 @@ class StateAnswersCalcOutput: """ def __init__( - self, exploration_id, exploration_version, state_name, - interaction_id, calculation_id, calculation_output): + self, + exploration_id: str, + exploration_version: int, + state_name: str, + interaction_id: str, + calculation_id: str, + calculation_output: Union[ + AnswerFrequencyList, CategorizedAnswerFrequencyLists + ] + ) -> None: """Initialize domain object for state answers calculation output. Args: exploration_id: str. The ID of the exploration corresponding to the answer calculation output. - exploration_version: str. The version of the exploration + exploration_version: int. The version of the exploration corresponding to the answer calculation output. state_name: str. The name of the exploration state to which the aggregated answers were submitted. @@ -1505,7 +1851,7 @@ def __init__( self.interaction_id = interaction_id self.calculation_output = calculation_output - def validate(self): + def validate(self) -> None: """Validates StateAnswersCalcOutputModel domain object entity before it is commited to storage. """ @@ -1555,10 +1901,15 @@ class LearnerAnswerDetails: """ def __init__( - self, state_reference, entity_type, interaction_id, - learner_answer_info_list, accumulated_answer_info_json_size_bytes, - learner_answer_info_schema_version=( - feconf.CURRENT_LEARNER_ANSWER_INFO_SCHEMA_VERSION)): + self, + state_reference: str, + entity_type: str, + interaction_id: str, + learner_answer_info_list: List[LearnerAnswerInfo], + accumulated_answer_info_json_size_bytes: int, + learner_answer_info_schema_version: int = ( + feconf.CURRENT_LEARNER_ANSWER_INFO_SCHEMA_VERSION) + ) -> None: """Constructs a LearnerAnswerDetail domain object. Args: @@ -1590,7 +1941,7 @@ def __init__( self.learner_answer_info_schema_version = ( learner_answer_info_schema_version) - def to_dict(self): + def to_dict(self) -> LearnerAnswerDetailsDict: """Returns a dict representing LearnerAnswerDetails domain object. Returns: @@ -1611,7 +1962,10 @@ def to_dict(self): } @classmethod - def from_dict(cls, learner_answer_details_dict): + def from_dict( + cls, + learner_answer_details_dict: LearnerAnswerDetailsDict + ) -> LearnerAnswerDetails: """Return a LearnerAnswerDetails domain object from a dict. Args: @@ -1634,7 +1988,7 @@ def from_dict(cls, learner_answer_details_dict): learner_answer_details_dict['learner_answer_info_schema_version'] ) - def validate(self): + def validate(self) -> None: """Validates LearnerAnswerDetails domain object.""" if not isinstance(self.state_reference, str): @@ -1699,7 +2053,9 @@ def validate(self): 'Expected accumulated_answer_info_json_size_bytes to be an int ' 'received %s' % self.accumulated_answer_info_json_size_bytes) - def add_learner_answer_info(self, learner_answer_info): + def add_learner_answer_info( + self, learner_answer_info: LearnerAnswerInfo + ) -> None: """Adds new learner answer info in the learner_answer_info_list. Args: @@ -1716,7 +2072,7 @@ def add_learner_answer_info(self, learner_answer_info): self.accumulated_answer_info_json_size_bytes += ( learner_answer_info_dict_size) - def delete_learner_answer_info(self, learner_answer_info_id): + def delete_learner_answer_info(self, learner_answer_info_id: str) -> None: """Delete the learner answer info from the learner_answer_info_list. Args: @@ -1737,10 +2093,10 @@ def delete_learner_answer_info(self, learner_answer_info_id): learner_answer_info.get_learner_answer_info_dict_size()) if self.learner_answer_info_list == new_learner_answer_info_list: raise Exception('Learner answer info with the given id not found.') - else: - self.learner_answer_info_list = new_learner_answer_info_list - def update_state_reference(self, new_state_reference): + self.learner_answer_info_list = new_learner_answer_info_list + + def update_state_reference(self, new_state_reference: str) -> None: """Updates the state_reference of the LearnerAnswerDetails object. Args: @@ -1754,8 +2110,12 @@ class LearnerAnswerInfo: """Domain object containing the answer details submitted by the learner.""" def __init__( - self, learner_answer_info_id, answer, - answer_details, created_on): + self, + learner_answer_info_id: str, + answer: Optional[Union[str, int, Dict[str, str], List[str]]], + answer_details: str, + created_on: datetime.datetime + ) -> None: """Constructs a LearnerAnswerInfo domain object. Args: @@ -1775,13 +2135,13 @@ def __init__( self.answer_details = answer_details self.created_on = created_on - def to_dict(self): + def to_dict(self) -> LearnerAnswerInfoDict: """Returns the dict of learner answer info. Returns: dict. The learner_answer_info dict. """ - learner_answer_info_dict = { + learner_answer_info_dict: LearnerAnswerInfoDict = { 'id': self.id, 'answer': self.answer, 'answer_details': self.answer_details, @@ -1790,7 +2150,9 @@ def to_dict(self): return learner_answer_info_dict @classmethod - def from_dict(cls, learner_answer_info_dict): + def from_dict( + cls, learner_answer_info_dict: LearnerAnswerInfoDict + ) -> LearnerAnswerInfo: """Returns a dict representing LearnerAnswerInfo domain object. Returns: @@ -1806,7 +2168,7 @@ def from_dict(cls, learner_answer_info_dict): ) @classmethod - def get_new_learner_answer_info_id(cls): + def get_new_learner_answer_info_id(cls) -> str: """Generates the learner answer info domain object id. Returns: @@ -1814,11 +2176,11 @@ def get_new_learner_answer_info_id(cls): """ learner_answer_info_id = ( utils.base64_from_int( - utils.get_current_time_in_millisecs()) + + int(utils.get_current_time_in_millisecs())) + utils.base64_from_int(utils.get_random_int(127 * 127))) return learner_answer_info_id - def validate(self): + def validate(self) -> None: """Validates the LearnerAnswerInfo domain object.""" if not isinstance(self.id, str): raise utils.ValidationError( @@ -1849,7 +2211,7 @@ def validate(self): 'Expected created_on to be a datetime, received %s' % str(self.created_on)) - def get_learner_answer_info_dict_size(self): + def get_learner_answer_info_dict_size(self) -> int: """Returns a size overestimate (in bytes) of the given learner answer info dict. diff --git a/core/domain/stats_domain_test.py b/core/domain/stats_domain_test.py index 1bbf378c2df7..c308f62284c9 100644 --- a/core/domain/stats_domain_test.py +++ b/core/domain/stats_domain_test.py @@ -29,14 +29,20 @@ from core.platform import models from core.tests import test_utils -(stats_models,) = models.Registry.import_models([models.NAMES.statistics]) +from typing import Any, Dict + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import stats_models + +(stats_models,) = models.Registry.import_models([models.Names.STATISTICS]) class ExplorationStatsTests(test_utils.GenericTestBase): """Tests the ExplorationStats domain object.""" - def setUp(self): - super(ExplorationStatsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.state_stats_dict = { 'total_answers_count_v1': 0, @@ -52,7 +58,7 @@ def setUp(self): 'num_completions_v2': 2 } - self.exploration_stats_dict = { + self.exploration_stats_dict: stats_domain.ExplorationStatsDict = { 'exp_id': 'exp_id1', 'exp_version': 1, 'num_starts_v1': 0, @@ -70,7 +76,10 @@ def setUp(self): self.exploration_stats = self._get_exploration_stats_from_dict( self.exploration_stats_dict) - def _get_exploration_stats_from_dict(self, exploration_stats_dict): + def _get_exploration_stats_from_dict( + self, + exploration_stats_dict: stats_domain.ExplorationStatsDict + ) -> stats_domain.ExplorationStats: """Converts and returns the ExplorationStats object from the given exploration stats dict. """ @@ -89,7 +98,7 @@ def _get_exploration_stats_from_dict(self, exploration_stats_dict): exploration_stats_dict['num_completions_v2'], state_stats_mapping) - def test_create_default(self): + def test_create_default(self) -> None: exploration_stats = ( stats_domain.ExplorationStats.create_default('exp_id1', 1, {})) @@ -103,8 +112,8 @@ def test_create_default(self): self.assertEqual(exploration_stats.num_completions_v2, 0) self.assertEqual(exploration_stats.state_stats_mapping, {}) - def test_to_dict(self): - expected_exploration_stats_dict = { + def test_to_dict(self) -> None: + expected_exploration_stats_dict: stats_domain.ExplorationStatsDict = { 'exp_id': 'exp_id1', 'exp_version': 1, 'num_starts_v1': 0, @@ -123,45 +132,57 @@ def test_to_dict(self): expected_exploration_stats_dict, observed_exploration_stats.to_dict()) - def test_get_sum_of_first_hit_counts(self): + def test_get_sum_of_first_hit_counts(self) -> None: """Test the get_sum_of_first_hit_counts method.""" self.assertEqual( self.exploration_stats.get_sum_of_first_hit_counts(), 14) - def test_validate_for_exploration_stats_with_correct_data(self): + def test_validate_for_exploration_stats_with_correct_data(self) -> None: self.exploration_stats.validate() - def test_validate_with_int_exp_id(self): - self.exploration_stats.exp_id = 10 - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate() input type. + def test_validate_with_int_exp_id(self) -> None: + self.exploration_stats.exp_id = 10 # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected exp_id to be a string')): self.exploration_stats.validate() - def test_validation_with_string_num_actual_starts(self): - self.exploration_stats.num_actual_starts_v2 = '0' - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate() input type. + def test_validation_with_string_num_actual_starts(self) -> None: + self.exploration_stats.num_actual_starts_v2 = '0' # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected num_actual_starts_v2 to be an int')): self.exploration_stats.validate() - def test_validation_with_list_state_stats_mapping(self): - self.exploration_stats.state_stats_mapping = [] - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate() input type. + def test_validation_with_list_state_stats_mapping(self) -> None: + self.exploration_stats.state_stats_mapping = [] # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected state_stats_mapping to be a dict')): self.exploration_stats.validate() - def test_validation_with_negative_num_completions(self): + def test_validation_with_negative_num_completions(self) -> None: self.exploration_stats.num_completions_v2 = -5 - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( '%s cannot have negative values' % ('num_completions_v2'))): self.exploration_stats.validate() - def test_validate_exp_version(self): - self.exploration_stats.exp_version = 'invalid_exp_version' - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate(). + def test_validate_exp_version(self) -> None: + self.exploration_stats.exp_version = 'invalid_exp_version' # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected exp_version to be an int')): self.exploration_stats.validate() - def test_to_frontend_dict(self): + def test_to_frontend_dict(self) -> None: state_stats_dict = { 'total_answers_count_v1': 0, 'total_answers_count_v2': 10, @@ -175,7 +196,7 @@ def test_to_frontend_dict(self): 'num_completions_v1': 0, 'num_completions_v2': 2 } - exploration_stats_dict = { + exploration_stats_dict: stats_domain.ExplorationStatsDict = { 'exp_id': 'exp_id1', 'exp_version': 1, 'num_starts_v1': 0, @@ -215,7 +236,7 @@ def test_to_frontend_dict(self): self.assertEqual( exploration_stats.to_frontend_dict(), expected_frontend_dict) - def test_clone_instance(self): + def test_clone_instance(self) -> None: exploration_stats = (stats_domain.ExplorationStats.create_default( 'exp_id1', 1, {})) expected_clone_object = exploration_stats.clone() @@ -227,13 +248,13 @@ def test_clone_instance(self): class StateStatsTests(test_utils.GenericTestBase): """Tests the StateStats domain object.""" - def setUp(self): - super(StateStatsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.state_stats = stats_domain.StateStats( 0, 10, 0, 4, 0, 18, 0, 7, 2, 0, 2) - def test_from_dict(self): + def test_from_dict(self) -> None: state_stats_dict = { 'total_answers_count_v1': 0, 'total_answers_count_v2': 10, @@ -284,7 +305,7 @@ def test_from_dict(self): state_stats.num_completions_v2, expected_state_stats.num_completions_v2) - def test_repr(self): + def test_repr(self) -> None: state_stats = stats_domain.StateStats(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11) self.assertEqual( '%r' % (state_stats,), @@ -296,7 +317,7 @@ def test_repr(self): 'num_times_solution_viewed_v2=9, ' 'num_completions_v1=10, num_completions_v2=11)') - def test_str(self): + def test_str(self) -> None: state_stats = stats_domain.StateStats(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11) self.assertEqual( '%s' % (state_stats,), @@ -308,7 +329,7 @@ def test_str(self): 'num_times_solution_viewed=9, ' 'num_completions=21)') - def test_create_default(self): + def test_create_default(self) -> None: state_stats = stats_domain.StateStats.create_default() self.assertEqual(state_stats.total_answers_count_v1, 0) self.assertEqual(state_stats.total_answers_count_v2, 0) @@ -322,7 +343,7 @@ def test_create_default(self): self.assertEqual(state_stats.num_completions_v1, 0) self.assertEqual(state_stats.num_completions_v2, 0) - def test_equality(self): + def test_equality(self) -> None: state_stats_a = stats_domain.StateStats.create_default() state_stats_b = stats_domain.StateStats.create_default() state_stats_c = stats_domain.StateStats.create_default() @@ -343,7 +364,7 @@ def test_equality(self): self.assertEqual(state_stats_b, state_stats_c) self.assertEqual(state_stats_a, state_stats_c) - def test_equality_with_different_class(self): + def test_equality_with_different_class(self) -> None: class DifferentStats: """A different class.""" @@ -354,12 +375,12 @@ class DifferentStats: self.assertFalse(state_stats == different_stats) - def test_hash(self): + def test_hash(self) -> None: state_stats = stats_domain.StateStats.create_default() - with self.assertRaisesRegexp(TypeError, 'unhashable'): + with self.assertRaisesRegex(TypeError, 'unhashable'): unused_hash = hash(state_stats) - def test_aggregate_from_state_stats(self): + def test_aggregate_from_state_stats(self) -> None: state_stats = stats_domain.StateStats( 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100) other_state_stats = stats_domain.StateStats( @@ -372,7 +393,7 @@ def test_aggregate_from_state_stats(self): stats_domain.StateStats( 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111)) - def test_aggregate_from_session_state_stats(self): + def test_aggregate_from_session_state_stats(self) -> None: state_stats = stats_domain.StateStats( 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10) session_state_stats = stats_domain.SessionStateStats( @@ -385,7 +406,7 @@ def test_aggregate_from_session_state_stats(self): stats_domain.StateStats( 10, 11, 10, 12, 10, 13, 10, 14, 15, 10, 16)) - def test_aggregate_from_different_stats(self): + def test_aggregate_from_different_stats(self) -> None: class DifferentStats: """A different class.""" @@ -394,10 +415,13 @@ class DifferentStats: state_stats = stats_domain.StateStats.create_default() different_stats = DifferentStats() - with self.assertRaisesRegexp(TypeError, 'can not be aggregated from'): - state_stats.aggregate_from(different_stats) + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[arg-type] + # is used to test method aggregate_from() input type. + with self.assertRaisesRegex(TypeError, 'can not be aggregated from'): + state_stats.aggregate_from(different_stats) # type: ignore[arg-type] - def test_to_dict(self): + def test_to_dict(self) -> None: state_stats_dict = { 'total_answers_count_v1': 0, 'total_answers_count_v2': 10, @@ -414,22 +438,29 @@ def test_to_dict(self): state_stats = stats_domain.StateStats(0, 10, 0, 4, 0, 18, 0, 7, 2, 0, 2) self.assertEqual(state_stats_dict, state_stats.to_dict()) - def test_validation_for_state_stats_with_correct_data(self): + def test_validation_for_state_stats_with_correct_data(self) -> None: self.state_stats.validate() - def test_validation_for_state_stats_with_string_total_answers_count(self): - self.state_stats.total_answers_count_v2 = '10' - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate() input type. + def test_validation_for_state_stats_with_string_total_answers_count( + self + ) -> None: + self.state_stats.total_answers_count_v2 = '10' # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected total_answers_count_v2 to be an int')): self.state_stats.validate() - def test_validation_for_state_stats_with_negative_total_answers_count(self): + def test_validation_for_state_stats_with_negative_total_answers_count( + self + ) -> None: self.state_stats.total_answers_count_v2 = -5 - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( '%s cannot have negative values' % ('total_answers_count_v2'))): self.state_stats.validate() - def test_to_frontend_dict(self): + def test_to_frontend_dict(self) -> None: state_stats_dict = { 'total_answers_count_v1': 0, 'total_answers_count_v2': 10, @@ -458,11 +489,16 @@ def test_to_frontend_dict(self): self.assertEqual( state_stats.to_frontend_dict(), expected_state_stats_dict) + def test_cloned_object_replicates_original_object(self) -> None: + state_stats = stats_domain.StateStats(0, 10, 0, 4, 0, 18, 0, 7, 2, 0, 2) + expected_state_stats = state_stats.clone() + self.assertEqual(state_stats.to_dict(), expected_state_stats.to_dict()) + class SessionStateStatsTests(test_utils.GenericTestBase): """Tests the SessionStateStats domain object.""" - def test_from_dict(self): + def test_from_dict(self) -> None: session_state_stats_dict = { 'total_answers_count': 10, 'useful_feedback_count': 4, @@ -493,7 +529,7 @@ def test_from_dict(self): session_state_stats.num_completions, expected_session_state_stats.num_completions) - def test_repr(self): + def test_repr(self) -> None: session_state_stats = stats_domain.SessionStateStats(1, 2, 3, 4, 5, 6) self.assertEqual( '%r' % (session_state_stats,), @@ -505,7 +541,7 @@ def test_repr(self): 'num_times_solution_viewed=5, ' 'num_completions=6)') - def test_create_default(self): + def test_create_default(self) -> None: session_state_stats = stats_domain.SessionStateStats.create_default() self.assertEqual(session_state_stats.total_answers_count, 0) self.assertEqual(session_state_stats.useful_feedback_count, 0) @@ -514,7 +550,7 @@ def test_create_default(self): self.assertEqual(session_state_stats.num_times_solution_viewed, 0) self.assertEqual(session_state_stats.num_completions, 0) - def test_equality(self): + def test_equality(self) -> None: session_state_stats_a = stats_domain.SessionStateStats.create_default() session_state_stats_b = stats_domain.SessionStateStats.create_default() session_state_stats_c = stats_domain.SessionStateStats.create_default() @@ -535,7 +571,7 @@ def test_equality(self): self.assertEqual(session_state_stats_b, session_state_stats_c) self.assertEqual(session_state_stats_a, session_state_stats_c) - def test_equality_with_different_class(self): + def test_equality_with_different_class(self) -> None: class DifferentStats: """A different class.""" @@ -546,12 +582,12 @@ class DifferentStats: self.assertFalse(session_state_stats == different_stats) - def test_hash(self): + def test_hash(self) -> None: session_state_stats = stats_domain.SessionStateStats.create_default() - with self.assertRaisesRegexp(TypeError, 'unhashable'): + with self.assertRaisesRegex(TypeError, 'unhashable'): unused_hash = hash(session_state_stats) - def test_to_dict(self): + def test_to_dict(self) -> None: self.assertEqual( stats_domain.SessionStateStats(1, 2, 3, 4, 5, 6).to_dict(), { 'total_answers_count': 1, @@ -562,12 +598,145 @@ def test_to_dict(self): 'num_completions': 6 }) + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that num_starts must be in aggregated stats dict. + def test_aggregated_stats_validation_when_session_property_is_missing( + self + ) -> None: + sessions_state_stats: stats_domain.AggregatedStatsDict = { # type: ignore[typeddict-item] + 'num_actual_starts': 1, + 'num_completions': 1, + 'state_stats_mapping': { + 'Home': { + 'total_hit_count': 1, + 'first_hit_count': 1, + 'total_answers_count': 1, + 'useful_feedback_count': 1, + 'num_times_solution_viewed': 1, + 'num_completions': 1 + } + } + } + with self.assertRaisesRegex( + utils.ValidationError, + 'num_starts not in aggregated stats dict.' + ): + stats_domain.SessionStateStats.validate_aggregated_stats_dict( + sessions_state_stats) + + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that num_actual_starts must be an int. + def test_aggregated_stats_validation_when_session_property_type_is_invalid( + self + ) -> None: + sessions_state_stats: stats_domain.AggregatedStatsDict = { + 'num_starts': 1, + 'num_actual_starts': 'invalid_type', # type: ignore[typeddict-item] + 'num_completions': 1, + 'state_stats_mapping': { + 'Home': { + 'total_hit_count': 1, + 'first_hit_count': 1, + 'total_answers_count': 1, + 'useful_feedback_count': 1, + 'num_times_solution_viewed': 1, + 'num_completions': 1 + } + } + } + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected num_actual_starts to be an int, received invalid_type' + ): + stats_domain.SessionStateStats.validate_aggregated_stats_dict( + sessions_state_stats) + + def test_aggregated_stats_validation_when_state_property_type_is_missing( + self + ) -> None: + sessions_state_stats: stats_domain.AggregatedStatsDict = { + 'num_starts': 1, + 'num_actual_starts': 1, + 'num_completions': 1, + 'state_stats_mapping': { + 'Home': { + 'total_hit_count': 1, + 'first_hit_count': 1, + 'useful_feedback_count': 1, + 'num_times_solution_viewed': 1, + 'num_completions': 1 + } + } + } + with self.assertRaisesRegex( + utils.ValidationError, + 'total_answers_count not in state stats mapping of Home in ' + 'aggregated stats dict.' + ): + stats_domain.SessionStateStats.validate_aggregated_stats_dict( + sessions_state_stats) + + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[dict-item] is used to + # test that first_hit_count must be an int. + def test_aggregated_stats_validation_when_state_property_type_is_invalid( + self + ) -> None: + sessions_state_stats: stats_domain.AggregatedStatsDict = { + 'num_starts': 1, + 'num_actual_starts': 1, + 'num_completions': 1, + 'state_stats_mapping': { + 'Home': { + 'total_hit_count': 1, + 'first_hit_count': 'invalid_count', # type: ignore[dict-item] + 'total_answers_count': 1, + 'useful_feedback_count': 1, + 'num_times_solution_viewed': 1, + 'num_completions': 1 + } + } + } + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected first_hit_count to be an int, received invalid_count' + ): + stats_domain.SessionStateStats.validate_aggregated_stats_dict( + sessions_state_stats) + + def test_aggregated_stats_validation_when_fully_valid( + self + ) -> None: + sessions_state_stats: stats_domain.AggregatedStatsDict = { + 'num_starts': 1, + 'num_actual_starts': 1, + 'num_completions': 1, + 'state_stats_mapping': { + 'Home': { + 'total_hit_count': 1, + 'first_hit_count': 1, + 'total_answers_count': 1, + 'useful_feedback_count': 1, + 'num_times_solution_viewed': 1, + 'num_completions': 1 + } + } + } + self.assertEqual( + stats_domain.SessionStateStats.validate_aggregated_stats_dict( + sessions_state_stats + ), + sessions_state_stats + ) + class ExplorationIssuesTests(test_utils.GenericTestBase): """Tests the ExplorationIssues domain object.""" - def setUp(self): - super(ExplorationIssuesTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.exp_issues = stats_domain.ExplorationIssues( 'exp_id1', 1, [ @@ -586,13 +755,13 @@ def setUp(self): 'is_valid': True}) ]) - def test_create_default(self): + def test_create_default(self) -> None: exp_issues = stats_domain.ExplorationIssues.create_default('exp_id1', 1) self.assertEqual(exp_issues.exp_id, 'exp_id1') self.assertEqual(exp_issues.exp_version, 1) self.assertEqual(exp_issues.unresolved_issues, []) - def test_to_dict(self): + def test_to_dict(self) -> None: exp_issues_dict = self.exp_issues.to_dict() self.assertEqual(exp_issues_dict['exp_id'], 'exp_id1') @@ -613,8 +782,8 @@ def test_to_dict(self): 'is_valid': True }]) - def test_from_dict(self): - exp_issues_dict = { + def test_from_dict(self) -> None: + exp_issues_dict: stats_domain.ExplorationIssuesDict = { 'exp_id': 'exp_id1', 'exp_version': 1, 'unresolved_issues': [{ @@ -653,26 +822,35 @@ def test_from_dict(self): 'schema_version': 1, 'is_valid': True}) - def test_validate_for_exp_issues_with_correct_data(self): + def test_validate_for_exp_issues_with_correct_data(self) -> None: self.exp_issues.validate() - def test_validate_with_int_exp_id(self): - self.exp_issues.exp_id = 5 - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate() input type. + def test_validate_with_int_exp_id(self) -> None: + self.exp_issues.exp_id = 5 # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected exp_id to be a string, received %s' % (type(5)))): self.exp_issues.validate() - def test_validate_exp_version(self): - self.exp_issues.exp_version = 'invalid_version' + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the validate() method input type. + def test_validate_exp_version(self) -> None: + self.exp_issues.exp_version = 'invalid_version' # type: ignore[assignment] - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected exp_version to be an int')): self.exp_issues.validate() - def test_validate_unresolved_issues(self): - self.exp_issues.unresolved_issues = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the validate() method input type. + def test_validate_unresolved_issues(self) -> None: + self.exp_issues.unresolved_issues = 0 # type: ignore[assignment] - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected unresolved_issues to be a list')): self.exp_issues.validate() @@ -680,12 +858,12 @@ def test_validate_unresolved_issues(self): class PlaythroughTests(test_utils.GenericTestBase): """Tests the Playthrough domain object.""" - def setUp(self): - super(PlaythroughTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.playthrough = self._get_valid_early_quit_playthrough() - def _get_valid_early_quit_playthrough(self): + def _get_valid_early_quit_playthrough(self) -> stats_domain.Playthrough: """Returns an early quit playthrough after validating it.""" playthrough = stats_domain.Playthrough( 'exp_id1', 1, 'EarlyQuit', { @@ -707,7 +885,7 @@ def _get_valid_early_quit_playthrough(self): playthrough.validate() return playthrough - def test_to_dict(self): + def test_to_dict(self) -> None: playthrough = stats_domain.Playthrough( 'exp_id1', 1, 'EarlyQuit', { 'state_name': { @@ -752,9 +930,9 @@ def test_to_dict(self): 'schema_version': 1 }]) - def test_from_dict(self): + def test_from_dict(self) -> None: """Test the from_dict() method.""" - playthrough_dict = { + playthrough_dict: stats_domain.PlaythroughDict = { 'exp_id': 'exp_id1', 'exp_version': 1, 'issue_type': 'EarlyQuit', @@ -803,33 +981,39 @@ def test_from_dict(self): 'schema_version': 1 }) - def test_from_dict_raises_exception_when_miss_exp_id(self): + def test_from_dict_raises_exception_when_miss_exp_id(self) -> None: """Test the from_dict() method.""" # Test that a playthrough dict without 'exp_id' key raises exception. - playthrough_dict = { + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[typeddict-item] + # is used to test that playthrough dict contains 'exp_id' key. + playthrough_dict: stats_domain.PlaythroughDict = { # type: ignore[typeddict-item] 'exp_version': 1, 'issue_type': 'EarlyQuit', 'issue_customization_args': {}, 'actions': [] } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'exp_id not in playthrough data dict.'): stats_domain.Playthrough.from_dict(playthrough_dict) - def test_validate_with_string_exp_version(self): - self.playthrough.exp_version = '1' - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate() input type. + def test_validate_with_string_exp_version(self) -> None: + self.playthrough.exp_version = '1' # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected exp_version to be an int, received %s' % (type('1')))): self.playthrough.validate() - def test_validate_with_invalid_issue_type(self): + def test_validate_with_invalid_issue_type(self) -> None: self.playthrough.issue_type = 'InvalidIssueType' - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Invalid issue type: %s' % self.playthrough.issue_type)): self.playthrough.validate() - def test_validate_with_invalid_action_type(self): + def test_validate_with_invalid_action_type(self) -> None: self.playthrough.actions = [ stats_domain.LearnerAction.from_dict({ 'action_type': 'InvalidActionType', @@ -840,35 +1024,47 @@ def test_validate_with_invalid_action_type(self): } }, })] - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Invalid action type: %s' % 'InvalidActionType')): self.playthrough.validate() - def test_validate_non_str_exp_id(self): - self.playthrough.exp_id = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate(). + def test_validate_non_str_exp_id(self) -> None: + self.playthrough.exp_id = 0 # type: ignore[assignment] - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected exp_id to be a string')): self.playthrough.validate() - def test_validate_non_str_issue_type(self): - self.playthrough.issue_type = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate(). + def test_validate_non_str_issue_type(self) -> None: + self.playthrough.issue_type = 0 # type: ignore[assignment] - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected issue_type to be a string')): self.playthrough.validate() - def test_validate_non_list_actions(self): - self.playthrough.actions = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate(). + def test_validate_non_list_actions(self) -> None: + self.playthrough.actions = 0 # type: ignore[assignment] - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected actions to be a list')): self.playthrough.validate() - def test_validate_non_dict_issue_customization_args(self): - self.playthrough.issue_customization_args = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate(). + def test_validate_non_dict_issue_customization_args(self) -> None: + self.playthrough.issue_customization_args = 0 # type: ignore[assignment] - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected issue_customization_args to be a dict')): self.playthrough.validate() @@ -878,8 +1074,8 @@ class ExplorationIssueTests(test_utils.GenericTestBase): DUMMY_TIME_SPENT_IN_MSECS = 1000.0 - def setUp(self): - super(ExplorationIssueTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.exp_issue = stats_domain.ExplorationIssue( 'EarlyQuit', { @@ -887,7 +1083,28 @@ def setUp(self): 'time_spent_in_exp_in_msecs': {'value': 0} }, [], 1, True) - def _dummy_convert_issue_v1_dict_to_v2_dict(self, issue_dict): + def test_equality_with_different_class(self) -> None: + class DifferentIssue: + """A different class.""" + + pass + + exploration_issue = stats_domain.ExplorationIssue( + 'EarlyQuit', { + 'state_name': {'value': ''}, + 'time_spent_in_exp_in_msecs': {'value': 0} + }, [], 1, True) + different_issue = DifferentIssue() + + self.assertFalse(exploration_issue == different_issue) + + # TODO(#15995): Here we use type Any because currently customization + # args are typed according to the codebase implementation, which can + # be considered as loose type. But once the customization args are + # implemented properly. we can remove this Any type and this todo also. + def _dummy_convert_issue_v1_dict_to_v2_dict( + self, issue_dict: Dict[str, Any] + ) -> Dict[str, Any]: """A test implementation of schema conversion function. It sets all the "time spent" fields for EarlyQuit issues to DUMMY_TIME_SPENT_IN_MSECS. """ @@ -898,7 +1115,7 @@ def _dummy_convert_issue_v1_dict_to_v2_dict(self, issue_dict): return issue_dict - def test_to_dict(self): + def test_to_dict(self) -> None: exp_issue = stats_domain.ExplorationIssue( 'EarlyQuit', { @@ -927,8 +1144,10 @@ def test_to_dict(self): 'is_valid': True }) - def test_from_dict(self): - expected_customization_args = { + def test_from_dict(self) -> None: + expected_customization_args: ( + stats_domain.IssuesCustomizationArgsDictType + ) = { 'time_spent_in_exp_in_msecs': { 'value': 0 }, @@ -953,22 +1172,24 @@ def test_from_dict(self): 'is_valid': True }) - def test_from_dict_raises_exception(self): + def test_from_dict_raises_exception(self) -> None: """Test the from_dict() method.""" # Test that an exploration issue dict without 'issue_type' key raises # exception. - exp_issue_dict = { + # Here we use MyPy ignore because we want to silent the error that was + # generated by defining ExplorationIssueDict without 'issue_type' key. + exp_issue_dict: stats_domain.ExplorationIssueDict = { # type: ignore[typeddict-item] 'issue_customization_args': {}, 'playthrough_ids': [], 'schema_version': 1, 'is_valid': True } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'issue_type not in exploration issue dict.'): stats_domain.ExplorationIssue.from_dict(exp_issue_dict) - def test_update_exp_issue_from_model(self): + def test_update_exp_issue_from_model(self) -> None: """Test the migration of exploration issue domain objects.""" exp_issue = stats_domain.ExplorationIssue( 'EarlyQuit', @@ -986,6 +1207,8 @@ def test_update_exp_issue_from_model(self): exp_issues_model = stats_models.ExplorationIssuesModel.get_model( 'exp_id', 1) + # Ruling out the possibility of None for mypy type checking. + assert exp_issues_model is not None current_issue_schema_version_swap = self.swap( stats_models, 'CURRENT_ISSUE_SCHEMA_VERSION', 2) @@ -1018,6 +1241,8 @@ def test_update_exp_issue_from_model(self): exp_issues_model1 = stats_models.ExplorationIssuesModel.get_model( 'exp_id_1', 1) + # Ruling out the possibility of None for mypy type checking. + assert exp_issues_model1 is not None current_issue_schema_version_swap = self.swap( stats_models, 'CURRENT_ISSUE_SCHEMA_VERSION', 2) @@ -1034,7 +1259,9 @@ def test_update_exp_issue_from_model(self): exp_issue_from_model1.unresolved_issues[0].issue_type, 'MultipleIncorrectSubmissions') - def test_cannot_update_exp_issue_from_invalid_schema_version_model(self): + def test_cannot_update_exp_issue_from_invalid_schema_version_model( + self + ) -> None: exp_issue = stats_domain.ExplorationIssue('EarlyQuit', {}, [], 4, True) exp_issue_dict = exp_issue.to_dict() stats_models.ExplorationIssuesModel.create( @@ -1042,36 +1269,43 @@ def test_cannot_update_exp_issue_from_invalid_schema_version_model(self): exp_issues_model = stats_models.ExplorationIssuesModel.get_model( 'exp_id', 1) + # Ruling out the possibility of None for mypy type checking. + assert exp_issues_model is not None - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d and unversioned issue schemas at' ' present.' % stats_models.CURRENT_ISSUE_SCHEMA_VERSION): stats_services.get_exp_issues_from_model(exp_issues_model) - def test_cannot_update_exp_issue_with_no_schema_version(self): + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[arg-type] is used to + # test updating exp_issue with no schema_version. + def test_cannot_update_exp_issue_with_no_schema_version(self) -> None: exp_issue = stats_domain.ExplorationIssue( - 'EarlyQuit', {}, [], None, True) + 'EarlyQuit', {}, [], None, True) # type: ignore[arg-type] exp_issue_dict = exp_issue.to_dict() stats_models.ExplorationIssuesModel.create( 'exp_id', 1, [exp_issue_dict]) exp_issues_model = stats_models.ExplorationIssuesModel.get_model( 'exp_id', 1) + # Ruling out the possibility of None for mypy type checking. + assert exp_issues_model is not None - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape( 'unsupported operand type(s) for +=: \'NoneType\' ' 'and \'int\'')): stats_services.get_exp_issues_from_model(exp_issues_model) - def test_actual_update_exp_issue_from_model_raises_error(self): + def test_actual_update_exp_issue_from_model_raises_error(self) -> None: exp_issue = stats_domain.ExplorationIssue('EarlyQuit', {}, [], 1, True) exp_issue_dict = exp_issue.to_dict() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The _convert_issue_v1_dict_to_v2_dict() method is missing ' @@ -1080,45 +1314,95 @@ def test_actual_update_exp_issue_from_model_raises_error(self): stats_domain.ExplorationIssue.update_exp_issue_from_model( exp_issue_dict) - def test_validate_for_exp_issues_with_correct_data(self): + def test_validate_for_exp_issues_with_correct_data(self) -> None: self.exp_issue.validate() - def test_validate_with_int_issue_type(self): - self.exp_issue.issue_type = 5 - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the validate() method input type. + def test_validate_with_int_issue_type(self) -> None: + self.exp_issue.issue_type = 5 # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected issue_type to be a string, received %s' % (type(5)))): self.exp_issue.validate() - def test_validate_with_string_schema_version(self): - self.exp_issue.schema_version = '1' - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the validate() method input type. + def test_validate_with_string_schema_version(self) -> None: + self.exp_issue.schema_version = '1' # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected schema_version to be an int, received %s' % (type('1')))): self.exp_issue.validate() - def test_validate_issue_type(self): + def test_validate_issue_type(self) -> None: self.exp_issue.issue_type = 'invalid_issue_type' - with self.assertRaisesRegexp(utils.ValidationError, ( + with self.assertRaisesRegex(utils.ValidationError, ( 'Invalid issue type')): self.exp_issue.validate() - def test_validate_playthrough_ids(self): - self.exp_issue.playthrough_ids = 'invalid_playthrough_ids' - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the validate() method input type. + def test_validate_playthrough_ids(self) -> None: + self.exp_issue.playthrough_ids = 'invalid_playthrough_ids' # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected playthrough_ids to be a list')): self.exp_issue.validate() - def test_validate_playthrough_id_type(self): - self.exp_issue.playthrough_ids = [0, 1] - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we Remove this test after + # the backend is fully type-annotated. Here ignore[list-item] is used to + # test that playthrough_id is a string. + def test_validate_playthrough_id_type(self) -> None: + self.exp_issue.playthrough_ids = [0, 1] # type: ignore[list-item] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected each playthrough_id to be a string')): self.exp_issue.validate() + def test_comparison_between_exploration_issues_returns_correctly( + self) -> None: + expected_customization_args: ( + stats_domain.IssuesCustomizationArgsDictType + ) = { + 'time_spent_in_exp_in_msecs': { + 'value': 0 + }, + 'state_name': { + 'value': '' + } + } + exp_issue1 = stats_domain.ExplorationIssue( + 'EarlyQuit', + expected_customization_args, + [], + 1, + True + ) + exp_issue2 = stats_domain.ExplorationIssue( + 'EarlyQuit', + expected_customization_args, + [], + 2, + True + ) + exp_issue3 = stats_domain.ExplorationIssue( + 'EarlyQuit', + expected_customization_args, + [], + 1, + True + ) + + self.assertTrue(exp_issue1 == exp_issue3) + self.assertFalse(exp_issue2 == exp_issue3) + self.assertFalse(exp_issue1 == exp_issue2) + class LearnerActionTests(test_utils.GenericTestBase): """Tests the LearnerAction domain object.""" - def setUp(self): - super(LearnerActionTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.learner_action = stats_domain.LearnerAction( 'ExplorationStart', { @@ -1127,16 +1411,21 @@ def setUp(self): } }, 1) - def _dummy_convert_action_v1_dict_to_v2_dict(self, action_dict): + def _dummy_convert_action_v1_dict_to_v2_dict( + self, + action_dict: stats_domain.LearnerActionDict + ) -> stats_domain.LearnerActionDict: """A test implementation of schema conversion function.""" action_dict['schema_version'] = 2 if action_dict['action_type'] == 'ExplorationStart': action_dict['action_type'] = 'ExplorationStart1' - action_dict['action_customization_args']['new_key'] = 5 + action_dict['action_customization_args']['new_key'] = { + 'value': 5 + } return action_dict - def test_to_dict(self): + def test_to_dict(self) -> None: learner_action = stats_domain.LearnerAction( 'ExplorationStart', { @@ -1157,7 +1446,7 @@ def test_to_dict(self): 'schema_version': 1 }) - def test_update_learner_action_from_model(self): + def test_update_learner_action_from_model(self) -> None: """Test the migration of learner action domain objects.""" learner_action = stats_domain.LearnerAction('ExplorationStart', {}, 1) learner_action_dict = learner_action.to_dict() @@ -1188,7 +1477,8 @@ def test_update_learner_action_from_model(self): self.assertEqual( playthrough.actions[0].action_type, 'ExplorationStart1') self.assertEqual( - playthrough.actions[0].action_customization_args['new_key'], 5) + playthrough.actions[0].action_customization_args['new_key'], + {'value': 5}) # For other action types, no changes happen during migration. learner_action1 = stats_domain.LearnerAction('ExplorationQuit', {}, 1) @@ -1222,7 +1512,8 @@ def test_update_learner_action_from_model(self): playthrough1.actions[0].action_type, 'ExplorationQuit') def test_cannot_update_learner_action_from_invalid_schema_version_model( - self): + self + ) -> None: learner_action = stats_domain.LearnerAction('ExplorationStart', {}, 4) learner_action_dict = learner_action.to_dict() @@ -1238,7 +1529,7 @@ def test_cannot_update_learner_action_from_invalid_schema_version_model( playthrough_model = stats_models.PlaythroughModel.get(playthrough_id) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d and unversioned action schemas' ' at present.' % @@ -1246,9 +1537,12 @@ def test_cannot_update_learner_action_from_invalid_schema_version_model( stats_services.get_playthrough_from_model( playthrough_model) - def test_cannot_update_learner_action_with_no_schema_version(self): + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[arg-type] is used to test + # updating learner_action with no schema_version. + def test_cannot_update_learner_action_with_no_schema_version(self) -> None: learner_action = stats_domain.LearnerAction( - 'ExplorationStart', {}, None) + 'ExplorationStart', {}, None) # type: ignore[arg-type] learner_action_dict = learner_action.to_dict() playthrough_id = stats_models.PlaythroughModel.create( @@ -1263,18 +1557,18 @@ def test_cannot_update_learner_action_with_no_schema_version(self): playthrough_model = stats_models.PlaythroughModel.get(playthrough_id) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape( 'unsupported operand type(s) for +=: \'NoneType\' ' 'and \'int\'')): stats_services.get_playthrough_from_model(playthrough_model) - def test_actual_update_learner_action_from_model_raises_error(self): + def test_actual_update_learner_action_from_model_raises_error(self) -> None: learner_action = stats_domain.LearnerAction('ExplorationStart', {}, 1) learner_action_dict = learner_action.to_dict() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The _convert_action_v1_dict_to_v2_dict() method is missing ' @@ -1283,18 +1577,24 @@ def test_actual_update_learner_action_from_model_raises_error(self): stats_domain.LearnerAction.update_learner_action_from_model( learner_action_dict) - def test_validate_for_learner_action_with_correct_data(self): + def test_validate_for_learner_action_with_correct_data(self) -> None: self.learner_action.validate() - def test_validate_with_int_action_type(self): - self.learner_action.action_type = 5 - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate() input type. + def test_validate_with_int_action_type(self) -> None: + self.learner_action.action_type = 5 # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected action_type to be a string, received %s' % (type(5)))): self.learner_action.validate() - def test_validate_with_string_schema_version(self): - self.learner_action.schema_version = '1' - with self.assertRaisesRegexp(utils.ValidationError, ( + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method validate() input type. + def test_validate_with_string_schema_version(self) -> None: + self.learner_action.schema_version = '1' # type: ignore[assignment] + with self.assertRaisesRegex(utils.ValidationError, ( 'Expected schema_version to be an int, received %s' % (type('1')))): self.learner_action.validate() @@ -1302,7 +1602,9 @@ def test_validate_with_string_schema_version(self): class StateAnswersTests(test_utils.GenericTestBase): """Tests the StateAnswers domain object.""" - def test_can_retrieve_properly_constructed_submitted_answer_dict_list(self): + def test_can_retrieve_properly_constructed_submitted_answer_dict_list( + self + ) -> None: state_answers = stats_domain.StateAnswers( 'exp_id', 1, 'initial_state', 'TextInput', [ stats_domain.SubmittedAnswer( @@ -1346,49 +1648,69 @@ def test_can_retrieve_properly_constructed_submitted_answer_dict_list(self): class StateAnswersValidationTests(test_utils.GenericTestBase): """Tests the StateAnswers domain object for validation.""" - def setUp(self): - super(StateAnswersValidationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.state_answers = stats_domain.StateAnswers( 'exp_id', 1, 'initial_state', 'TextInput', []) # The canonical object should have no validation problems. self.state_answers.validate() - def test_exploration_id_must_be_string(self): - self.state_answers.exploration_id = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the type of exploration_id. + def test_exploration_id_must_be_string(self) -> None: + self.state_answers.exploration_id = 0 # type: ignore[assignment] self._assert_validation_error( self.state_answers, 'Expected exploration_id to be a string') - def test_state_name_must_be_string(self): - self.state_answers.state_name = ['state'] + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that state_name is a string. + def test_state_name_must_be_string(self) -> None: + self.state_answers.state_name = ['state'] # type: ignore[assignment] self._assert_validation_error( self.state_answers, 'Expected state_name to be a string') - def test_interaction_id_can_be_none(self): - self.state_answers.interaction_id = None + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the type of interaction_id. + def test_interaction_id_can_be_none(self) -> None: + self.state_answers.interaction_id = None # type: ignore[assignment] self.state_answers.validate() - def test_interaction_id_must_otherwise_be_string(self): - self.state_answers.interaction_id = 10 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the type of interaction_id. + def test_interaction_id_must_otherwise_be_string(self) -> None: + self.state_answers.interaction_id = 10 # type: ignore[assignment] self._assert_validation_error( self.state_answers, 'Expected interaction_id to be a string') - def test_interaction_id_must_refer_to_existing_interaction(self): + def test_interaction_id_must_refer_to_existing_interaction(self) -> None: self.state_answers.interaction_id = 'FakeInteraction' self._assert_validation_error( self.state_answers, 'Unknown interaction_id: FakeInteraction') - def test_submitted_answer_list_must_be_list(self): - self.state_answers.submitted_answer_list = {} + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the type of submitted_answer_list. + def test_submitted_answer_list_must_be_list(self) -> None: + self.state_answers.submitted_answer_list = {} # type: ignore[assignment] self._assert_validation_error( self.state_answers, 'Expected submitted_answer_list to be a list') - def test_schema_version_must_be_integer(self): - self.state_answers.schema_version = '1' + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test the type of schema_version. + def test_schema_version_must_be_integer(self) -> None: + self.state_answers.schema_version = '1' # type: ignore[assignment] self._assert_validation_error( self.state_answers, 'Expected schema_version to be an integer') - def test_schema_version_must_be_between_one_and_current_version(self): + def test_schema_version_must_be_between_one_and_current_version( + self + ) -> None: self.state_answers.schema_version = 0 self._assert_validation_error( self.state_answers, 'schema_version < 1: 0') @@ -1406,7 +1728,7 @@ def test_schema_version_must_be_between_one_and_current_version(self): class SubmittedAnswerTests(test_utils.GenericTestBase): """Tests the SubmittedAnswer domain object.""" - def test_can_be_converted_to_from_full_dict(self): + def test_can_be_converted_to_from_full_dict(self) -> None: submitted_answer = stats_domain.SubmittedAnswer( 'Text', 'TextInput', 0, 1, exp_domain.EXPLICIT_CLASSIFICATION, {}, 'sess', 10.5, rule_spec_str='rule spec str', @@ -1417,7 +1739,7 @@ def test_can_be_converted_to_from_full_dict(self): self.assertEqual( cloned_submitted_answer.to_dict(), submitted_answer_dict) - def test_can_be_converted_to_full_dict(self): + def test_can_be_converted_to_full_dict(self) -> None: submitted_answer = stats_domain.SubmittedAnswer( 'Text', 'TextInput', 0, 1, exp_domain.EXPLICIT_CLASSIFICATION, {}, 'sess', 10.5, rule_spec_str='rule spec str', @@ -1435,7 +1757,7 @@ def test_can_be_converted_to_full_dict(self): 'answer_str': 'answer str' }) - def test_dict_may_not_include_rule_spec_str_or_answer_str(self): + def test_dict_may_not_include_rule_spec_str_or_answer_str(self) -> None: submitted_answer = stats_domain.SubmittedAnswer( 'Text', 'TextInput', 0, 1, exp_domain.EXPLICIT_CLASSIFICATION, {}, 'sess', 10.5) @@ -1447,12 +1769,17 @@ def test_dict_may_not_include_rule_spec_str_or_answer_str(self): 'classification_categorization': exp_domain.EXPLICIT_CLASSIFICATION, 'params': {}, 'session_id': 'sess', - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'answer_str': None, + 'rule_spec_str': None }) - def test_requires_answer_to_be_created_from_dict(self): - with self.assertRaisesRegexp(KeyError, 'answer'): - stats_domain.SubmittedAnswer.from_dict({ + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that the 'answer' key is in the submitted answer dict. + def test_requires_answer_to_be_created_from_dict(self) -> None: + with self.assertRaisesRegex(KeyError, 'answer'): + stats_domain.SubmittedAnswer.from_dict({ # type: ignore[typeddict-item] 'interaction_id': 'TextInput', 'answer_group_index': 0, 'rule_spec_index': 1, @@ -1460,12 +1787,17 @@ def test_requires_answer_to_be_created_from_dict(self): exp_domain.EXPLICIT_CLASSIFICATION), 'params': {}, 'session_id': 'sess', - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'rule_spec_str': None, + 'answer_str': None }) - def test_requires_interaction_id_to_be_created_from_dict(self): - with self.assertRaisesRegexp(KeyError, 'interaction_id'): - stats_domain.SubmittedAnswer.from_dict({ + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that the 'interaction_id' key is in the submitted answer dict. + def test_requires_interaction_id_to_be_created_from_dict(self) -> None: + with self.assertRaisesRegex(KeyError, 'interaction_id'): + stats_domain.SubmittedAnswer.from_dict({ # type: ignore[typeddict-item] 'answer': 'Text', 'answer_group_index': 0, 'rule_spec_index': 1, @@ -1473,12 +1805,17 @@ def test_requires_interaction_id_to_be_created_from_dict(self): exp_domain.EXPLICIT_CLASSIFICATION), 'params': {}, 'session_id': 'sess', - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'rule_spec_str': None, + 'answer_str': None }) - def test_requires_answer_group_index_to_be_created_from_dict(self): - with self.assertRaisesRegexp(KeyError, 'answer_group_index'): - stats_domain.SubmittedAnswer.from_dict({ + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that the 'answer_group_index' key is in the submitted answer dict. + def test_requires_answer_group_index_to_be_created_from_dict(self) -> None: + with self.assertRaisesRegex(KeyError, 'answer_group_index'): + stats_domain.SubmittedAnswer.from_dict({ # type: ignore[typeddict-item] 'answer': 'Text', 'interaction_id': 'TextInput', 'rule_spec_index': 1, @@ -1486,12 +1823,17 @@ def test_requires_answer_group_index_to_be_created_from_dict(self): exp_domain.EXPLICIT_CLASSIFICATION), 'params': {}, 'session_id': 'sess', - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'rule_spec_str': None, + 'answer_str': None }) - def test_requires_rule_spec_index_to_be_created_from_dict(self): - with self.assertRaisesRegexp(KeyError, 'rule_spec_index'): - stats_domain.SubmittedAnswer.from_dict({ + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that the 'rule_spec_index' key is in the submitted answer dict. + def test_requires_rule_spec_index_to_be_created_from_dict(self) -> None: + with self.assertRaisesRegex(KeyError, 'rule_spec_index'): + stats_domain.SubmittedAnswer.from_dict({ # type: ignore[typeddict-item] 'answer': 'Text', 'interaction_id': 'TextInput', 'answer_group_index': 0, @@ -1499,24 +1841,37 @@ def test_requires_rule_spec_index_to_be_created_from_dict(self): exp_domain.EXPLICIT_CLASSIFICATION), 'params': {}, 'session_id': 'sess', - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'rule_spec_str': None, + 'answer_str': None }) - def test_requires_classification_categ_to_be_created_from_dict(self): - with self.assertRaisesRegexp(KeyError, 'classification_categorization'): - stats_domain.SubmittedAnswer.from_dict({ + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that the 'classification_categorization' key is in the submitted + # answer dict. + def test_requires_classification_categ_to_be_created_from_dict( + self + ) -> None: + with self.assertRaisesRegex(KeyError, 'classification_categorization'): + stats_domain.SubmittedAnswer.from_dict({ # type: ignore[typeddict-item] 'answer': 'Text', 'interaction_id': 'TextInput', 'answer_group_index': 0, 'rule_spec_index': 1, 'params': {}, 'session_id': 'sess', - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'rule_spec_str': None, + 'answer_str': None }) - def test_requires_params_to_be_created_from_dict(self): - with self.assertRaisesRegexp(KeyError, 'params'): - stats_domain.SubmittedAnswer.from_dict({ + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that the 'params' key is in the submitted answer dict. + def test_requires_params_to_be_created_from_dict(self) -> None: + with self.assertRaisesRegex(KeyError, 'params'): + stats_domain.SubmittedAnswer.from_dict({ # type: ignore[typeddict-item] 'answer': 'Text', 'interaction_id': 'TextInput', 'answer_group_index': 0, @@ -1524,12 +1879,17 @@ def test_requires_params_to_be_created_from_dict(self): 'classification_categorization': ( exp_domain.EXPLICIT_CLASSIFICATION), 'session_id': 'sess', - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'rule_spec_str': None, + 'answer_str': None }) - def test_requires_session_id_to_be_created_from_dict(self): - with self.assertRaisesRegexp(KeyError, 'session_id'): - stats_domain.SubmittedAnswer.from_dict({ + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that the 'session_id' key is in the submitted answer dict. + def test_requires_session_id_to_be_created_from_dict(self) -> None: + with self.assertRaisesRegex(KeyError, 'session_id'): + stats_domain.SubmittedAnswer.from_dict({ # type: ignore[typeddict-item] 'answer': 'Text', 'interaction_id': 'TextInput', 'answer_group_index': 0, @@ -1537,12 +1897,17 @@ def test_requires_session_id_to_be_created_from_dict(self): 'classification_categorization': ( exp_domain.EXPLICIT_CLASSIFICATION), 'params': {}, - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'rule_spec_str': None, + 'answer_str': None }) - def test_requires_time_spent_in_sec_to_be_created_from_dict(self): - with self.assertRaisesRegexp(KeyError, 'time_spent_in_sec'): - stats_domain.SubmittedAnswer.from_dict({ + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[typeddict-item] is used + # to test that the 'time_spent_in_sec' key is in the submitted answer dict. + def test_requires_time_spent_in_sec_to_be_created_from_dict(self) -> None: + with self.assertRaisesRegex(KeyError, 'time_spent_in_sec'): + stats_domain.SubmittedAnswer.from_dict({ # type: ignore[typeddict-item] 'answer': 'Text', 'interaction_id': 'TextInput', 'answer_group_index': 0, @@ -1551,9 +1916,11 @@ def test_requires_time_spent_in_sec_to_be_created_from_dict(self): exp_domain.EXPLICIT_CLASSIFICATION), 'params': {}, 'session_id': 'sess', + 'rule_spec_str': None, + 'answer_str': None }) - def test_can_be_created_from_full_dict(self): + def test_can_be_created_from_full_dict(self) -> None: submitted_answer = stats_domain.SubmittedAnswer.from_dict({ 'answer': 'Text', 'interaction_id': 'TextInput', @@ -1580,7 +1947,9 @@ def test_can_be_created_from_full_dict(self): self.assertEqual(submitted_answer.rule_spec_str, 'rule spec str') self.assertEqual(submitted_answer.answer_str, 'answer str') - def test_can_be_created_from_dict_missing_rule_spec_and_answer(self): + def test_can_be_created_from_dict_missing_rule_spec_and_answer( + self + ) -> None: submitted_answer = stats_domain.SubmittedAnswer.from_dict({ 'answer': 'Text', 'interaction_id': 'TextInput', @@ -1590,7 +1959,9 @@ def test_can_be_created_from_dict_missing_rule_spec_and_answer(self): exp_domain.EXPLICIT_CLASSIFICATION), 'params': {}, 'session_id': 'sess', - 'time_spent_in_sec': 10.5 + 'time_spent_in_sec': 10.5, + 'rule_spec_str': None, + 'answer_str': None }) self.assertEqual(submitted_answer.answer, 'Text') self.assertEqual(submitted_answer.interaction_id, 'TextInput') @@ -1609,8 +1980,8 @@ def test_can_be_created_from_dict_missing_rule_spec_and_answer(self): class SubmittedAnswerValidationTests(test_utils.GenericTestBase): """Tests the SubmittedAnswer domain object for validation.""" - def setUp(self): - super(SubmittedAnswerValidationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.submitted_answer = stats_domain.SubmittedAnswer( 'Text', 'TextInput', 0, 0, exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id', 0.) @@ -1618,7 +1989,7 @@ def setUp(self): # The canonical object should have no validation problems. self.submitted_answer.validate() - def test_answer_may_be_none_only_for_linear_interaction(self): + def test_answer_may_be_none_only_for_linear_interaction(self) -> None: # It's valid for answer to be None if the interaction type is Continue. self.submitted_answer.answer = None self._assert_validation_error( @@ -1629,72 +2000,100 @@ def test_answer_may_be_none_only_for_linear_interaction(self): self.submitted_answer.interaction_id = 'Continue' self.submitted_answer.validate() - def test_time_spent_in_sec_must_not_be_none(self): - self.submitted_answer.time_spent_in_sec = None + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that time_spent_in_sec is not None. + def test_time_spent_in_sec_must_not_be_none(self) -> None: + self.submitted_answer.time_spent_in_sec = None # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'SubmittedAnswers must have a provided time_spent_in_sec') - def test_time_spent_in_sec_must_be_number(self): - self.submitted_answer.time_spent_in_sec = '0' + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that time_spent_in_sec is int. + def test_time_spent_in_sec_must_be_number(self) -> None: + self.submitted_answer.time_spent_in_sec = '0' # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'Expected time_spent_in_sec to be a number') - def test_time_spent_in_sec_must_be_positive(self): + def test_time_spent_in_sec_must_be_positive(self) -> None: self.submitted_answer.time_spent_in_sec = -1. self._assert_validation_error( self.submitted_answer, 'Expected time_spent_in_sec to be non-negative') - def test_session_id_must_not_be_none(self): - self.submitted_answer.session_id = None + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that session_id is not None. + def test_session_id_must_not_be_none(self) -> None: + self.submitted_answer.session_id = None # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'SubmittedAnswers must have a provided session_id') - def test_session_id_must_be_string(self): - self.submitted_answer.session_id = 90 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that session_id is a string. + def test_session_id_must_be_string(self) -> None: + self.submitted_answer.session_id = 90 # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'Expected session_id to be a string') - def test_params_must_be_dict(self): - self.submitted_answer.params = [] + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that params is a dict. + def test_params_must_be_dict(self) -> None: + self.submitted_answer.params = [] # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'Expected params to be a dict') - def test_answer_group_index_must_be_integer(self): - self.submitted_answer.answer_group_index = '0' + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that answer_group_index is int. + def test_answer_group_index_must_be_integer(self) -> None: + self.submitted_answer.answer_group_index = '0' # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'Expected answer_group_index to be an integer') - def test_answer_group_index_must_be_positive(self): + def test_answer_group_index_must_be_positive(self) -> None: self.submitted_answer.answer_group_index = -1 self._assert_validation_error( self.submitted_answer, 'Expected answer_group_index to be non-negative') - def test_rule_spec_index_can_be_none(self): - self.submitted_answer.rule_spec_index = None + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test type of rule_spec_index. + def test_rule_spec_index_can_be_none(self) -> None: + self.submitted_answer.rule_spec_index = None # type: ignore[assignment] self.submitted_answer.validate() - def test_rule_spec_index_must_be_integer(self): - self.submitted_answer.rule_spec_index = '0' + def test_rule_spec_index_must_be_integer(self) -> None: + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[assignment] + # is used to test that rule_spec_index is int. + self.submitted_answer.rule_spec_index = '0' # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'Expected rule_spec_index to be an integer') - self.submitted_answer.rule_spec_index = '' + + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[assignment] + # is used to test that rule_spec_index is int. + self.submitted_answer.rule_spec_index = '' # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'Expected rule_spec_index to be an integer') self.submitted_answer.rule_spec_index = 0 self.submitted_answer.validate() - def test_rule_spec_index_must_be_positive(self): + def test_rule_spec_index_must_be_positive(self) -> None: self.submitted_answer.rule_spec_index = -1 self._assert_validation_error( self.submitted_answer, 'Expected rule_spec_index to be non-negative') - def test_classification_categorization_must_be_valid_category(self): + def test_classification_categorization_must_be_valid_category(self) -> None: self.submitted_answer.classification_categorization = ( exp_domain.TRAINING_DATA_CLASSIFICATION) self.submitted_answer.validate() @@ -1712,8 +2111,11 @@ def test_classification_categorization_must_be_valid_category(self): self.submitted_answer, 'Expected valid classification_categorization') - def test_rule_spec_str_must_be_none_or_string(self): - self.submitted_answer.rule_spec_str = 10 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that rule_spec_str is None or str. + def test_rule_spec_str_must_be_none_or_string(self) -> None: + self.submitted_answer.rule_spec_str = 10 # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'Expected rule_spec_str to be either None or a string') @@ -1724,8 +2126,11 @@ def test_rule_spec_str_must_be_none_or_string(self): self.submitted_answer.rule_spec_str = None self.submitted_answer.validate() - def test_answer_str_must_be_none_or_string(self): - self.submitted_answer.answer_str = 10 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that answer_str is None or str. + def test_answer_str_must_be_none_or_string(self) -> None: + self.submitted_answer.answer_str = 10 # type: ignore[assignment] self._assert_validation_error( self.submitted_answer, 'Expected answer_str to be either None or a string') @@ -1744,17 +2149,17 @@ class AnswerFrequencyListDomainTests(test_utils.GenericTestBase): ANSWER_B = stats_domain.AnswerOccurrence('answer b', 2) ANSWER_C = stats_domain.AnswerOccurrence('answer c', 1) - def test_has_correct_type(self): + def test_has_correct_type(self) -> None: answer_frequency_list = stats_domain.AnswerFrequencyList([]) self.assertEqual( answer_frequency_list.calculation_output_type, stats_domain.CALC_OUTPUT_TYPE_ANSWER_FREQUENCY_LIST) - def test_defaults_to_empty_list(self): + def test_defaults_to_empty_list(self) -> None: answer_frequency_list = stats_domain.AnswerFrequencyList() self.assertEqual(len(answer_frequency_list.answer_occurrences), 0) - def test_create_list_from_raw_object(self): + def test_create_list_from_raw_object(self) -> None: answer_frequency_list = ( stats_domain.AnswerFrequencyList.from_raw_type([{ 'answer': 'answer a', 'frequency': 3 @@ -1768,7 +2173,7 @@ def test_create_list_from_raw_object(self): self.assertEqual(answer_occurrences[1].answer, 'answer b') self.assertEqual(answer_occurrences[1].frequency, 2) - def test_convert_list_to_raw_object(self): + def test_convert_list_to_raw_object(self) -> None: answer_frequency_list = stats_domain.AnswerFrequencyList( [self.ANSWER_A, self.ANSWER_B]) self.assertEqual(answer_frequency_list.to_raw_type(), [{ @@ -1787,19 +2192,19 @@ class CategorizedAnswerFrequencyListsDomainTests(test_utils.GenericTestBase): ANSWER_B = stats_domain.AnswerOccurrence('answer b', 2) ANSWER_C = stats_domain.AnswerOccurrence('answer c', 1) - def test_has_correct_type(self): + def test_has_correct_type(self) -> None: answer_frequency_lists = ( stats_domain.CategorizedAnswerFrequencyLists({})) self.assertEqual( answer_frequency_lists.calculation_output_type, stats_domain.CALC_OUTPUT_TYPE_CATEGORIZED_ANSWER_FREQUENCY_LISTS) - def test_defaults_to_empty_dict(self): + def test_defaults_to_empty_dict(self) -> None: answer_frequency_lists = stats_domain.CategorizedAnswerFrequencyLists() self.assertEqual( len(answer_frequency_lists.categorized_answer_freq_lists), 0) - def test_create_list_from_raw_object(self): + def test_create_list_from_raw_object(self) -> None: answer_frequency_lists = ( stats_domain.CategorizedAnswerFrequencyLists.from_raw_type({ 'category a': [{'answer': 'answer a', 'frequency': 3}], @@ -1834,7 +2239,7 @@ def test_create_list_from_raw_object(self): self.assertEqual(category_b_answers[1].answer, 'answer c') self.assertEqual(category_b_answers[1].frequency, 1) - def test_convert_list_to_raw_object(self): + def test_convert_list_to_raw_object(self) -> None: answer_frequency_lists = stats_domain.CategorizedAnswerFrequencyLists({ 'category a': stats_domain.AnswerFrequencyList([self.ANSWER_A]), 'category b': stats_domain.AnswerFrequencyList( @@ -1858,8 +2263,8 @@ class StateAnswersCalcOutputValidationTests(test_utils.GenericTestBase): class MockCalculationOutputObjectWithUnknownType: pass - def setUp(self): - super(StateAnswersCalcOutputValidationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.state_answers_calc_output = stats_domain.StateAnswersCalcOutput( 'exp_id', 1, 'initial_state', 'TextInput', 'AnswerFrequencies', stats_domain.AnswerFrequencyList.from_raw_type([])) @@ -1867,32 +2272,46 @@ def setUp(self): # The canonical object should have no validation problems. self.state_answers_calc_output.validate() - def test_exploration_id_must_be_string(self): - self.state_answers_calc_output.exploration_id = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that exploration_id is a string. + def test_exploration_id_must_be_string(self) -> None: + self.state_answers_calc_output.exploration_id = 0 # type: ignore[assignment] self._assert_validation_error( self.state_answers_calc_output, 'Expected exploration_id to be a string') - def test_state_name_must_be_string(self): - self.state_answers_calc_output.state_name = ['state'] + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that sstate_name is a string. + def test_state_name_must_be_string(self) -> None: + self.state_answers_calc_output.state_name = ['state'] # type: ignore[assignment] self._assert_validation_error( self.state_answers_calc_output, 'Expected state_name to be a string') - def test_calculation_id_must_be_string(self): - self.state_answers_calc_output.calculation_id = ['calculation id'] + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that calculation_id is a string. + def test_calculation_id_must_be_string(self) -> None: + self.state_answers_calc_output.calculation_id = ['calculation id'] # type: ignore[assignment] self._assert_validation_error( self.state_answers_calc_output, 'Expected calculation_id to be a string') - def test_calculation_output_must_be_known_type(self): + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that calculation_output is a known type. + def test_calculation_output_must_be_known_type(self) -> None: self.state_answers_calc_output.calculation_output = ( - self.MockCalculationOutputObjectWithUnknownType()) + self.MockCalculationOutputObjectWithUnknownType()) # type: ignore[assignment] self._assert_validation_error( self.state_answers_calc_output, 'Expected calculation output to be one of') - def test_calculation_output_must_be_less_than_one_million_bytes(self): + def test_calculation_output_must_be_less_than_one_million_bytes( + self + ) -> None: occurred_answer = stats_domain.AnswerOccurrence( 'This is not a long sentence.', 1) self.state_answers_calc_output.calculation_output = ( @@ -1905,8 +2324,8 @@ def test_calculation_output_must_be_less_than_one_million_bytes(self): class LearnerAnswerDetailsTests(test_utils.GenericTestBase): - def setUp(self): - super(LearnerAnswerDetailsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.learner_answer_details = stats_domain.LearnerAnswerDetails( 'exp_id:state_name', feconf.ENTITY_TYPE_EXPLORATION, 'TextInput', [stats_domain.LearnerAnswerInfo( @@ -1914,7 +2333,7 @@ def setUp(self): datetime.datetime(2019, 6, 19, 13, 59, 29, 153073))], 4000) self.learner_answer_details.validate() - def test_to_dict(self): + def test_to_dict(self) -> None: expected_learner_answer_details_dict = { 'state_reference': 'exp_id:state_name', 'entity_type': 'exploration', @@ -1931,8 +2350,8 @@ def test_to_dict(self): self.assertEqual( learner_answer_details_dict, expected_learner_answer_details_dict) - def test_from_dict(self): - learner_answer_details_dict = { + def test_from_dict(self) -> None: + learner_answer_details_dict: stats_domain.LearnerAnswerDetailsDict = { 'state_reference': 'exp_id:state_name', 'entity_type': 'exploration', 'interaction_id': 'TextInput', @@ -1969,7 +2388,7 @@ def test_from_dict(self): self.assertEqual( learner_answer_details.learner_answer_info_schema_version, 1) - def test_add_learner_answer_info(self): + def test_add_learner_answer_info(self) -> None: learner_answer_info = stats_domain.LearnerAnswerInfo( 'id_2', 'This answer', 'This details', datetime.datetime.strptime('27 Sep 2012', '%d %b %Y')) @@ -1980,7 +2399,7 @@ def test_add_learner_answer_info(self): self.assertEqual( len(self.learner_answer_details.learner_answer_info_list), 2) - def test_learner_answer_info_with_big_size_must_not_be_added(self): + def test_learner_answer_info_with_big_size_must_not_be_added(self) -> None: answer = 'This is answer abc' * 900 answer_details = 'This is answer details' * 400 created_on = datetime.datetime.strptime('27 Sep 2012', '%d %b %Y') @@ -2003,7 +2422,7 @@ def test_learner_answer_info_with_big_size_must_not_be_added(self): self.assertEqual( len(self.learner_answer_details.learner_answer_info_list), 36) - def test_delete_learner_answer_info(self): + def test_delete_learner_answer_info(self) -> None: self.assertEqual( len(self.learner_answer_details.learner_answer_info_list), 1) learner_answer_info = stats_domain.LearnerAnswerInfo( @@ -2019,13 +2438,13 @@ def test_delete_learner_answer_info(self): self.assertNotEqual( self.learner_answer_details.accumulated_answer_info_json_size_bytes, 0) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Learner answer info with the given id not found'): self.learner_answer_details.delete_learner_answer_info('id_3') self.assertEqual( len(self.learner_answer_details.learner_answer_info_list), 1) - def test_update_state_reference(self): + def test_update_state_reference(self) -> None: self.assertEqual( self.learner_answer_details.state_reference, 'exp_id:state_name') self.learner_answer_details.update_state_reference( @@ -2034,78 +2453,98 @@ def test_update_state_reference(self): self.learner_answer_details.state_reference, 'exp_id_1:state_name_1') - def test_state_reference_must_be_string(self): - self.learner_answer_details.state_reference = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that state_reference is str. + def test_state_reference_must_be_string(self) -> None: + self.learner_answer_details.state_reference = 0 # type: ignore[assignment] self._assert_validation_error( self.learner_answer_details, 'Expected state_reference to be a string') - def test_entity_type_must_be_string(self): - self.learner_answer_details.entity_type = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that entity_type is str. + def test_entity_type_must_be_string(self) -> None: + self.learner_answer_details.entity_type = 0 # type: ignore[assignment] self._assert_validation_error( self.learner_answer_details, 'Expected entity_type to be a string') - def test_entity_type_must_be_valid(self,): + def test_entity_type_must_be_valid(self,) -> None: self.learner_answer_details.entity_type = 'topic' self._assert_validation_error( self.learner_answer_details, 'Invalid entity type received topic') - def test_state_reference_must_be_valid_for_exploration(self): + def test_state_reference_must_be_valid_for_exploration(self) -> None: self.learner_answer_details.state_reference = 'expidstatename' self._assert_validation_error( self.learner_answer_details, 'For entity type exploration, the state reference should') - def test_state_reference_must_be_valid_for_question(self): + def test_state_reference_must_be_valid_for_question(self) -> None: self.learner_answer_details.entity_type = 'question' self.learner_answer_details.state_reference = 'expid:statename' self._assert_validation_error( self.learner_answer_details, 'For entity type question, the state reference should') - def test_interaction_id_must_be_valid(self): + def test_interaction_id_must_be_valid(self) -> None: self.learner_answer_details.interaction_id = 'MyInteraction' self._assert_validation_error( self.learner_answer_details, 'Unknown interaction_id: MyInteraction') - def test_interaction_id_must_be_string(self): - self.learner_answer_details.interaction_id = 0 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that interaction_id is str. + def test_interaction_id_must_be_string(self) -> None: + self.learner_answer_details.interaction_id = 0 # type: ignore[assignment] self._assert_validation_error( self.learner_answer_details, 'Expected interaction_id to be a string') - def test_continue_interaction_cannot_solicit_answer_details(self): + def test_continue_interaction_cannot_solicit_answer_details(self) -> None: self.learner_answer_details.interaction_id = 'Continue' self._assert_validation_error( self.learner_answer_details, 'The Continue interaction does not support ' 'soliciting answer details') - def test_end_exploration_interaction_cannot_solicit_answer_details(self): + def test_end_exploration_interaction_cannot_solicit_answer_details( + self + ) -> None: self.learner_answer_details.interaction_id = 'EndExploration' self._assert_validation_error( self.learner_answer_details, 'The EndExploration interaction does not support ' 'soliciting answer details') - def test_learner_answer_info_must_be_list(self): - self.learner_answer_details.learner_answer_info_list = 'list' + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that learner_answer_info is a List. + def test_learner_answer_info_must_be_list(self) -> None: + self.learner_answer_details.learner_answer_info_list = 'list' # type: ignore[assignment] self._assert_validation_error( self.learner_answer_details, 'Expected learner_answer_info_list to be a list') - def test_learner_answer_info_schema_version_must_be_int(self): - self.learner_answer_details.learner_answer_info_schema_version = 'v' + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that learner_answer_info_schema_version is int. + def test_learner_answer_info_schema_version_must_be_int(self) -> None: + self.learner_answer_details.learner_answer_info_schema_version = 'v' # type: ignore[assignment] self._assert_validation_error( self.learner_answer_details, 'Expected learner_answer_info_schema_version to be an int') - def test_accumulated_answer_info_json_size_bytes_must_be_int(self): + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that accumulated_answer_info_json_size_bytes is a string. + def test_accumulated_answer_info_json_size_bytes_must_be_int(self) -> None: self.learner_answer_details.accumulated_answer_info_json_size_bytes = ( - 'size') + 'size') # type: ignore[assignment] self._assert_validation_error( self.learner_answer_details, 'Expected accumulated_answer_info_json_size_bytes to be an int') @@ -2113,14 +2552,14 @@ def test_accumulated_answer_info_json_size_bytes_must_be_int(self): class LearnerAnswerInfoTests(test_utils.GenericTestBase): - def setUp(self): - super(LearnerAnswerInfoTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.learner_answer_info = stats_domain.LearnerAnswerInfo( 'id_1', 'This is my answer', 'This is my answer details', datetime.datetime(2019, 6, 19, 13, 59, 29, 153073)) self.learner_answer_info.validate() - def test_to_dict(self): + def test_to_dict(self) -> None: expected_learner_answer_info_dict = { 'id': 'id_1', 'answer': 'This is my answer', @@ -2131,8 +2570,8 @@ def test_to_dict(self): expected_learner_answer_info_dict, self.learner_answer_info.to_dict()) - def test_from_dict(self): - learner_answer_info_dict = { + def test_from_dict(self) -> None: + learner_answer_info_dict: stats_domain.LearnerAnswerInfoDict = { 'id': 'id_1', 'answer': 'This is my answer', 'answer_details': 'This is my answer details', @@ -2148,8 +2587,8 @@ def test_from_dict(self): learner_answer_info.created_on, datetime.datetime(2019, 6, 19, 13, 59, 29, 153073)) - def test_from_dict_to_dict(self): - learner_answer_info_dict = { + def test_from_dict_to_dict(self) -> None: + learner_answer_info_dict: stats_domain.LearnerAnswerInfoDict = { 'id': 'id_1', 'answer': 'This is my answer', 'answer_details': 'This is my answer details', @@ -2167,67 +2606,79 @@ def test_from_dict_to_dict(self): self.assertEqual( learner_answer_info.to_dict(), learner_answer_info_dict) - def test_get_learner_answer_info_dict_size(self): + def test_get_learner_answer_info_dict_size(self) -> None: learner_answer_info_dict_size = ( self.learner_answer_info.get_learner_answer_info_dict_size()) self.assertNotEqual(learner_answer_info_dict_size, 0) self.assertTrue(learner_answer_info_dict_size > 0) - def test_get_new_learner_answer_info_id(self): + def test_get_new_learner_answer_info_id(self) -> None: learner_answer_info_id = ( stats_domain.LearnerAnswerInfo.get_new_learner_answer_info_id()) self.assertNotEqual(learner_answer_info_id, None) self.assertTrue(isinstance(learner_answer_info_id, str)) - def test_id_must_be_string(self): - self.learner_answer_info.id = 123 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test id type. + def test_id_must_be_string(self) -> None: + self.learner_answer_info.id = 123 # type: ignore[assignment] self._assert_validation_error( self.learner_answer_info, 'Expected id to be a string') - def test_answer_must_not_be_none(self): + def test_answer_must_not_be_none(self) -> None: self.learner_answer_info.answer = None self._assert_validation_error( self.learner_answer_info, 'The answer submitted by the learner cannot be empty') - def test_answer_must_not_be_empty_dict(self): + def test_answer_must_not_be_empty_dict(self) -> None: self.learner_answer_info.answer = {} self._assert_validation_error( self.learner_answer_info, 'The answer submitted cannot be an empty dict') - def test_answer_must_not_be_empty_string(self): + def test_answer_must_not_be_empty_string(self) -> None: self.learner_answer_info.answer = '' self._assert_validation_error( self.learner_answer_info, 'The answer submitted cannot be an empty string') - def test_answer_details_must_not_be_none(self): - self.learner_answer_info.answer_details = None + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test method that answer_details from learner_answer_info is not None. + def test_answer_details_must_not_be_none(self) -> None: + self.learner_answer_info.answer_details = None # type: ignore[assignment] self._assert_validation_error( self.learner_answer_info, 'Expected answer_details to be a string') - def test_answer_details_must_be_string(self): - self.learner_answer_info.answer_details = 1 + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that answer_details is str. + def test_answer_details_must_be_string(self) -> None: + self.learner_answer_info.answer_details = 1 # type: ignore[assignment] self._assert_validation_error( self.learner_answer_info, 'Expected answer_details to be a string') - def test_answer_details_must_not_be_empty_string(self): + def test_answer_details_must_not_be_empty_string(self) -> None: self.learner_answer_info.answer_details = '' self._assert_validation_error( self.learner_answer_info, 'The answer details submitted cannot be an empty string') - def test_large_answer_details_must_not_be_stored(self): + def test_large_answer_details_must_not_be_stored(self) -> None: self.learner_answer_info.answer_details = 'abcdef' * 2000 self._assert_validation_error( self.learner_answer_info, 'The answer details size is to large to be stored') - def test_created_on_must_be_datetime_type(self): - self.learner_answer_info.created_on = '19 June 2019' + # TODO(#13528): Here we use MyPy ignore because we remove this test after + # the backend is fully type-annotated. Here ignore[assignment] is used to + # test that created_on is a datetime. + def test_created_on_must_be_datetime_type(self) -> None: + self.learner_answer_info.created_on = '19 June 2019' # type: ignore[assignment] self._assert_validation_error( self.learner_answer_info, 'Expected created_on to be a datetime') diff --git a/core/domain/stats_services.py b/core/domain/stats_services.py index ac72757b0ff5..bd099cb7df30 100644 --- a/core/domain/stats_services.py +++ b/core/domain/stats_services.py @@ -21,15 +21,30 @@ import copy import datetime import itertools +import logging from core import feconf from core import utils +from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import question_services from core.domain import stats_domain from core.platform import models -(stats_models,) = models.Registry.import_models([models.NAMES.statistics]) +from typing import ( + Dict, List, Literal, Optional, Sequence, Union, cast, overload +) + +MYPY = False +if MYPY: # pragma: no cover + from core.domain import state_domain + from mypy_imports import base_models + from mypy_imports import stats_models + from mypy_imports import transaction_services + +(base_models, stats_models,) = models.Registry.import_models( + [models.Names.BASE_MODEL, models.Names.STATISTICS] +) transaction_services = models.Registry.import_transaction_services() # NOTE TO DEVELOPERS: The functions: @@ -41,7 +56,60 @@ # to that PR if you need to reinstate them. -def _migrate_to_latest_issue_schema(exp_issue_dict): +@overload +def get_playthrough_models_by_ids( + playthrough_ids: List[str], *, strict: Literal[True] +) -> List[stats_models.PlaythroughModel]: ... + + +@overload +def get_playthrough_models_by_ids( + playthrough_ids: List[str] +) -> List[Optional[stats_models.PlaythroughModel]]: ... + + +@overload +def get_playthrough_models_by_ids( + playthrough_ids: List[str], *, strict: Literal[False] +) -> List[Optional[stats_models.PlaythroughModel]]: ... + + +def get_playthrough_models_by_ids( + playthrough_ids: List[str], strict: bool = False +) -> Sequence[Optional[stats_models.PlaythroughModel]]: + """Returns a list of playthrough models matching the IDs provided. + + Args: + playthrough_ids: list(str). List of IDs to get playthrough models for. + strict: bool. Whether to fail noisily if no playthrough model exists + with a given ID exists in the datastore. + + Returns: + list(PlaythroughModel|None). The list of playthrough models + corresponding to given ids. If a PlaythroughModel does not exist, + the corresponding returned list element is None. + + Raises: + Exception. No PlaythroughModel exists for the given playthrough_id. + """ + + playthrough_models = ( + stats_models.PlaythroughModel.get_multi(playthrough_ids)) + + if strict: + for index, playthrough_model in enumerate(playthrough_models): + if playthrough_model is None: + raise Exception( + 'No PlaythroughModel exists for the playthrough_id: %s' + % playthrough_ids[index] + ) + + return playthrough_models + + +def _migrate_to_latest_issue_schema( + exp_issue_dict: stats_domain.ExplorationIssueDict +) -> None: """Holds the responsibility of performing a step-by-step sequential update of an exploration issue dict based on its schema version. If the current issue schema version changes (stats_models.CURRENT_ISSUE_SCHEMA_VERSION), a @@ -71,7 +139,9 @@ def _migrate_to_latest_issue_schema(exp_issue_dict): issue_schema_version += 1 -def _migrate_to_latest_action_schema(learner_action_dict): +def _migrate_to_latest_action_schema( + learner_action_dict: stats_domain.LearnerActionDict +) -> None: """Holds the responsibility of performing a step-by-step sequential update of an learner action dict based on its schema version. If the current action schema version changes (stats_models.CURRENT_ACTION_SCHEMA_VERSION), a new @@ -101,7 +171,9 @@ def _migrate_to_latest_action_schema(learner_action_dict): action_schema_version += 1 -def get_exploration_stats(exp_id, exp_version): +def get_exploration_stats( + exp_id: str, exp_version: int +) -> stats_domain.ExplorationStats: """Retrieves the ExplorationStats domain instance. Args: @@ -121,7 +193,10 @@ def get_exploration_stats(exp_id, exp_version): @transaction_services.run_in_transaction_wrapper def _update_stats_transactional( - exp_id, exp_version, aggregated_stats): + exp_id: str, + exp_version: int, + aggregated_stats: stats_domain.AggregatedStatsDict +) -> None: """Updates ExplorationStatsModel according to the dict containing aggregated stats. The model GET and PUT must be done in a transaction to avoid loss of updates that come in rapid succession. @@ -131,18 +206,38 @@ def _update_stats_transactional( exp_version: int. Version of the exploration. aggregated_stats: dict. Dict representing an ExplorationStatsModel instance with stats aggregated in the frontend. + + Raises: + Exception. ExplorationStatsModel does not exist. """ + exploration = exp_fetchers.get_exploration_by_id(exp_id) + if exploration.version != exp_version: + logging.error( + 'Trying to update stats for version %s of exploration %s, but ' + 'the current version is %s.' % ( + exp_version, exp_id, exploration.version)) + return + exp_stats = get_exploration_stats_by_id(exp_id, exp_version) + if exp_stats is None: raise Exception( 'ExplorationStatsModel id="%s.%s" does not exist' % ( exp_id, exp_version)) + try: + stats_domain.SessionStateStats.validate_aggregated_stats_dict( + aggregated_stats) + except utils.ValidationError as e: + logging.exception('Aggregated stats validation failed: %s', e) + return + exp_stats.num_starts_v2 += aggregated_stats['num_starts'] exp_stats.num_completions_v2 += aggregated_stats['num_completions'] exp_stats.num_actual_starts_v2 += aggregated_stats['num_actual_starts'] - for state_name, stats in aggregated_stats['state_stats_mapping'].items(): + state_stats_mapping = aggregated_stats['state_stats_mapping'] + for state_name, stats in state_stats_mapping.items(): if state_name not in exp_stats.state_stats_mapping: # Some events in the past seems to have 'undefined' state names # passed from the frontend code. These are invalid and should be @@ -158,7 +253,11 @@ def _update_stats_transactional( save_stats_model(exp_stats) -def update_stats(exp_id, exp_version, aggregated_stats): +def update_stats( + exp_id: str, + exp_version: int, + aggregated_stats: stats_domain.AggregatedStatsDict +) -> None: """Updates ExplorationStatsModel according to the dict containing aggregated stats. @@ -172,7 +271,9 @@ def update_stats(exp_id, exp_version, aggregated_stats): exp_id, exp_version, aggregated_stats) -def get_stats_for_new_exploration(exp_id, exp_version, state_names): +def get_stats_for_new_exploration( + exp_id: str, exp_version: int, state_names: List[str] +) -> stats_domain.ExplorationStats: """Creates ExplorationStatsModel for the freshly created exploration and sets all initial values to zero. @@ -195,7 +296,12 @@ def get_stats_for_new_exploration(exp_id, exp_version, state_names): def get_stats_for_new_exp_version( - exp_id, exp_version, state_names, exp_versions_diff, revert_to_version): + exp_id: str, + exp_version: int, + state_names: List[str], + exp_versions_diff: Optional[exp_domain.ExplorationVersionsDiff], + revert_to_version: Optional[int] +) -> stats_domain.ExplorationStats: """Retrieves the ExplorationStatsModel for the old exp_version and makes any required changes to the structure of the model. Then, a new ExplorationStatsModel is created for the new exp_version. Note: This @@ -233,8 +339,12 @@ def get_stats_for_new_exp_version( def advance_version_of_exp_stats( - exp_version, exp_versions_diff, exp_stats, - reverted_exp_stats, revert_to_version): + exp_version: int, + exp_versions_diff: Optional[exp_domain.ExplorationVersionsDiff], + exp_stats: stats_domain.ExplorationStats, + reverted_exp_stats: Optional[stats_domain.ExplorationStats], + revert_to_version: Optional[int] +) -> stats_domain.ExplorationStats: """Makes required changes to the structure of ExplorationStatsModel of an old exp_version and a new ExplorationStatsModel is created for the new exp_version. Note: This function does not save the newly created model, it @@ -252,6 +362,10 @@ def advance_version_of_exp_stats( Returns: ExplorationStats. The newly created exploration stats object. + + Raises: + Exception. ExplorationVersionsDiff cannot be None when the change + is not a revert. """ # Handling reverts. @@ -273,9 +387,14 @@ def advance_version_of_exp_stats( new_state_name_stats_mapping = {} + if exp_versions_diff is None: + raise Exception( + 'ExplorationVersionsDiff cannot be None when the change is' + ' not a revert.' + ) # Handle unchanged states. unchanged_state_names = set(utils.compute_list_difference( - exp_stats.state_stats_mapping, + list(exp_stats.state_stats_mapping.keys()), exp_versions_diff.deleted_state_names + list(exp_versions_diff.new_to_old_state_names.values()))) for state_name in unchanged_state_names: @@ -301,7 +420,10 @@ def advance_version_of_exp_stats( def assign_playthrough_to_corresponding_issue( - playthrough, exp_issues, issue_schema_version): + playthrough: stats_domain.Playthrough, + exp_issues: stats_domain.ExplorationIssues, + issue_schema_version: int +) -> bool: """Stores the given playthrough as a new model into its corresponding exploration issue. When the corresponding exploration issue does not exist, a new one is created. @@ -328,7 +450,10 @@ def assign_playthrough_to_corresponding_issue( def _get_corresponding_exp_issue( - playthrough, exp_issues, issue_schema_version): + playthrough: stats_domain.Playthrough, + exp_issues: stats_domain.ExplorationIssues, + issue_schema_version: int +) -> stats_domain.ExplorationIssue: """Returns the unique exploration issue model expected to own the given playthrough. If it does not exist yet, then it will be created. @@ -360,7 +485,9 @@ def _get_corresponding_exp_issue( return issue -def create_exp_issues_for_new_exploration(exp_id, exp_version): +def create_exp_issues_for_new_exploration( + exp_id: str, exp_version: int +) -> None: """Creates the ExplorationIssuesModel instance for the exploration. Args: @@ -370,53 +497,104 @@ def create_exp_issues_for_new_exploration(exp_id, exp_version): stats_models.ExplorationIssuesModel.create(exp_id, exp_version, []) -def update_exp_issues_for_new_exp_version( - exploration, exp_versions_diff, revert_to_version): +def get_updated_exp_issues_models_for_new_exp_version( + exploration: exp_domain.Exploration, + exp_versions_diff: Optional[exp_domain.ExplorationVersionsDiff], + revert_to_version: Optional[int] +) -> List[base_models.BaseModel]: """Retrieves the ExplorationIssuesModel for the old exp_version and makes any required changes to the structure of the model. + Note: This method does not perform put operations on the models. The caller + of this method must do so. + Args: exploration: Exploration. Domain object for the exploration. exp_versions_diff: ExplorationVersionsDiff|None. The domain object for the exploration versions difference, None if it is a revert. revert_to_version: int|None. If the change is a revert, the version. Otherwise, None. + + Raises: + Exception. ExplorationVersionsDiff cannot be None when the change + is not a revert. + + Returns: + list(BaseModel). A list of model instances related to exploration + issues that were updated. """ - exp_issues = get_exp_issues(exploration.id, exploration.version - 1) + models_to_put: List[base_models.BaseModel] = [] + exp_issues = get_exp_issues( + exploration.id, exploration.version - 1, strict=False + ) if exp_issues is None: - create_exp_issues_for_new_exploration( - exploration.id, exploration.version - 1) - return + instance_id = stats_models.ExplorationIssuesModel.get_entity_id( + exploration.id, + exploration.version - 1 + ) + models_to_put.append( + stats_models.ExplorationIssuesModel( + id=instance_id, + exp_id=exploration.id, + exp_version=exploration.version, + unresolved_issues=[] + ) + ) + return models_to_put if revert_to_version: old_exp_issues = get_exp_issues(exploration.id, revert_to_version) exp_issues.unresolved_issues = old_exp_issues.unresolved_issues exp_issues.exp_version = exploration.version + 1 - create_exp_issues_model(exp_issues) - return + models_to_put.append( + get_exp_issues_model_from_domain_object(exp_issues) + ) + return models_to_put + + if exp_versions_diff is None: + raise Exception( + 'ExplorationVersionsDiff cannot be None when the change is' + ' not a revert.' + ) deleted_state_names = exp_versions_diff.deleted_state_names old_to_new_state_names = exp_versions_diff.old_to_new_state_names playthrough_ids = list(itertools.chain.from_iterable( issue.playthrough_ids for issue in exp_issues.unresolved_issues)) - playthrough_models = ( - stats_models.PlaythroughModel.get_multi(playthrough_ids)) + playthrough_models = get_playthrough_models_by_ids( + playthrough_ids, strict=True + ) + updated_playthrough_models = [] for playthrough_model in playthrough_models: playthrough = get_playthrough_from_model(playthrough_model) if 'state_names' in playthrough.issue_customization_args: - state_names = ( - playthrough.issue_customization_args['state_names']['value']) + # Here we use cast because we need to narrow down the type from + # various allowed issue customization arg types to List[str] type, + # and here we are sure that the type is always going to be List[str] + # because above 'if' condition forces 'state_names' issue + # customization arg to have values of type List[str]. + state_names = cast( + List[str], + playthrough.issue_customization_args['state_names']['value'] + ) playthrough.issue_customization_args['state_names']['value'] = [ state_name if state_name not in old_to_new_state_names else old_to_new_state_names[state_name] for state_name in state_names ] if 'state_name' in playthrough.issue_customization_args: - state_name = ( - playthrough.issue_customization_args['state_name']['value']) + # Here we use cast because we need to narrow down the type from + # various allowed issue customization arg types to str type, and + # here we are sure that the type is always going to be str because + # above 'if' condition forces 'state_name' issue customization arg + # to have values of type str. + state_name = cast( + str, + playthrough.issue_customization_args['state_name']['value'] + ) playthrough.issue_customization_args['state_name']['value'] = ( state_name if state_name not in old_to_new_state_names else old_to_new_state_names[state_name]) @@ -425,14 +603,28 @@ def update_exp_issues_for_new_exp_version( action_customization_args = action.action_customization_args if 'state_name' in action_customization_args: - state_name = action_customization_args['state_name']['value'] + # Here we use cast because we need to narrow down the type from + # various allowed action customization arg types to str type, + # and here we are sure that the type is always going to be str + # because above 'if' condition forces 'state_name' action + # customization arg to have values of type str. + state_name = cast( + str, action_customization_args['state_name']['value'] + ) action_customization_args['state_name']['value'] = ( state_name if state_name not in old_to_new_state_names else old_to_new_state_names[state_name]) if 'dest_state_name' in action_customization_args: - dest_state_name = ( - action_customization_args['dest_state_name']['value']) + # Here we use cast because we need to narrow down the type from + # various allowed action customization arg types to str type, + # and here we are sure that the type is always going to be str + # because above 'if' condition forces 'dest_state_name' action + # customization arg to have values of type str. + dest_state_name = cast( + str, + action_customization_args['dest_state_name']['value'] + ) action_customization_args['dest_state_name']['value'] = ( dest_state_name if dest_state_name not in old_to_new_state_names else @@ -442,15 +634,21 @@ def update_exp_issues_for_new_exp_version( playthrough.issue_customization_args) playthrough_model.actions = [ action.to_dict() for action in playthrough.actions] + updated_playthrough_models.append(playthrough_model) - stats_models.PlaythroughModel.update_timestamps_multi(playthrough_models) - stats_models.PlaythroughModel.put_multi(playthrough_models) + models_to_put.extend(updated_playthrough_models) for exp_issue in exp_issues.unresolved_issues: if 'state_names' in exp_issue.issue_customization_args: - state_names = ( - exp_issue.issue_customization_args['state_names']['value']) - + # Here we use cast because we need to narrow down the type from + # various allowed issue customization arg types to List[str] type, + # and here we are sure that the type is always going to be List[str] + # because above 'if' condition forces 'state_names' issue + # customization arg to have values of type List[str]. + state_names = cast( + List[str], + exp_issue.issue_customization_args['state_names']['value'] + ) if any(name in deleted_state_names for name in state_names): exp_issue.is_valid = False @@ -461,9 +659,15 @@ def update_exp_issues_for_new_exp_version( ] if 'state_name' in exp_issue.issue_customization_args: - state_name = ( - exp_issue.issue_customization_args['state_name']['value']) - + # Here we use cast because we need to narrow down the type from + # various allowed issue customization arg types to str type, and + # here we are sure that the type is always going to be str because + # above 'if' condition forces 'state_name' issue customization arg + # to have values of type str. + state_name = cast( + str, + exp_issue.issue_customization_args['state_name']['value'] + ) if state_name in deleted_state_names: exp_issue.is_valid = False @@ -472,29 +676,67 @@ def update_exp_issues_for_new_exp_version( old_to_new_state_names[state_name]) exp_issues.exp_version += 1 - create_exp_issues_model(exp_issues) + models_to_put.append(get_exp_issues_model_from_domain_object(exp_issues)) + return models_to_put + + +@overload +def get_exp_issues( + exp_id: str, exp_version: int +) -> stats_domain.ExplorationIssues: ... + + +@overload +def get_exp_issues( + exp_id: str, exp_version: int, *, strict: Literal[True] +) -> stats_domain.ExplorationIssues: ... -def get_exp_issues(exp_id, exp_version): +@overload +def get_exp_issues( + exp_id: str, exp_version: int, *, strict: Literal[False] +) -> Optional[stats_domain.ExplorationIssues]: ... + + +@overload +def get_exp_issues( + exp_id: str, exp_version: int, *, strict: bool = ... +) -> Optional[stats_domain.ExplorationIssues]: ... + + +def get_exp_issues( + exp_id: str, exp_version: int, strict: bool = True +) -> Optional[stats_domain.ExplorationIssues]: """Retrieves the ExplorationIssues domain object. Args: exp_id: str. ID of the exploration. exp_version: int. Version of the exploration. + strict: bool. Fails noisily if the model doesn't exist. Returns: ExplorationIssues|None. The domain object for exploration issues or None if the exp_id is invalid. + + Raises: + Exception. No ExplorationIssues model found for the given exp_id. """ - exp_issues = None exp_issues_model = stats_models.ExplorationIssuesModel.get_model( exp_id, exp_version) - if exp_issues_model is not None: - exp_issues = get_exp_issues_from_model(exp_issues_model) - return exp_issues + if exp_issues_model is None: + if not strict: + return None + raise Exception( + 'No ExplorationIssues model found for the given exp_id: %s' % + exp_id + ) + + return get_exp_issues_from_model(exp_issues_model) -def get_playthrough_by_id(playthrough_id): +def get_playthrough_by_id( + playthrough_id: str +) -> Optional[stats_domain.Playthrough]: """Retrieves the Playthrough domain object. Args: @@ -506,10 +748,15 @@ def get_playthrough_by_id(playthrough_id): """ playthrough_model = ( stats_models.PlaythroughModel.get(playthrough_id, strict=False)) - return playthrough_model and get_playthrough_from_model(playthrough_model) + if playthrough_model is None: + return None + return get_playthrough_from_model(playthrough_model) -def get_exploration_stats_by_id(exp_id, exp_version): + +def get_exploration_stats_by_id( + exp_id: str, exp_version: int +) -> Optional[stats_domain.ExplorationStats]: """Retrieves the ExplorationStats domain object. Args: @@ -517,7 +764,8 @@ def get_exploration_stats_by_id(exp_id, exp_version): exp_version: int. Version of the exploration. Returns: - ExplorationStats. The domain object for exploration statistics. + ExplorationStats|None. The domain object for exploration statistics, or + None if no ExplorationStatsModel exists for the given id. Raises: Exception. Entity for class ExplorationStatsModel with id not found. @@ -531,7 +779,9 @@ def get_exploration_stats_by_id(exp_id, exp_version): return exploration_stats -def get_multiple_exploration_stats_by_version(exp_id, version_numbers): +def get_multiple_exploration_stats_by_version( + exp_id: str, version_numbers: List[int] +) -> List[Optional[stats_domain.ExplorationStats]]: """Returns a list of ExplorationStats domain objects corresponding to the specified versions. @@ -554,7 +804,9 @@ def get_multiple_exploration_stats_by_version(exp_id, version_numbers): return exploration_stats -def get_exp_issues_from_model(exp_issues_model): +def get_exp_issues_from_model( + exp_issues_model: stats_models.ExplorationIssuesModel +) -> stats_domain.ExplorationIssues: """Gets an ExplorationIssues domain object from an ExplorationIssuesModel instance. @@ -576,7 +828,9 @@ def get_exp_issues_from_model(exp_issues_model): unresolved_issues) -def get_exploration_stats_from_model(exploration_stats_model): +def get_exploration_stats_from_model( + exploration_stats_model: stats_models.ExplorationStatsModel +) -> stats_domain.ExplorationStats: """Gets an ExplorationStats domain object from an ExplorationStatsModel instance. @@ -604,7 +858,9 @@ def get_exploration_stats_from_model(exploration_stats_model): new_state_stats_mapping) -def get_playthrough_from_model(playthrough_model): +def get_playthrough_from_model( + playthrough_model: stats_models.PlaythroughModel +) -> stats_domain.Playthrough: """Gets a PlaythroughModel domain object from a PlaythroughModel instance. Args: @@ -623,7 +879,26 @@ def get_playthrough_from_model(playthrough_model): playthrough_model.issue_customization_args, actions) -def create_stats_model(exploration_stats): +def get_state_stats_mapping( + exploration_stats: stats_domain.ExplorationStats +) -> Dict[str, Dict[str, int]]: + """Returns the state stats mapping of the given exploration stats. + + Args: + exploration_stats: ExplorationStats. Exploration statistics domain + object. + + Returns: + dict. The state stats mapping of the given exploration stats. + """ + new_state_stats_mapping = { + state_name: exploration_stats.state_stats_mapping[state_name].to_dict() + for state_name in exploration_stats.state_stats_mapping + } + return new_state_stats_mapping + + +def create_stats_model(exploration_stats: stats_domain.ExplorationStats) -> str: """Creates an ExplorationStatsModel in datastore given an ExplorationStats domain object. @@ -634,10 +909,7 @@ def create_stats_model(exploration_stats): Returns: str. ID of the datastore instance for ExplorationStatsModel. """ - new_state_stats_mapping = { - state_name: exploration_stats.state_stats_mapping[state_name].to_dict() - for state_name in exploration_stats.state_stats_mapping - } + new_state_stats_mapping = get_state_stats_mapping(exploration_stats) instance_id = stats_models.ExplorationStatsModel.create( exploration_stats.exp_id, exploration_stats.exp_version, @@ -652,13 +924,18 @@ def create_stats_model(exploration_stats): return instance_id -def save_stats_model(exploration_stats): +def save_stats_model( + exploration_stats: stats_domain.ExplorationStats +) -> None: """Updates the ExplorationStatsModel datastore instance with the passed ExplorationStats domain object. Args: exploration_stats: ExplorationStats. The exploration statistics domain object. + + Raises: + Exception. No exploration stats model exists for the given exp_id. """ new_state_stats_mapping = { state_name: exploration_stats.state_stats_mapping[state_name].to_dict() @@ -668,6 +945,11 @@ def save_stats_model(exploration_stats): exploration_stats_model = stats_models.ExplorationStatsModel.get_model( exploration_stats.exp_id, exploration_stats.exp_version) + if exploration_stats_model is None: + raise Exception( + 'No exploration stats model exists for the given exp_id.' + ) + exploration_stats_model.num_starts_v1 = exploration_stats.num_starts_v1 exploration_stats_model.num_starts_v2 = exploration_stats.num_starts_v2 exploration_stats_model.num_actual_starts_v1 = ( @@ -684,20 +966,33 @@ def save_stats_model(exploration_stats): exploration_stats_model.put() -def create_exp_issues_model(exp_issues): - """Creates a new ExplorationIssuesModel in the datastore. +def get_exp_issues_model_from_domain_object( + exp_issues: stats_domain.ExplorationIssues +) -> stats_models.ExplorationIssuesModel: + """Creates a new ExplorationIssuesModel instance. Args: exp_issues: ExplorationIssues. The exploration issues domain object. + + Returns: + ExplorationIssuesModel. The ExplorationIssuesModel. """ unresolved_issues_dicts = [ unresolved_issue.to_dict() for unresolved_issue in exp_issues.unresolved_issues] - stats_models.ExplorationIssuesModel.create( - exp_issues.exp_id, exp_issues.exp_version, unresolved_issues_dicts) + instance_id = stats_models.ExplorationIssuesModel.get_entity_id( + exp_issues.exp_id, + exp_issues.exp_version + ) + return stats_models.ExplorationIssuesModel( + id=instance_id, + exp_id=exp_issues.exp_id, + exp_version=exp_issues.exp_version, + unresolved_issues=unresolved_issues_dicts + ) -def save_exp_issues_model(exp_issues): +def save_exp_issues_model(exp_issues: stats_domain.ExplorationIssues) -> None: """Updates the ExplorationIssuesModel datastore instance with the passed ExplorationIssues domain object. @@ -706,11 +1001,15 @@ def save_exp_issues_model(exp_issues): """ @transaction_services.run_in_transaction_wrapper - def _save_exp_issues_model_transactional(): + def _save_exp_issues_model_transactional() -> None: """Implementation to be run in a transaction.""" exp_issues_model = stats_models.ExplorationIssuesModel.get_model( exp_issues.exp_id, exp_issues.exp_version) + if exp_issues_model is None: + raise Exception( + 'No ExplorationIssuesModel exists for the given exploration id.' + ) exp_issues_model.exp_version = exp_issues.exp_version exp_issues_model.unresolved_issues = [ issue.to_dict() for issue in exp_issues.unresolved_issues] @@ -722,7 +1021,9 @@ def _save_exp_issues_model_transactional(): _save_exp_issues_model_transactional() -def get_exploration_stats_multi(exp_version_references): +def get_exploration_stats_multi( + exp_version_references: List[exp_domain.ExpVersionReference] +) -> List[stats_domain.ExplorationStats]: """Retrieves the exploration stats for the given explorations. Args: @@ -751,7 +1052,7 @@ def get_exploration_stats_multi(exp_version_references): return exploration_stats_list -def delete_playthroughs_multi(playthrough_ids): +def delete_playthroughs_multi(playthrough_ids: List[str]) -> None: """Deletes multiple playthrough instances. Args: @@ -759,10 +1060,15 @@ def delete_playthroughs_multi(playthrough_ids): """ @transaction_services.run_in_transaction_wrapper - def _delete_playthroughs_multi_transactional(): + def _delete_playthroughs_multi_transactional() -> None: """Implementation to be run in a transaction.""" - stats_models.PlaythroughModel.delete_multi( - stats_models.PlaythroughModel.get_multi(playthrough_ids)) + playthrough_models = get_playthrough_models_by_ids( + playthrough_ids, strict=True + ) + filtered_playthrough_models = [] + for playthrough_model in playthrough_models: + filtered_playthrough_models.append(playthrough_model) + stats_models.PlaythroughModel.delete_multi(filtered_playthrough_models) # Run in transaction to help prevent data-races between concurrent # operations that may update the playthroughs being deleted. @@ -770,8 +1076,12 @@ def _delete_playthroughs_multi_transactional(): def record_answer( - exploration_id, exploration_version, state_name, interaction_id, - submitted_answer): + exploration_id: str, + exploration_version: int, + state_name: str, + interaction_id: str, + submitted_answer: stats_domain.SubmittedAnswer +) -> None: """Record an answer by storing it to the corresponding StateAnswers entity. Args: @@ -787,8 +1097,12 @@ def record_answer( def record_answers( - exploration_id, exploration_version, state_name, interaction_id, - submitted_answer_list): + exploration_id: str, + exploration_version: int, + state_name: str, + interaction_id: str, + submitted_answer_list: List[stats_domain.SubmittedAnswer] +) -> None: """Optimally record a group of answers using an already loaded exploration. The submitted_answer_list is a list of SubmittedAnswer domain objects. @@ -812,7 +1126,11 @@ def record_answers( state_answers.get_submitted_answer_dict_list()) -def get_state_answers(exploration_id, exploration_version, state_name): +def get_state_answers( + exploration_id: str, + exploration_version: int, + state_name: str +) -> Optional[stats_domain.StateAnswers]: """Returns a StateAnswers object containing all answers associated with the specified exploration state, or None if no such answers have yet been submitted. @@ -844,7 +1162,11 @@ def get_state_answers(exploration_id, exploration_version, state_name): return None -def get_sample_answers(exploration_id, exploration_version, state_name): +def get_sample_answers( + exploration_id: str, + exploration_version: int, + state_name: str +) -> List[state_domain.AcceptableCorrectAnswerTypes]: """Fetches a list of sample answers that were submitted to the specified exploration state (at the given version of the exploration). @@ -872,7 +1194,7 @@ def get_sample_answers(exploration_id, exploration_version, state_name): for submitted_answer_dict in sample_answers] -def get_state_reference_for_exploration(exp_id, state_name): +def get_state_reference_for_exploration(exp_id: str, state_name: str) -> str: """Returns the generated state reference for the given exploration id and state name. @@ -893,7 +1215,7 @@ def get_state_reference_for_exploration(exp_id, state_name): .get_state_reference_for_exploration(exp_id, state_name)) -def get_state_reference_for_question(question_id): +def get_state_reference_for_question(question_id: str) -> str: """Returns the generated state reference for the given question id. Args: @@ -912,7 +1234,9 @@ def get_state_reference_for_question(question_id): .get_state_reference_for_question(question_id)) -def get_learner_answer_details_from_model(learner_answer_details_model): +def get_learner_answer_details_from_model( + learner_answer_details_model: stats_models.LearnerAnswerDetailsModel +) -> Optional[stats_domain.LearnerAnswerDetails]: """Returns a LearnerAnswerDetails domain object given a LearnerAnswerDetailsModel loaded from the datastore. @@ -935,7 +1259,9 @@ def get_learner_answer_details_from_model(learner_answer_details_model): learner_answer_details_model.accumulated_answer_info_json_size_bytes) -def get_learner_answer_details(entity_type, state_reference): +def get_learner_answer_details( + entity_type: str, state_reference: str +) -> Optional[stats_domain.LearnerAnswerDetails]: """Returns a LearnerAnswerDetails domain object, with given entity_type and state_name. This function checks in the datastore if the corresponding LearnerAnswerDetailsModel exists, if not then None is returned. @@ -949,8 +1275,8 @@ def get_learner_answer_details(entity_type, state_reference): 'question_id'. Returns: - LearnerAnswerDetails. The learner answer domain object or None if the - model does not exist. + Optional[LearnerAnswerDetails]. The learner answer domain object or + None if the model does not exist. """ learner_answer_details_model = ( stats_models.LearnerAnswerDetailsModel.get_model_instance( @@ -962,7 +1288,9 @@ def get_learner_answer_details(entity_type, state_reference): return None -def create_learner_answer_details_model_instance(learner_answer_details): +def create_learner_answer_details_model_instance( + learner_answer_details: stats_domain.LearnerAnswerDetails +) -> None: """Creates a new model instance from the given LearnerAnswerDetails domain object. @@ -980,7 +1308,10 @@ def create_learner_answer_details_model_instance(learner_answer_details): def save_learner_answer_details( - entity_type, state_reference, learner_answer_details): + entity_type: str, + state_reference: str, + learner_answer_details: stats_domain.LearnerAnswerDetails +) -> None: """Saves the LearnerAnswerDetails domain object in the datatstore, if the model instance with the given entity_type and state_reference is found and if the instance id of the model doesn't matches with the generated instance @@ -1022,7 +1353,12 @@ def save_learner_answer_details( def record_learner_answer_info( - entity_type, state_reference, interaction_id, answer, answer_details): + entity_type: str, + state_reference: str, + interaction_id: str, + answer: Union[str, int, Dict[str, str], List[str]], + answer_details: str +) -> None: """Records the new learner answer info received from the learner in the model and then saves it. @@ -1056,7 +1392,10 @@ def record_learner_answer_info( def delete_learner_answer_info( - entity_type, state_reference, learner_answer_info_id): + entity_type: str, + state_reference: str, + learner_answer_info_id: str +) -> None: """Deletes the learner answer info in the model, and then saves it. Args: @@ -1082,7 +1421,10 @@ def delete_learner_answer_info( def update_state_reference( - entity_type, old_state_reference, new_state_reference): + entity_type: str, + old_state_reference: str, + new_state_reference: str +) -> None: """Updates the state_reference field of the LearnerAnswerDetails model instance with the new_state_reference received and then saves the instance in the datastore. @@ -1107,7 +1449,8 @@ def update_state_reference( def delete_learner_answer_details_for_exploration_state( - exp_id, state_name): + exp_id: str, state_name: str +) -> None: """Deletes the LearnerAnswerDetailsModel corresponding to the given exploration ID and state name. @@ -1126,7 +1469,9 @@ def delete_learner_answer_details_for_exploration_state( learner_answer_details_model.delete() -def delete_learner_answer_details_for_question_state(question_id): +def delete_learner_answer_details_for_question_state( + question_id: str +) -> None: """Deletes the LearnerAnswerDetailsModel for the given question ID. Args: diff --git a/core/domain/stats_services_test.py b/core/domain/stats_services_test.py index 23ae51837655..dc413535d96a 100644 --- a/core/domain/stats_services_test.py +++ b/core/domain/stats_services_test.py @@ -21,7 +21,6 @@ import os from core import feconf -from core import python_utils from core import utils from core.domain import event_services from core.domain import exp_domain @@ -30,10 +29,19 @@ from core.domain import question_services from core.domain import stats_domain from core.domain import stats_services +from core.domain import translation_domain from core.platform import models from core.tests import test_utils -(stats_models,) = models.Registry.import_models([models.NAMES.statistics]) +from typing import Dict, Final, List, Optional, Tuple, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import stats_models + +(stats_models,) = models.Registry.import_models([models.Names.STATISTICS]) +datastore_services = models.Registry.import_datastore_services() class StatisticsServicesTests(test_utils.GenericTestBase): @@ -41,8 +49,8 @@ class StatisticsServicesTests(test_utils.GenericTestBase): module. """ - def setUp(self): - super(StatisticsServicesTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.exp_id = 'exp_id1' self.exp_version = 1 self.stats_model_id = ( @@ -52,8 +60,22 @@ def setUp(self): self.exp_id, self.exp_version, []) self.playthrough_id = stats_models.PlaythroughModel.create( 'exp_id1', 1, 'EarlyQuit', {}, []) - - def test_get_exploration_stats_with_new_exp_id(self): + self.save_new_valid_exploration( + self.exp_id, 'admin', title='Title 1', end_state_name='End', + correctness_feedback_enabled=True) + + def test_raises_error_if_playthrough_model_fetched_with_invalid_id_and_strict( # pylint: disable=line-too-long + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'No PlaythroughModel exists for the playthrough_id: invalid_id' + ): + stats_services.get_playthrough_models_by_ids( + ['invalid_id'], strict=True + ) + + def test_get_exploration_stats_with_new_exp_id(self) -> None: exploration_stats = stats_services.get_exploration_stats( 'new_exp_id', 1) @@ -61,10 +83,12 @@ def test_get_exploration_stats_with_new_exp_id(self): self.assertEqual(exploration_stats.exp_id, 'new_exp_id') self.assertEqual(exploration_stats.state_stats_mapping, {}) - def test_update_stats_method(self): + def test_update_stats_method(self) -> None: """Test the update_stats method.""" exploration_stats = stats_services.get_exploration_stats_by_id( 'exp_id1', 1) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None exploration_stats.state_stats_mapping = { 'Home': stats_domain.StateStats.create_default(), '🙂': stats_domain.StateStats.create_default(), @@ -73,7 +97,7 @@ def test_update_stats_method(self): # Pass in exploration start event to stats model created in setup # function. - aggregated_stats = { + aggregated_stats: stats_domain.AggregatedStatsDict = { 'num_starts': 1, 'num_actual_starts': 1, 'num_completions': 1, @@ -100,6 +124,8 @@ def test_update_stats_method(self): stats_services.update_stats('exp_id1', 1, aggregated_stats) exploration_stats = stats_services.get_exploration_stats_by_id( 'exp_id1', 1) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None self.assertEqual(exploration_stats.num_starts_v2, 1) self.assertEqual(exploration_stats.num_actual_starts_v2, 1) self.assertEqual(exploration_stats.num_completions_v2, 1) @@ -140,9 +166,39 @@ def test_update_stats_method(self): exploration_stats.state_stats_mapping[ '🙂'].num_times_solution_viewed_v2, 1) - def test_update_stats_throws_if_model_is_missing_entirely(self): + def test_update_stats_throws_if_exp_version_is_not_latest(self) -> None: + """Test the update_stats method.""" + aggregated_stats: stats_domain.AggregatedStatsDict = { + 'num_starts': 1, + 'num_actual_starts': 1, + 'num_completions': 1, + 'state_stats_mapping': { + 'Home': { + 'total_hit_count': 1, + 'first_hit_count': 1, + 'total_answers_count': 1, + 'useful_feedback_count': 1, + 'num_times_solution_viewed': 1, + 'num_completions': 1 + }, + } + } + + exploration_stats = stats_services.get_exploration_stats_by_id( + 'exp_id1', 2) + self.assertEqual(exploration_stats, None) + + stats_services.update_stats('exp_id1', 2, aggregated_stats) + + exploration_stats = stats_services.get_exploration_stats_by_id( + 'exp_id1', 2) + self.assertEqual(exploration_stats, None) + + def test_update_stats_throws_if_stats_model_is_missing_entirely( + self + ) -> None: """Test the update_stats method.""" - aggregated_stats = { + aggregated_stats: stats_domain.AggregatedStatsDict = { 'num_starts': 1, 'num_actual_starts': 1, 'num_completions': 1, @@ -157,20 +213,32 @@ def test_update_stats_throws_if_model_is_missing_entirely(self): }, } } + stats_model = stats_models.ExplorationStatsModel.get_model('exp_id1', 1) + # Ruling out the possibility of None for mypy type checking. + assert stats_model is not None + stats_model.delete() + exploration_stats = stats_services.get_exploration_stats_by_id( + 'exp_id1', 1) + self.assertEqual(exploration_stats, None) - with self.assertRaisesRegexp(Exception, 'id="nullid.1" does not exist'): - stats_services.update_stats('nullid', 1, aggregated_stats) + with self.assertRaisesRegex( + Exception, + 'ExplorationStatsModel id="exp_id1.1" does not exist' + ): + stats_services.update_stats('exp_id1', 1, aggregated_stats) - def test_update_stats_throws_if_model_is_missing_state_stats(self): + def test_update_stats_throws_if_model_is_missing_state_stats(self) -> None: """Test the update_stats method.""" exploration_stats = stats_services.get_exploration_stats_by_id( 'exp_id1', 1) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None exploration_stats.state_stats_mapping = { 'Home': stats_domain.StateStats.create_default() } stats_services.save_stats_model(exploration_stats) - aggregated_stats = { + aggregated_stats: stats_domain.AggregatedStatsDict = { 'num_starts': 1, 'num_actual_starts': 1, 'num_completions': 1, @@ -194,12 +262,56 @@ def test_update_stats_throws_if_model_is_missing_state_stats(self): } } - with self.assertRaisesRegexp(Exception, 'does not exist'): + with self.assertRaisesRegex(Exception, 'does not exist'): stats_services.update_stats('exp_id1', 1, aggregated_stats) - def test_update_stats_returns_if_state_name_is_undefined(self): + def test_update_stats_returns_if_state_name_is_undefined(self) -> None: """Tests that the update_stats returns if a state name is undefined.""" - aggregated_stats = { + # Here we use MyPy ignore because AggregatedStatsDict can only accept + # Dict[str, int] but for testing purpose here we are providing str for + # one of the value which causes MyPy to throw an error. Thus to avoid + # the error, we used ignore here. + aggregated_stats: stats_domain.AggregatedStatsDict = { + 'num_starts': '1', # type: ignore[typeddict-item] + 'num_actual_starts': 1, + 'num_completions': 1, + 'state_stats_mapping': { + 'undefined': { + 'total_hit_count': 1, + 'first_hit_count': 1, + 'total_answers_count': 1, + 'useful_feedback_count': 1, + 'num_times_solution_viewed': 1, + 'num_completions': 1 + }, + } + } + + exploration_stats = stats_services.get_exploration_stats_by_id( + 'exp_id1', 1) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None + self.assertEqual(exploration_stats.state_stats_mapping, { + 'End': stats_domain.StateStats.create_default(), + 'Introduction': stats_domain.StateStats.create_default() + }) + + stats_services.update_stats('exp_id1', 1, aggregated_stats) + + exploration_stats = stats_services.get_exploration_stats_by_id( + 'exp_id1', 1) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None + self.assertEqual(exploration_stats.state_stats_mapping, { + 'End': stats_domain.StateStats.create_default(), + 'Introduction': stats_domain.StateStats.create_default() + }) + + def test_update_stats_returns_if_aggregated_stats_type_is_invalid( + self + ) -> None: + """Tests that the update_stats returns if a state name is undefined.""" + aggregated_stats: stats_domain.AggregatedStatsDict = { 'num_starts': 1, 'num_actual_starts': 1, 'num_completions': 1, @@ -217,25 +329,39 @@ def test_update_stats_returns_if_state_name_is_undefined(self): exploration_stats = stats_services.get_exploration_stats_by_id( 'exp_id1', 1) - self.assertEqual(exploration_stats.state_stats_mapping, {}) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None + self.assertEqual(exploration_stats.state_stats_mapping, { + 'End': stats_domain.StateStats.create_default(), + 'Introduction': stats_domain.StateStats.create_default() + }) stats_services.update_stats('exp_id1', 1, aggregated_stats) exploration_stats = stats_services.get_exploration_stats_by_id( 'exp_id1', 1) - self.assertEqual(exploration_stats.state_stats_mapping, {}) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None + self.assertEqual(exploration_stats.state_stats_mapping, { + 'End': stats_domain.StateStats.create_default(), + 'Introduction': stats_domain.StateStats.create_default() + }) - def test_update_stats_throws_if_model_is_using_unicode_state_name(self): + def test_update_stats_throws_if_model_is_using_unicode_state_name( + self + ) -> None: """Test the update_stats method.""" exploration_stats = stats_services.get_exploration_stats_by_id( 'exp_id1', 1) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None exploration_stats.state_stats_mapping = { 'Home': stats_domain.StateStats.create_default(), # No stats for '🙂'. } stats_services.save_stats_model(exploration_stats) - aggregated_stats = { + aggregated_stats: stats_domain.AggregatedStatsDict = { 'num_starts': 1, 'num_actual_starts': 1, 'num_completions': 1, @@ -259,10 +385,10 @@ def test_update_stats_throws_if_model_is_using_unicode_state_name(self): } } - with self.assertRaisesRegexp(Exception, 'does not exist'): + with self.assertRaisesRegex(Exception, 'does not exist'): stats_services.update_stats('exp_id1', 1, aggregated_stats) - def test_calls_to_stats_methods(self): + def test_calls_to_stats_methods(self) -> None: """Test that calls are being made to the get_stats_for_new_exp_version and get_stats_for_new_exploration methods when an exploration is @@ -279,7 +405,7 @@ def test_calls_to_stats_methods(self): test_exp_filepath = os.path.join( feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) - assets_list = [] + assets_list: List[Tuple[str, bytes]] = [] with self.swap( stats_services, 'get_stats_for_new_exploration', stats_for_new_exploration_log): @@ -293,9 +419,25 @@ def test_calls_to_stats_methods(self): self.assertEqual(stats_for_new_exp_version_log.times_called, 0) # Update exploration by adding a state. + exploration = exp_fetchers.get_exploration_by_id(exp_id) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) change_list = [exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'New state' + 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })] with self.swap( stats_services, 'get_stats_for_new_exp_version', @@ -309,37 +451,55 @@ def test_calls_to_stats_methods(self): self.assertEqual(stats_for_new_exploration_log.times_called, 1) self.assertEqual(stats_for_new_exp_version_log.times_called, 1) - def test_get_stats_for_new_exploration(self): + def test_get_stats_for_new_exploration(self) -> None: """Test the get_stats_for_new_exploration method.""" # Create exploration object in datastore. exp_id = 'exp_id' test_exp_filepath = os.path.join( feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) - assets_list = [] + assets_list: List[Tuple[str, bytes]] = [] exp_services.save_new_exploration_from_yaml_and_assets( feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id, assets_list) exploration = exp_fetchers.get_exploration_by_id(exp_id) - exploration_stats = stats_services.get_stats_for_new_exploration( - exploration.id, exploration.version, exploration.states) - stats_services.create_stats_model(exploration_stats) + exploration_stats_for_new_exploration = ( + stats_services.get_stats_for_new_exploration( + exploration.id, + exploration.version, + list(exploration.states.keys()) + ) + ) + stats_services.create_stats_model( + exploration_stats_for_new_exploration + ) - exploration_stats = stats_services.get_exploration_stats_by_id( - exploration.id, exploration.version) - self.assertEqual(exploration_stats.exp_id, exp_id) - self.assertEqual(exploration_stats.exp_version, 1) - self.assertEqual(exploration_stats.num_starts_v1, 0) - self.assertEqual(exploration_stats.num_starts_v2, 0) - self.assertEqual(exploration_stats.num_actual_starts_v1, 0) - self.assertEqual(exploration_stats.num_actual_starts_v2, 0) - self.assertEqual(exploration_stats.num_completions_v1, 0) - self.assertEqual(exploration_stats.num_completions_v2, 0) + newly_created_exploration_stats = ( + stats_services.get_exploration_stats_by_id( + exploration.id, exploration.version + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert newly_created_exploration_stats is not None + self.assertEqual(newly_created_exploration_stats.exp_id, exp_id) + self.assertEqual(newly_created_exploration_stats.exp_version, 1) + self.assertEqual(newly_created_exploration_stats.num_starts_v1, 0) + self.assertEqual(newly_created_exploration_stats.num_starts_v2, 0) + self.assertEqual( + newly_created_exploration_stats.num_actual_starts_v1, 0 + ) self.assertEqual( - list(exploration_stats.state_stats_mapping.keys()), ['Home', 'End']) + newly_created_exploration_stats.num_actual_starts_v2, 0 + ) + self.assertEqual(newly_created_exploration_stats.num_completions_v1, 0) + self.assertEqual(newly_created_exploration_stats.num_completions_v2, 0) + self.assertEqual( + list(newly_created_exploration_stats.state_stats_mapping.keys()), + ['Home', 'End'] + ) - def test_revert_exploration_creates_stats(self): + def test_revert_exploration_creates_stats(self) -> None: """Test that the revert_exploration method creates stats for the newest exploration version. """ @@ -348,7 +508,7 @@ def test_revert_exploration_creates_stats(self): test_exp_filepath = os.path.join( feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) - assets_list = [] + assets_list: List[Tuple[str, bytes]] = [] exp_services.save_new_exploration_from_yaml_and_assets( feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id, assets_list) @@ -357,6 +517,8 @@ def test_revert_exploration_creates_stats(self): # Save stats for version 1. exploration_stats = stats_services.get_exploration_stats_by_id( exp_id, 1) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None exploration_stats.num_starts_v2 = 3 exploration_stats.num_actual_starts_v2 = 2 exploration_stats.num_completions_v2 = 1 @@ -367,6 +529,8 @@ def test_revert_exploration_creates_stats(self): 'committer_id_v2', exploration.id, [], 'Updated') exploration_stats = stats_services.get_exploration_stats_by_id( exp_id, 2) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None exploration_stats.num_starts_v2 = 4 exploration_stats.num_actual_starts_v2 = 3 exploration_stats.num_completions_v2 = 2 @@ -378,12 +542,12 @@ def test_revert_exploration_creates_stats(self): exploration_stats = stats_services.get_exploration_stats_by_id( exp_id, 3 ) - self.assertIsNotNone(exploration_stats) + assert exploration_stats is not None self.assertEqual(exploration_stats.num_starts_v2, 3) self.assertEqual(exploration_stats.num_actual_starts_v2, 2) self.assertEqual(exploration_stats.num_completions_v2, 1) - def test_get_stats_for_new_exp_creates_new_stats(self): + def test_get_stats_for_new_exp_creates_new_stats(self) -> None: new_stats = stats_services.get_stats_for_new_exp_version( 'exp_id', 1, [], None, None) @@ -391,18 +555,73 @@ def test_get_stats_for_new_exp_creates_new_stats(self): self.assertEqual(new_stats.exp_version, 1) self.assertEqual(new_stats.state_stats_mapping, {}) - def test_get_stats_for_new_exp_version(self): + def test_raises_error_when_both_exp_diff_and_revert_are_none(self) -> None: + # Create exploration object in datastore. + exp_id = 'exp_id' + test_exp_filepath = os.path.join( + feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') + yaml_content = utils.get_file_contents(test_exp_filepath) + assets_list: List[Tuple[str, bytes]] = [] + exp_services.save_new_exploration_from_yaml_and_assets( + feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id, + assets_list) + exploration = exp_fetchers.get_exploration_by_id(exp_id) + + # Test addition of states. + exploration.add_states(['New state', 'New state 2']) + exploration.version += 1 + with self.assertRaisesRegex( + Exception, + 'ExplorationVersionsDiff cannot be None when the change' + ): + stats_services.get_stats_for_new_exp_version( + exploration.id, + exploration.version, + list(exploration.states.keys()), + None, + None + ) + + def test_raises_error_when_both_exp_diff_and_revert_are_none_while_updating_exp_issue( # pylint: disable=line-too-long + self + ) -> None: + # Create exploration object in datastore. + exp_id = 'exp_id' + test_exp_filepath = os.path.join( + feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') + yaml_content = utils.get_file_contents(test_exp_filepath) + assets_list: List[Tuple[str, bytes]] = [] + exp_services.save_new_exploration_from_yaml_and_assets( + feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id, + assets_list) + exploration = exp_fetchers.get_exploration_by_id(exp_id) + + # Test addition of states. + exploration.add_states(['New state', 'New state 2']) + exploration.version += 1 + with self.assertRaisesRegex( + Exception, + 'ExplorationVersionsDiff cannot be None when the change' + ): + stats_services.get_updated_exp_issues_models_for_new_exp_version( + exploration, None, None + ) + + def test_get_stats_for_new_exp_version(self) -> None: """Test the get_stats_for_new_exp_version method.""" # Create exploration object in datastore. exp_id = 'exp_id' test_exp_filepath = os.path.join( feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) - assets_list = [] + assets_list: List[Tuple[str, bytes]] = [] exp_services.save_new_exploration_from_yaml_and_assets( feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id, assets_list) exploration = exp_fetchers.get_exploration_by_id(exp_id) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) # Test addition of states. exploration.add_states(['New state', 'New state 2']) @@ -410,18 +629,51 @@ def test_get_stats_for_new_exp_version(self): change_list = [exp_domain.ExplorationChange({ 'cmd': 'add_state', 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index }), exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'New state 2' + 'state_name': 'New state 2', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, + exploration.version, + list(exploration.states.keys()), + exp_versions_diff, + None + ) stats_services.create_stats_model(exploration_stats) - exploration_stats = stats_services.get_exploration_stats_by_id( - exploration.id, exploration.version) + exploration_stats_with_none = ( + stats_services.get_exploration_stats_by_id( + exploration.id, exploration.version + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats_with_none is not None self.assertEqual(exploration_stats.exp_id, exp_id) self.assertEqual(exploration_stats.exp_version, 2) self.assertEqual(exploration_stats.num_actual_starts_v2, 0) @@ -446,15 +698,24 @@ def test_get_stats_for_new_exp_version(self): })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, + exploration.version, + list(exploration.states.keys()), + exp_versions_diff, + None + ) stats_services.create_stats_model(exploration_stats) - exploration_stats = stats_services.get_exploration_stats_by_id( - exploration.id, exploration.version) - self.assertEqual(exploration_stats.exp_version, 3) + exploration_stats_with_none = ( + stats_services.get_exploration_stats_by_id( + exploration.id, exploration.version + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats_with_none is not None + self.assertEqual(exploration_stats_with_none.exp_version, 3) self.assertEqual( - set(exploration_stats.state_stats_mapping.keys()), set([ + set(exploration_stats_with_none.state_stats_mapping.keys()), set([ 'Home', 'End', 'Renamed state', 'New state'])) # Test deletion of states. @@ -466,15 +727,24 @@ def test_get_stats_for_new_exp_version(self): })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, + exploration.version, + list(exploration.states.keys()), + exp_versions_diff, + None + ) stats_services.create_stats_model(exploration_stats) - exploration_stats = stats_services.get_exploration_stats_by_id( - exploration.id, exploration.version) - self.assertEqual(exploration_stats.exp_version, 4) + exploration_stats_with_none = ( + stats_services.get_exploration_stats_by_id( + exploration.id, exploration.version + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats_with_none is not None + self.assertEqual(exploration_stats_with_none.exp_version, 4) self.assertEqual( - set(exploration_stats.state_stats_mapping.keys()), + set(exploration_stats_with_none.state_stats_mapping.keys()), set(['Home', 'Renamed state', 'End'])) # Test addition, renaming and deletion of states. @@ -484,7 +754,19 @@ def test_get_stats_for_new_exp_version(self): exploration.version += 1 change_list = [exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'New state 2' + 'state_name': 'New state 2', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index }), exp_domain.ExplorationChange({ 'cmd': 'rename_state', 'old_state_name': 'New state 2', @@ -495,15 +777,24 @@ def test_get_stats_for_new_exp_version(self): })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, + exploration.version, + list(exploration.states.keys()), + exp_versions_diff, + None + ) stats_services.create_stats_model(exploration_stats) - exploration_stats = stats_services.get_exploration_stats_by_id( - exploration.id, exploration.version) - self.assertEqual(exploration_stats.exp_version, 5) + exploration_stats_with_none = ( + stats_services.get_exploration_stats_by_id( + exploration.id, exploration.version + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats_with_none is not None + self.assertEqual(exploration_stats_with_none.exp_version, 5) self.assertEqual( - set(exploration_stats.state_stats_mapping.keys()), + set(exploration_stats_with_none.state_stats_mapping.keys()), set(['Home', 'End', 'Renamed state'])) # Test addition and multiple renames. @@ -514,7 +805,20 @@ def test_get_stats_for_new_exp_version(self): change_list = [exp_domain.ExplorationChange({ 'cmd': 'add_state', 'state_name': 'New state 2', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }), + exp_domain.ExplorationChange({ 'cmd': 'rename_state', 'old_state_name': 'New state 2', 'new_state_name': 'New state 3' @@ -525,21 +829,32 @@ def test_get_stats_for_new_exp_version(self): })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, + exploration.version, + list(exploration.states.keys()), + exp_versions_diff, + None + ) stats_services.create_stats_model(exploration_stats) - exploration_stats = stats_services.get_exploration_stats_by_id( - exploration.id, exploration.version) - self.assertEqual(exploration_stats.exp_version, 6) + exploration_stats_with_none = ( + stats_services.get_exploration_stats_by_id( + exploration.id, exploration.version + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats_with_none is not None + self.assertEqual(exploration_stats_with_none.exp_version, 6) self.assertEqual( - set(exploration_stats.state_stats_mapping.keys()), + set(exploration_stats_with_none.state_stats_mapping.keys()), set(['Home', 'New state 4', 'Renamed state', 'End'])) # Set some values for the the stats in the ExplorationStatsModel # instance. exploration_stats_model = stats_models.ExplorationStatsModel.get_model( exploration.id, exploration.version) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats_model is not None exploration_stats_model.num_actual_starts_v2 = 5 exploration_stats_model.num_completions_v2 = 2 exploration_stats_model.state_stats_mapping['New state 4'][ @@ -564,73 +879,135 @@ def test_get_stats_for_new_exp_version(self): }), exp_domain.ExplorationChange({ 'cmd': 'add_state', 'state_name': 'New state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }), + exp_domain.ExplorationChange({ 'cmd': 'rename_state', 'old_state_name': 'New state', 'new_state_name': 'New state 4' })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, + exploration.version, + list(exploration.states.keys()), + exp_versions_diff, + None + ) stats_services.create_stats_model(exploration_stats) - exploration_stats = stats_services.get_exploration_stats_by_id( + exp_stats = stats_services.get_exploration_stats_by_id( exploration.id, exploration.version) - self.assertEqual(exploration_stats.exp_version, 7) + # Ruling out the possibility of None for mypy type checking. + assert exp_stats is not None + self.assertEqual(exp_stats.exp_version, 7) self.assertEqual( - set(exploration_stats.state_stats_mapping.keys()), + set(exp_stats.state_stats_mapping.keys()), set(['Home', 'New state 4', 'Renamed state', 'End'])) # Test the values of the stats carried over from the last version. - self.assertEqual(exploration_stats.num_actual_starts_v2, 5) - self.assertEqual(exploration_stats.num_completions_v2, 2) + self.assertEqual(exp_stats.num_actual_starts_v2, 5) + self.assertEqual(exp_stats.num_completions_v2, 2) self.assertEqual( - exploration_stats.state_stats_mapping['Home'].total_hit_count_v2, 8) + exp_stats.state_stats_mapping['Home'].total_hit_count_v2, 8) self.assertEqual( - exploration_stats.state_stats_mapping[ + exp_stats.state_stats_mapping[ 'Renamed state'].first_hit_count_v2, 2) self.assertEqual( - exploration_stats.state_stats_mapping[ + exp_stats.state_stats_mapping[ 'End'].useful_feedback_count_v2, 4) # State 'New state 4' has been deleted and recreated, so it should # now contain default values for stats instead of the values it # contained in the last version. self.assertEqual( - exploration_stats.state_stats_mapping[ + exp_stats.state_stats_mapping[ 'New state 4'].total_answers_count_v2, 0) # Test reverts. exploration.version += 1 - exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - None, 5) - stats_services.create_stats_model(exploration_stats) + exploration_stats_for_new_exp_version = ( + stats_services.get_stats_for_new_exp_version( + exploration.id, + exploration.version, + list(exploration.states.keys()), + None, + 5 + ) + ) + stats_services.create_stats_model( + exploration_stats_for_new_exp_version + ) - exploration_stats = stats_services.get_exploration_stats_by_id( - exploration.id, exploration.version) - self.assertEqual(exploration_stats.exp_version, 8) + newly_created_exploration_stats = ( + stats_services.get_exploration_stats_by_id( + exploration.id, exploration.version + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert newly_created_exploration_stats is not None + self.assertEqual(newly_created_exploration_stats.exp_version, 8) self.assertEqual( - set(exploration_stats.state_stats_mapping.keys()), + set(newly_created_exploration_stats.state_stats_mapping.keys()), set(['Home', 'Renamed state', 'End'])) - self.assertEqual(exploration_stats.num_actual_starts_v2, 0) - self.assertEqual(exploration_stats.num_completions_v2, 0) + self.assertEqual( + newly_created_exploration_stats.num_actual_starts_v2, 0 + ) + self.assertEqual(newly_created_exploration_stats.num_completions_v2, 0) # Test state name swaps. exploration.add_states(['New state 5', 'New state 6']) exploration.version += 1 change_list = [exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'New state 5' + 'state_name': 'New state 5', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index }), exp_domain.ExplorationChange({ 'cmd': 'add_state', - 'state_name': 'New state 6' + 'state_name': 'New state 6', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, + exploration.version, + list(exploration.states.keys()), + exp_versions_diff, + None + ) stats_services.create_stats_model(exploration_stats) exploration.rename_state('New state 5', 'New state 7') @@ -652,18 +1029,27 @@ def test_get_stats_for_new_exp_version(self): })] exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) exploration_stats = stats_services.get_stats_for_new_exp_version( - exploration.id, exploration.version, exploration.states, - exp_versions_diff, None) + exploration.id, + exploration.version, + list(exploration.states.keys()), + exp_versions_diff, + None + ) stats_services.create_stats_model(exploration_stats) - exploration_stats = stats_services.get_exploration_stats_by_id( - exploration.id, exploration.version) - self.assertEqual(exploration_stats.exp_version, 10) + exploration_stats_with_none = ( + stats_services.get_exploration_stats_by_id( + exploration.id, exploration.version + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats_with_none is not None + self.assertEqual(exploration_stats_with_none.exp_version, 10) self.assertEqual( - set(exploration_stats.state_stats_mapping.keys()), + set(exploration_stats_with_none.state_stats_mapping.keys()), set(['End', 'Home', 'New state 6', 'New state 5', 'Renamed state'])) - def test_get_exploration_stats_from_model(self): + def test_get_exploration_stats_from_model(self) -> None: """Test the get_exploration_stats_from_model method.""" model = stats_models.ExplorationStatsModel.get(self.stats_model_id) exploration_stats = stats_services.get_exploration_stats_from_model( @@ -676,9 +1062,12 @@ def test_get_exploration_stats_from_model(self): self.assertEqual(exploration_stats.num_actual_starts_v2, 0) self.assertEqual(exploration_stats.num_completions_v1, 0) self.assertEqual(exploration_stats.num_completions_v2, 0) - self.assertEqual(exploration_stats.state_stats_mapping, {}) + self.assertEqual(exploration_stats.state_stats_mapping, { + 'End': stats_domain.StateStats.create_default(), + 'Introduction': stats_domain.StateStats.create_default() + }) - def test_get_playthrough_from_model(self): + def test_get_playthrough_from_model(self) -> None: """Test the get_playthrough_from_model method.""" model = stats_models.PlaythroughModel.get(self.playthrough_id) playthrough = stats_services.get_playthrough_from_model(model) @@ -688,10 +1077,12 @@ def test_get_playthrough_from_model(self): self.assertEqual(playthrough.issue_customization_args, {}) self.assertEqual(playthrough.actions, []) - def test_get_exploration_stats_by_id(self): + def test_get_exploration_stats_by_id(self) -> None: """Test the get_exploration_stats_by_id method.""" exploration_stats = stats_services.get_exploration_stats_by_id( self.exp_id, self.exp_version) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None self.assertEqual(exploration_stats.exp_id, 'exp_id1') self.assertEqual(exploration_stats.exp_version, 1) self.assertEqual(exploration_stats.num_starts_v1, 0) @@ -700,16 +1091,23 @@ def test_get_exploration_stats_by_id(self): self.assertEqual(exploration_stats.num_actual_starts_v2, 0) self.assertEqual(exploration_stats.num_completions_v1, 0) self.assertEqual(exploration_stats.num_completions_v2, 0) - self.assertEqual(exploration_stats.state_stats_mapping, {}) + self.assertEqual(exploration_stats.state_stats_mapping, { + 'End': stats_domain.StateStats.create_default(), + 'Introduction': stats_domain.StateStats.create_default() + }) - def test_create_stats_model(self): - """Test the create_stats_model method.""" + def test_create_stats_model(self) -> None: + """Test the create method.""" exploration_stats = stats_services.get_exploration_stats_by_id( self.exp_id, self.exp_version) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None exploration_stats.exp_version += 1 model_id = stats_services.create_stats_model(exploration_stats) exploration_stats = stats_services.get_exploration_stats_by_id( self.exp_id, self.exp_version + 1) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None self.assertEqual(exploration_stats.exp_id, 'exp_id1') self.assertEqual(exploration_stats.exp_version, 2) self.assertEqual(exploration_stats.num_starts_v1, 0) @@ -718,12 +1116,17 @@ def test_create_stats_model(self): self.assertEqual(exploration_stats.num_actual_starts_v2, 0) self.assertEqual(exploration_stats.num_completions_v1, 0) self.assertEqual(exploration_stats.num_completions_v2, 0) - self.assertEqual(exploration_stats.state_stats_mapping, {}) + self.assertEqual(exploration_stats.state_stats_mapping, { + 'End': stats_domain.StateStats.create_default(), + 'Introduction': stats_domain.StateStats.create_default() + }) # Test create method with different state_stats_mapping. exploration_stats.state_stats_mapping = { 'Home': stats_domain.StateStats.create_default() } + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None exploration_stats.exp_version += 1 model_id = stats_services.create_stats_model(exploration_stats) model = stats_models.ExplorationStatsModel.get(model_id) @@ -752,10 +1155,12 @@ def test_create_stats_model(self): } }) - def test_save_stats_model(self): + def test_save_stats_model(self) -> None: """Test the save_stats_model method.""" exploration_stats = stats_services.get_exploration_stats_by_id( self.exp_id, self.exp_version) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None exploration_stats.num_starts_v2 += 15 exploration_stats.num_actual_starts_v2 += 5 exploration_stats.num_completions_v2 += 2 @@ -763,11 +1168,13 @@ def test_save_stats_model(self): exploration_stats = stats_services.get_exploration_stats_by_id( self.exp_id, self.exp_version) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None self.assertEqual(exploration_stats.num_starts_v2, 15) self.assertEqual(exploration_stats.num_actual_starts_v2, 5) self.assertEqual(exploration_stats.num_completions_v2, 2) - def test_get_exploration_stats_multi(self): + def test_get_exploration_stats_multi(self) -> None: """Test the get_exploration_stats_multi method.""" stats_models.ExplorationStatsModel.create( 'exp_id2', 2, 10, 0, 0, 0, 0, 0, {}) @@ -784,13 +1191,14 @@ def test_get_exploration_stats_multi(self): self.assertEqual(exp_stats_list[1].exp_version, 2) def test_get_multiple_exploration_stats_by_version_with_invalid_exp_id( - self): + self + ) -> None: exp_stats = stats_services.get_multiple_exploration_stats_by_version( 'invalid_exp_id', [1]) self.assertEqual(exp_stats, [None]) - def test_get_exploration_stats_multi_with_invalid_exp_id(self): + def test_get_exploration_stats_multi_with_invalid_exp_id(self) -> None: exp_version_references = [ exp_domain.ExpVersionReference('exp_id_1', 1), exp_domain.ExpVersionReference('exp_id_2', 2)] @@ -810,7 +1218,7 @@ def test_get_exploration_stats_multi_with_invalid_exp_id(self): self.assertEqual(exp_stats_list[1].exp_id, 'exp_id_2') self.assertEqual(exp_stats_list[1].exp_version, 2) - def test_update_exp_issues_for_new_exp_version(self): + def test_get_updated_exp_issues_models_for_new_exp_version(self) -> None: self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) exp = self.save_new_valid_exploration('exp_id', admin_id) @@ -822,26 +1230,46 @@ def test_update_exp_issues_for_new_exp_version(self): '%s.%s' % ('exp_id', 1), strict=False)) exp.version += 1 - stats_services.update_exp_issues_for_new_exp_version( - exp, exp_domain.ExplorationVersionsDiff([]), None) + models_to_put = ( + stats_services.get_updated_exp_issues_models_for_new_exp_version( + exp, exp_domain.ExplorationVersionsDiff([]), None + ) + ) + datastore_services.update_timestamps_multi(models_to_put) + datastore_services.put_multi(models_to_put) exploration_issues_model = ( stats_models.ExplorationIssuesModel.get('%s.%s' % ('exp_id', 1))) self.assertEqual(exploration_issues_model.unresolved_issues, []) + def test_raises_error_while_saving_stats_with_invalid_id(self) -> None: + """Test the update_stats method.""" + exploration_stats = stats_services.get_exploration_stats_by_id( + self.exp_id, self.exp_version) + # Ruling out the possibility of None for mypy type checking. + assert exploration_stats is not None + exploration_stats.num_starts_v2 += 15 + exploration_stats.num_actual_starts_v2 += 5 + exploration_stats.num_completions_v2 += 2 + exploration_stats.exp_id = 'Invalid_id' + with self.assertRaisesRegex( + Exception, 'No exploration stats model exists' + ): + stats_services.save_stats_model(exploration_stats) + class ExplorationIssuesTests(test_utils.GenericTestBase): """Unit tests focused on services related to exploration issues.""" - def setUp(self): - super(ExplorationIssuesTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.exp = self.save_new_linear_exp_with_state_names_and_interactions( 'exp_id', self.owner_id, ['A', 'B'], ['TextInput', 'EndExploration']) - def _create_cst_playthrough(self, state_names): + def _create_cst_playthrough(self, state_names: List[str]) -> str: """Creates a Cyclic State Transitions playthrough and returns its id. Args: @@ -852,8 +1280,10 @@ def _create_cst_playthrough(self, state_names): Returns: str. The ID of the new playthrough. """ - issue_customization_args = {'state_names': {'value': state_names}} - actions = [{ + issue_customization_args: ( + stats_domain.IssuesCustomizationArgsDictType + ) = {'state_names': {'value': state_names}} + actions: List[stats_domain.LearnerActionDict] = [{ 'action_type': 'ExplorationStart', 'action_customization_args': { 'state_name': {'value': state_names[0]}, @@ -873,7 +1303,7 @@ def _create_cst_playthrough(self, state_names): }, 'schema_version': stats_models.CURRENT_ACTION_SCHEMA_VERSION, } - for state_name, dest_state_name in python_utils.ZIP( + for state_name, dest_state_name in zip( state_names[:-1], state_names[1:])) actions.append({ 'action_type': 'ExplorationQuit', @@ -887,7 +1317,7 @@ def _create_cst_playthrough(self, state_names): self.exp.id, self.exp.version, 'CyclicStateTransitions', issue_customization_args, actions) - def _create_eq_playthrough(self, state_name): + def _create_eq_playthrough(self, state_name: str) -> str: """Creates an Early Quit playthrough and returns its id. Args: @@ -896,11 +1326,13 @@ def _create_eq_playthrough(self, state_name): Returns: str. The ID of the new playthrough. """ - issue_customization_args = { + issue_customization_args: ( + stats_domain.IssuesCustomizationArgsDictType + ) = { 'state_name': {'value': state_name}, 'time_spent_in_exp_in_msecs': {'value': 200}, } - actions = [{ + actions: List[stats_domain.LearnerActionDict] = [{ 'action_type': 'ExplorationStart', 'action_customization_args': {'state_name': {'value': state_name}}, 'schema_version': stats_models.CURRENT_ACTION_SCHEMA_VERSION, @@ -917,7 +1349,10 @@ def _create_eq_playthrough(self, state_name): issue_customization_args, actions) def _create_mis_playthrough( - self, state_name, num_times_answered_incorrectly): + self, + state_name: str, + num_times_answered_incorrectly: int + ) -> str: """Creates a Multiple Incorrect Submissions playthrough and returns its id. @@ -929,13 +1364,15 @@ def _create_mis_playthrough( Returns: str. The ID of the new playthrough. """ - issue_customization_args = { + issue_customization_args: ( + stats_domain.IssuesCustomizationArgsDictType + ) = { 'state_name': {'value': state_name}, 'num_times_answered_incorrectly': { 'value': num_times_answered_incorrectly }, } - actions = [{ + actions: List[stats_domain.LearnerActionDict] = [{ 'action_type': 'ExplorationStart', 'action_customization_args': {'state_name': {'value': state_name}}, 'schema_version': stats_models.CURRENT_ACTION_SCHEMA_VERSION, @@ -966,7 +1403,9 @@ def _create_mis_playthrough( self.exp.id, self.exp.version, 'MultipleIncorrectSubmissions', issue_customization_args, actions) - def _create_cst_exp_issue(self, playthrough_ids, state_names): + def _create_cst_exp_issue( + self, playthrough_ids: List[str], state_names: List[str] + ) -> stats_domain.ExplorationIssue: """Returns a new Cyclic State Transitions issue domain object. Args: @@ -979,13 +1418,17 @@ def _create_cst_exp_issue(self, playthrough_ids, state_names): Returns: stats_domain.ExplorationIssue. The new issue. """ - issue_customization_args = {'state_names': {'value': state_names}} + issue_customization_args: Dict[ + str, Dict[str, Union[str, int, List[str]]] + ] = {'state_names': {'value': state_names}} is_valid = True return stats_domain.ExplorationIssue( 'CyclicStateTransitions', issue_customization_args, playthrough_ids, stats_models.CURRENT_ISSUE_SCHEMA_VERSION, is_valid) - def _create_eq_exp_issue(self, playthrough_ids, state_name): + def _create_eq_exp_issue( + self, playthrough_ids: List[str], state_name: str + ) -> stats_domain.ExplorationIssue: """Returns a new Early Quit issue domain object. Args: @@ -996,7 +1439,9 @@ def _create_eq_exp_issue(self, playthrough_ids, state_name): Returns: stats_domain.ExplorationIssue. The new issue. """ - issue_customization_args = { + issue_customization_args: Dict[ + str, Dict[str, Union[str, int, List[str]]] + ] = { 'state_name': {'value': state_name}, 'time_spent_in_exp_in_msecs': {'value': 200}, } @@ -1006,7 +1451,11 @@ def _create_eq_exp_issue(self, playthrough_ids, state_name): stats_models.CURRENT_ISSUE_SCHEMA_VERSION, is_valid) def _create_mis_exp_issue( - self, playthrough_ids, state_name, num_times_answered_incorrectly): + self, + playthrough_ids: List[str], + state_name: str, + num_times_answered_incorrectly: int + ) -> stats_domain.ExplorationIssue: """Returns a new Multiple Incorrect Submissions issue domain object. Args: @@ -1019,7 +1468,9 @@ def _create_mis_exp_issue( Returns: stats_domain.ExplorationIssue. The new issue. """ - issue_customization_args = { + issue_customization_args: Dict[ + str, Dict[str, Union[str, int, List[str]]] + ] = { 'state_name': {'value': state_name}, 'num_times_answered_incorrectly': { 'value': num_times_answered_incorrectly @@ -1031,25 +1482,29 @@ def _create_mis_exp_issue( playthrough_ids, stats_models.CURRENT_ISSUE_SCHEMA_VERSION, is_valid) - def test_create_exp_issues_model(self): + def test_create_exp_issues_model(self) -> None: exp_issues = stats_domain.ExplorationIssues( self.exp.id, stats_models.CURRENT_ISSUE_SCHEMA_VERSION, []) - stats_services.create_exp_issues_model(exp_issues) + stats_services.get_exp_issues_model_from_domain_object(exp_issues) - exp_issues = stats_models.ExplorationIssuesModel.get_model( + exp_issues_model = stats_models.ExplorationIssuesModel.get_model( self.exp.id, self.exp.version) - self.assertEqual(exp_issues.exp_id, self.exp.id) - self.assertEqual(exp_issues.exp_version, self.exp.version) - self.assertEqual(exp_issues.unresolved_issues, []) - - def test_get_exp_issues_creates_new_empty_exp_issues_when_missing(self): + # Ruling out the possibility of None for mypy type checking. + assert exp_issues_model is not None + self.assertEqual(exp_issues_model.exp_id, self.exp.id) + self.assertEqual(exp_issues_model.exp_version, self.exp.version) + self.assertEqual(exp_issues_model.unresolved_issues, []) + + def test_get_exp_issues_creates_new_empty_exp_issues_when_missing( + self + ) -> None: exp_issues = ( stats_services.get_exp_issues(self.exp.id, self.exp.version)) self.assertEqual(exp_issues.exp_id, self.exp.id) self.assertEqual(exp_issues.unresolved_issues, []) - def test_delete_playthroughs_multi(self): + def test_delete_playthroughs_multi(self) -> None: playthrough_ids = [ self._create_eq_playthrough('A'), self._create_cst_playthrough(['A', 'B', 'A']), @@ -1062,7 +1517,7 @@ def test_delete_playthroughs_multi(self): stats_models.PlaythroughModel.get_multi(playthrough_ids), [None, None, None]) - def test_save_exp_issues_model(self): + def test_save_exp_issues_model(self) -> None: eq_playthrough_ids = [self._create_eq_playthrough('A')] cst_playthrough_ids = [self._create_cst_playthrough(['A', 'B', 'A'])] mis_playthrough_ids = [self._create_mis_playthrough('A', 3)] @@ -1086,7 +1541,7 @@ def test_save_exp_issues_model(self): exp_issues.unresolved_issues[2].playthrough_ids, cst_playthrough_ids) - def test_cst_exp_issue_is_invalidated_when_state_is_deleted(self): + def test_cst_exp_issue_is_invalidated_when_state_is_deleted(self) -> None: stats_services.save_exp_issues_model( stats_domain.ExplorationIssues(self.exp.id, self.exp.version, [ self._create_cst_exp_issue( @@ -1094,17 +1549,22 @@ def test_cst_exp_issue_is_invalidated_when_state_is_deleted(self): ['A', 'B', 'A']) ])) - exp_services.update_exploration(self.owner_id, self.exp.id, [ - exp_domain.ExplorationChange( - {'cmd': 'delete_state', 'state_name': 'B'}) - ], 'change') + exp_services.update_exploration( + self.owner_id, + self.exp.id, + [ + exp_domain.ExplorationChange( + {'cmd': 'delete_state', 'state_name': 'B'}) + ], + 'change' + ) exp_issues = ( stats_services.get_exp_issues(self.exp.id, self.exp.version + 1)) self.assertEqual(len(exp_issues.unresolved_issues), 1) self.assertFalse(exp_issues.unresolved_issues[0].is_valid) - def test_cst_exp_issue_is_updated_when_state_is_renamed(self): + def test_cst_exp_issue_is_updated_when_state_is_renamed(self) -> None: stats_services.save_exp_issues_model( stats_domain.ExplorationIssues(self.exp.id, self.exp.version, [ self._create_cst_exp_issue( @@ -1112,13 +1572,18 @@ def test_cst_exp_issue_is_updated_when_state_is_renamed(self): ['A', 'B', 'A']) ])) - exp_services.update_exploration(self.owner_id, self.exp.id, [ - exp_domain.ExplorationChange({ - 'cmd': 'rename_state', - 'old_state_name': 'A', - 'new_state_name': 'Z', - }) - ], 'change') + exp_services.update_exploration( + self.owner_id, + self.exp.id, + [ + exp_domain.ExplorationChange({ + 'cmd': 'rename_state', + 'old_state_name': 'A', + 'new_state_name': 'Z', + }) + ], + 'change' + ) exp_issues = ( stats_services.get_exp_issues(self.exp.id, self.exp.version + 1)) @@ -1152,37 +1617,47 @@ def test_cst_exp_issue_is_updated_when_state_is_renamed(self): actions[3]['action_customization_args']['state_name']['value'], 'Z') - def test_eq_exp_issue_is_invalidated_when_state_is_deleted(self): + def test_eq_exp_issue_is_invalidated_when_state_is_deleted(self) -> None: stats_services.save_exp_issues_model( stats_domain.ExplorationIssues(self.exp.id, self.exp.version, [ self._create_eq_exp_issue( [self._create_eq_playthrough('B')], 'B') ])) - exp_services.update_exploration(self.owner_id, self.exp.id, [ - exp_domain.ExplorationChange( - {'cmd': 'delete_state', 'state_name': 'B'}) - ], 'change') + exp_services.update_exploration( + self.owner_id, + self.exp.id, + [ + exp_domain.ExplorationChange( + {'cmd': 'delete_state', 'state_name': 'B'}) + ], + 'change' + ) exp_issues = ( stats_services.get_exp_issues(self.exp.id, self.exp.version + 1)) self.assertEqual(len(exp_issues.unresolved_issues), 1) self.assertFalse(exp_issues.unresolved_issues[0].is_valid) - def test_eq_exp_issue_is_updated_when_state_is_renamed(self): + def test_eq_exp_issue_is_updated_when_state_is_renamed(self) -> None: stats_services.save_exp_issues_model( stats_domain.ExplorationIssues(self.exp.id, self.exp.version, [ self._create_eq_exp_issue( [self._create_eq_playthrough('A')], 'A') ])) - exp_services.update_exploration(self.owner_id, self.exp.id, [ - exp_domain.ExplorationChange({ - 'cmd': 'rename_state', - 'old_state_name': 'A', - 'new_state_name': 'Z', - }) - ], 'change') + exp_services.update_exploration( + self.owner_id, + self.exp.id, + [ + exp_domain.ExplorationChange({ + 'cmd': 'rename_state', + 'old_state_name': 'A', + 'new_state_name': 'Z', + }) + ], + 'change' + ) exp_issues = ( stats_services.get_exp_issues(self.exp.id, self.exp.version + 1)) @@ -1206,37 +1681,47 @@ def test_eq_exp_issue_is_updated_when_state_is_renamed(self): self.assertEqual( actions[1]['action_customization_args']['state_name']['value'], 'Z') - def test_mis_exp_issue_is_invalidated_when_state_is_deleted(self): + def test_mis_exp_issue_is_invalidated_when_state_is_deleted(self) -> None: stats_services.save_exp_issues_model( stats_domain.ExplorationIssues(self.exp.id, self.exp.version, [ self._create_mis_exp_issue( [self._create_mis_playthrough('B', 2)], 'B', 2) ])) - exp_services.update_exploration(self.owner_id, self.exp.id, [ - exp_domain.ExplorationChange( - {'cmd': 'delete_state', 'state_name': 'B'}), - ], 'Delete B') + exp_services.update_exploration( + self.owner_id, + self.exp.id, + [ + exp_domain.ExplorationChange( + {'cmd': 'delete_state', 'state_name': 'B'}), + ], + 'Delete B' + ) exp_issues = ( stats_services.get_exp_issues(self.exp.id, self.exp.version + 1)) self.assertEqual(len(exp_issues.unresolved_issues), 1) self.assertFalse(exp_issues.unresolved_issues[0].is_valid) - def test_mis_exp_issue_is_updated_when_state_is_renamed(self): + def test_mis_exp_issue_is_updated_when_state_is_renamed(self) -> None: stats_services.save_exp_issues_model( stats_domain.ExplorationIssues(self.exp.id, self.exp.version, [ self._create_mis_exp_issue( [self._create_mis_playthrough('A', 2)], 'A', 2) ])) - exp_services.update_exploration(self.owner_id, self.exp.id, [ - exp_domain.ExplorationChange({ - 'cmd': 'rename_state', - 'old_state_name': 'A', - 'new_state_name': 'Z', - }) - ], 'change') + exp_services.update_exploration( + self.owner_id, + self.exp.id, + [ + exp_domain.ExplorationChange({ + 'cmd': 'rename_state', + 'old_state_name': 'A', + 'new_state_name': 'Z', + }) + ], + 'change' + ) exp_issues = ( stats_services.get_exp_issues(self.exp.id, self.exp.version + 1)) @@ -1264,7 +1749,7 @@ def test_mis_exp_issue_is_updated_when_state_is_renamed(self): self.assertEqual( actions[3]['action_customization_args']['state_name']['value'], 'Z') - def test_revert_exploration_recovers_exp_issues(self): + def test_revert_exploration_recovers_exp_issues(self) -> None: stats_services.save_exp_issues_model( stats_domain.ExplorationIssues(self.exp.id, self.exp.version, [ self._create_eq_exp_issue( @@ -1276,10 +1761,15 @@ def test_revert_exploration_recovers_exp_issues(self): [self._create_mis_playthrough('B', 3)], 'B', 3), ])) - exp_services.update_exploration(self.owner_id, self.exp.id, [ - exp_domain.ExplorationChange( - {'cmd': 'delete_state', 'state_name': 'B'}), - ], 'commit') + exp_services.update_exploration( + self.owner_id, + self.exp.id, + [ + exp_domain.ExplorationChange( + {'cmd': 'delete_state', 'state_name': 'B'}), + ], + 'commit' + ) exp_issues = ( stats_services.get_exp_issues(self.exp.id, self.exp.version + 1)) @@ -1297,11 +1787,30 @@ def test_revert_exploration_recovers_exp_issues(self): self.assertTrue(exp_issues.unresolved_issues[1].is_valid) self.assertTrue(exp_issues.unresolved_issues[2].is_valid) + def test_raises_error_while_saving_exp_issues_model_with_invalid_exp_id( + self + ) -> None: + exp_issues = stats_domain.ExplorationIssues( + self.exp.id, + self.exp.version, + [ + self._create_cst_exp_issue( + [self._create_cst_playthrough(['A', 'B', 'A'])], + ['A', 'B', 'A'] + ) + ] + ) + exp_issues.exp_id = 'Invalid_id' + with self.assertRaisesRegex( + Exception, 'No ExplorationIssuesModel exists' + ): + stats_services.save_exp_issues_model(exp_issues) + class EventLogEntryTests(test_utils.GenericTestBase): """Test for the event log creation.""" - def test_create_events(self): + def test_create_events(self) -> None: """Basic test that makes sure there are no exceptions thrown.""" event_services.StartExplorationEventHandler.record( 'eid', 2, 'state', 'session', {}, feconf.PLAY_TYPE_NORMAL) @@ -1312,13 +1821,16 @@ def test_create_events(self): class AnswerEventTests(test_utils.GenericTestBase): """Test recording new answer operations through events.""" - SESSION_ID = 'SESSION_ID' - TIME_SPENT = 5.0 - PARAMS = {} + SESSION_ID: Final = 'SESSION_ID' + TIME_SPENT: Final = 5.0 + PARAMS: Dict[str, str] = {} - def test_record_answer(self): + def test_record_answer(self) -> None: self.save_new_default_exploration('eid', 'fake@user.com') exp = exp_fetchers.get_exploration_by_id('eid') + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) first_state_name = exp.init_state_name second_state_name = 'State 2' @@ -1341,20 +1853,35 @@ def test_record_answer(self): 'unicode_str': 'Enter here' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'state_name': first_state_name, - 'property_name': - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 'new_value': 1 }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': second_state_name, + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_ADD_STATE, 'state_name': third_state_name, + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': second_state_name, @@ -1372,14 +1899,9 @@ def test_record_answer(self): 'unicode_str': 'Enter here' } }, - 'rows': {'value': 1} + 'rows': {'value': 1}, + 'catchMisspellings': {'value': False} } - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'state_name': second_state_name, - 'property_name': - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 'new_value': 1 }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'state_name': third_state_name, @@ -1398,12 +1920,6 @@ def test_record_answer(self): } }, } - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'state_name': third_state_name, - 'property_name': - exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX, - 'new_value': 2 })], 'Add new state') exp = exp_fetchers.get_exploration_by_id('eid') @@ -1458,58 +1974,74 @@ def test_record_answer(self): 'answer': 'answer1', 'time_spent_in_sec': 5.0, 'answer_group_index': 0, 'rule_spec_index': 0, 'classification_categorization': 'explicit', 'session_id': 'sid1', - 'interaction_id': 'TextInput', 'params': {} + 'interaction_id': 'TextInput', 'params': {}, 'answer_str': None, + 'rule_spec_str': None }, { 'answer': 'answer1', 'time_spent_in_sec': 5.0, 'answer_group_index': 0, 'rule_spec_index': 1, 'classification_categorization': 'explicit', 'session_id': 'sid2', - 'interaction_id': 'TextInput', 'params': {} + 'interaction_id': 'TextInput', 'params': {}, 'answer_str': None, + 'rule_spec_str': None }, { 'answer': {'x': 1.0, 'y': 5.0}, 'time_spent_in_sec': 5.0, 'answer_group_index': 1, 'rule_spec_index': 0, 'classification_categorization': 'explicit', 'session_id': 'sid1', - 'interaction_id': 'TextInput', 'params': {} + 'interaction_id': 'TextInput', 'params': {}, 'answer_str': None, + 'rule_spec_str': None }, { 'answer': 10, 'time_spent_in_sec': 5.0, 'answer_group_index': 2, 'rule_spec_index': 0, 'classification_categorization': 'explicit', - 'session_id': 'sid1', 'interaction_id': 'TextInput', 'params': {} + 'session_id': 'sid1', 'interaction_id': 'TextInput', 'params': {}, + 'answer_str': None, 'rule_spec_str': None }, { 'answer': [{'a': 'some', 'b': 'text'}, {'a': 1.0, 'c': 2.0}], 'time_spent_in_sec': 5.0, 'answer_group_index': 3, 'rule_spec_index': 0, 'classification_categorization': 'explicit', - 'session_id': 'sid1', 'interaction_id': 'TextInput', 'params': {} + 'session_id': 'sid1', 'interaction_id': 'TextInput', 'params': {}, + 'answer_str': None, 'rule_spec_str': None }] expected_submitted_answer_list2 = [{ 'answer': [2, 4, 8], 'time_spent_in_sec': 5.0, 'answer_group_index': 2, 'rule_spec_index': 0, 'classification_categorization': 'explicit', 'session_id': 'sid3', - 'interaction_id': 'TextInput', 'params': {} + 'interaction_id': 'TextInput', 'params': {}, 'answer_str': None, + 'rule_spec_str': None }, { 'answer': self.UNICODE_TEST_STRING, 'time_spent_in_sec': 5.0, 'answer_group_index': 1, 'rule_spec_index': 1, 'classification_categorization': 'explicit', 'session_id': 'sid4', - 'interaction_id': 'TextInput', 'params': {} + 'interaction_id': 'TextInput', 'params': {}, 'answer_str': None, + 'rule_spec_str': None }] - expected_submitted_answer_list3 = [{ + expected_submitted_answer_list3: List[ + Dict[str, Union[str, Optional[int], Dict[str, str], float]] + ] = [{ 'answer': None, 'time_spent_in_sec': 5.0, 'answer_group_index': 1, 'rule_spec_index': 1, 'classification_categorization': 'explicit', - 'session_id': 'sid5', 'interaction_id': 'Continue', 'params': {} + 'session_id': 'sid5', 'interaction_id': 'Continue', 'params': {}, + 'answer_str': None, 'rule_spec_str': None }] state_answers = stats_services.get_state_answers( 'eid', exp_version, first_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual( state_answers.get_submitted_answer_dict_list(), expected_submitted_answer_list1) state_answers = stats_services.get_state_answers( 'eid', exp_version, second_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual( state_answers.get_submitted_answer_dict_list(), expected_submitted_answer_list2) state_answers = stats_services.get_state_answers( 'eid', exp_version, third_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual( state_answers.get_submitted_answer_dict_list(), expected_submitted_answer_list3) @@ -1518,16 +2050,16 @@ def test_record_answer(self): class RecordAnswerTests(test_utils.GenericTestBase): """Tests for functionality related to recording and retrieving answers.""" - EXP_ID = 'exp_id0' + EXP_ID: Final = 'exp_id0' - def setUp(self): - super(RecordAnswerTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.exploration = self.save_new_valid_exploration( self.EXP_ID, self.owner_id, end_state_name='End') - def test_record_answer_without_retrieving_it_first(self): + def test_record_answer_without_retrieving_it_first(self) -> None: stats_services.record_answer( self.EXP_ID, self.exploration.version, self.exploration.init_state_name, 'TextInput', @@ -1539,6 +2071,8 @@ def test_record_answer_without_retrieving_it_first(self): state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{ 'answer': 'first answer', 'time_spent_in_sec': 1.0, @@ -1547,10 +2081,12 @@ def test_record_answer_without_retrieving_it_first(self): 'classification_categorization': 'explicit', 'session_id': 'a_session_id_val', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }]) - def test_record_and_retrieve_single_answer(self): + def test_record_and_retrieve_single_answer(self) -> None: state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) @@ -1567,6 +2103,8 @@ def test_record_and_retrieve_single_answer(self): state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual(state_answers.exploration_id, 'exp_id0') self.assertEqual(state_answers.exploration_version, 1) self.assertEqual( @@ -1580,10 +2118,14 @@ def test_record_and_retrieve_single_answer(self): 'classification_categorization': 'explicit', 'session_id': 'a_session_id_val', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }]) - def test_record_and_retrieve_single_answer_with_preexisting_entry(self): + def test_record_and_retrieve_single_answer_with_preexisting_entry( + self + ) -> None: stats_services.record_answer( self.EXP_ID, self.exploration.version, self.exploration.init_state_name, 'TextInput', @@ -1595,6 +2137,8 @@ def test_record_and_retrieve_single_answer_with_preexisting_entry(self): state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{ 'answer': 'first answer', 'time_spent_in_sec': 1.0, @@ -1603,7 +2147,9 @@ def test_record_and_retrieve_single_answer_with_preexisting_entry(self): 'classification_categorization': 'explicit', 'session_id': 'a_session_id_val', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }]) stats_services.record_answer( @@ -1617,6 +2163,8 @@ def test_record_and_retrieve_single_answer_with_preexisting_entry(self): state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual(state_answers.exploration_id, 'exp_id0') self.assertEqual(state_answers.exploration_version, 1) self.assertEqual( @@ -1630,7 +2178,9 @@ def test_record_and_retrieve_single_answer_with_preexisting_entry(self): 'classification_categorization': 'explicit', 'session_id': 'a_session_id_val', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }, { 'answer': 'some text', 'time_spent_in_sec': 10.0, @@ -1639,10 +2189,12 @@ def test_record_and_retrieve_single_answer_with_preexisting_entry(self): 'classification_categorization': 'explicit', 'session_id': 'a_session_id_val', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }]) - def test_record_many_answers(self): + def test_record_many_answers(self) -> None: state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) @@ -1668,6 +2220,8 @@ def test_record_many_answers(self): state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual(state_answers.exploration_id, 'exp_id0') self.assertEqual(state_answers.exploration_version, 1) self.assertEqual( @@ -1681,7 +2235,9 @@ def test_record_many_answers(self): 'classification_categorization': 'explicit', 'session_id': 'session_id_v', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }, { 'answer': 'answer ccc', 'time_spent_in_sec': 3.0, @@ -1690,7 +2246,9 @@ def test_record_many_answers(self): 'classification_categorization': 'explicit', 'session_id': 'session_id_v', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }, { 'answer': 'answer bbbbb', 'time_spent_in_sec': 7.5, @@ -1699,10 +2257,12 @@ def test_record_many_answers(self): 'classification_categorization': 'explicit', 'session_id': 'session_id_v', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }]) - def test_record_answers_exceeding_one_shard(self): + def test_record_answers_exceeding_one_shard(self) -> None: # Use a smaller max answer list size so less answers are needed to # exceed a shard. with self.swap( @@ -1737,6 +2297,8 @@ def test_record_answers_exceeding_one_shard(self): master_model = stats_models.StateAnswersModel.get_master_model( self.exploration.id, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert master_model is not None self.assertGreater(master_model.shard_count, 0) # The order of the answers returned depends on the size of the @@ -1744,6 +2306,8 @@ def test_record_answers_exceeding_one_shard(self): state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual(state_answers.exploration_id, 'exp_id0') self.assertEqual(state_answers.exploration_version, 1) self.assertEqual( @@ -1752,7 +2316,7 @@ def test_record_answers_exceeding_one_shard(self): self.assertEqual( len(state_answers.get_submitted_answer_dict_list()), 600) - def test_record_many_answers_with_preexisting_entry(self): + def test_record_many_answers_with_preexisting_entry(self) -> None: stats_services.record_answer( self.EXP_ID, self.exploration.version, self.exploration.init_state_name, 'TextInput', @@ -1764,6 +2328,8 @@ def test_record_many_answers_with_preexisting_entry(self): state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{ 'answer': '1 answer', 'time_spent_in_sec': 1.0, @@ -1772,7 +2338,9 @@ def test_record_many_answers_with_preexisting_entry(self): 'classification_categorization': 'explicit', 'session_id': 'a_session_id_val', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }]) submitted_answer_list = [ @@ -1795,6 +2363,8 @@ def test_record_many_answers_with_preexisting_entry(self): state_answers = stats_services.get_state_answers( self.EXP_ID, self.exploration.version, self.exploration.init_state_name) + # Ruling out the possibility of None for mypy type checking. + assert state_answers is not None self.assertEqual(state_answers.exploration_id, 'exp_id0') self.assertEqual(state_answers.exploration_version, 1) self.assertEqual( @@ -1808,7 +2378,9 @@ def test_record_many_answers_with_preexisting_entry(self): 'classification_categorization': 'explicit', 'session_id': 'a_session_id_val', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }, { 'answer': 'answer aaa', 'time_spent_in_sec': 10.0, @@ -1817,7 +2389,9 @@ def test_record_many_answers_with_preexisting_entry(self): 'classification_categorization': 'explicit', 'session_id': 'session_id_v', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }, { 'answer': 'answer ccccc', 'time_spent_in_sec': 3.0, @@ -1826,7 +2400,9 @@ def test_record_many_answers_with_preexisting_entry(self): 'classification_categorization': 'explicit', 'session_id': 'session_id_v', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }, { 'answer': 'answer bbbbbbb', 'time_spent_in_sec': 7.5, @@ -1835,23 +2411,25 @@ def test_record_many_answers_with_preexisting_entry(self): 'classification_categorization': 'explicit', 'session_id': 'session_id_v', 'interaction_id': 'TextInput', - 'params': {} + 'params': {}, + 'answer_str': None, + 'rule_spec_str': None }]) class SampleAnswerTests(test_utils.GenericTestBase): """Tests for functionality related to retrieving sample answers.""" - EXP_ID = 'exp_id0' + EXP_ID: Final = 'exp_id0' - def setUp(self): - super(SampleAnswerTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.exploration = self.save_new_valid_exploration( self.EXP_ID, self.owner_id, end_state_name='End') - def test_at_most_100_answers_returned_even_if_there_are_lots(self): + def test_at_most_100_answers_returned_even_if_there_are_lots(self) -> None: submitted_answer_list = [ stats_domain.SubmittedAnswer( 'answer a', 'TextInput', 0, 1, @@ -1874,7 +2452,9 @@ def test_at_most_100_answers_returned_even_if_there_are_lots(self): self.exploration.init_state_name) self.assertEqual(len(sample_answers), 100) - def test_exactly_100_answers_returned_if_main_shard_has_100_answers(self): + def test_exactly_100_answers_returned_if_main_shard_has_100_answers( + self + ) -> None: submitted_answer_list = [ stats_domain.SubmittedAnswer( 'answer a', 'TextInput', 0, 1, @@ -1891,7 +2471,7 @@ def test_exactly_100_answers_returned_if_main_shard_has_100_answers(self): self.exploration.init_state_name) self.assertEqual(sample_answers, ['answer a'] * 100) - def test_all_answers_returned_if_main_shard_has_few_answers(self): + def test_all_answers_returned_if_main_shard_has_few_answers(self) -> None: submitted_answer_list = [ stats_domain.SubmittedAnswer( 'answer a', 'TextInput', 0, 1, @@ -1911,7 +2491,7 @@ def test_all_answers_returned_if_main_shard_has_few_answers(self): self.exploration.init_state_name) self.assertEqual(sample_answers, ['answer a', 'answer bbbbb']) - def test_only_sample_answers_in_main_shard_returned(self): + def test_only_sample_answers_in_main_shard_returned(self) -> None: # Use a smaller max answer list size so fewer answers are needed to # exceed a shard. with self.swap( @@ -1940,7 +2520,7 @@ def test_only_sample_answers_in_main_shard_returned(self): model = stats_models.StateAnswersModel.get('%s:%s:%s:%s' % ( self.exploration.id, str(self.exploration.version), self.exploration.init_state_name, '0')) - self.assertEqual(model.shard_count, 1) + self.assertGreater(model.shard_count, 1) # Verify that the list of sample answers returned contains fewer than # 100 answers, although a total of 100 answers were submitted. @@ -1949,19 +2529,27 @@ def test_only_sample_answers_in_main_shard_returned(self): self.exploration.init_state_name) self.assertLess(len(sample_answers), 100) - def test_get_sample_answers_with_invalid_exp_id(self): + def test_get_sample_answers_with_invalid_exp_id(self) -> None: sample_answers = stats_services.get_sample_answers( 'invalid_exp_id', self.exploration.version, self.exploration.init_state_name) self.assertEqual(sample_answers, []) + def test_raises_error_while_fetching_exp_issues_with_invalid_id_and_strict( + self + ) -> None: + with self.assertRaisesRegex( + Exception, 'No ExplorationIssues model found' + ): + stats_services.get_exp_issues('Invalid_id', 0, strict=True) + class LearnerAnswerDetailsServicesTest(test_utils.GenericTestBase): """Test for services related to learner answer details.""" - def setUp(self): - super(LearnerAnswerDetailsServicesTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.exp_id = 'exp_id1' self.state_name = 'intro' self.question_id = 'q_id_1' @@ -1985,28 +2573,33 @@ def setUp(self): self.state_reference_question, self.interaction_id, [], feconf.CURRENT_LEARNER_ANSWER_INFO_SCHEMA_VERSION, 0)) - def test_get_state_reference_for_exp_raises_error_for_fake_exp_id(self): + def test_get_state_reference_for_exp_raises_error_for_fake_exp_id( + self + ) -> None: self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.get_user_id_from_email(self.OWNER_EMAIL) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Entity .* not found'): stats_services.get_state_reference_for_exploration( 'fake_exp', 'state_name') def test_get_state_reference_for_exp_raises_error_for_invalid_state_name( - self): + self + ) -> None: self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) exploration = self.save_new_default_exploration( self.exp_id, owner_id) self.assertEqual(list(exploration.states.keys()), ['Introduction']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.InvalidInputException, 'No state with the given state name was found'): stats_services.get_state_reference_for_exploration( self.exp_id, 'state_name') - def test_get_state_reference_for_exp_for_valid_exp_id_and_state_name(self): + def test_get_state_reference_for_exp_for_valid_exp_id_and_state_name( + self + ) -> None: self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) exploration = self.save_new_default_exploration( @@ -2017,31 +2610,40 @@ def test_get_state_reference_for_exp_for_valid_exp_id_and_state_name(self): self.exp_id, 'Introduction')) self.assertEqual(state_reference, 'exp_id1:Introduction') - def test_get_state_reference_for_question_with_invalid_question_id(self): - with self.assertRaisesRegexp( + def test_get_state_reference_for_question_with_invalid_question_id( + self + ) -> None: + with self.assertRaisesRegex( utils.InvalidInputException, 'No question with the given question id exists'): stats_services.get_state_reference_for_question( 'fake_question_id') - def test_get_state_reference_for_question_with_valid_question_id(self): + def test_get_state_reference_for_question_with_valid_question_id( + self + ) -> None: self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) editor_id = self.get_user_id_from_email( self.EDITOR_EMAIL) question_id = question_services.get_new_question_id() + content_id_generator = translation_domain.ContentIdGenerator() question = self.save_new_question( question_id, editor_id, - self._create_valid_question_data('ABC'), ['skill_1']) + self._create_valid_question_data('ABC', content_id_generator), + ['skill_1'], + content_id_generator.next_content_id_index) self.assertNotEqual(question, None) state_reference = ( stats_services.get_state_reference_for_question(question_id)) self.assertEqual(state_reference, question_id) - def test_update_learner_answer_details(self): + def test_update_learner_answer_details(self) -> None: answer = 'This is my answer' answer_details = 'This is my answer details' learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( len(learner_answer_details.learner_answer_info_list), 0) stats_services.record_learner_answer_info( @@ -2049,6 +2651,8 @@ def test_update_learner_answer_details(self): self.interaction_id, answer, answer_details) learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( len(learner_answer_details.learner_answer_info_list), 1) @@ -2059,12 +2663,16 @@ def test_update_learner_answer_details(self): self.interaction_id, answer, answer_details) learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( len(learner_answer_details.learner_answer_info_list), 2) - def test_delete_learner_answer_info(self): + def test_delete_learner_answer_info(self) -> None: learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( len(learner_answer_details.learner_answer_info_list), 0) answer = 'This is my answer' @@ -2074,6 +2682,8 @@ def test_delete_learner_answer_info(self): self.interaction_id, answer, answer_details) learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( len(learner_answer_details.learner_answer_info_list), 1) learner_answer_info_id = ( @@ -2083,20 +2693,25 @@ def test_delete_learner_answer_info(self): learner_answer_info_id) learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( len(learner_answer_details.learner_answer_info_list), 0) - def test_delete_learner_answer_info_with_invalid_input(self): - with self.assertRaisesRegexp( + def test_delete_learner_answer_info_with_invalid_input(self) -> None: + with self.assertRaisesRegex( utils.InvalidInputException, 'No learner answer details found with the given state reference'): stats_services.delete_learner_answer_info( feconf.ENTITY_TYPE_EXPLORATION, 'expID:stateName', 'id_1') def test_delete_learner_answer_info_with_unknown_learner_answer_info_id( - self): + self + ) -> None: learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( len(learner_answer_details.learner_answer_info_list), 0) answer = 'This is my answer' @@ -2106,19 +2721,23 @@ def test_delete_learner_answer_info_with_unknown_learner_answer_info_id( self.interaction_id, answer, answer_details) learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( len(learner_answer_details.learner_answer_info_list), 1) learner_answer_info_id = 'id_1' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Learner answer info with the given id not found'): stats_services.delete_learner_answer_info( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration, learner_answer_info_id) - def test_update_state_reference(self): + def test_update_state_reference(self) -> None: new_state_reference = 'exp_id_2:state_name_2' learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertNotEqual( learner_answer_details.state_reference, new_state_reference) stats_services.update_state_reference( @@ -2126,10 +2745,12 @@ def test_update_state_reference(self): new_state_reference) learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, new_state_reference) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertEqual( learner_answer_details.state_reference, new_state_reference) - def test_new_learner_answer_details_is_created(self): + def test_new_learner_answer_details_is_created(self) -> None: state_reference = 'exp_id_2:state_name_2' interaction_id = 'GraphInput' answer = 'Hello World' @@ -2142,6 +2763,8 @@ def test_new_learner_answer_details_is_created(self): interaction_id, answer, answer_details) learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, state_reference) + # Ruling out the possibility of None for mypy type checking. + assert learner_answer_details is not None self.assertNotEqual(learner_answer_details, None) self.assertEqual( learner_answer_details.state_reference, state_reference) @@ -2149,15 +2772,15 @@ def test_new_learner_answer_details_is_created(self): self.assertEqual( len(learner_answer_details.learner_answer_info_list), 1) - def test_update_with_invalid_input_raises_exception(self): - with self.assertRaisesRegexp( + def test_update_with_invalid_input_raises_exception(self) -> None: + with self.assertRaisesRegex( utils.InvalidInputException, 'No learner answer details found with the given state reference'): stats_services.update_state_reference( feconf.ENTITY_TYPE_EXPLORATION, 'expID:stateName', 'newexp:statename') - def test_delete_learner_answer_details_for_exploration_state(self): + def test_delete_learner_answer_details_for_exploration_state(self) -> None: learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) self.assertNotEqual(learner_answer_details, None) @@ -2167,7 +2790,7 @@ def test_delete_learner_answer_details_for_exploration_state(self): feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration) self.assertEqual(learner_answer_details, None) - def test_delete_learner_answer_details_for_question_state(self): + def test_delete_learner_answer_details_for_question_state(self) -> None: learner_answer_details = stats_services.get_learner_answer_details( feconf.ENTITY_TYPE_QUESTION, self.state_reference_question) self.assertNotEqual(learner_answer_details, None) diff --git a/core/domain/story_domain.py b/core/domain/story_domain.py index e008b5a6164c..ef5163eb39e2 100644 --- a/core/domain/story_domain.py +++ b/core/domain/story_domain.py @@ -17,6 +17,7 @@ from __future__ import annotations import copy +import datetime import functools import json import re @@ -26,58 +27,63 @@ from core import utils from core.constants import constants from core.domain import change_domain -from core.domain import fs_domain -from core.domain import fs_services -from core.domain import html_cleaner -from core.domain import html_validation_service + +from typing import Final, List, Literal, Optional, TypedDict, overload + +from core.domain import fs_services # pylint: disable=invalid-import-from # isort:skip +from core.domain import html_cleaner # pylint: disable=invalid-import-from # isort:skip +from core.domain import html_validation_service # pylint: disable=invalid-import-from # isort:skip + +# TODO(#14537): Refactor this file and remove imports marked +# with 'invalid-import-from'. # Do not modify the values of these constants. This is to preserve backwards # compatibility with previous change dicts. -STORY_PROPERTY_TITLE = 'title' -STORY_PROPERTY_THUMBNAIL_BG_COLOR = 'thumbnail_bg_color' -STORY_PROPERTY_THUMBNAIL_FILENAME = 'thumbnail_filename' -STORY_PROPERTY_DESCRIPTION = 'description' -STORY_PROPERTY_NOTES = 'notes' -STORY_PROPERTY_LANGUAGE_CODE = 'language_code' -STORY_PROPERTY_URL_FRAGMENT = 'url_fragment' -STORY_PROPERTY_META_TAG_CONTENT = 'meta_tag_content' - -STORY_NODE_PROPERTY_DESTINATION_NODE_IDS = 'destination_node_ids' -STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS = 'acquired_skill_ids' -STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS = 'prerequisite_skill_ids' -STORY_NODE_PROPERTY_OUTLINE = 'outline' -STORY_NODE_PROPERTY_TITLE = 'title' -STORY_NODE_PROPERTY_DESCRIPTION = 'description' -STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR = 'thumbnail_bg_color' -STORY_NODE_PROPERTY_THUMBNAIL_FILENAME = 'thumbnail_filename' -STORY_NODE_PROPERTY_EXPLORATION_ID = 'exploration_id' - - -INITIAL_NODE_ID = 'initial_node_id' -NODE = 'node' - -CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION = 'migrate_schema_to_latest_version' +STORY_PROPERTY_TITLE: Final = 'title' +STORY_PROPERTY_THUMBNAIL_BG_COLOR: Final = 'thumbnail_bg_color' +STORY_PROPERTY_THUMBNAIL_FILENAME: Final = 'thumbnail_filename' +STORY_PROPERTY_DESCRIPTION: Final = 'description' +STORY_PROPERTY_NOTES: Final = 'notes' +STORY_PROPERTY_LANGUAGE_CODE: Final = 'language_code' +STORY_PROPERTY_URL_FRAGMENT: Final = 'url_fragment' +STORY_PROPERTY_META_TAG_CONTENT: Final = 'meta_tag_content' + +STORY_NODE_PROPERTY_DESTINATION_NODE_IDS: Final = 'destination_node_ids' +STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS: Final = 'acquired_skill_ids' +STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS: Final = 'prerequisite_skill_ids' +STORY_NODE_PROPERTY_OUTLINE: Final = 'outline' +STORY_NODE_PROPERTY_TITLE: Final = 'title' +STORY_NODE_PROPERTY_DESCRIPTION: Final = 'description' +STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR: Final = 'thumbnail_bg_color' +STORY_NODE_PROPERTY_THUMBNAIL_FILENAME: Final = 'thumbnail_filename' +STORY_NODE_PROPERTY_EXPLORATION_ID: Final = 'exploration_id' + + +INITIAL_NODE_ID: Final = 'initial_node_id' +NODE: Final = 'node' + +CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION: Final = 'migrate_schema_to_latest_version' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. -CMD_UPDATE_STORY_PROPERTY = 'update_story_property' -CMD_UPDATE_STORY_NODE_PROPERTY = 'update_story_node_property' -CMD_UPDATE_STORY_CONTENTS_PROPERTY = 'update_story_contents_property' +CMD_UPDATE_STORY_PROPERTY: Final = 'update_story_property' +CMD_UPDATE_STORY_NODE_PROPERTY: Final = 'update_story_node_property' +CMD_UPDATE_STORY_CONTENTS_PROPERTY: Final = 'update_story_contents_property' # These take node_id as parameter. -CMD_ADD_STORY_NODE = 'add_story_node' -CMD_DELETE_STORY_NODE = 'delete_story_node' -CMD_UPDATE_STORY_NODE_OUTLINE_STATUS = 'update_story_node_outline_status' +CMD_ADD_STORY_NODE: Final = 'add_story_node' +CMD_DELETE_STORY_NODE: Final = 'delete_story_node' +CMD_UPDATE_STORY_NODE_OUTLINE_STATUS: Final = 'update_story_node_outline_status' # This takes additional 'title' parameters. -CMD_CREATE_NEW = 'create_new' +CMD_CREATE_NEW: Final = 'create_new' -CMD_CHANGE_ROLE = 'change_role' +CMD_CHANGE_ROLE: Final = 'change_role' -ROLE_MANAGER = 'manager' -ROLE_NONE = 'none' +ROLE_MANAGER: Final = 'manager' +ROLE_NONE: Final = 'none' # The prefix for all node ids of a story. -NODE_ID_PREFIX = 'node_' +NODE_ID_PREFIX: Final = 'node_' class StoryChange(change_domain.BaseChange): @@ -101,85 +107,322 @@ class StoryChange(change_domain.BaseChange): # The allowed list of story properties which can be used in # update_story_property command. - STORY_PROPERTIES = ( - STORY_PROPERTY_TITLE, STORY_PROPERTY_THUMBNAIL_BG_COLOR, + STORY_PROPERTIES: List[str] = [ + STORY_PROPERTY_TITLE, + STORY_PROPERTY_THUMBNAIL_BG_COLOR, STORY_PROPERTY_THUMBNAIL_FILENAME, - STORY_PROPERTY_DESCRIPTION, STORY_PROPERTY_NOTES, - STORY_PROPERTY_LANGUAGE_CODE, STORY_PROPERTY_URL_FRAGMENT, - STORY_PROPERTY_META_TAG_CONTENT) + STORY_PROPERTY_DESCRIPTION, + STORY_PROPERTY_NOTES, + STORY_PROPERTY_LANGUAGE_CODE, + STORY_PROPERTY_URL_FRAGMENT, + STORY_PROPERTY_META_TAG_CONTENT + ] # The allowed list of story node properties which can be used in # update_story_node_property command. - STORY_NODE_PROPERTIES = ( + STORY_NODE_PROPERTIES: List[str] = [ STORY_NODE_PROPERTY_DESTINATION_NODE_IDS, STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS, - STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS, STORY_NODE_PROPERTY_OUTLINE, - STORY_NODE_PROPERTY_EXPLORATION_ID, STORY_NODE_PROPERTY_TITLE, + STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS, + STORY_NODE_PROPERTY_OUTLINE, + STORY_NODE_PROPERTY_EXPLORATION_ID, + STORY_NODE_PROPERTY_TITLE, STORY_NODE_PROPERTY_DESCRIPTION, STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR, - STORY_NODE_PROPERTY_THUMBNAIL_FILENAME) + STORY_NODE_PROPERTY_THUMBNAIL_FILENAME + ] # The allowed list of story content properties which can be used in # update_story_contents_property command. - STORY_CONTENTS_PROPERTIES = (INITIAL_NODE_ID, NODE, ) + STORY_CONTENTS_PROPERTIES: List[str] = [INITIAL_NODE_ID, NODE] - ALLOWED_COMMANDS = [{ + ALLOWED_COMMANDS: List[feconf.ValidCmdDict] = [{ 'name': CMD_UPDATE_STORY_PROPERTY, 'required_attribute_names': ['property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': STORY_PROPERTIES} + 'allowed_values': {'property_name': STORY_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_STORY_NODE_PROPERTY, 'required_attribute_names': [ 'node_id', 'property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': STORY_NODE_PROPERTIES} + 'allowed_values': {'property_name': STORY_NODE_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_STORY_CONTENTS_PROPERTY, 'required_attribute_names': ['property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': STORY_CONTENTS_PROPERTIES} + 'allowed_values': {'property_name': STORY_CONTENTS_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_ADD_STORY_NODE, 'required_attribute_names': ['node_id', 'title'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_STORY_NODE, 'required_attribute_names': ['node_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_STORY_NODE_OUTLINE_STATUS, 'required_attribute_names': ['node_id', 'old_value', 'new_value'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_CREATE_NEW, 'required_attribute_names': ['title'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION, 'required_attribute_names': ['from_version', 'to_version'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }] +class CreateNewStoryCmd(StoryChange): + """Class representing the StoryChange's + CMD_CREATE_NEW command. + """ + + title: str + + +class MigrateSchemaToLatestVersionCmd(StoryChange): + """Class representing the StoryChange's + CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION command. + """ + + from_version: str + to_version: str + + +class UpdateStoryNodeOutlineStatusCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_OUTLINE_STATUS command. + """ + + node_id: str + old_value: bool + new_value: bool + + +class DeleteStoryNodeCmd(StoryChange): + """Class representing the StoryChange's + CMD_DELETE_STORY_NODE command. + """ + + node_id: str + + +class AddStoryNodeCmd(StoryChange): + """Class representing the StoryChange's + CMD_ADD_STORY_NODE command. + """ + + node_id: str + title: str + + +class UpdateStoryContentsPropertyInitialNodeIdCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_CONTENTS_PROPERTY command with + INITIAL_NODE_ID as allowed value. + """ + + property_name: Literal['initial_node_id'] + new_value: str + old_value: str + + +class UpdateStoryContentsPropertyNodeCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_CONTENTS_PROPERTY command with + NODE as allowed value. + """ + + property_name: Literal['node'] + new_value: int + old_value: int + + +class UpdateStoryNodePropertyDestinationNodeIdsCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_DESTINATION_NODE_IDS as + allowed value. + """ + + node_id: str + property_name: Literal['destination_node_ids'] + new_value: List[str] + old_value: List[str] + + +class UpdateStoryNodePropertyAcquiredSkillIdsCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS as + allowed value. + """ + + node_id: str + property_name: Literal['acquired_skill_ids'] + new_value: List[str] + old_value: List[str] + + +class UpdateStoryNodePropertyPrerequisiteSkillIdsCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS as + allowed value. + """ + + node_id: str + property_name: Literal['prerequisite_skill_ids'] + new_value: List[str] + old_value: List[str] + + +class UpdateStoryNodePropertyOutlineCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_OUTLINE as allowed value. + """ + + node_id: str + property_name: Literal['outline'] + new_value: str + old_value: str + + +class UpdateStoryNodePropertyExplorationIdCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_EXPLORATION_ID as allowed + value. + """ + + node_id: str + property_name: Literal['exploration_id'] + new_value: str + old_value: str + + +class UpdateStoryNodePropertyTitleCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_TITLE as allowed value. + """ + + node_id: str + property_name: Literal['title'] + new_value: str + old_value: str + + +class UpdateStoryNodePropertyDescriptionCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_DESCRIPTION as allowed value. + """ + + node_id: str + property_name: Literal['description'] + new_value: str + old_value: str + + +class UpdateStoryNodePropertyThumbnailBGColorCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR as + allowed value. + """ + + node_id: str + property_name: Literal['thumbnail_bg_color'] + new_value: str + old_value: str + + +class UpdateStoryNodePropertyThumbnailFilenameCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_NODE_PROPERTY command with + STORY_NODE_PROPERTY_THUMBNAIL_FILENAME as + allowed value. + """ + + node_id: str + property_name: Literal['thumbnail_filename'] + new_value: str + old_value: str + + +class UpdateStoryPropertyCmd(StoryChange): + """Class representing the StoryChange's + CMD_UPDATE_STORY_PROPERTY command. + """ + + property_name: str + new_value: str + old_value: str + + +class StoryNodeDict(TypedDict): + """Dictionary representing the StoryNode object.""" + + id: str + title: str + description: str + thumbnail_filename: Optional[str] + thumbnail_bg_color: Optional[str] + thumbnail_size_in_bytes: Optional[int] + destination_node_ids: List[str] + acquired_skill_ids: List[str] + prerequisite_skill_ids: List[str] + outline: str + outline_is_finalized: bool + exploration_id: Optional[str] + + class StoryNode: """Domain object describing a node in the exploration graph of a story. """ def __init__( - self, node_id, title, description, thumbnail_filename, - thumbnail_bg_color, thumbnail_size_in_bytes, destination_node_ids, - acquired_skill_ids, prerequisite_skill_ids, - outline, outline_is_finalized, exploration_id): + self, + node_id: str, + title: str, + description: str, + thumbnail_filename: Optional[str], + thumbnail_bg_color: Optional[str], + thumbnail_size_in_bytes: Optional[int], + destination_node_ids: List[str], + acquired_skill_ids: List[str], + prerequisite_skill_ids: List[str], + outline: str, + outline_is_finalized: bool, + exploration_id: Optional[str] + ) -> None: """Initializes a StoryNode domain object. Args: @@ -222,7 +465,7 @@ def __init__( self.exploration_id = exploration_id @classmethod - def get_number_from_node_id(cls, node_id): + def get_number_from_node_id(cls, node_id: str) -> int: """Decodes the node_id to get the number at the end of the id. Args: @@ -234,7 +477,7 @@ def get_number_from_node_id(cls, node_id): return int(node_id.replace(NODE_ID_PREFIX, '')) @classmethod - def get_incremented_node_id(cls, node_id): + def get_incremented_node_id(cls, node_id: str) -> str: """Increments the next node id of the story. Args: @@ -248,7 +491,7 @@ def get_incremented_node_id(cls, node_id): return incremented_node_id @classmethod - def require_valid_node_id(cls, node_id): + def require_valid_node_id(cls, node_id: str) -> None: """Validates the node id for a StoryNode object. Args: @@ -264,7 +507,7 @@ def require_valid_node_id(cls, node_id): 'Invalid node_id: %s' % node_id) @classmethod - def require_valid_thumbnail_filename(cls, thumbnail_filename): + def require_valid_thumbnail_filename(cls, thumbnail_filename: str) -> None: """Checks whether the thumbnail filename of the node is a valid one. @@ -274,7 +517,7 @@ def require_valid_thumbnail_filename(cls, thumbnail_filename): utils.require_valid_thumbnail_filename(thumbnail_filename) @classmethod - def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): + def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color: str) -> bool: """Checks whether the thumbnail background color of the story node is a valid one. @@ -288,7 +531,7 @@ def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): return thumbnail_bg_color in constants.ALLOWED_THUMBNAIL_BG_COLORS[ 'chapter'] - def to_dict(self): + def to_dict(self) -> StoryNodeDict: """Returns a dict representing this StoryNode domain object. Returns: @@ -310,7 +553,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, node_dict): + def from_dict(cls, node_dict: StoryNodeDict) -> StoryNode: """Return a StoryNode domain object from a dict. Args: @@ -320,19 +563,24 @@ def from_dict(cls, node_dict): StoryNode. The corresponding StoryNode domain object. """ node = cls( - node_dict['id'], node_dict['title'], node_dict['description'], + node_dict['id'], + node_dict['title'], + node_dict['description'], node_dict['thumbnail_filename'], node_dict['thumbnail_bg_color'], node_dict['thumbnail_size_in_bytes'], node_dict['destination_node_ids'], node_dict['acquired_skill_ids'], - node_dict['prerequisite_skill_ids'], node_dict['outline'], - node_dict['outline_is_finalized'], node_dict['exploration_id']) + node_dict['prerequisite_skill_ids'], + node_dict['outline'], + node_dict['outline_is_finalized'], + node_dict['exploration_id'] + ) return node @classmethod - def create_default_story_node(cls, node_id, title): + def create_default_story_node(cls, node_id: str, title: str) -> StoryNode: """Returns a StoryNode domain object with default values. Args: @@ -347,7 +595,7 @@ def create_default_story_node(cls, node_id, title): node_id, title, '', None, None, None, [], [], [], '', False, None) - def validate(self): + def validate(self) -> None: """Validates various properties of the story node. Raises: @@ -359,7 +607,8 @@ def validate(self): raise utils.ValidationError( 'Expected exploration ID to be a string, received %s' % self.exploration_id) - self.require_valid_thumbnail_filename(self.thumbnail_filename) + if self.thumbnail_filename is not None: + self.require_valid_thumbnail_filename(self.thumbnail_filename) if self.thumbnail_bg_color is not None and not ( self.require_valid_thumbnail_bg_color(self.thumbnail_bg_color)): raise utils.ValidationError( @@ -465,16 +714,30 @@ def validate(self): 'The story node with ID %s points to itself.' % node_id) +class StoryContentsDict(TypedDict): + """Dictionary representing the StoryContents object.""" + + nodes: List[StoryNodeDict] + initial_node_id: Optional[str] + next_node_id: str + + class StoryContents: """Domain object representing the story_contents dict.""" - def __init__(self, story_nodes, initial_node_id, next_node_id): + def __init__( + self, + story_nodes: List[StoryNode], + initial_node_id: Optional[str], + next_node_id: str + ) -> None: """Constructs a StoryContents domain object. Args: story_nodes: list(StoryNode). The list of story nodes that are part of this story. - initial_node_id: str. The id of the starting node of the story. + initial_node_id: Optional[str]. The id of the starting node of the + story and None if there is only one node(or the starting node). next_node_id: str. The id for the next node to be added to the story. """ @@ -482,7 +745,7 @@ def __init__(self, story_nodes, initial_node_id, next_node_id): self.nodes = story_nodes self.next_node_id = next_node_id - def validate(self): + def validate(self) -> None: """Validates various properties of the story contents object. Raises: @@ -494,6 +757,8 @@ def validate(self): 'Expected nodes field to be a list, received %s' % self.nodes) if len(self.nodes) > 0: + # Ruling out the possibility of None for mypy type checking. + assert self.initial_node_id is not None StoryNode.require_valid_node_id(self.initial_node_id) StoryNode.require_valid_node_id(self.next_node_id) @@ -536,23 +801,56 @@ def validate(self): raise utils.ValidationError( 'Expected all chapter titles to be distinct.') - def get_node_index(self, node_id): + @overload + def get_node_index( + self, node_id: str, + ) -> int: ... + + @overload + def get_node_index( + self, node_id: str, *, strict: Literal[True] + ) -> int: ... + + @overload + def get_node_index( + self, node_id: str, *, strict: Literal[False] + ) -> Optional[int]: ... + + @overload + def get_node_index( + self, node_id: str, *, strict: bool = ... + ) -> Optional[int]: ... + + def get_node_index( + self, + node_id: str, + strict: bool = True + ) -> Optional[int]: """Returns the index of the story node with the given node id, or None if the node id is not in the story contents dict. Args: node_id: str. The id of the node. + strict: bool. Whether to fail noisily if no node with the given + node_id exists. Default is True. Returns: int or None. The index of the corresponding node, or None if there - is no such node. + is no such node and strict == False. + + Raises: + ValueError. If the node id is not in the story contents dict. """ + index: Optional[int] = None for ind, node in enumerate(self.nodes): if node.id == node_id: - return ind - return None + index = ind + if strict and index is None: + raise ValueError( + 'The node with id %s is not part of this story.' % node_id) + return index - def get_ordered_nodes(self): + def get_ordered_nodes(self) -> List[StoryNode]: """Returns a list of nodes ordered by how they would appear sequentially to a learner. @@ -562,16 +860,21 @@ def get_ordered_nodes(self): Returns: list(StoryNode). The ordered list of nodes. """ + if len(self.nodes) == 0: + return [] + # Ruling out the possibility of None for mypy type checking. + assert self.initial_node_id is not None initial_index = self.get_node_index(self.initial_node_id) current_node = self.nodes[initial_index] ordered_nodes_list = [current_node] while current_node.destination_node_ids: next_node_id = current_node.destination_node_ids[0] - current_node = self.nodes[self.get_node_index(next_node_id)] + next_index = self.get_node_index(next_node_id) + current_node = self.nodes[next_index] ordered_nodes_list.append(current_node) return ordered_nodes_list - def get_all_linked_exp_ids(self): + def get_all_linked_exp_ids(self) -> List[str]: """Returns a list of exploration id linked to each of the nodes of story content. @@ -584,12 +887,15 @@ def get_all_linked_exp_ids(self): exp_ids.append(node.exploration_id) return exp_ids - def get_node_with_corresponding_exp_id(self, exp_id): + def get_node_with_corresponding_exp_id(self, exp_id: str) -> StoryNode: """Returns the node object which corresponds to a given exploration ids. Returns: - StoryNode or None. The StoryNode object of the corresponding - exploration id if exist else None. + StoryNode. The StoryNode object of the corresponding exploration id + if exist. + + Raises: + Exception. Unable to find the exploration in any node. """ for node in self.nodes: if node.exploration_id == exp_id: @@ -598,7 +904,7 @@ def get_node_with_corresponding_exp_id(self, exp_id): raise Exception('Unable to find the exploration id in any node: %s' % ( exp_id)) - def to_dict(self): + def to_dict(self) -> StoryContentsDict: """Returns a dict representing this StoryContents domain object. Returns: @@ -613,7 +919,9 @@ def to_dict(self): } @classmethod - def from_dict(cls, story_contents_dict): + def from_dict( + cls, story_contents_dict: StoryContentsDict + ) -> StoryContents: """Return a StoryContents domain object from a dict. Args: @@ -627,22 +935,69 @@ def from_dict(cls, story_contents_dict): [ StoryNode.from_dict(story_node_dict) for story_node_dict in story_contents_dict['nodes'] - ], story_contents_dict['initial_node_id'], + ], + story_contents_dict['initial_node_id'], story_contents_dict['next_node_id'] ) return story_contents +class StoryDict(TypedDict): + """Dictionary representing the Story object.""" + + id: str + title: str + thumbnail_filename: Optional[str] + thumbnail_bg_color: Optional[str] + thumbnail_size_in_bytes: Optional[int] + description: str + notes: str + story_contents: StoryContentsDict + story_contents_schema_version: int + language_code: str + corresponding_topic_id: str + version: int + url_fragment: str + meta_tag_content: str + + +class SerializableStoryDict(StoryDict): + """Dictionary representing the serializable Story object.""" + + created_on: str + last_updated: str + + +class VersionedStoryContentsDict(TypedDict): + """Dictionary representing the versioned StoryContents object.""" + + schema_version: int + story_contents: StoryContentsDict + + class Story: """Domain object for an Oppia Story.""" def __init__( - self, story_id, title, thumbnail_filename, - thumbnail_bg_color, thumbnail_size_in_bytes, description, notes, - story_contents, story_contents_schema_version, language_code, - corresponding_topic_id, version, url_fragment, meta_tag_content, - created_on=None, last_updated=None): + self, + story_id: str, + title: str, + thumbnail_filename: Optional[str], + thumbnail_bg_color: Optional[str], + thumbnail_size_in_bytes: Optional[int], + description: str, + notes: str, + story_contents: StoryContents, + story_contents_schema_version: int, + language_code: str, + corresponding_topic_id: str, + version: int, + url_fragment: str, + meta_tag_content: str, + created_on: Optional[datetime.datetime] = None, + last_updated: Optional[datetime.datetime] = None + ) -> None: """Constructs a Story domain object. Args: @@ -691,7 +1046,32 @@ def __init__( self.meta_tag_content = meta_tag_content @classmethod - def require_valid_thumbnail_filename(cls, thumbnail_filename): + def require_valid_description(cls, description: str) -> None: + """Checks whether the description is a valid string. + + Args: + description: str. The description to be checked. + + Raises: + ValidationError. The description is not a valid string. + """ + if not isinstance(description, str): + raise utils.ValidationError( + 'Expected description to be a string, received %s' + % description) + if description == '': + raise utils.ValidationError( + 'Expected description field not to be empty') + + description_length_limit = ( + android_validation_constants.MAX_CHARS_IN_STORY_DESCRIPTION) + if len(description) > description_length_limit: + raise utils.ValidationError( + 'Expected description to be less than %d chars, received %s' + % (description_length_limit, len(description))) + + @classmethod + def require_valid_thumbnail_filename(cls, thumbnail_filename: str) -> None: """Checks whether the thumbnail filename of the story is a valid one. @@ -701,7 +1081,7 @@ def require_valid_thumbnail_filename(cls, thumbnail_filename): utils.require_valid_thumbnail_filename(thumbnail_filename) @classmethod - def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): + def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color: str) -> bool: """Checks whether the thumbnail background color of the story is a valid one. @@ -715,25 +1095,21 @@ def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): return thumbnail_bg_color in constants.ALLOWED_THUMBNAIL_BG_COLORS[ 'story'] - def validate(self): + def validate(self) -> None: """Validates various properties of the story object. Raises: ValidationError. One or more attributes of story are invalid. """ self.require_valid_title(self.title) - - if not isinstance(self.description, str): - raise utils.ValidationError( - 'Expected description to be a string, received %s' - % self.description) - - if self.url_fragment is not None: - utils.require_valid_url_fragment( - self.url_fragment, 'Story Url Fragment', - constants.MAX_CHARS_IN_STORY_URL_FRAGMENT) + self.require_valid_description(self.description) + assert self.url_fragment is not None + utils.require_valid_url_fragment( + self.url_fragment, 'Story Url Fragment', + constants.MAX_CHARS_IN_STORY_URL_FRAGMENT) utils.require_valid_meta_tag_content(self.meta_tag_content) - self.require_valid_thumbnail_filename(self.thumbnail_filename) + if self.thumbnail_filename is not None: + self.require_valid_thumbnail_filename(self.thumbnail_filename) if self.thumbnail_bg_color is not None and not ( self.require_valid_thumbnail_bg_color(self.thumbnail_bg_color)): raise utils.ValidationError( @@ -779,7 +1155,7 @@ def validate(self): self.story_contents.validate() @classmethod - def require_valid_story_id(cls, story_id): + def require_valid_story_id(cls, story_id: str) -> None: """Checks whether the story id is a valid one. Args: @@ -793,7 +1169,7 @@ def require_valid_story_id(cls, story_id): raise utils.ValidationError('Invalid story id.') @classmethod - def require_valid_title(cls, title): + def require_valid_title(cls, title: str) -> None: """Checks whether the story title is a valid one. Args: @@ -811,7 +1187,9 @@ def require_valid_title(cls, title): 'Story title should be less than %d chars, received %s' % (title_limit, title)) - def get_acquired_skill_ids_for_node_ids(self, node_ids): + def get_acquired_skill_ids_for_node_ids( + self, node_ids: List[str] + ) -> List[str]: """Returns the acquired skill ids of the nodes having the given node ids. @@ -831,7 +1209,9 @@ def get_acquired_skill_ids_for_node_ids(self, node_ids): acquired_skill_ids.append(skill_id) return acquired_skill_ids - def get_prerequisite_skill_ids_for_exp_id(self, exp_id): + def get_prerequisite_skill_ids_for_exp_id( + self, exp_id: str + ) -> Optional[List[str]]: """Returns the prerequisite skill ids of the node having the given exploration id. @@ -847,7 +1227,7 @@ def get_prerequisite_skill_ids_for_exp_id(self, exp_id): return node.prerequisite_skill_ids return None - def has_exploration(self, exp_id): + def has_exploration(self, exp_id: str) -> bool: """Checks whether an exploration is present in the story. Args: @@ -861,7 +1241,7 @@ def has_exploration(self, exp_id): return True return False - def to_dict(self): + def to_dict(self) -> StoryDict: """Returns a dict representing this Story domain object. Returns: @@ -885,7 +1265,7 @@ def to_dict(self): } @classmethod - def deserialize(cls, json_string): + def deserialize(cls, json_string: str) -> Story: """Returns a Story domain object decoded from a JSON string. Args: @@ -914,14 +1294,21 @@ def deserialize(cls, json_string): return story - def serialize(self): + def serialize(self) -> str: """Returns the object serialized as a JSON string. Returns: str. JSON-encoded str encoding all of the information composing the object. """ - story_dict = self.to_dict() + # Here we use MyPy ignore because to_dict() method returns a general + # dictionary representation of domain object (StoryDict) which does not + # contain properties like created_on and last_updated but MyPy expecting + # story_dict, a dictionary which contain all the properties of domain + # object. That's why we explicitly changing the type of story_dict, + # here which causes MyPy to throw an error. Thus, to silence the error, + # we added an ignore here. + story_dict: SerializableStoryDict = self.to_dict() # type: ignore[assignment] # The only reason we add the version parameter separately is that our # yaml encoding/decoding of this object does not handle the version # parameter. @@ -944,8 +1331,12 @@ def serialize(self): @classmethod def from_dict( - cls, story_dict, story_version=0, - story_created_on=None, story_last_updated=None): + cls, + story_dict: StoryDict, + story_version: int = 0, + story_created_on: Optional[datetime.datetime] = None, + story_last_updated: Optional[datetime.datetime] = None + ) -> Story: """Returns a Story domain object from a dictionary. Args: @@ -961,23 +1352,35 @@ def from_dict( Story. The corresponding Story domain object. """ story = cls( - story_dict['id'], story_dict['title'], - story_dict['thumbnail_filename'], story_dict['thumbnail_bg_color'], + story_dict['id'], + story_dict['title'], + story_dict['thumbnail_filename'], + story_dict['thumbnail_bg_color'], story_dict['thumbnail_size_in_bytes'], - story_dict['description'], story_dict['notes'], + story_dict['description'], + story_dict['notes'], StoryContents.from_dict(story_dict['story_contents']), story_dict['story_contents_schema_version'], - story_dict['language_code'], story_dict['corresponding_topic_id'], - story_version, story_dict['url_fragment'], + story_dict['language_code'], + story_dict['corresponding_topic_id'], + story_version, + story_dict['url_fragment'], story_dict['meta_tag_content'], - story_created_on, story_last_updated) + story_created_on, + story_last_updated + ) return story @classmethod def create_default_story( - cls, story_id, title, description, corresponding_topic_id, - url_fragment): + cls, + story_id: str, + title: str, + description: str, + corresponding_topic_id: str, + url_fragment: str + ) -> Story: """Returns a story domain object with default values. This is for the frontend where a default blank story would be shown to the user when the story is created for the first time. @@ -1004,7 +1407,9 @@ def create_default_story( url_fragment, '') @classmethod - def _convert_story_contents_v1_dict_to_v2_dict(cls, story_contents_dict): + def _convert_story_contents_v1_dict_to_v2_dict( + cls, story_contents_dict: StoryContentsDict + ) -> StoryContentsDict: """Converts old Story Contents schema to the modern v2 schema. v2 schema introduces the thumbnail_filename and thumbnail_bg_color fields for Story Nodes. @@ -1022,7 +1427,9 @@ def _convert_story_contents_v1_dict_to_v2_dict(cls, story_contents_dict): return story_contents_dict @classmethod - def _convert_story_contents_v2_dict_to_v3_dict(cls, story_contents_dict): + def _convert_story_contents_v2_dict_to_v3_dict( + cls, story_contents_dict: StoryContentsDict + ) -> StoryContentsDict: """Converts v2 Story Contents schema to the v3 schema. v3 schema introduces the description field for Story Nodes. @@ -1038,7 +1445,9 @@ def _convert_story_contents_v2_dict_to_v3_dict(cls, story_contents_dict): return story_contents_dict @classmethod - def _convert_story_contents_v3_dict_to_v4_dict(cls, story_contents_dict): + def _convert_story_contents_v3_dict_to_v4_dict( + cls, story_contents_dict: StoryContentsDict + ) -> StoryContentsDict: """Converts v3 Story Contents schema to the v4 schema. v4 schema introduces the new schema for Math components. @@ -1052,12 +1461,17 @@ def _convert_story_contents_v3_dict_to_v4_dict(cls, story_contents_dict): for node in story_contents_dict['nodes']: node['outline'] = ( html_validation_service.add_math_content_to_math_rte_components( - node['outline'])) + node['outline'] + ) + ) return story_contents_dict @classmethod def _convert_story_contents_v4_dict_to_v5_dict( - cls, story_id, story_contents_dict): + cls, + story_id: str, + story_contents_dict: StoryContentsDict + ) -> StoryContentsDict: """Converts v4 Story Contents schema to the modern v5 schema. v5 schema introduces the thumbnail_size_in_bytes for Story Nodes. @@ -1069,9 +1483,7 @@ def _convert_story_contents_v4_dict_to_v5_dict( Returns: dict. The converted story_contents_dict. """ - file_system_class = fs_services.get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_STORY, story_id)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_STORY, story_id) for index in range(len(story_contents_dict['nodes'])): filepath = '%s/%s' % ( constants.ASSET_TYPE_THUMBNAIL, @@ -1082,7 +1494,11 @@ def _convert_story_contents_v4_dict_to_v5_dict( @classmethod def update_story_contents_from_model( - cls, versioned_story_contents, current_version, story_id): + cls, + versioned_story_contents: VersionedStoryContentsDict, + current_version: int, + story_id: str + ) -> None: """Converts the story_contents blob contained in the given versioned_story_contents dict from current_version to current_version + 1. Note that the versioned_story_contents being @@ -1090,7 +1506,7 @@ def update_story_contents_from_model( Args: versioned_story_contents: dict. A dict with two keys: - - schema_version: str. The schema version for the + - schema_version: int. The schema version for the story_contents dict. - story_contents: dict. The dict comprising the story contents. @@ -1109,7 +1525,7 @@ def update_story_contents_from_model( versioned_story_contents['story_contents'] = conversion_fn( versioned_story_contents['story_contents']) - def update_title(self, title): + def update_title(self, title: str) -> None: """Updates the title of the story. Args: @@ -1117,16 +1533,19 @@ def update_title(self, title): """ self.title = title - def update_thumbnail_filename(self, new_thumbnail_filename): + def update_thumbnail_filename( + self, new_thumbnail_filename: Optional[str] + ) -> None: """Updates the thumbnail filename and file size of the story. Args: new_thumbnail_filename: str|None. The new thumbnail filename of the story. + + Raises: + Exception. The subtopic with the given id doesn't exist. """ - file_system_class = fs_services.get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_STORY, self.id)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_STORY, self.id) filepath = '%s/%s' % ( constants.ASSET_TYPE_THUMBNAIL, new_thumbnail_filename) @@ -1138,7 +1557,9 @@ def update_thumbnail_filename(self, new_thumbnail_filename): 'The thumbnail %s for story with id %s does not exist' ' in the filesystem.' % (new_thumbnail_filename, self.id)) - def update_thumbnail_bg_color(self, thumbnail_bg_color): + def update_thumbnail_bg_color( + self, thumbnail_bg_color: Optional[str] + ) -> None: """Updates the thumbnail background color of the story. Args: @@ -1147,7 +1568,7 @@ def update_thumbnail_bg_color(self, thumbnail_bg_color): """ self.thumbnail_bg_color = thumbnail_bg_color - def update_description(self, description): + def update_description(self, description: str) -> None: """Updates the description of the story. Args: @@ -1155,7 +1576,7 @@ def update_description(self, description): """ self.description = description - def update_notes(self, notes): + def update_notes(self, notes: str) -> None: """Updates the notes of the story. Args: @@ -1163,7 +1584,7 @@ def update_notes(self, notes): """ self.notes = notes - def update_language_code(self, language_code): + def update_language_code(self, language_code: str) -> None: """Updates the language code of the story. Args: @@ -1171,7 +1592,7 @@ def update_language_code(self, language_code): """ self.language_code = language_code - def update_url_fragment(self, url_fragment): + def update_url_fragment(self, url_fragment: str) -> None: """Updates the url fragment of the story. Args: @@ -1179,7 +1600,7 @@ def update_url_fragment(self, url_fragment): """ self.url_fragment = url_fragment - def update_meta_tag_content(self, new_meta_tag_content): + def update_meta_tag_content(self, new_meta_tag_content: str) -> None: """Updates the meta tag content of the story. Args: @@ -1188,7 +1609,7 @@ def update_meta_tag_content(self, new_meta_tag_content): """ self.meta_tag_content = new_meta_tag_content - def add_node(self, desired_node_id, node_title): + def add_node(self, desired_node_id: str, node_title: str) -> None: """Adds a new default node with the id as story_contents.next_node_id. Args: @@ -1211,7 +1632,9 @@ def add_node(self, desired_node_id, node_title): if self.story_contents.initial_node_id is None: self.story_contents.initial_node_id = desired_node_id - def _check_exploration_id_already_present(self, exploration_id): + def _check_exploration_id_already_present( + self, exploration_id: str + ) -> bool: """Returns whether a node with the given exploration id is already present in story_contents. @@ -1227,19 +1650,17 @@ def _check_exploration_id_already_present(self, exploration_id): return True return False - def delete_node(self, node_id): + def delete_node(self, node_id: str) -> None: """Deletes a node with the given node_id. Args: node_id: str. The id of the node. Raises: - ValueError. The node is not part of the story. + ValueError. The node is the starting node for story, change the + starting node before deleting it. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) if node_id == self.story_contents.initial_node_id: if len(self.story_contents.nodes) == 1: self.story_contents.initial_node_id = None @@ -1252,56 +1673,45 @@ def delete_node(self, node_id): node.destination_node_ids.remove(node_id) del self.story_contents.nodes[node_index] - def update_node_outline(self, node_id, new_outline): + def update_node_outline(self, node_id: str, new_outline: str) -> None: """Updates the outline field of a given node. Args: node_id: str. The id of the node. new_outline: str. The new outline of the given node. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].outline = new_outline - def update_node_title(self, node_id, new_title): + def update_node_title(self, node_id: str, new_title: str) -> None: """Updates the title field of a given node. Args: node_id: str. The id of the node. new_title: str. The new title of the given node. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].title = new_title - def update_node_description(self, node_id, new_description): + def update_node_description( + self, + node_id: str, + new_description: str + ) -> None: """Updates the description field of a given node. Args: node_id: str. The id of the node. new_description: str. The new description of the given node. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) - self.story_contents.nodes[node_index].description = new_description - def update_node_thumbnail_filename(self, node_id, new_thumbnail_filename): + def update_node_thumbnail_filename( + self, + node_id: str, + new_thumbnail_filename: Optional[str] + ) -> None: """Updates the thumbnail filename and file size field of a given node. Args: @@ -1310,15 +1720,10 @@ def update_node_thumbnail_filename(self, node_id, new_thumbnail_filename): given node. Raises: - ValueError. The node is not part of the story. + Exception. The node with the given id doesn't exist. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) - file_system_class = fs_services.get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_STORY, self.id)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_STORY, self.id) filepath = '%s/%s' % ( constants.ASSET_TYPE_THUMBNAIL, new_thumbnail_filename) @@ -1332,113 +1737,91 @@ def update_node_thumbnail_filename(self, node_id, new_thumbnail_filename): 'The thumbnail %s for story node with id %s does not exist' ' in the filesystem.' % (new_thumbnail_filename, self.id)) - def update_node_thumbnail_bg_color(self, node_id, new_thumbnail_bg_color): + def update_node_thumbnail_bg_color( + self, + node_id: str, + new_thumbnail_bg_color: Optional[str] + ) -> None: """Updates the thumbnail background color field of a given node. Args: node_id: str. The id of the node. new_thumbnail_bg_color: str|None. The new thumbnail background color of the given node. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].thumbnail_bg_color = ( new_thumbnail_bg_color) - def mark_node_outline_as_finalized(self, node_id): + def mark_node_outline_as_finalized(self, node_id: str) -> None: """Updates the outline_is_finalized field of the node with the given node_id as True. Args: node_id: str. The id of the node. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].outline_is_finalized = True - def mark_node_outline_as_unfinalized(self, node_id): + def mark_node_outline_as_unfinalized(self, node_id: str) -> None: """Updates the outline_is_finalized field of the node with the given node_id as False. Args: node_id: str. The id of the node. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].outline_is_finalized = False - def update_node_acquired_skill_ids(self, node_id, new_acquired_skill_ids): + def update_node_acquired_skill_ids( + self, + node_id: str, + new_acquired_skill_ids: List[str] + ) -> None: """Updates the acquired skill ids field of a given node. Args: node_id: str. The id of the node. new_acquired_skill_ids: list(str). The updated acquired skill id list. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].acquired_skill_ids = ( new_acquired_skill_ids) def update_node_prerequisite_skill_ids( - self, node_id, new_prerequisite_skill_ids): + self, + node_id: str, + new_prerequisite_skill_ids: List[str] + ) -> None: """Updates the prerequisite skill ids field of a given node. Args: node_id: str. The id of the node. new_prerequisite_skill_ids: list(str). The updated prerequisite skill id list. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].prerequisite_skill_ids = ( new_prerequisite_skill_ids) def update_node_destination_node_ids( - self, node_id, new_destination_node_ids): + self, + node_id: str, + new_destination_node_ids: List[str] + ) -> None: """Updates the destination_node_ids field of a given node. Args: node_id: str. The id of the node. new_destination_node_ids: list(str). The updated destination node id list. - - Raises: - ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].destination_node_ids = ( new_destination_node_ids) - def rearrange_node_in_story(self, from_index, to_index): + def rearrange_node_in_story(self, from_index: int, to_index: int) -> None: """Rearranges or moves a node in the story content. Args: @@ -1474,7 +1857,8 @@ def rearrange_node_in_story(self, from_index, to_index): story_content_nodes.insert(to_index, story_node_to_move) def update_node_exploration_id( - self, node_id, new_exploration_id): + self, node_id: str, new_exploration_id: str + ) -> None: """Updates the exploration id field of a given node. Args: @@ -1482,12 +1866,9 @@ def update_node_exploration_id( new_exploration_id: str. The updated exploration id for a node. Raises: - ValueError. The node is not part of the story. + ValueError. A node with given exploration id is already exists. """ node_index = self.story_contents.get_node_index(node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story.' % node_id) if ( self.story_contents.nodes[node_index].exploration_id == @@ -1503,30 +1884,54 @@ def update_node_exploration_id( self.story_contents.nodes[node_index].exploration_id = ( new_exploration_id) - def update_initial_node(self, new_initial_node_id): + def update_initial_node(self, new_initial_node_id: str) -> None: """Updates the starting node of the story. Args: new_initial_node_id: str. The new starting node id. - - Raises: - ValueError. The node is not part of the story. """ - node_index = self.story_contents.get_node_index(new_initial_node_id) - if node_index is None: - raise ValueError( - 'The node with id %s is not part of this story.' - % new_initial_node_id) + self.story_contents.get_node_index(new_initial_node_id) self.story_contents.initial_node_id = new_initial_node_id +class HumanReadableStorySummaryDict(TypedDict): + """Dictionary representing the human readable StorySummary object.""" + + id: str + title: str + description: str + node_titles: List[str] + thumbnail_bg_color: Optional[str] + thumbnail_filename: Optional[str] + url_fragment: str + + +class StorySummaryDict(HumanReadableStorySummaryDict): + """Dictionary representing the StorySummary object.""" + + language_code: str + version: int + story_model_created_on: float + story_model_last_updated: float + + class StorySummary: """Domain object for Story Summary.""" def __init__( - self, story_id, title, description, language_code, version, - node_titles, thumbnail_bg_color, thumbnail_filename, url_fragment, - story_model_created_on, story_model_last_updated): + self, + story_id: str, + title: str, + description: str, + language_code: str, + version: int, + node_titles: List[str], + thumbnail_bg_color: Optional[str], + thumbnail_filename: Optional[str], + url_fragment: str, + story_model_created_on: datetime.datetime, + story_model_last_updated: datetime.datetime + ) -> None: """Constructs a StorySummary domain object. Args: @@ -1557,17 +1962,17 @@ def __init__( self.story_model_created_on = story_model_created_on self.story_model_last_updated = story_model_last_updated - def validate(self): + def validate(self) -> None: """Validates various properties of the story summary object. Raises: ValidationError. One or more attributes of story summary are invalid. """ - if self.url_fragment is not None: - utils.require_valid_url_fragment( - self.url_fragment, 'Story Url Fragment', - constants.MAX_CHARS_IN_STORY_URL_FRAGMENT) + assert self.url_fragment is not None + utils.require_valid_url_fragment( + self.url_fragment, 'Story Url Fragment', + constants.MAX_CHARS_IN_STORY_URL_FRAGMENT) if not isinstance(self.title, str): raise utils.ValidationError( @@ -1592,7 +1997,8 @@ def validate(self): 'Expected each chapter title to be a string, received %s' % title) - utils.require_valid_thumbnail_filename(self.thumbnail_filename) + if self.thumbnail_filename is not None: + utils.require_valid_thumbnail_filename(self.thumbnail_filename) if ( self.thumbnail_bg_color is not None and not ( Story.require_valid_thumbnail_bg_color( @@ -1616,7 +2022,7 @@ def validate(self): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code) - def to_dict(self): + def to_dict(self) -> StorySummaryDict: """Returns a dictionary representation of this domain object. Returns: @@ -1638,7 +2044,7 @@ def to_dict(self): self.story_model_last_updated) } - def to_human_readable_dict(self): + def to_human_readable_dict(self) -> HumanReadableStorySummaryDict: """Returns a dictionary representation of this domain object. Returns: @@ -1653,3 +2059,26 @@ def to_human_readable_dict(self): 'thumbnail_filename': self.thumbnail_filename, 'url_fragment': self.url_fragment } + + +class LearnerGroupSyllabusStorySummaryDict(StorySummaryDict): + """Dictionary representation of a StorySummary object for learner + groups syllabus. + """ + + story_is_published: bool + completed_node_titles: List[str] + all_node_dicts: List[StoryNodeDict] + topic_name: str + topic_url_fragment: str + classroom_url_fragment: Optional[str] + + +class StoryChapterProgressSummaryDict(TypedDict): + """Dictionary representation of a StoryChapterProgressSummary object for + learner groups syllabus. + """ + + exploration_id: str + visited_checkpoints_count: int + total_checkpoints_count: int diff --git a/core/domain/story_domain_test.py b/core/domain/story_domain_test.py index 38b95d7ad3ae..2a022629098c 100644 --- a/core/domain/story_domain_test.py +++ b/core/domain/story_domain_test.py @@ -20,30 +20,31 @@ import os from core import feconf -from core import python_utils from core import utils from core.constants import constants -from core.domain import fs_domain +from core.domain import fs_services from core.domain import story_domain from core.domain import story_fetchers from core.domain import story_services from core.tests import test_utils +from typing import Final + class StoryChangeTests(test_utils.GenericTestBase): - def test_story_change_object_with_missing_cmd(self): - with self.assertRaisesRegexp( + def test_story_change_object_with_missing_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): story_domain.StoryChange({'invalid': 'data'}) - def test_story_change_object_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_story_change_object_with_invalid_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): story_domain.StoryChange({'cmd': 'invalid'}) - def test_story_change_object_with_missing_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_story_change_object_with_missing_attribute_in_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following required attributes are missing: ' 'new_value, old_value')): @@ -52,8 +53,8 @@ def test_story_change_object_with_missing_attribute_in_cmd(self): 'property_name': 'title', }) - def test_story_change_object_with_extra_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_story_change_object_with_extra_attribute_in_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following extra attributes are present: invalid')): story_domain.StoryChange({ @@ -62,8 +63,8 @@ def test_story_change_object_with_extra_attribute_in_cmd(self): 'invalid': 'invalid' }) - def test_story_change_object_with_invalid_story_property(self): - with self.assertRaisesRegexp( + def test_story_change_object_with_invalid_story_property(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd update_story_property: ' 'invalid is not allowed')): @@ -74,8 +75,10 @@ def test_story_change_object_with_invalid_story_property(self): 'new_value': 'new_value', }) - def test_story_change_object_with_invalid_story_node_property(self): - with self.assertRaisesRegexp( + def test_story_change_object_with_invalid_story_node_property( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd update_story_node_property: ' 'invalid is not allowed')): @@ -87,8 +90,10 @@ def test_story_change_object_with_invalid_story_node_property(self): 'new_value': 'new_value', }) - def test_story_change_object_with_invalid_story_contents_property(self): - with self.assertRaisesRegexp( + def test_story_change_object_with_invalid_story_contents_property( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd update_story_contents_property:' ' invalid is not allowed')): @@ -99,7 +104,7 @@ def test_story_change_object_with_invalid_story_contents_property(self): 'new_value': 'new_value', }) - def test_story_change_object_with_add_story_node(self): + def test_story_change_object_with_add_story_node(self) -> None: story_change_object = story_domain.StoryChange({ 'cmd': 'add_story_node', 'node_id': 'node_id', @@ -110,7 +115,7 @@ def test_story_change_object_with_add_story_node(self): self.assertEqual(story_change_object.node_id, 'node_id') self.assertEqual(story_change_object.title, 'title') - def test_story_change_object_with_delete_story_node(self): + def test_story_change_object_with_delete_story_node(self) -> None: story_change_object = story_domain.StoryChange({ 'cmd': 'delete_story_node', 'node_id': 'node_id' @@ -119,7 +124,7 @@ def test_story_change_object_with_delete_story_node(self): self.assertEqual(story_change_object.cmd, 'delete_story_node') self.assertEqual(story_change_object.node_id, 'node_id') - def test_story_change_object_with_update_story_node_property(self): + def test_story_change_object_with_update_story_node_property(self) -> None: story_change_object = story_domain.StoryChange({ 'cmd': 'update_story_node_property', 'node_id': 'node_id', @@ -134,7 +139,7 @@ def test_story_change_object_with_update_story_node_property(self): self.assertEqual(story_change_object.new_value, 'new_value') self.assertEqual(story_change_object.old_value, 'old_value') - def test_story_change_object_with_update_story_property(self): + def test_story_change_object_with_update_story_property(self) -> None: story_change_object = story_domain.StoryChange({ 'cmd': 'update_story_property', 'property_name': 'title', @@ -147,7 +152,9 @@ def test_story_change_object_with_update_story_property(self): self.assertEqual(story_change_object.new_value, 'new_value') self.assertEqual(story_change_object.old_value, 'old_value') - def test_story_change_object_with_update_story_contents_property(self): + def test_story_change_object_with_update_story_contents_property( + self + ) -> None: story_change_object = story_domain.StoryChange({ 'cmd': 'update_story_contents_property', 'property_name': 'initial_node_id', @@ -161,7 +168,9 @@ def test_story_change_object_with_update_story_contents_property(self): self.assertEqual(story_change_object.new_value, 'new_value') self.assertEqual(story_change_object.old_value, 'old_value') - def test_story_change_object_with_update_story_node_outline_status(self): + def test_story_change_object_with_update_story_node_outline_status( + self + ) -> None: story_change_object = story_domain.StoryChange({ 'cmd': 'update_story_node_outline_status', 'node_id': 'node_id', @@ -175,7 +184,7 @@ def test_story_change_object_with_update_story_node_outline_status(self): self.assertEqual(story_change_object.old_value, 'old_value') self.assertEqual(story_change_object.new_value, 'new_value') - def test_story_change_object_with_create_new(self): + def test_story_change_object_with_create_new(self) -> None: story_change_object = story_domain.StoryChange({ 'cmd': 'create_new', 'title': 'title', @@ -185,7 +194,8 @@ def test_story_change_object_with_create_new(self): self.assertEqual(story_change_object.title, 'title') def test_story_change_object_with_migrate_schema_to_latest_version( - self): + self + ) -> None: story_change_object = story_domain.StoryChange({ 'cmd': 'migrate_schema_to_latest_version', 'from_version': 'from_version', @@ -197,7 +207,7 @@ def test_story_change_object_with_migrate_schema_to_latest_version( self.assertEqual(story_change_object.from_version, 'from_version') self.assertEqual(story_change_object.to_version, 'to_version') - def test_to_dict(self): + def test_to_dict(self) -> None: story_change_dict = { 'cmd': 'create_new', 'title': 'title' @@ -209,21 +219,21 @@ def test_to_dict(self): class StoryDomainUnitTests(test_utils.GenericTestBase): """Test the story domain object.""" - STORY_ID = 'story_id' - NODE_ID_1 = story_domain.NODE_ID_PREFIX + '1' - NODE_ID_2 = 'node_2' - SKILL_ID_1 = 'skill_id_1' - SKILL_ID_2 = 'skill_id_2' - EXP_ID = 'exp_id' - USER_ID = 'user' - USER_ID_1 = 'user1' - - def setUp(self): - super(StoryDomainUnitTests, self).setUp() - self.STORY_ID = story_services.get_new_story_id() + STORY_ID: Final = 'story_id' + NODE_ID_1: Final = story_domain.NODE_ID_PREFIX + '1' + NODE_ID_2: Final = 'node_2' + SKILL_ID_1: Final = 'skill_id_1' + SKILL_ID_2: Final = 'skill_id_2' + EXP_ID: Final = 'exp_id' + USER_ID: Final = 'user' + USER_ID_1: Final = 'user1' + + def setUp(self) -> None: + super().setUp() + self.story_id = story_services.get_new_story_id() self.TOPIC_ID = utils.generate_random_string(12) self.story = self.save_new_story( - self.STORY_ID, self.USER_ID, self.TOPIC_ID, + self.story_id, self.USER_ID, self.TOPIC_ID, url_fragment='story-frag') self.story.add_node(self.NODE_ID_1, 'Node title') self.story.add_node(self.NODE_ID_2, 'Node title 2') @@ -232,18 +242,22 @@ def setUp(self): self.signup('user@example.com', 'user') self.signup('user1@example.com', 'user1') - def _assert_validation_error(self, expected_error_substring): + # Here we use MyPy ignore because the signature of this method + # doesn't match with TestBase._assert_validation_error(). + def _assert_validation_error(self, expected_error_substring: str) -> None: # type: ignore[override] """Checks that the story passes validation. Args: expected_error_substring: str. String that should be a substring of the expected error message. """ - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): self.story.validate() - def _assert_valid_story_id(self, expected_error_substring, story_id): + def _assert_valid_story_id( + self, expected_error_substring: str, story_id: str + ) -> None: """Checks that the story id is valid. Args: @@ -251,11 +265,11 @@ def _assert_valid_story_id(self, expected_error_substring, story_id): of the expected error message. story_id: str. The story ID to validate. """ - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): story_domain.Story.require_valid_story_id(story_id) - def test_serialize_and_deserialize_returns_unchanged_story(self): + def test_serialize_and_deserialize_returns_unchanged_story(self) -> None: """Checks that serializing and then deserializing a default story works as intended by leaving the story unchanged. """ @@ -266,29 +280,43 @@ def test_serialize_and_deserialize_returns_unchanged_story(self): story.to_dict(), story_domain.Story.deserialize(story.serialize()).to_dict()) - def test_valid_story_id(self): - self._assert_valid_story_id('Story id should be a string', 10) + def test_valid_story_id(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self._assert_valid_story_id('Story id should be a string', 10) # type: ignore[arg-type] self._assert_valid_story_id('Invalid story id', 'abc') + constants.STORY_ID_LENGTH = 3 + try: + story_domain.Story.require_valid_story_id('abc') + except utils.ValidationError: + self.fail( + 'require_valid_story_id() raised ExceptionType unexpectedly!') def _assert_valid_thumbnail_filename_for_story( - self, expected_error_substring, thumbnail_filename): + self, expected_error_substring: str, thumbnail_filename: str + ) -> None: """Checks that story passes validation for thumbnail filename.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): story_domain.Story.require_valid_thumbnail_filename( thumbnail_filename) def _assert_valid_thumbnail_filename_for_story_node( - self, expected_error_substring, thumbnail_filename): + self, expected_error_substring: str, thumbnail_filename: str + ) -> None: """Checks that story node passes validation for thumbnail filename.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): story_domain.StoryNode.require_valid_thumbnail_filename( thumbnail_filename) - def test_thumbnail_filename_validation_for_story(self): + def test_thumbnail_filename_validation_for_story(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. self._assert_valid_thumbnail_filename_for_story( - 'Expected thumbnail filename to be a string, received 10', 10) + 'Expected thumbnail filename to be a string, received 10', 10) # type: ignore[arg-type] self._assert_valid_thumbnail_filename_for_story( 'Thumbnail filename should not start with a dot.', '.name') self._assert_valid_thumbnail_filename_for_story( @@ -302,9 +330,12 @@ def test_thumbnail_filename_validation_for_story(self): self._assert_valid_thumbnail_filename_for_story( 'Expected a filename ending in svg, received name.jpg', 'name.jpg') - def test_thumbnail_filename_validation_for_story_node(self): + def test_thumbnail_filename_validation_for_story_node(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. self._assert_valid_thumbnail_filename_for_story_node( - 'Expected thumbnail filename to be a string, received 10', 10) + 'Expected thumbnail filename to be a string, received 10', 10) # type: ignore[arg-type] self._assert_valid_thumbnail_filename_for_story_node( 'Thumbnail filename should not start with a dot.', '.name') self._assert_valid_thumbnail_filename_for_story_node( @@ -318,7 +349,7 @@ def test_thumbnail_filename_validation_for_story_node(self): self._assert_valid_thumbnail_filename_for_story_node( 'Expected a filename ending in svg, received name.jpg', 'name.jpg') - def test_story_node_thumbnail_size_in_bytes_validation(self): + def test_story_node_thumbnail_size_in_bytes_validation(self) -> None: self.story.story_contents.nodes[0].thumbnail_filename = 'image.svg' self.story.story_contents.nodes[0].thumbnail_bg_color = ( constants.ALLOWED_THUMBNAIL_BG_COLORS['chapter'][0]) @@ -326,24 +357,22 @@ def test_story_node_thumbnail_size_in_bytes_validation(self): self._assert_validation_error( 'Story node thumbnail size in bytes cannot be zero.') - def test_story_node_update_thumbnail_filename(self): + def test_story_node_update_thumbnail_filename(self) -> None: # Test exception when thumbnail is not found on filesystem. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The thumbnail img.svg for story node with id %s does not exist' - ' in the filesystem.' % (self.STORY_ID)): + ' in the filesystem.' % (self.story_id)): self.story.update_node_thumbnail_filename( self.NODE_ID_1, 'img.svg') # Test successful update of thumbnail_filename when the thumbnail # is found in the filesystem. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_STORY, self.story.id)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_STORY, self.story.id) fs.commit( '%s/new_image.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, mimetype='image/svg+xml') @@ -359,16 +388,33 @@ def test_story_node_update_thumbnail_filename(self): self.story.story_contents.nodes[node_index].thumbnail_size_in_bytes, len(raw_image)) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, - 'The node with id invalid_id is not part of this story'): + 'The node with id invalid_id is not part of this story.'): self.story.update_node_thumbnail_filename( 'invalid_id', 'invalid_thumbnail.svg') - def test_to_human_readable_dict(self): - story_summary = story_fetchers.get_story_summary_by_id(self.STORY_ID) - expected_dict = { - 'id': self.STORY_ID, + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_story_description_validation(self) -> None: + self.story.description = 1 # type: ignore[assignment] + self._assert_validation_error( + 'Expected description to be a string, received 1') + + self.story.description = '' + self._assert_validation_error( + 'Expected description field not to be empty') + + self.story.description = 'a' * 1001 + self._assert_validation_error( + 'Expected description to be less than %d chars, received %s' % ( + 1000, 1001)) + + def test_to_human_readable_dict(self) -> None: + story_summary = story_fetchers.get_story_summary_by_id(self.story_id) + expected_dict: story_domain.HumanReadableStorySummaryDict = { + 'id': self.story_id, 'title': 'Title', 'description': 'Description', 'node_titles': [], @@ -379,7 +425,7 @@ def test_to_human_readable_dict(self): self.assertEqual(expected_dict, story_summary.to_human_readable_dict()) - def test_defaults(self): + def test_defaults(self) -> None: """Test the create_default_story and create_default_story_node method of class Story. """ @@ -387,7 +433,7 @@ def test_defaults(self): story = story_domain.Story.create_default_story( self.STORY_ID, 'Title', 'Description', topic_id, 'story-frag-default') - expected_story_dict = { + expected_story_dict: story_domain.StoryDict = { 'id': self.STORY_ID, 'title': 'Title', 'thumbnail_filename': None, @@ -410,7 +456,7 @@ def test_defaults(self): } self.assertEqual(story.to_dict(), expected_story_dict) - def test_get_acquired_skill_ids_for_node_ids(self): + def test_get_acquired_skill_ids_for_node_ids(self) -> None: self.story.story_contents.nodes[0].acquired_skill_ids = ['skill_1'] self.story.story_contents.nodes[1].acquired_skill_ids = ['skill_2'] self.assertEqual( @@ -419,7 +465,7 @@ def test_get_acquired_skill_ids_for_node_ids(self): ['skill_1', 'skill_2'] ) - def test_get_acquired_skill_ids_for_node_ids_empty(self): + def test_get_acquired_skill_ids_for_node_ids_empty(self) -> None: self.story.story_contents.nodes[0].acquired_skill_ids = [] self.story.story_contents.nodes[1].acquired_skill_ids = [] self.assertEqual( @@ -427,7 +473,7 @@ def test_get_acquired_skill_ids_for_node_ids_empty(self): [self.NODE_ID_1, self.NODE_ID_2]), [] ) - def test_get_acquired_skill_ids_for_node_ids_multi_skills(self): + def test_get_acquired_skill_ids_for_node_ids_multi_skills(self) -> None: # Test cases when there are multiple acquired skill ids linked to # one node. self.story.story_contents.nodes[0].acquired_skill_ids = [ @@ -440,7 +486,9 @@ def test_get_acquired_skill_ids_for_node_ids_multi_skills(self): ['skill_1', 'skill_2', 'skill_3'] ) - def test_get_acquired_skill_ids_for_node_ids_overlapping_skills(self): + def test_get_acquired_skill_ids_for_node_ids_overlapping_skills( + self + ) -> None: # Test cases when there are and multiple nodes have overlapping # skill ids. self.story.story_contents.nodes[0].acquired_skill_ids = [ @@ -453,7 +501,17 @@ def test_get_acquired_skill_ids_for_node_ids_overlapping_skills(self): ['skill_1', 'skill_2'] ) - def test_get_prerequisite_skill_ids(self): + def test_get_acquired_skill_ids_with_empty_node_ids_ids_is_empty_list( + self + ) -> None: + self.story.story_contents.nodes[0].acquired_skill_ids = [ + 'skill_1', 'skill_2'] + self.story.story_contents.nodes[1].acquired_skill_ids = [ + 'skill_1'] + self.assertEqual( + self.story.get_acquired_skill_ids_for_node_ids([]), []) + + def test_get_prerequisite_skill_ids(self) -> None: self.story.story_contents.nodes[0].prerequisite_skill_ids = ['skill_1'] self.story.story_contents.nodes[0].exploration_id = 'exp_id' self.assertEqual( @@ -462,30 +520,36 @@ def test_get_prerequisite_skill_ids(self): self.assertIsNone( self.story.get_prerequisite_skill_ids_for_exp_id('exp_id_2')) - def test_has_exploration_id(self): + def test_has_exploration_id(self) -> None: self.story.story_contents.nodes[0].exploration_id = 'exp_id' self.assertTrue(self.story.has_exploration('exp_id')) self.assertFalse(self.story.has_exploration('exp_id_2')) - def test_title_validation(self): - self.story.title = 1 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_title_validation(self) -> None: + self.story.title = 1 # type: ignore[assignment] self._assert_validation_error('Title should be a string') self.story.title = ( 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz') self._assert_validation_error( 'Story title should be less than 39 chars') - def test_thumbnail_filename_validation(self): - self.story.thumbnail_filename = [] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_thumbnail_filename_validation(self) -> None: + self.story.thumbnail_filename = [] # type: ignore[assignment] self._assert_validation_error( 'Expected thumbnail filename to be a string, received') - def test_thumbnail_bg_validation(self): + def test_thumbnail_bg_validation(self) -> None: self.story.thumbnail_bg_color = '#FFFFFF' self._assert_validation_error( 'Story thumbnail background color #FFFFFF is not supported.') - def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self): + def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self) -> None: self.story.thumbnail_bg_color = '#F8BF74' self.story.thumbnail_filename = None self._assert_validation_error( @@ -495,23 +559,21 @@ def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self): self._assert_validation_error( 'Story thumbnail background color is not specified.') - def test_update_thumbnail_filename(self): + def test_update_thumbnail_filename(self) -> None: self.assertEqual(self.story.thumbnail_filename, None) # Test exception when thumbnail is not found on filesystem. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The thumbnail img.svg for story with id %s does not exist' - ' in the filesystem.' % (self.STORY_ID)): + ' in the filesystem.' % (self.story_id)): self.story.update_thumbnail_filename('img.svg') # Save the dummy image to the filesystem to be used as thumbnail. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_STORY, self.story.id)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_STORY, self.story.id) fs.commit( '%s/img.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, mimetype='image/svg+xml') @@ -521,26 +583,30 @@ def test_update_thumbnail_filename(self): self.assertEqual(self.story.thumbnail_filename, 'img.svg') self.assertEqual(self.story.thumbnail_size_in_bytes, len(raw_image)) - def test_description_validation(self): - self.story.description = 1 - self._assert_validation_error( - 'Expected description to be a string, received 1') - - def test_notes_validation(self): - self.story.notes = 1 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_notes_validation(self) -> None: + self.story.notes = 1 # type: ignore[assignment] self._assert_validation_error( 'Expected notes to be a string, received 1') - def test_language_code_validation(self): - self.story.language_code = 0 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_language_code_validation(self) -> None: + self.story.language_code = 0 # type: ignore[assignment] self._assert_validation_error( 'Expected language code to be a string, received 0') self.story.language_code = 'xz' self._assert_validation_error('Invalid language code') - def test_schema_version_validation(self): - self.story.story_contents_schema_version = 'schema_version' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_schema_version_validation(self) -> None: + self.story.story_contents_schema_version = 'schema_version' # type: ignore[assignment] self._assert_validation_error( 'Expected story contents schema version to be an integer, received ' 'schema_version') @@ -552,37 +618,62 @@ def test_schema_version_validation(self): self.story.story_contents_schema_version) ) - def test_corresponding_topic_id_validation(self): + def test_corresponding_topic_id_validation(self) -> None: # Generating valid topic id of type str. valid_topic_id = utils.generate_random_string(12) self.assertIsInstance(valid_topic_id, str) self.story.corresponding_topic_id = valid_topic_id self.story.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. # Setting invalid topic id type. invalid_topic_id = 123 - self.story.corresponding_topic_id = invalid_topic_id + self.story.corresponding_topic_id = invalid_topic_id # type: ignore[assignment] self._assert_validation_error( 'Expected corresponding_topic_id should be a string, received: %s' % (invalid_topic_id)) - def test_add_node_validation(self): - with self.assertRaisesRegexp( + def test_add_node_validation(self) -> None: + with self.assertRaisesRegex( Exception, 'The node id node_4 does not match the expected ' 'next node id for the story'): self.story.add_node('node_4', 'Title 4') - def test_get_number_from_node_id(self): + def test_delete_node_with_two_nodes_must_in_order(self) -> None: + self.assertEqual(len(self.story.story_contents.nodes), 2) + with self.assertRaisesRegex( + ValueError, + 'The node with id %s is the starting node for the story, ' + 'change the starting node before deleting it.' % self.NODE_ID_1 + ): + self.story.delete_node(self.NODE_ID_1) + self.story.delete_node(self.NODE_ID_2) + self.assertEqual(self.story.story_contents.nodes[0].id, 'node_1') + self.story.delete_node(self.NODE_ID_1) + self.assertIsNone(self.story.story_contents.initial_node_id) + + def test_get_number_from_node_id(self) -> None: self.assertEqual( story_domain.StoryNode.get_number_from_node_id('node_10'), 10) - def test_node_outline_finalized_validation(self): - self.story.story_contents.nodes[0].outline_is_finalized = 'abs' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_node_outline_finalized_validation(self) -> None: + self.story.story_contents.nodes[0].outline_is_finalized = 'abs' # type: ignore[assignment] self._assert_validation_error( 'Expected outline_is_finalized to be a boolean') + self.story.update_node_outline('node_1', 'new outline') + self.assertEqual( + self.story.story_contents.nodes[0].outline, 'new outline') - def test_node_title_validation(self): - self.story.story_contents.nodes[0].title = 1 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_node_title_validation(self) -> None: + self.story.story_contents.nodes[0].title = 1 # type: ignore[assignment] self._assert_validation_error( 'Expected title to be a string, received 1') @@ -591,8 +682,11 @@ def test_node_title_validation(self): self._assert_validation_error( 'Chapter title should be less than 36 chars') - def test_node_description_validation(self): - self.story.story_contents.nodes[0].description = 1 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_node_description_validation(self) -> None: + self.story.story_contents.nodes[0].description = 1 # type: ignore[assignment] self._assert_validation_error( 'Expected description to be a string, received 1') @@ -606,12 +700,21 @@ def test_node_description_validation(self): self._assert_validation_error( 'Chapter description should be less than 152 chars') - def test_node_thumbnail_bg_validation(self): + self.story.update_node_description('node_1', 'new description') + self.assertEqual( + self.story.story_contents.nodes[0].description, 'new description') + + self.story.update_node_title('node_1', 'new title') + self.assertEqual(self.story.story_contents.nodes[0].title, 'new title') + + def test_node_thumbnail_bg_validation(self) -> None: self.story.story_contents.nodes[0].thumbnail_bg_color = '#FFFFFF' self._assert_validation_error( 'Chapter thumbnail background color #FFFFFF is not supported.') - def test_node_thumbnail_filename_or_thumbnail_bg_color_is_none(self): + def test_node_thumbnail_filename_or_thumbnail_bg_color_is_none( + self + ) -> None: self.story.story_contents.nodes[0].thumbnail_bg_color = '#F8BF74' self.story.story_contents.nodes[0].thumbnail_filename = None self._assert_validation_error( @@ -621,20 +724,28 @@ def test_node_thumbnail_filename_or_thumbnail_bg_color_is_none(self): self._assert_validation_error( 'Chapter thumbnail background color is not specified.') - def test_nodes_validation(self): + def test_nodes_validation(self) -> None: self.story.story_contents.initial_node_id = 'node_10' self._assert_validation_error('Expected starting node to exist') self.story.story_contents.initial_node_id = 'node_id_1' self._assert_validation_error('Invalid node_id: node_id_1') self.story.story_contents.initial_node_id = 'node_abc' self._assert_validation_error('Invalid node_id: node_abc') + self.story.update_initial_node('node_1') self.story.story_contents.initial_node_id = 'node_1' - self.story.story_contents.nodes = {} + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story.story_contents.nodes = {} # type: ignore[assignment] self._assert_validation_error( 'Expected nodes field to be a list, received {}') - self.story.story_contents.nodes = ['node_1'] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story.story_contents.nodes = ['node_1'] # type: ignore[list-item] self._assert_validation_error( 'Expected each node to be a StoryNode object, received node_1') @@ -664,31 +775,68 @@ def test_nodes_validation(self): 'skill_id', 'skill_id', 'skill_id_1'] self._assert_validation_error( 'Expected all acquired skills to be distinct.') - self.story.story_contents.nodes[0].acquired_skill_ids = [1] + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story.story_contents.nodes[0].acquired_skill_ids = [1] # type: ignore[list-item] self._assert_validation_error( 'Expected each acquired skill id to be a string, received 1') - self.story.story_contents.nodes[0].acquired_skill_ids = 1 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story.story_contents.nodes[0].acquired_skill_ids = 1 # type: ignore[assignment] self._assert_validation_error( 'Expected acquired skill ids to be a list, received 1') + self.story.story_contents.nodes[0].acquired_skill_ids = ['3'] + self.assertEqual( + self.story.story_contents.nodes[0].acquired_skill_ids, ['3']) + self.story.update_node_acquired_skill_ids('node_1', ['3', '4']) + self.assertEqual( + self.story.story_contents.nodes[0].acquired_skill_ids, ['3', '4']) self.story.story_contents.nodes[0].prerequisite_skill_ids = [ 'skill_id', 'skill_id', 'skill_id_1'] self._assert_validation_error( 'Expected all prerequisite skills to be distinct.') - self.story.story_contents.nodes[0].prerequisite_skill_ids = [1] + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story.story_contents.nodes[0].prerequisite_skill_ids = [1] # type: ignore[list-item] self._assert_validation_error( 'Expected each prerequisite skill id to be a string, received 1') - self.story.story_contents.nodes[0].prerequisite_skill_ids = 1 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story.story_contents.nodes[0].prerequisite_skill_ids = 1 # type: ignore[assignment] self._assert_validation_error( 'Expected prerequisite skill ids to be a list, received 1') self.story.story_contents.nodes[0].prerequisite_skill_ids = ['1'] - self.story.story_contents.nodes[0].thumbnail_filename = [] + self.story.update_node_prerequisite_skill_ids('node_1', ['1', '2']) + self.assertEqual( + self.story.story_contents.nodes[0].prerequisite_skill_ids, + ['1', '2']) + self.story.mark_node_outline_as_finalized('node_1') + self.assertTrue( + self.story.story_contents.nodes[0].outline_is_finalized) + self.story.mark_node_outline_as_unfinalized('node_1') + self.assertFalse( + self.story.story_contents.nodes[0].outline_is_finalized) + self.story.update_node_thumbnail_bg_color('node_1', 'Red') + self.assertEqual( + self.story.story_contents.nodes[0].thumbnail_bg_color, 'Red') + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story.story_contents.nodes[0].thumbnail_filename = [] # type: ignore[assignment] self._assert_validation_error( 'Expected thumbnail filename to be a string, received') - def test_acquired_prerequisite_skill_intersection_validation(self): + def test_acquired_prerequisite_skill_intersection_validation(self) -> None: self.story.story_contents.nodes[0].prerequisite_skill_ids = [ 'skill_id', 'skill_id_1'] self.story.story_contents.nodes[0].acquired_skill_ids = [ @@ -697,9 +845,9 @@ def test_acquired_prerequisite_skill_intersection_validation(self): 'Expected prerequisite skill ids and acquired skill ids ' 'to be mutually exclusive.') - def test_get_ordered_nodes(self): + def test_get_ordered_nodes_when_nodes_exist(self) -> None: self.story.story_contents.next_node_id = 'node_4' - node_1 = { + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image1.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -714,7 +862,7 @@ def test_get_ordered_nodes(self): 'outline_is_finalized': False, 'exploration_id': None } - node_2 = { + node_2: story_domain.StoryNodeDict = { 'id': 'node_2', 'thumbnail_filename': 'image2.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -729,7 +877,7 @@ def test_get_ordered_nodes(self): 'outline_is_finalized': False, 'exploration_id': None } - node_3 = { + node_3: story_domain.StoryNodeDict = { 'id': 'node_3', 'thumbnail_filename': 'image3.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -761,9 +909,14 @@ def test_get_ordered_nodes(self): self.assertEqual(calculated_list[1].id, expected_list[1].id) self.assertEqual(calculated_list[2].id, expected_list[2].id) - def test_get_all_linked_exp_ids(self): - self.story.story_contents.next_node_id = 'node_4' - node_1 = { + def test_get_ordered_nodes_when_no_nodes_exist(self) -> None: + self.story.story_contents.nodes = [] + calculated_list = self.story.story_contents.get_ordered_nodes() + self.assertEqual(calculated_list, []) + + def test_get_all_linked_exp_ids(self) -> None: + self.story.story_contents.next_node_id = 'node_5' + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -778,7 +931,7 @@ def test_get_all_linked_exp_ids(self): 'outline_is_finalized': False, 'exploration_id': 'exp_1' } - node_2 = { + node_2: story_domain.StoryNodeDict = { 'id': 'node_2', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -793,7 +946,7 @@ def test_get_all_linked_exp_ids(self): 'outline_is_finalized': False, 'exploration_id': 'exp_2' } - node_3 = { + node_3: story_domain.StoryNodeDict = { 'id': 'node_3', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -808,11 +961,34 @@ def test_get_all_linked_exp_ids(self): 'outline_is_finalized': False, 'exploration_id': 'exp_3' } + node_4: story_domain.StoryNodeDict = { + 'id': 'node_4', + 'thumbnail_filename': 'image.svg', + 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ + 'chapter'][0], + 'thumbnail_size_in_bytes': 21131, + 'title': 'Title 3', + 'description': 'Description 3', + 'destination_node_ids': [], + 'acquired_skill_ids': [], + 'prerequisite_skill_ids': [], + 'outline': '', + 'outline_is_finalized': False, + 'exploration_id': None + } self.story.story_contents.initial_node_id = 'node_2' self.story.story_contents.nodes = [ story_domain.StoryNode.from_dict(node_1), story_domain.StoryNode.from_dict(node_2), ] + self.assertEqual( + self.story.story_contents.get_all_linked_exp_ids(), + ['exp_1', 'exp_2']) + self.story.story_contents.nodes = [ + story_domain.StoryNode.from_dict(node_1), + story_domain.StoryNode.from_dict(node_2), + story_domain.StoryNode.from_dict(node_4), + ] self.assertEqual( self.story.story_contents.get_all_linked_exp_ids(), ['exp_1', 'exp_2']) @@ -821,10 +997,109 @@ def test_get_all_linked_exp_ids(self): self.assertEqual( self.story.story_contents.get_all_linked_exp_ids(), ['exp_1', 'exp_2', 'exp_3']) + with self.assertRaisesRegex( + ValueError, 'A node with exploration id exp_3 already exists.' + ): + self.story.update_node_exploration_id('node_4', 'exp_3') + self.story.update_node_exploration_id('node_3', 'exp_3') + self.assertEqual( + self.story.story_contents.get_all_linked_exp_ids(), + ['exp_1', 'exp_2', 'exp_3']) + self.story.update_node_exploration_id('node_3', 'exp_4') + self.assertEqual( + self.story.story_contents.get_all_linked_exp_ids(), + ['exp_1', 'exp_2', 'exp_4']) + + def test_update_story_contents_from_model_with_all_versions(self) -> None: + node_1: story_domain.StoryNodeDict = { + 'id': 'node_1', + 'thumbnail_filename': 'image.svg', + 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ + 'chapter'][0], + 'thumbnail_size_in_bytes': 21131, + 'title': 'Title 1', + 'description': 'Description 1', + 'destination_node_ids': ['node_3'], + 'acquired_skill_ids': [], + 'prerequisite_skill_ids': [], + 'outline': 'a', + 'outline_is_finalized': False, + 'exploration_id': 'exp_1' + } + story_contents_dict_1: story_domain.StoryContentsDict = { + 'nodes': [node_1], + 'initial_node_id': 'node_1', + 'next_node_id': 'node_4' + } + version_dict: story_domain.VersionedStoryContentsDict = { + 'schema_version': 0, + 'story_contents': story_contents_dict_1 + } + + self.assertEqual(version_dict['schema_version'], 0) + story_domain.Story.update_story_contents_from_model( + version_dict, 1, 'node_1') + self.assertEqual(version_dict['schema_version'], 2) + self.assertIsNone( + version_dict['story_contents']['nodes'][0]['thumbnail_filename']) + self.assertIsNone( + version_dict['story_contents']['nodes'][0]['thumbnail_bg_color']) + self.assertEqual( + version_dict['story_contents']['nodes'][0]['description'], + 'Description 1') + story_domain.Story.update_story_contents_from_model( + version_dict, 2, 'node_1') + self.assertEqual(version_dict['schema_version'], 3) + self.assertEqual( + version_dict['story_contents']['nodes'][0]['description'], '') + story_domain.Story.update_story_contents_from_model( + version_dict, 3, 'node_1') + self.assertEqual(version_dict['schema_version'], 4) + self.assertEqual( + version_dict['story_contents']['nodes'][0][ + 'thumbnail_size_in_bytes'], 21131) + story_domain.Story.update_story_contents_from_model( + version_dict, 4, 'node_1') + self.assertEqual(version_dict['schema_version'], 5) + self.assertIsNone( + version_dict['story_contents']['nodes'][0][ + 'thumbnail_size_in_bytes']) + + def test_story_info_update(self) -> None: + topic_id = utils.generate_random_string(12) + story = story_domain.Story.create_default_story( + self.STORY_ID, 'Title', 'Description', topic_id, + 'story-frag-default') + self.assertEqual(story.title, 'Title') + story.update_title('Updated title') + self.assertEqual(story.title, 'Updated title') + + self.assertIsNone(story.thumbnail_bg_color) + story.update_thumbnail_bg_color('Updated thumbnail_bg_color') + self.assertEqual( + story.thumbnail_bg_color, 'Updated thumbnail_bg_color') + + self.assertEqual(story.description, 'Description') + story.update_description('Updated Description') + self.assertEqual(story.description, 'Updated Description') + + self.assertEqual(story.notes, '') + story.update_notes('Updated notes') + self.assertEqual(story.notes, 'Updated notes') - def test_get_node_with_corresponding_exp_id_with_valid_exp_id(self): + self.assertEqual(story.language_code, 'en') + story.update_language_code('Updated language_code') + self.assertEqual(story.language_code, 'Updated language_code') + + self.assertEqual(story.meta_tag_content, '') + story.update_meta_tag_content('Updated meta_tag_content') + self.assertEqual(story.meta_tag_content, 'Updated meta_tag_content') + + def test_get_node_with_corresponding_exp_id_with_valid_exp_id( + self + ) -> None: self.story.story_contents.next_node_id = 'node_4' - node_1 = { + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -850,9 +1125,11 @@ def test_get_node_with_corresponding_exp_id_with_valid_exp_id(self): self.assertEqual(node_with_exp_1.to_dict(), node_1) - def test_get_node_with_corresponding_exp_id_with_invalid_exp_id(self): + def test_get_node_with_corresponding_exp_id_with_invalid_exp_id( + self + ) -> None: self.story.story_contents.next_node_id = 'node_4' - node_1 = { + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -871,16 +1148,16 @@ def test_get_node_with_corresponding_exp_id_with_invalid_exp_id(self): self.story.story_contents.nodes = [ story_domain.StoryNode.from_dict(node_1) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unable to find the exploration id in any node: invalid_id'): self.story.story_contents.get_node_with_corresponding_exp_id( 'invalid_id') - def test_all_nodes_visited(self): + def test_all_nodes_visited(self) -> None: self.story.story_contents.next_node_id = 'node_4' # Case 1: Disconnected graph. - node_1 = { + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -895,7 +1172,7 @@ def test_all_nodes_visited(self): 'outline_is_finalized': False, 'exploration_id': None } - node_2 = { + node_2: story_domain.StoryNodeDict = { 'id': 'node_2', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -910,7 +1187,7 @@ def test_all_nodes_visited(self): 'outline_is_finalized': False, 'exploration_id': None } - node_3 = { + node_3: story_domain.StoryNodeDict = { 'id': 'node_3', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1085,7 +1362,7 @@ def test_all_nodes_visited(self): 'outline_is_finalized': False, 'exploration_id': None } - node_4 = { + node_4: story_domain.StoryNodeDict = { 'id': 'node_4', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1108,30 +1385,48 @@ def test_all_nodes_visited(self): ] self.story.validate() - def test_rearrange_node_in_story_fail_with_invalid_from_index_value(self): - with self.assertRaisesRegexp( + def test_rearrange_node_in_story_fail_with_invalid_from_index_value( + self + ) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( Exception, 'Expected from_index value to be a number, ' 'received None'): - self.story.rearrange_node_in_story(None, 2) + self.story.rearrange_node_in_story(None, 2) # type: ignore[arg-type] - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( Exception, 'Expected from_index value to be a number, ' 'received a'): - self.story.rearrange_node_in_story('a', 2) - - def test_rearrange_node_in_story_fail_with_invalid_to_index_value(self): - with self.assertRaisesRegexp( + self.story.rearrange_node_in_story('a', 2) # type: ignore[arg-type] + + def test_rearrange_node_in_story_fail_with_invalid_to_index_value( + self + ) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( Exception, 'Expected to_index value to be a number, ' 'received None'): - self.story.rearrange_node_in_story(1, None) + self.story.rearrange_node_in_story(1, None) # type: ignore[arg-type] - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( Exception, 'Expected to_index value to be a number, ' 'received a'): - self.story.rearrange_node_in_story(1, 'a') + self.story.rearrange_node_in_story(1, 'a') # type: ignore[arg-type] - def test_rearrange_canonical_story_fail_with_out_of_bound_indexes(self): - node_1 = { + def test_rearrange_canonical_story_fail_with_out_of_bound_indexes( + self + ) -> None: + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1146,7 +1441,7 @@ def test_rearrange_canonical_story_fail_with_out_of_bound_indexes(self): 'outline_is_finalized': False, 'exploration_id': None } - node_2 = { + node_2: story_domain.StoryNodeDict = { 'id': 'node_2', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1165,35 +1460,37 @@ def test_rearrange_canonical_story_fail_with_out_of_bound_indexes(self): story_domain.StoryNode.from_dict(node_1), story_domain.StoryNode.from_dict(node_2) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected from_index value to be with-in bounds.'): self.story.rearrange_node_in_story(10, 0) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected from_index value to be with-in bounds.'): self.story.rearrange_node_in_story(-1, 0) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected to_index value to be with-in bounds.'): self.story.rearrange_node_in_story(0, 10) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected to_index value to be with-in bounds.'): self.story.rearrange_node_in_story(0, -1) - def test_update_url_fragment(self): + def test_update_url_fragment(self) -> None: self.assertEqual(self.story.url_fragment, 'story-frag') self.story.update_url_fragment('updated-title') self.assertEqual(self.story.url_fragment, 'updated-title') - def test_rearrange_node_in_story_fail_with_identical_index_values(self): - with self.assertRaisesRegexp( + def test_rearrange_node_in_story_fail_with_identical_index_values( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Expected from_index and to_index values to be ' 'different.'): self.story.rearrange_node_in_story(1, 1) - def test_rearrange_node_in_story(self): - node_1 = { + def test_rearrange_node_in_story(self) -> None: + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1208,7 +1505,7 @@ def test_rearrange_node_in_story(self): 'outline_is_finalized': False, 'exploration_id': None } - node_2 = { + node_2: story_domain.StoryNodeDict = { 'id': 'node_2', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1223,7 +1520,7 @@ def test_rearrange_node_in_story(self): 'outline_is_finalized': False, 'exploration_id': None } - node_3 = { + node_3: story_domain.StoryNodeDict = { 'id': 'node_3', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1265,7 +1562,7 @@ def test_rearrange_node_in_story(self): self.assertEqual(nodes[1].id, 'node_2') self.assertEqual(nodes[2].id, 'node_3') - def test_story_contents_export_import(self): + def test_story_contents_export_import(self) -> None: """Test that to_dict and from_dict preserve all data within a story_contents object. """ @@ -1275,57 +1572,73 @@ def test_story_contents_export_import(self): [self.NODE_ID_2], [self.SKILL_ID_1], [self.SKILL_ID_2], 'Outline', False, self.EXP_ID) story_contents = story_domain.StoryContents( - [story_node], self.NODE_ID_1, 2) + [story_node], self.NODE_ID_1, '2') story_contents_dict = story_contents.to_dict() story_contents_from_dict = story_domain.StoryContents.from_dict( story_contents_dict) self.assertEqual( story_contents_from_dict.to_dict(), story_contents_dict) - def test_validate_non_str_exploration_id(self): - self.story.story_contents.nodes[0].exploration_id = 1 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_exploration_id(self) -> None: + self.story.story_contents.nodes[0].exploration_id = 1 # type: ignore[assignment] self._assert_validation_error( 'Expected exploration ID to be a string') - def test_validate_empty_exploration_id(self): + def test_validate_empty_exploration_id(self) -> None: self.story.story_contents.nodes[0].exploration_id = '' self._assert_validation_error( 'Expected exploration ID to not be an empty string') - def test_validate_non_str_outline(self): - self.story.story_contents.nodes[0].outline = 0 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_outline(self) -> None: + self.story.story_contents.nodes[0].outline = 0 # type: ignore[assignment] self._assert_validation_error( 'Expected outline to be a string') - def test_validate_non_list_destination_node_ids(self): - self.story.story_contents.nodes[0].destination_node_ids = 0 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_list_destination_node_ids(self) -> None: + self.story.story_contents.nodes[0].destination_node_ids = 0 # type: ignore[assignment] self._assert_validation_error( 'Expected destination node ids to be a list') - def test_validate_node_id(self): + def test_validate_node_id(self) -> None: self.story.story_contents.nodes[0].destination_node_ids = [ self.NODE_ID_1] self._assert_validation_error( 'The story node with ID %s points to itself.' % self.NODE_ID_1) - def test_validate_non_str_node_id(self): - self.story.story_contents.nodes[0].destination_node_ids = [0] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_node_id(self) -> None: + self.story.story_contents.nodes[0].destination_node_ids = [0] # type: ignore[list-item] self._assert_validation_error('Expected node ID to be a string') - def test_validate_out_of_bounds_node_id(self): + def test_validate_out_of_bounds_node_id(self) -> None: self.story.story_contents.nodes[0].id = 'node_3' self._assert_validation_error( 'The node with id node_3 is out of bounds.') - def test_get_node_index_with_invalid_node_id(self): + def test_get_node_index_with_invalid_node_id(self) -> None: self.assertIsNone( - self.story.story_contents.get_node_index('invalid_node_id')) + self.story.story_contents.get_node_index( + 'invalid_node_id', + strict=False + ) + ) - def test_validate_empty_title(self): + def test_validate_empty_title(self) -> None: self.story.title = '' self._assert_validation_error('Title field should not be empty') - def test_story_summary_creation(self): + def test_story_summary_creation(self) -> None: curr_time = datetime.datetime.utcnow() story_summary = story_domain.StorySummary( 'story_id', 'title', 'description', 'en', 1, ['Title 1'], '#F8BF74', @@ -1347,7 +1660,7 @@ def test_story_summary_creation(self): self.assertEqual(story_summary.to_dict(), expected_dict) - def test_story_export_import_returns_original_object(self): + def test_story_export_import_returns_original_object(self) -> None: """Checks that to_dict and from_dict preserves all the data within a Story during export and import. """ @@ -1362,8 +1675,8 @@ def test_story_export_import_returns_original_object(self): class StorySummaryTests(test_utils.GenericTestBase): - def setUp(self): - super(StorySummaryTests, self).setUp() + def setUp(self) -> None: + super().setUp() current_time = datetime.datetime.utcnow() time_in_millisecs = utils.get_time_in_millisecs(current_time) self.story_summary_dict = { @@ -1377,7 +1690,7 @@ def setUp(self): 'thumbnail_filename': 'image.svg', 'language_code': 'en', 'id': 'story_id', - 'url_fragment': 'story-summary-frag' + 'url_fragment': 'story-summary-frag', } self.story_summary = story_domain.StorySummary( @@ -1385,32 +1698,37 @@ def setUp(self): '#F8BF74', 'image.svg', 'story-summary-frag', current_time, current_time) - def test_story_summary_gets_created(self): + def test_story_summary_gets_created(self) -> None: self.assertEqual( self.story_summary.to_dict(), self.story_summary_dict) - def _assert_validation_error(self, expected_error_substring): + # Here we use MyPy ignore because the signature of this method + # doesn't match with TestBase._assert_validation_error(). + def _assert_validation_error(self, expected_error_substring: str) -> None: # type: ignore[override] """Checks that the story summary passes validation. Args: expected_error_substring: str. String that should be a substring of the expected error message. """ - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): self.story_summary.validate() - def test_thumbnail_filename_validation(self): - self.story_summary.thumbnail_filename = [] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_thumbnail_filename_validation(self) -> None: + self.story_summary.thumbnail_filename = [] # type: ignore[assignment] self._assert_validation_error( 'Expected thumbnail filename to be a string, received') - def test_thumbnail_bg_validation(self): + def test_thumbnail_bg_validation(self) -> None: self.story_summary.thumbnail_bg_color = '#FFFFFF' self._assert_validation_error( 'Story thumbnail background color #FFFFFF is not supported.') - def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self): + def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self) -> None: self.story_summary.thumbnail_bg_color = '#F8BF74' self.story_summary.thumbnail_filename = None self._assert_validation_error( @@ -1420,38 +1738,44 @@ def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self): self._assert_validation_error( 'Story thumbnail background color is not specified.') - def test_validation_passes_with_valid_properties(self): + def test_validation_passes_with_valid_properties(self) -> None: self.story_summary.validate() - def test_validation_fails_with_invalid_title(self): - self.story_summary.title = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_title(self) -> None: + self.story_summary.title = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected title to be a string, received 0'): self.story_summary.validate() - def test_validation_fails_with_empty_title(self): + def test_validation_fails_with_empty_title(self) -> None: self.story_summary.title = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Title field should not be empty'): self.story_summary.validate() - def test_validation_fails_with_empty_url_fragment(self): + def test_validation_fails_with_empty_url_fragment(self) -> None: self.story_summary.url_fragment = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Story Url Fragment field should not be empty'): self.story_summary.validate() - def test_validation_fails_with_nonstring_url_fragment(self): - self.story_summary.url_fragment = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_fails_with_nonstring_url_fragment(self) -> None: + self.story_summary.url_fragment = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Story Url Fragment field must be a string. Received 0.'): self.story_summary.validate() - def test_validation_fails_with_lengthy_url_fragment(self): + def test_validation_fails_with_lengthy_url_fragment(self) -> None: self.story_summary.url_fragment = 'abcd' * 10 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Story Url Fragment field should not exceed %d characters, ' 'received %s.' % ( @@ -1459,44 +1783,56 @@ def test_validation_fails_with_lengthy_url_fragment(self): self.story_summary.url_fragment)): self.story_summary.validate() - def test_validation_fails_with_invalid_chars_in_url_fragment(self): + def test_validation_fails_with_invalid_chars_in_url_fragment(self) -> None: self.story_summary.url_fragment = 'Abc Def!' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Story Url Fragment field contains invalid characters. ' 'Only lowercase words separated by hyphens are allowed. ' 'Received Abc Def!.'): self.story_summary.validate() - def test_validation_fails_with_invalid_description(self): - self.story_summary.description = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_description(self) -> None: + self.story_summary.description = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected description to be a string, received 0'): self.story_summary.validate() - def test_validation_fails_with_invalid_node_titles(self): - self.story_summary.node_titles = '10' - with self.assertRaisesRegexp( + def test_validation_fails_with_invalid_node_titles(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story_summary.node_titles = '10' # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected node_titles to be a list, received \'10\''): self.story_summary.validate() - self.story_summary.node_titles = [5] - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.story_summary.node_titles = [5] # type: ignore[list-item] + with self.assertRaisesRegex( utils.ValidationError, 'Expected each chapter title to be a string, received 5'): self.story_summary.validate() - def test_validation_fails_with_invalid_language_code(self): - self.story_summary.language_code = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_fails_with_invalid_language_code(self) -> None: + self.story_summary.language_code = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected language code to be a string, received 0'): self.story_summary.validate() - def test_validation_fails_with_unallowed_language_code(self): + def test_validation_fails_with_unallowed_language_code(self) -> None: self.story_summary.language_code = 'invalid' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid language code: invalid'): self.story_summary.validate() diff --git a/core/domain/story_fetchers.py b/core/domain/story_fetchers.py index 6913a093f34f..6b00da9c2912 100644 --- a/core/domain/story_fetchers.py +++ b/core/domain/story_fetchers.py @@ -23,18 +23,32 @@ from __future__ import annotations import copy +import itertools from core import feconf from core.domain import caching_services +from core.domain import classroom_services +from core.domain import exp_fetchers from core.domain import story_domain +from core.domain import topic_fetchers +from core.domain import user_services from core.platform import models +from typing import Dict, List, Literal, Optional, Sequence, overload + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import story_models + from mypy_imports import user_models + (story_models, user_models) = models.Registry.import_models( - [models.NAMES.story, models.NAMES.user]) + [models.Names.STORY, models.Names.USER]) def _migrate_story_contents_to_latest_schema( - versioned_story_contents, story_id): + versioned_story_contents: story_domain.VersionedStoryContentsDict, + story_id: str +) -> None: """Holds the responsibility of performing a step-by-step, sequential update of the story structure based on the schema version of the input story dictionary. If the current story_contents schema changes, a new @@ -66,7 +80,9 @@ def _migrate_story_contents_to_latest_schema( story_contents_schema_version += 1 -def get_story_from_model(story_model): +def get_story_from_model( + story_model: story_models.StoryModel +) -> story_domain.Story: """Returns a story domain object given a story model loaded from the datastore. @@ -80,7 +96,7 @@ def get_story_from_model(story_model): """ # Ensure the original story model does not get altered. - versioned_story_contents = { + versioned_story_contents: story_domain.VersionedStoryContentsDict = { 'schema_version': story_model.story_contents_schema_version, 'story_contents': copy.deepcopy(story_model.story_contents) } @@ -104,7 +120,9 @@ def get_story_from_model(story_model): story_model.last_updated) -def get_story_summary_from_model(story_summary_model): +def get_story_summary_from_model( + story_summary_model: story_models.StorySummaryModel +) -> story_domain.StorySummary: """Returns a domain object for an Oppia story summary given a story summary model. @@ -130,7 +148,43 @@ def get_story_summary_from_model(story_summary_model): ) -def get_story_by_id(story_id, strict=True, version=None): +@overload +def get_story_by_id( + story_id: str, +) -> story_domain.Story: ... + + +@overload +def get_story_by_id( + story_id: str, + *, + version: Optional[int] = None +) -> story_domain.Story: ... + + +@overload +def get_story_by_id( + story_id: str, + *, + strict: Literal[True], + version: Optional[int] = None +) -> story_domain.Story: ... + + +@overload +def get_story_by_id( + story_id: str, + *, + strict: Literal[False], + version: Optional[int] = None +) -> Optional[story_domain.Story]: ... + + +def get_story_by_id( + story_id: str, + strict: bool = True, + version: Optional[int] = None +) -> Optional[story_domain.Story]: """Returns a domain object representing a story. Args: @@ -166,7 +220,9 @@ def get_story_by_id(story_id, strict=True, version=None): return None -def get_story_by_url_fragment(url_fragment): +def get_story_by_url_fragment( + url_fragment: str +) -> Optional[story_domain.Story]: """Returns a domain object representing a story. Args: @@ -184,7 +240,27 @@ def get_story_by_url_fragment(url_fragment): return story -def get_story_summary_by_id(story_id, strict=True): +@overload +def get_story_summary_by_id( + story_id: str +) -> story_domain.StorySummary: ... + + +@overload +def get_story_summary_by_id( + story_id: str, *, strict: Literal[True] +) -> story_domain.StorySummary: ... + + +@overload +def get_story_summary_by_id( + story_id: str, *, strict: Literal[False] +) -> Optional[story_domain.StorySummary]: ... + + +def get_story_summary_by_id( + story_id: str, strict: bool = True +) -> Optional[story_domain.StorySummary]: """Returns a domain object representing a story summary. Args: @@ -206,24 +282,59 @@ def get_story_summary_by_id(story_id, strict=True): return None -def get_stories_by_ids(story_ids): +@overload +def get_stories_by_ids( + story_ids: List[str], *, strict: Literal[True] +) -> List[story_domain.Story]: ... + + +@overload +def get_stories_by_ids( + story_ids: List[str] +) -> List[Optional[story_domain.Story]]: ... + + +@overload +def get_stories_by_ids( + story_ids: List[str], *, strict: Literal[False] +) -> List[Optional[story_domain.Story]]: ... + + +def get_stories_by_ids( + story_ids: List[str], strict: bool = False +) -> Sequence[Optional[story_domain.Story]]: """Returns a list of stories matching the IDs provided. Args: story_ids: list(str). List of IDs to get stories for. + strict: bool. Whether to fail noisily if no story model exists + with a given ID exists in the datastore. Returns: list(Story|None). The list of stories corresponding to given ids. If a Story does not exist, the corresponding returned list element is None. + + Raises: + Exception. No story model exists for the given story_id. """ all_story_models = story_models.StoryModel.get_multi(story_ids) - stories = [ - get_story_from_model(story_model) if story_model is not None else None - for story_model in all_story_models] + stories: List[Optional[story_domain.Story]] = [] + for index, story_model in enumerate(all_story_models): + if story_model is None: + if strict: + raise Exception( + 'No story model exists for the story_id: %s' + % story_ids[index] + ) + stories.append(story_model) + elif story_model is not None: + stories.append(get_story_from_model(story_model)) return stories -def get_story_summaries_by_ids(story_ids): +def get_story_summaries_by_ids( + story_ids: List[str] +) -> List[story_domain.StorySummary]: """Returns the StorySummary objects corresponding the given story ids. Args: @@ -243,7 +354,78 @@ def get_story_summaries_by_ids(story_ids): return story_summaries -def get_latest_completed_node_ids(user_id, story_id): +def get_learner_group_syllabus_story_summaries( + story_ids: List[str] +) -> List[story_domain.LearnerGroupSyllabusStorySummaryDict]: + """Returns the learner group syllabus story summary dicts + corresponding the given story ids. + + Args: + story_ids: list(str). The list of story ids for which the story + summaries are to be returned. + + Returns: + list(LearnerGroupSyllabusStorySummaryDict). The story summaries + corresponds to given story ids. + """ + # Validating if story exists before adding it to all stories list is only + # done for mypy type checks as all story ids are supposed to be valid as + # a part of validation done for learner group syllabus before calling + # this function. + all_stories = [ + story for story in get_stories_by_ids(story_ids) if story + ] + + topic_ids = list( + {story.corresponding_topic_id for story in all_stories} + ) + topics = topic_fetchers.get_topics_by_ids(topic_ids) + topic_id_to_topic_map = {} + for topic in topics: + # Ruling out the possibility of None for mypy type checking. Topic is + # guaranteed to exist as a part of validation done for story ids since + # story is bound to be part of a topic. + assert topic is not None + topic_id_to_topic_map[topic.id] = topic + + story_summaries_dicts = [ + story_summary.to_dict() for story_summary in + get_story_summaries_by_ids(story_ids) + ] + + return [ + { + 'id': story.id, + 'title': story.title, + 'description': story.description, + 'language_code': story.language_code, + 'version': story.version, + 'node_titles': summary_dict['node_titles'], + 'thumbnail_filename': story.thumbnail_filename, + 'thumbnail_bg_color': story.thumbnail_bg_color, + 'url_fragment': story.url_fragment, + 'story_model_created_on': summary_dict['story_model_created_on'], + 'story_model_last_updated': + summary_dict['story_model_last_updated'], + 'story_is_published': True, + 'completed_node_titles': [], + 'all_node_dicts': [ + node.to_dict() for node in + story.story_contents.nodes + ], + 'topic_name': + topic_id_to_topic_map[story.corresponding_topic_id].name, + 'topic_url_fragment': + topic_id_to_topic_map[ + story.corresponding_topic_id].url_fragment, + 'classroom_url_fragment': None + } + for (story, summary_dict) in + zip(all_stories, story_summaries_dicts) + ] + + +def get_latest_completed_node_ids(user_id: str, story_id: str) -> List[str]: """Returns the ids of the completed nodes that come latest in the story. Args: @@ -262,7 +444,7 @@ def get_latest_completed_node_ids(user_id, story_id): return [] num_of_nodes = min(len(progress_model.completed_node_ids), 3) - story = get_story_by_id(story_id) + story = get_story_by_id(story_id, strict=True) ordered_node_ids = ( [node.id for node in story.story_contents.get_ordered_nodes()]) ordered_completed_node_ids = ( @@ -272,7 +454,9 @@ def get_latest_completed_node_ids(user_id, story_id): return ordered_completed_node_ids[-num_of_nodes:] -def get_completed_nodes_in_story(user_id, story_id): +def get_completed_nodes_in_story( + user_id: str, story_id: str +) -> List[story_domain.StoryNode]: """Returns nodes that are completed in a story Args: @@ -283,7 +467,7 @@ def get_completed_nodes_in_story(user_id, story_id): list(StoryNode). The list of the story nodes that the user has completed. """ - story = get_story_by_id(story_id) + story = get_story_by_id(story_id, strict=True) completed_nodes = [] completed_node_ids = get_completed_node_ids(user_id, story_id) @@ -294,20 +478,162 @@ def get_completed_nodes_in_story(user_id, story_id): return completed_nodes -def get_pending_and_all_nodes_in_story(user_id, story_id): - """Returns the nodes that are pending in a story +def get_user_progress_in_story_chapters( + user_id: str, story_ids: List[str] +) -> List[story_domain.StoryChapterProgressSummaryDict]: + """Returns the progress of multiple users in multiple chapters. Args: user_id: str. The user id of the user. + story_ids: list(str). The ids of the stories. + + Returns: + list(StoryChapterProgressSummaryDict). The list of the progress + summaries of the user corresponding to all stories chapters. + """ + all_valid_story_nodes: List[story_domain.StoryNode] = [] + for story in get_stories_by_ids(story_ids): + if story is not None: + all_valid_story_nodes.extend(story.story_contents.nodes) + exp_ids = [ + node.exploration_id for node in all_valid_story_nodes + if node.exploration_id + ] + exp_id_to_exp_map = exp_fetchers.get_multiple_explorations_by_id(exp_ids) + user_id_exp_id_combinations = list(itertools.product([user_id], exp_ids)) + exp_user_data_models = ( + user_models.ExplorationUserDataModel.get_multi( + user_id_exp_id_combinations)) + + all_chapters_progress: List[ + story_domain.StoryChapterProgressSummaryDict] = [] + for i, user_id_exp_id_pair in enumerate(user_id_exp_id_combinations): + exp_id = user_id_exp_id_pair[1] + exploration = exp_id_to_exp_map[exp_id] + all_checkpoints = user_services.get_checkpoints_in_order( + exploration.init_state_name, + exploration.states) + model = exp_user_data_models[i] + visited_checkpoints = 0 + if model is not None: + most_recently_visited_checkpoint = ( + model.most_recently_reached_checkpoint_state_name) + if most_recently_visited_checkpoint is not None: + visited_checkpoints = all_checkpoints.index( + most_recently_visited_checkpoint) + 1 + all_chapters_progress.append({ + 'exploration_id': exp_id, + 'visited_checkpoints_count': visited_checkpoints, + 'total_checkpoints_count': len(all_checkpoints) + }) + + return all_chapters_progress + + +def get_multi_users_progress_in_stories( + user_ids: List[str], story_ids: List[str] +) -> Dict[str, List[story_domain.LearnerGroupSyllabusStorySummaryDict]]: + """Returns the progress of given users in all given stories. + + Args: + user_ids: list(str). The user ids of the users. + story_ids: list(str). The list of story ids. + + Returns: + Dict(str, list(StoryProgressDict)). Dictionary of user id and their + corresponding list of story progress dicts. + """ + all_valid_stories = [ + story for story in get_stories_by_ids(story_ids) if story + ] + + # Filter unique topic ids from all valid stories. + topic_ids = list( + {story.corresponding_topic_id for story in all_valid_stories} + ) + topics = topic_fetchers.get_topics_by_ids(topic_ids, strict=True) + topic_id_to_topic_map = {} + for topic in topics: + topic_id_to_topic_map[topic.id] = topic + + story_id_to_story_map = {story.id: story for story in all_valid_stories} + valid_story_ids = [story.id for story in all_valid_stories] + all_story_summaries = get_story_summaries_by_ids(valid_story_ids) + story_id_to_summary_map = { + summary.id: summary for summary in all_story_summaries + } + + # All poosible combinations of user_id and story_id for which progress + # models are returned. + all_posssible_combinations = itertools.product(user_ids, valid_story_ids) + progress_models = user_models.StoryProgressModel.get_multi( + user_ids, valid_story_ids + ) + all_users_stories_progress: Dict[ + str, List[story_domain.LearnerGroupSyllabusStorySummaryDict] + ] = {user_id: [] for user_id in user_ids} + for i, (user_id, story_id) in enumerate(all_posssible_combinations): + progress_model = progress_models[i] + completed_node_ids = [] + if progress_model is not None: + completed_node_ids = progress_model.completed_node_ids + story = story_id_to_story_map[story_id] + completed_node_titles = [ + node.title for node in story.story_contents.nodes + if node.id in completed_node_ids + ] + topic = topic_id_to_topic_map[story.corresponding_topic_id] + summary_dict = story_id_to_summary_map[story_id].to_dict() + all_users_stories_progress[user_id].append({ + 'id': summary_dict['id'], + 'title': summary_dict['title'], + 'description': summary_dict['description'], + 'language_code': summary_dict['language_code'], + 'version': summary_dict['version'], + 'node_titles': summary_dict['node_titles'], + 'thumbnail_filename': summary_dict['thumbnail_filename'], + 'thumbnail_bg_color': summary_dict['thumbnail_bg_color'], + 'url_fragment': summary_dict['url_fragment'], + 'story_model_created_on': + summary_dict['story_model_created_on'], + 'story_model_last_updated': + summary_dict['story_model_last_updated'], + 'story_is_published': True, + 'completed_node_titles': completed_node_titles, + 'all_node_dicts': [ + node.to_dict() for node in + story.story_contents.nodes + ], + 'topic_name': topic.name, + 'topic_url_fragment': topic.url_fragment, + 'classroom_url_fragment': + classroom_services.get_classroom_url_fragment_for_topic_id( + topic.id), + }) + + return all_users_stories_progress + + +def get_pending_and_all_nodes_in_story( + user_id: Optional[str], story_id: str +) -> Dict[str, List[story_domain.StoryNode]]: + """Returns the nodes that are pending in a story + + Args: + user_id: Optional[str]. The user id of the user, or None if + the user is not logged in. story_id: str. The id of the story. Returns: - list(StoryNode). The list of story nodes, pending for the user. + Dict[str, List[story_domain.StoryNode]]. The list of story nodes, + pending for the user. """ - story = get_story_by_id(story_id) + story = get_story_by_id(story_id, strict=True) pending_nodes = [] - completed_node_ids = get_completed_node_ids(user_id, story_id) + completed_node_ids = ( + get_completed_node_ids(user_id, story_id) if user_id else [] + ) for node in story.story_contents.nodes: if node.id not in completed_node_ids: pending_nodes.append(node) @@ -318,7 +644,7 @@ def get_pending_and_all_nodes_in_story(user_id, story_id): } -def get_completed_node_ids(user_id, story_id): +def get_completed_node_ids(user_id: str, story_id: str) -> List[str]: """Returns the ids of the nodes completed in the story. Args: @@ -331,10 +657,19 @@ def get_completed_node_ids(user_id, story_id): progress_model = user_models.StoryProgressModel.get( user_id, story_id, strict=False) - return progress_model.completed_node_ids if progress_model else [] + # TODO(#15621): The explicit declaration of type for ndb properties should + # be removed. Currently, these ndb properties are annotated with Any return + # type. Once we have proper return type we can remove this. + if progress_model: + completed_node_ids: List[str] = progress_model.completed_node_ids + return completed_node_ids + else: + return [] -def get_node_index_by_story_id_and_node_id(story_id, node_id): +def get_node_index_by_story_id_and_node_id( + story_id: str, node_id: str +) -> int: """Returns the index of the story node with the given story id and node id. @@ -347,14 +682,10 @@ def get_node_index_by_story_id_and_node_id(story_id, node_id): Raises: Exception. The given story does not exist. - Exception. The given node does not exist in the story. """ story = get_story_by_id(story_id, strict=False) if story is None: raise Exception('Story with id %s does not exist.' % story_id) node_index = story.story_contents.get_node_index(node_id) - if node_index is None: - raise Exception( - 'Story node with id %s does not exist in this story.' % node_id) return node_index diff --git a/core/domain/story_fetchers_test.py b/core/domain/story_fetchers_test.py index 5cc82d245e02..973dab51b7a0 100644 --- a/core/domain/story_fetchers_test.py +++ b/core/domain/story_fetchers_test.py @@ -16,6 +16,7 @@ from __future__ import annotations +from core import feconf from core.domain import story_domain from core.domain import story_fetchers from core.domain import story_services @@ -25,31 +26,36 @@ from core.platform import models from core.tests import test_utils -(story_models, user_models) = models.Registry.import_models( - [models.NAMES.story, models.NAMES.user]) +from typing import Final + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import story_models + +(story_models,) = models.Registry.import_models([models.Names.STORY]) class StoryFetchersUnitTests(test_utils.GenericTestBase): """Test the story fetchers module.""" - STORY_ID = None - NODE_ID_1 = story_domain.NODE_ID_PREFIX + '1' - NODE_ID_2 = 'node_2' - USER_ID = 'user' - story = None + NODE_ID_1: Final = story_domain.NODE_ID_PREFIX + '1' + NODE_ID_2: Final = story_domain.NODE_ID_PREFIX + '2' + USER_ID: Final = 'user' - def setUp(self): - super(StoryFetchersUnitTests, self).setUp() - self.STORY_ID = story_services.get_new_story_id() + def setUp(self) -> None: + super().setUp() + self.story_id = story_services.get_new_story_id() self.TOPIC_ID = topic_fetchers.get_new_topic_id() self.save_new_topic( self.TOPIC_ID, self.USER_ID, name='Topic', description='A new topic', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=0) - self.save_new_story(self.STORY_ID, self.USER_ID, self.TOPIC_ID) + self.save_new_story( + self.story_id, self.USER_ID, self.TOPIC_ID, url_fragment='story-one' + ) topic_services.add_canonical_story( - self.USER_ID, self.TOPIC_ID, self.STORY_ID) + self.USER_ID, self.TOPIC_ID, self.story_id) changelist = [ story_domain.StoryChange({ 'cmd': story_domain.CMD_ADD_STORY_NODE, @@ -58,9 +64,9 @@ def setUp(self): }) ] story_services.update_story( - self.USER_ID, self.STORY_ID, changelist, + self.USER_ID, self.story_id, changelist, 'Added node.') - self.story = story_fetchers.get_story_by_id(self.STORY_ID) + self.story = story_fetchers.get_story_by_id(self.story_id) self.signup('a@example.com', 'A') self.signup('b@example.com', 'B') self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -78,68 +84,275 @@ def setUp(self): self.user_admin = user_services.get_user_actions_info( self.user_id_admin) - def test_get_story_from_model(self): - story_model = story_models.StoryModel.get(self.STORY_ID) + def test_get_story_from_model(self) -> None: + schema_version = feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION - 1 + story_model = story_models.StoryModel.get(self.story_id) + story_model.story_contents_schema_version = schema_version story = story_fetchers.get_story_from_model(story_model) - self.assertEqual(story.to_dict(), self.story.to_dict()) - def test_get_story_summary_from_model(self): - story_summary_model = story_models.StorySummaryModel.get(self.STORY_ID) + def test_get_story_summary_from_model(self) -> None: + story_summary_model = story_models.StorySummaryModel.get(self.story_id) story_summary = story_fetchers.get_story_summary_from_model( story_summary_model) - self.assertEqual(story_summary.id, self.STORY_ID) + self.assertEqual(story_summary.id, self.story_id) self.assertEqual(story_summary.title, 'Title') self.assertEqual(story_summary.description, 'Description') self.assertEqual(story_summary.node_titles, ['Title 1']) self.assertEqual(story_summary.thumbnail_bg_color, None) self.assertEqual(story_summary.thumbnail_filename, None) - def test_get_story_by_id_with_valid_ids_returns_correct_dict(self): + def test_get_story_summaries_by_id(self) -> None: + story_summaries = story_fetchers.get_story_summaries_by_ids( + [self.story_id, 'someID']) + + self.assertEqual(len(story_summaries), 1) + self.assertEqual(story_summaries[0].id, self.story_id) + self.assertEqual(story_summaries[0].title, 'Title') + self.assertEqual(story_summaries[0].description, 'Description') + self.assertEqual(story_summaries[0].language_code, 'en') + self.assertEqual(story_summaries[0].node_titles, ['Title 1']) + self.assertEqual(story_summaries[0].thumbnail_filename, None) + self.assertEqual(story_summaries[0].thumbnail_bg_color, None) + self.assertEqual(story_summaries[0].version, 2) + + def test_get_latest_completed_node_ids(self) -> None: + self.assertEqual(story_fetchers.get_latest_completed_node_ids( + self.USER_ID, self.story_id), []) + story_services.record_completed_node_in_story_context( + self.USER_ID, self.story_id, self.NODE_ID_1) + self.assertEqual( + story_fetchers.get_latest_completed_node_ids( + self.USER_ID, self.story_id), + [self.NODE_ID_1]) + + def test_migrate_story_contents(self) -> None: + story_id = self.story_id + story_model = story_models.StoryModel.get(story_id) + versioned_story_contents: story_domain.VersionedStoryContentsDict = { + 'schema_version': story_model.story_contents_schema_version, + 'story_contents': story_model.story_contents + } + # Disable protected function for test. + story_fetchers._migrate_story_contents_to_latest_schema( # pylint: disable=protected-access + versioned_story_contents, story_id) + versioned_story_contents[ + 'schema_version' + ] = feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION - 1 + story_fetchers._migrate_story_contents_to_latest_schema( # pylint: disable=protected-access + versioned_story_contents, story_id + ) + versioned_story_contents['schema_version'] = 6 + with self.assertRaisesRegex( + Exception, + 'Sorry, we can only process v1-v%d story schemas at ' + 'present.' % feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION + ): + story_fetchers._migrate_story_contents_to_latest_schema( # pylint: disable=protected-access + versioned_story_contents, story_id + ) + + def test_get_story_by_url_fragment(self) -> None: + story = story_fetchers.get_story_by_url_fragment( + url_fragment='story-one') + # Ruling out the possibility of None for mypy type checking. + assert story is not None + self.assertEqual(story.id, self.story_id) + self.assertEqual(story.url_fragment, 'story-one') + story = story_fetchers.get_story_by_url_fragment( + url_fragment='fake-story') + self.assertEqual(story, None) + + def test_get_story_by_id_with_valid_ids_returns_correct_dict(self) -> None: expected_story = self.story.to_dict() - story = story_fetchers.get_story_by_id(self.STORY_ID) + story = story_fetchers.get_story_by_id(self.story_id) self.assertEqual(story.to_dict(), expected_story) - def test_get_stories_by_ids(self): + def test_get_stories_by_ids(self) -> None: expected_story = self.story.to_dict() - stories = story_fetchers.get_stories_by_ids([self.STORY_ID]) + stories = story_fetchers.get_stories_by_ids([self.story_id]) + # Ruling out the possibility of None for mypy type checking. + assert stories[0] is not None self.assertEqual(len(stories), 1) self.assertEqual(stories[0].to_dict(), expected_story) - def test_get_stories_by_ids_for_non_existing_story_returns_none(self): + def test_raises_error_if_stories_fetched_with_invalid_id_and_strict( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'No story model exists for the story_id: invalid_id' + ): + story_fetchers.get_stories_by_ids(['invalid_id'], strict=True) + + def test_get_stories_by_ids_for_non_existing_story_returns_none( + self + ) -> None: non_exiting_story_id = 'invalid_id' expected_story = self.story.to_dict() stories = story_fetchers.get_stories_by_ids( - [self.STORY_ID, non_exiting_story_id]) + [self.story_id, non_exiting_story_id]) + # Ruling out the possibility of None for mypy type checking. + assert stories[0] is not None self.assertEqual(len(stories), 2) self.assertEqual(stories[0].to_dict(), expected_story) self.assertEqual(stories[1], None) - def test_get_story_summary_by_id(self): - story_summary = story_fetchers.get_story_summary_by_id(self.STORY_ID) + def test_get_multi_users_progress_in_stories(self) -> None: + all_users_stories_progress = ( + story_fetchers.get_multi_users_progress_in_stories( + [self.USER_ID], [self.story_id, 'invalid_story_id'] + ) + ) + all_stories = story_fetchers.get_stories_by_ids( + [self.story_id, 'invalid_story_id']) + + # Should return None for invalid story ID. + self.assertIsNone(all_stories[1]) + + user_stories_progress = all_users_stories_progress[self.USER_ID] + + self.assertEqual(len(user_stories_progress), 1) + assert all_stories[0] is not None + self.assertEqual(user_stories_progress[0]['id'], self.story_id) + self.assertEqual(user_stories_progress[0]['completed_node_titles'], []) + self.assertEqual( + len(user_stories_progress[0]['all_node_dicts']), + len(all_stories[0].story_contents.nodes) + ) + self.assertEqual(user_stories_progress[0]['topic_name'], 'Topic') + + story_services.record_completed_node_in_story_context( + self.USER_ID, self.story_id, self.NODE_ID_1) + + all_users_stories_progress = ( + story_fetchers.get_multi_users_progress_in_stories( + [self.USER_ID], [self.story_id, 'invalid_story_id'] + ) + ) + user_stories_progress = all_users_stories_progress[self.USER_ID] - self.assertEqual(story_summary.id, self.STORY_ID) + self.assertEqual(len(user_stories_progress), 1) + # Ruling out the possibility of None for mypy type checking. + assert user_stories_progress[0] is not None + self.assertEqual(user_stories_progress[0]['id'], self.story_id) + self.assertEqual( + user_stories_progress[0]['completed_node_titles'], ['Title 1']) + self.assertEqual(user_stories_progress[0]['topic_name'], 'Topic') + + def test_get_story_summary_by_id(self) -> None: + story_summary = story_fetchers.get_story_summary_by_id(self.story_id) + self.assertEqual(story_summary.id, self.story_id) self.assertEqual(story_summary.title, 'Title') self.assertEqual(story_summary.description, 'Description') self.assertEqual(story_summary.node_titles, ['Title 1']) self.assertEqual(story_summary.thumbnail_bg_color, None) self.assertEqual(story_summary.thumbnail_filename, None) + with self.swap_to_always_return( + story_models.StorySummaryModel, + 'get' + ): + story_summary = story_fetchers.get_story_summary_by_id('fakeID') + self.assertEqual(story_summary, None) + + def test_get_completed_node_id(self) -> None: + self.assertEqual( + story_fetchers.get_completed_node_ids('randomID', 'someID'), + [] + ) + story_services.record_completed_node_in_story_context( + self.USER_ID, self.story_id, self.NODE_ID_1) + story_services.record_completed_node_in_story_context( + self.USER_ID, self.story_id, self.NODE_ID_2) + self.assertEqual( + story_fetchers.get_completed_node_ids(self.USER_ID, self.story_id), + [self.NODE_ID_1, self.NODE_ID_2] + ) + + def test_get_pending_and_all_nodes_in_story(self) -> None: + result = story_fetchers.get_pending_and_all_nodes_in_story( + self.USER_ID, self.story_id + ) + pending_nodes = result['pending_nodes'] + self.assertEqual(len(pending_nodes), 1) + self.assertEqual(pending_nodes[0].description, '') + self.assertEqual(pending_nodes[0].title, 'Title 1') + self.assertEqual(pending_nodes[0].id, self.NODE_ID_1) + self.assertEqual(pending_nodes[0].exploration_id, None) + + def test_get_completed_nodes_in_story(self) -> None: + story = story_fetchers.get_story_by_id(self.story_id) + story_services.record_completed_node_in_story_context( + self.USER_ID, self.story_id, self.NODE_ID_1) + story_services.record_completed_node_in_story_context( + self.USER_ID, self.story_id, self.NODE_ID_2) + for ind, completed_node in enumerate( + story_fetchers.get_completed_nodes_in_story( + self.USER_ID, self.story_id)): + self.assertEqual( + completed_node.to_dict(), + story.story_contents.nodes[ind].to_dict() + ) - def test_get_node_index_by_story_id_and_node_id(self): + def test_get_node_index_by_story_id_and_node_id(self) -> None: # Tests correct node index should be returned when story and node exist. node_index = story_fetchers.get_node_index_by_story_id_and_node_id( - self.STORY_ID, self.NODE_ID_1) + self.story_id, self.NODE_ID_1) self.assertEqual(node_index, 0) # Tests error should be raised if story or node doesn't exist. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, - 'Story node with id node_5 does not exist in this story.'): + 'The node with id node_5 is not part of this story.'): story_fetchers.get_node_index_by_story_id_and_node_id( - self.STORY_ID, 'node_5') + self.story_id, 'node_5') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Story with id story_id_2 does not exist.'): story_fetchers.get_node_index_by_story_id_and_node_id( 'story_id_2', self.NODE_ID_1) + + def test_get_learner_group_syllabus_story_summaries(self) -> None: + story_summaries = ( + story_fetchers.get_learner_group_syllabus_story_summaries( + [self.story_id])) + + self.assertEqual(len(story_summaries), 1) + self.assertEqual(story_summaries[0]['id'], self.story_id) + self.assertEqual(story_summaries[0]['title'], 'Title') + self.assertEqual(story_summaries[0]['description'], 'Description') + self.assertEqual(story_summaries[0]['node_titles'], ['Title 1']) + self.assertEqual(story_summaries[0]['thumbnail_bg_color'], None) + self.assertEqual(story_summaries[0]['thumbnail_filename'], None) + self.assertEqual(story_summaries[0]['topic_name'], 'Topic') + + def test_get_user_progress_in_story_chapters(self) -> None: + exp_id_1 = 'expid1' + self.save_new_valid_exploration(exp_id_1, self.USER_ID) + + learner_id = 'learner1' + change_list = [ + story_domain.StoryChange({ + 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, + 'property_name': ( + story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID), + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'old_value': None, + 'new_value': exp_id_1 + }) + ] + story_services.update_story( + self.USER_ID, self.story_id, change_list, + 'Added node.') + + user_services.update_learner_checkpoint_progress( + learner_id, exp_id_1, 'Introduction', 1) + + user_progress = story_fetchers.get_user_progress_in_story_chapters( + learner_id, [self.story_id]) + + self.assertEqual(len(user_progress), 1) + self.assertEqual(user_progress[0]['exploration_id'], exp_id_1) + self.assertEqual(user_progress[0]['visited_checkpoints_count'], 1) + self.assertEqual(user_progress[0]['total_checkpoints_count'], 1) diff --git a/core/domain/story_services.py b/core/domain/story_services.py index bedadaac59c2..46dfcfe1f6b6 100644 --- a/core/domain/story_services.py +++ b/core/domain/story_services.py @@ -31,6 +31,7 @@ from core.domain import caching_services from core.domain import exp_fetchers from core.domain import exp_services +from core.domain import learner_group_services from core.domain import opportunity_services from core.domain import rights_manager from core.domain import story_domain @@ -39,12 +40,19 @@ from core.domain import topic_fetchers from core.platform import models +from typing import List, Sequence, Tuple, cast + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + from mypy_imports import story_models + from mypy_imports import user_models + (exp_models, story_models, user_models,) = models.Registry.import_models( - [models.NAMES.exploration, models.NAMES.story, models.NAMES.user]) -transaction_services = models.Registry.import_transaction_services() + [models.Names.EXPLORATION, models.Names.STORY, models.Names.USER]) -def get_new_story_id(): +def get_new_story_id() -> str: """Returns a new story id. Returns: @@ -53,7 +61,12 @@ def get_new_story_id(): return story_models.StoryModel.get_new_id('') -def _create_story(committer_id, story, commit_message, commit_cmds): +def _create_story( + committer_id: str, + story: story_domain.Story, + commit_message: str, + commit_cmds: List[story_domain.StoryChange] +) -> None: """Creates a new story. Args: @@ -85,7 +98,7 @@ def _create_story(committer_id, story, commit_message, commit_cmds): create_story_summary(story.id) -def save_new_story(committer_id, story): +def save_new_story(committer_id: str, story: story_domain.Story) -> None: """Saves a new story. Args: @@ -102,7 +115,9 @@ def save_new_story(committer_id, story): # Repository SAVE and DELETE methods. -def apply_change_list(story_id, change_list): +def apply_change_list( + story_id: str, change_list: List[story_domain.StoryChange] +) -> Tuple[story_domain.Story, List[str], List[str]]: """Applies a changelist to a story and returns the result. Args: @@ -114,6 +129,9 @@ def apply_change_list(story_id, change_list): Story, list(str), list(str). The resulting story domain object, the exploration IDs removed from story and the exploration IDs added to the story. + + Raises: + Exception. The elements in change list are not of domain object type. """ story = story_fetchers.get_story_by_id(story_id) exp_ids_in_old_story = story.story_contents.get_all_linked_exp_ids() @@ -122,82 +140,222 @@ def apply_change_list(story_id, change_list): if not isinstance(change, story_domain.StoryChange): raise Exception('Expected change to be of type StoryChange') if change.cmd == story_domain.CMD_ADD_STORY_NODE: - story.add_node(change.node_id, change.title) + # Here we use cast because we are narrowing down the type from + # StoryChange to a specific change command. + add_story_node_cmd = cast( + story_domain.AddStoryNodeCmd, + change + ) + story.add_node( + add_story_node_cmd.node_id, + add_story_node_cmd.title + ) elif change.cmd == story_domain.CMD_DELETE_STORY_NODE: - story.delete_node(change.node_id) + # Here we use cast because we are narrowing down the type from + # StoryChange to a specific change command. + delete_story_node_cmd = cast( + story_domain.DeleteStoryNodeCmd, + change + ) + story.delete_node(delete_story_node_cmd.node_id) elif (change.cmd == story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS): - if change.new_value: - story.mark_node_outline_as_finalized(change.node_id) + # Here we use cast because we are narrowing down the type from + # StoryChange to a specific change command. + update_story_node_outline_status = cast( + story_domain.UpdateStoryNodeOutlineStatusCmd, + change + ) + if update_story_node_outline_status.new_value: + story.mark_node_outline_as_finalized( + update_story_node_outline_status.node_id + ) else: - story.mark_node_outline_as_unfinalized(change.node_id) + story.mark_node_outline_as_unfinalized( + update_story_node_outline_status.node_id + ) elif change.cmd == story_domain.CMD_UPDATE_STORY_NODE_PROPERTY: if (change.property_name == story_domain.STORY_NODE_PROPERTY_OUTLINE): - story.update_node_outline(change.node_id, change.new_value) + # Here we use cast because this 'if' condition forces + # change to have type UpdateStoryNodePropertyOutlineCmd. + update_node_outline_cmd = cast( + story_domain.UpdateStoryNodePropertyOutlineCmd, + change + ) + story.update_node_outline( + update_node_outline_cmd.node_id, + update_node_outline_cmd.new_value + ) elif (change.property_name == story_domain.STORY_NODE_PROPERTY_TITLE): - story.update_node_title(change.node_id, change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type UpdateStoryNodePropertyTitleCmd. + update_node_title_cmd = cast( + story_domain.UpdateStoryNodePropertyTitleCmd, + change + ) + story.update_node_title( + update_node_title_cmd.node_id, + update_node_title_cmd.new_value + ) elif (change.property_name == story_domain.STORY_NODE_PROPERTY_DESCRIPTION): + # Here we use cast because this 'elif' condition forces + # change to have type UpdateStoryNodePropertyDescriptionCmd. + update_node_description_cmd = cast( + story_domain.UpdateStoryNodePropertyDescriptionCmd, + change + ) story.update_node_description( - change.node_id, change.new_value) + update_node_description_cmd.node_id, + update_node_description_cmd.new_value + ) elif (change.property_name == story_domain.STORY_NODE_PROPERTY_THUMBNAIL_FILENAME): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateStoryNodePropertyThumbnailFilenameCmd. + update_node_thumbnail_filename_cmd = cast( + story_domain.UpdateStoryNodePropertyThumbnailFilenameCmd, + change + ) story.update_node_thumbnail_filename( - change.node_id, change.new_value) + update_node_thumbnail_filename_cmd.node_id, + update_node_thumbnail_filename_cmd.new_value + ) elif (change.property_name == story_domain.STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateStoryNodePropertyThumbnailBGColorCmd. + update_node_thumbnail_bg_color = cast( + story_domain.UpdateStoryNodePropertyThumbnailBGColorCmd, + change + ) story.update_node_thumbnail_bg_color( - change.node_id, change.new_value) + update_node_thumbnail_bg_color.node_id, + update_node_thumbnail_bg_color.new_value + ) elif (change.property_name == story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateStoryNodePropertyAcquiredSkillIdsCmd. + update_node_acquired_skill_ids_cmd = cast( + story_domain.UpdateStoryNodePropertyAcquiredSkillIdsCmd, + change + ) story.update_node_acquired_skill_ids( - change.node_id, change.new_value) + update_node_acquired_skill_ids_cmd.node_id, + update_node_acquired_skill_ids_cmd.new_value + ) elif (change.property_name == story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateStoryNodePropertyPrerequisiteSkillIdsCmd. + update_prerequisite_skill_ids_cmd = cast( + story_domain.UpdateStoryNodePropertyPrerequisiteSkillIdsCmd, # pylint: disable=line-too-long + change + ) story.update_node_prerequisite_skill_ids( - change.node_id, change.new_value) + update_prerequisite_skill_ids_cmd.node_id, + update_prerequisite_skill_ids_cmd.new_value + ) elif (change.property_name == story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateStoryNodePropertyDestinationNodeIdsCmd. + update_node_destination_node_ids_cmd = cast( + story_domain.UpdateStoryNodePropertyDestinationNodeIdsCmd, # pylint: disable=line-too-long + change + ) story.update_node_destination_node_ids( - change.node_id, change.new_value) + update_node_destination_node_ids_cmd.node_id, + update_node_destination_node_ids_cmd.new_value + ) elif (change.property_name == story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateStoryNodePropertyExplorationIdCmd. + update_node_exploration_id_cmd = cast( + story_domain.UpdateStoryNodePropertyExplorationIdCmd, + change + ) story.update_node_exploration_id( - change.node_id, change.new_value) + update_node_exploration_id_cmd.node_id, + update_node_exploration_id_cmd.new_value + ) elif change.cmd == story_domain.CMD_UPDATE_STORY_PROPERTY: - if (change.property_name == + # Here we use cast because we are narrowing down the type from + # StoryChange to a specific change command. + update_story_property_cmd = cast( + story_domain.UpdateStoryPropertyCmd, + change + ) + if (update_story_property_cmd.property_name == story_domain.STORY_PROPERTY_TITLE): - story.update_title(change.new_value) - elif (change.property_name == + story.update_title(update_story_property_cmd.new_value) + elif (update_story_property_cmd.property_name == story_domain.STORY_PROPERTY_THUMBNAIL_FILENAME): - story.update_thumbnail_filename(change.new_value) - elif (change.property_name == + story.update_thumbnail_filename( + update_story_property_cmd.new_value + ) + elif (update_story_property_cmd.property_name == story_domain.STORY_PROPERTY_THUMBNAIL_BG_COLOR): - story.update_thumbnail_bg_color(change.new_value) - elif (change.property_name == + story.update_thumbnail_bg_color( + update_story_property_cmd.new_value + ) + elif (update_story_property_cmd.property_name == story_domain.STORY_PROPERTY_DESCRIPTION): - story.update_description(change.new_value) - elif (change.property_name == + story.update_description( + update_story_property_cmd.new_value + ) + elif (update_story_property_cmd.property_name == story_domain.STORY_PROPERTY_NOTES): - story.update_notes(change.new_value) - elif (change.property_name == + story.update_notes( + update_story_property_cmd.new_value + ) + elif (update_story_property_cmd.property_name == story_domain.STORY_PROPERTY_LANGUAGE_CODE): - story.update_language_code(change.new_value) - elif (change.property_name == + story.update_language_code( + update_story_property_cmd.new_value + ) + elif (update_story_property_cmd.property_name == story_domain.STORY_PROPERTY_URL_FRAGMENT): - story.update_url_fragment(change.new_value) - elif (change.property_name == + story.update_url_fragment( + update_story_property_cmd.new_value + ) + elif (update_story_property_cmd.property_name == story_domain.STORY_PROPERTY_META_TAG_CONTENT): - story.update_meta_tag_content(change.new_value) + story.update_meta_tag_content( + update_story_property_cmd.new_value + ) elif change.cmd == story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY: if (change.property_name == story_domain.INITIAL_NODE_ID): - story.update_initial_node(change.new_value) + # Here we use cast because this 'if' + # condition forces change to have type + # UpdateStoryContentsPropertyInitialNodeIdCmd. + update_initial_node_id_cmd = cast( + story_domain.UpdateStoryContentsPropertyInitialNodeIdCmd, + change + ) + story.update_initial_node( + update_initial_node_id_cmd.new_value + ) if change.property_name == story_domain.NODE: + # Here we use cast because this 'elif' condition forces + # change to have type UpdateStoryContentsPropertyNodeCmd. + update_node_cmd = cast( + story_domain.UpdateStoryContentsPropertyNodeCmd, + change + ) story.rearrange_node_in_story( - change.old_value, change.new_value) + update_node_cmd.old_value, update_node_cmd.new_value) elif ( change.cmd == story_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION): @@ -223,7 +381,7 @@ def apply_change_list(story_id, change_list): raise e -def does_story_exist_with_url_fragment(url_fragment): +def does_story_exist_with_url_fragment(url_fragment: str) -> bool: """Checks if the url fragment for the story exists. Args: @@ -237,17 +395,21 @@ def does_story_exist_with_url_fragment(url_fragment): def validate_prerequisite_skills_in_story_contents( - corresponding_topic_id, story_contents): + skill_ids_in_corresponding_topic: List[str], + story_contents: story_domain.StoryContents +) -> None: """Validates the prerequisites skills in the story contents. Args: - corresponding_topic_id: str. The corresponding topic id of the story. + skill_ids_in_corresponding_topic: list(str). List of skill IDs in + the corresponding topic of the story. story_contents: StoryContents. The story contents. Raises: ValidationError. Expected prerequisite skills to have been acquired in previous nodes. ValidationError. Expected story to not contain loops. + Exception. Initial node id should not be none. """ if len(story_contents.nodes) == 0: return @@ -256,6 +418,8 @@ def validate_prerequisite_skills_in_story_contents( # structure. nodes_queue = [] is_node_visited = [False] * len(story_contents.nodes) + # Ruling out the possibility of None for mypy type checking. + assert story_contents.initial_node_id is not None starting_node_index = story_contents.get_node_index( story_contents.initial_node_id) nodes_queue.append(story_contents.nodes[starting_node_index].id) @@ -290,12 +454,9 @@ def validate_prerequisite_skills_in_story_contents( raise utils.ValidationError( 'Loops are not allowed in stories.') destination_node = story_contents.nodes[node_index] - skill_ids_present_in_topic = ( - topic_fetchers.get_topic_by_id( - corresponding_topic_id).get_all_skill_ids()) # Include only skill ids relevant to the topic for validation. topic_relevant_skill_ids = list( - set(skill_ids_present_in_topic).intersection( + set(skill_ids_in_corresponding_topic).intersection( set(destination_node.prerequisite_skill_ids))) if not ( set( @@ -312,7 +473,9 @@ def validate_prerequisite_skills_in_story_contents( nodes_queue.append(node_id) -def validate_explorations_for_story(exp_ids, strict): +def validate_explorations_for_story( + exp_ids: List[str], strict: bool +) -> List[str]: """Validates the explorations in the given story and checks whether they are compatible with the mobile app and ready for publishing. @@ -334,6 +497,7 @@ def validate_explorations_for_story(exp_ids, strict): explorations before adding them to a story. ValidationError. All explorations in a story should be of the same category. + Exception. Exploration validation failed for given exploration IDs. """ validation_error_messages = [] @@ -369,6 +533,17 @@ def validate_explorations_for_story(exp_ids, strict): raise utils.ValidationError(error_string) validation_error_messages.append(error_string) + if exps_dict: + for exp_id, exp in exps_dict.items(): + if exp.category not in constants.ALL_CATEGORIES: + error_string = ( + 'All explorations in a story should be of a ' + 'default category. The exploration with ID %s has' + ' an invalid category %s.' % (exp_id, exp.category)) + if strict: + raise utils.ValidationError(error_string) + validation_error_messages.append(error_string) + if exps_dict: for exp_id in exp_ids: if exp_id in exps_dict: @@ -391,13 +566,48 @@ def validate_explorations_for_story(exp_ids, strict): logging.exception( 'Exploration validation failed for exploration with ID: ' '%s. Error: %s' % (exp_id, e)) - raise Exception(e) + raise Exception(e) from e return validation_error_messages +def populate_story_model_fields( + story_model: story_models.StoryModel, story: story_domain.Story +) -> story_models.StoryModel: + """Populate story model with the data from story object. + + Args: + story_model: StoryModel. The model to populate. + story: Story. The story domain object which should be used to + populate the model. + + Returns: + StoryModel. Populated model. + """ + story_model.description = story.description + story_model.title = story.title + story_model.thumbnail_bg_color = story.thumbnail_bg_color + story_model.thumbnail_filename = story.thumbnail_filename + story_model.thumbnail_size_in_bytes = story.thumbnail_size_in_bytes + story_model.notes = story.notes + story_model.language_code = story.language_code + story_model.story_contents_schema_version = ( + story.story_contents_schema_version) + story_model.story_contents = story.story_contents.to_dict() + story_model.corresponding_topic_id = story.corresponding_topic_id + story_model.version = story.version + story_model.url_fragment = story.url_fragment + story_model.meta_tag_content = story.meta_tag_content + return story_model + + def _save_story( - committer_id, story, commit_message, change_list, story_is_published): + committer_id: str, + story: story_domain.Story, + commit_message: str, + change_list: List[story_domain.StoryChange], + story_is_published: bool +) -> None: """Validates a story and commits it to persistent storage. If successful, increments the version number of the incoming story domain object by 1. @@ -421,8 +631,10 @@ def _save_story( 'save story %s: %s' % (story.id, change_list)) story.validate() + corresponding_topic = ( + topic_fetchers.get_topic_by_id(story.corresponding_topic_id)) validate_prerequisite_skills_in_story_contents( - story.corresponding_topic_id, story.story_contents) + corresponding_topic.get_all_skill_ids(), story.story_contents) if story_is_published: exp_ids = [] @@ -445,26 +657,14 @@ def _save_story( 'Unexpected error: trying to update version %s of story ' 'from version %s. Please reload the page and try again.' % (story_model.version, story.version)) - elif story.version < story_model.version: + + if story.version < story_model.version: raise Exception( 'Trying to update version %s of story from version %s, ' 'which is too old. Please reload the page and try again.' % (story_model.version, story.version)) - story_model.description = story.description - story_model.title = story.title - story_model.thumbnail_bg_color = story.thumbnail_bg_color - story_model.thumbnail_filename = story.thumbnail_filename - story_model.thumbnail_size_in_bytes = story.thumbnail_size_in_bytes - story_model.notes = story.notes - story_model.language_code = story.language_code - story_model.story_contents_schema_version = ( - story.story_contents_schema_version) - story_model.story_contents = story.story_contents.to_dict() - story_model.corresponding_topic_id = story.corresponding_topic_id - story_model.version = story.version - story_model.url_fragment = story.url_fragment - story_model.meta_tag_content = story.meta_tag_content + story_model = populate_story_model_fields(story_model, story) change_dicts = [change.to_dict() for change in change_list] story_model.commit(committer_id, commit_message, change_dicts) caching_services.delete_multi( @@ -472,7 +672,7 @@ def _save_story( story.version += 1 -def is_story_published_and_present_in_topic(story): +def is_story_published_and_present_in_topic(story: story_domain.Story) -> bool: """Returns whether a story is published. Raises an exception if the story is not present in the corresponding topic's story references. @@ -481,6 +681,10 @@ def is_story_published_and_present_in_topic(story): Returns: bool. Whether the supplied story is published. + + Raises: + ValidationError. The story does not belong to any valid topic. + Exception. The story does not belong to the expected topic. """ topic = topic_fetchers.get_topic_by_id( story.corresponding_topic_id, strict=False) @@ -506,7 +710,11 @@ def is_story_published_and_present_in_topic(story): def update_story( - committer_id, story_id, change_list, commit_message): + committer_id: str, + story_id: str, + change_list: List[story_domain.StoryChange], + commit_message: str +) -> None: """Updates a story. Commits changes. # NOTE: This function should not be called on its own. Access it @@ -518,11 +726,13 @@ def update_story( story_id: str. The story id. change_list: list(StoryChange). These changes are applied in sequence to produce the resulting story. - commit_message: str or None. A description of changes made to the + commit_message: str. A description of changes made to the story. Raises: + ValueError. Expected a commit message but received None. ValidationError. Exploration is already linked to a different story. + ValidationError. Story url fragment is not unique across the site. """ if not commit_message: raise ValueError('Expected a commit message but received none.') @@ -531,11 +741,11 @@ def update_story( new_story, exp_ids_removed_from_story, exp_ids_added_to_story = ( apply_change_list(story_id, change_list)) story_is_published = is_story_published_and_present_in_topic(new_story) - exploration_context_models_to_be_deleted = ( + exploration_context_models_to_be_deleted_with_none = ( exp_models.ExplorationContextModel.get_multi( exp_ids_removed_from_story)) exploration_context_models_to_be_deleted = [ - model for model in exploration_context_models_to_be_deleted + model for model in exploration_context_models_to_be_deleted_with_none if model is not None] exploration_context_models_collisions_list = ( exp_models.ExplorationContextModel.get_multi( @@ -573,7 +783,9 @@ def update_story( exp_models.ExplorationContextModel.put_multi(new_exploration_context_models) -def delete_story(committer_id, story_id, force_deletion=False): +def delete_story( + committer_id: str, story_id: str, force_deletion: bool = False +) -> None: """Deletes the story with the given story_id. Args: @@ -600,13 +812,15 @@ def delete_story(committer_id, story_id, force_deletion=False): suggestion_services.auto_reject_translation_suggestions_for_exp_ids( exp_ids) - exploration_context_models = ( + exploration_context_models: Sequence[ + exp_models.ExplorationContextModel + ] = ( exp_models.ExplorationContextModel.get_all().filter( exp_models.ExplorationContextModel.story_id == story_id ).fetch() ) exp_models.ExplorationContextModel.delete_multi( - exploration_context_models + list(exploration_context_models) ) # This must come after the story is retrieved. Otherwise the memcache @@ -622,8 +836,11 @@ def delete_story(committer_id, story_id, force_deletion=False): opportunity_services.delete_exp_opportunities_corresponding_to_story( story_id) + # Delete references of the story from all related learner groups. + learner_group_services.remove_story_reference_from_learner_groups(story_id) + -def delete_story_summary(story_id): +def delete_story_summary(story_id: str) -> None: """Delete a story summary model. Args: @@ -634,7 +851,9 @@ def delete_story_summary(story_id): story_models.StorySummaryModel.get(story_id).delete() -def compute_summary_of_story(story): +def compute_summary_of_story( + story: story_domain.Story +) -> story_domain.StorySummary: """Create a StorySummary domain object for a given Story domain object and return it. @@ -643,9 +862,17 @@ def compute_summary_of_story(story): Returns: StorySummary. The computed summary for the given story. + + Raises: + Exception. No data available for when the story was last_updated on. """ story_model_node_titles = [ node.title for node in story.story_contents.nodes] + + if story.created_on is None or story.last_updated is None: + raise Exception( + 'No data available for when the story was last_updated on.' + ) story_summary = story_domain.StorySummary( story.id, story.title, story.description, story.language_code, story.version, story_model_node_titles, story.thumbnail_bg_color, @@ -656,7 +883,7 @@ def compute_summary_of_story(story): return story_summary -def create_story_summary(story_id): +def create_story_summary(story_id: str) -> None: """Creates and stores a summary of the given story. Args: @@ -667,13 +894,19 @@ def create_story_summary(story_id): save_story_summary(story_summary) -def save_story_summary(story_summary): - """Save a story summary domain object as a StorySummaryModel - entity in the datastore. +def populate_story_summary_model_fields( + story_summary_model: story_models.StorySummaryModel, + story_summary: story_domain.StorySummary +) -> story_models.StorySummaryModel: + """Populate story summary model with the data from story summary object. Args: - story_summary: StorySummary. The story summary object to be saved in the - datastore. + story_summary_model: StorySummaryModel. The model to populate. + story_summary: StorySummary. The story summary domain object which + should be used to populate the model. + + Returns: + StorySummaryModel. Populated model. """ story_summary_dict = { 'title': story_summary.title, @@ -689,21 +922,36 @@ def save_story_summary(story_summary): 'story_model_created_on': ( story_summary.story_model_created_on) } - - story_summary_model = ( - story_models.StorySummaryModel.get_by_id(story_summary.id)) if story_summary_model is not None: story_summary_model.populate(**story_summary_dict) - story_summary_model.update_timestamps() - story_summary_model.put() else: story_summary_dict['id'] = story_summary.id - model = story_models.StorySummaryModel(**story_summary_dict) - model.update_timestamps() - model.put() + story_summary_model = story_models.StorySummaryModel( + **story_summary_dict) + + return story_summary_model + + +def save_story_summary(story_summary: story_domain.StorySummary) -> None: + """Save a story summary domain object as a StorySummaryModel + entity in the datastore. + + Args: + story_summary: StorySummary. The story summary object to be saved in the + datastore. + """ + existing_skill_summary_model = ( + story_models.StorySummaryModel.get_by_id(story_summary.id)) + story_summary_model = populate_story_summary_model_fields( + existing_skill_summary_model, story_summary + ) + story_summary_model.update_timestamps() + story_summary_model.put() -def record_completed_node_in_story_context(user_id, story_id, node_id): +def record_completed_node_in_story_context( + user_id: str, story_id: str, node_id: str +) -> None: """Records a node by a given user in a given story context as having been played. diff --git a/core/domain/story_services_test.py b/core/domain/story_services_test.py index 9d0a273b852c..d0c4597f64d1 100644 --- a/core/domain/story_services_test.py +++ b/core/domain/story_services_test.py @@ -20,12 +20,11 @@ import os from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import exp_domain from core.domain import exp_services -from core.domain import fs_domain +from core.domain import fs_services from core.domain import param_domain from core.domain import story_domain from core.domain import story_fetchers @@ -36,22 +35,27 @@ from core.platform import models from core.tests import test_utils +from typing import Final, List, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import story_models + from mypy_imports import user_models + (story_models, user_models) = models.Registry.import_models( - [models.NAMES.story, models.NAMES.user]) + [models.Names.STORY, models.Names.USER]) class StoryServicesUnitTests(test_utils.GenericTestBase): """Test the story services module.""" - STORY_ID = None - EXP_ID = 'exp_id' - NODE_ID_1 = story_domain.NODE_ID_PREFIX + '1' - NODE_ID_2 = 'node_2' - USER_ID = 'user' - story = None + EXP_ID: Final = 'exp_id' + NODE_ID_1: Final = story_domain.NODE_ID_PREFIX + '1' + NODE_ID_2: Final = 'node_2' + USER_ID: Final = 'user' - def setUp(self): - super(StoryServicesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup('a@example.com', 'A') self.signup('b@example.com', 'B') self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) @@ -62,7 +66,7 @@ def setUp(self): self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) self.STORY_ID = story_services.get_new_story_id() self.TOPIC_ID = topic_fetchers.get_new_topic_id() - self.save_new_topic( + self.topic = self.save_new_topic( self.TOPIC_ID, self.USER_ID, name='Topic', abbreviated_name='topic-one', url_fragment='topic-one', description='A new topic', @@ -102,8 +106,9 @@ def setUp(self): self.user_b = user_services.get_user_actions_info(self.user_id_b) self.user_admin = user_services.get_user_actions_info( self.user_id_admin) + self.OLD_VALUE: List[str] = [] - def test_compute_summary(self): + def test_compute_summary(self) -> None: story_summary = story_services.compute_summary_of_story(self.story) self.assertEqual(story_summary.id, self.STORY_ID) @@ -113,21 +118,34 @@ def test_compute_summary(self): self.assertEqual(story_summary.thumbnail_bg_color, None) self.assertEqual(story_summary.thumbnail_filename, None) - def test_get_new_story_id(self): + def test_raises_error_when_the_story_provided_with_no_created_on_data( + self + ) -> None: + self.story.created_on = None + + with self.assertRaisesRegex( + Exception, + 'No data available for when the story was last_updated' + ): + story_services.compute_summary_of_story(self.story) + + def test_get_new_story_id(self) -> None: new_story_id = story_services.get_new_story_id() self.assertEqual(len(new_story_id), 12) self.assertEqual(story_models.StoryModel.get_by_id(new_story_id), None) - def test_commit_log_entry(self): + def test_commit_log_entry(self) -> None: story_commit_log_entry = ( story_models.StoryCommitLogEntryModel.get_commit(self.STORY_ID, 1) ) + # Ruling out the possibility of None for mypy type checking. + assert story_commit_log_entry is not None self.assertEqual(story_commit_log_entry.commit_type, 'create') self.assertEqual(story_commit_log_entry.story_id, self.STORY_ID) self.assertEqual(story_commit_log_entry.user_id, self.USER_ID) - def test_update_story_properties(self): + def test_update_story_properties(self) -> None: changelist = [ story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY, @@ -164,13 +182,11 @@ def test_update_story_properties(self): ] # Save a dummy image on filesystem, to be used as thumbnail. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_STORY, self.STORY_ID)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_STORY, self.STORY_ID) fs.commit( '%s/image.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, mimetype='image/svg+xml') @@ -197,7 +213,25 @@ def test_update_story_properties(self): constants.ALLOWED_THUMBNAIL_BG_COLORS['story'][0]) self.assertEqual(story_summary.thumbnail_filename, 'image.svg') - def test_update_story_node_properties(self): + def test_update_published_story(self) -> None: + change_list = [ + story_domain.StoryChange({ + 'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY, + 'property_name': story_domain.STORY_PROPERTY_TITLE, + 'old_value': 'Title', + 'new_value': 'New Title' + }) + ] + topic_services.publish_story( + self.TOPIC_ID, self.STORY_ID, self.user_id_admin + ) + story_services.update_story( + self.USER_ID, self.STORY_ID, change_list, + 'Changed title') + updated_story = story_fetchers.get_story_by_id(self.STORY_ID) + self.assertEqual(updated_story.title, 'New Title') + + def test_update_story_node_properties(self) -> None: changelist = [ story_domain.StoryChange({ 'cmd': story_domain.CMD_ADD_STORY_NODE, @@ -217,7 +251,7 @@ def test_update_story_node_properties(self): 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': self.NODE_ID_2, - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': [self.NODE_ID_1] }), story_domain.StoryChange({ @@ -253,13 +287,11 @@ def test_update_story_node_properties(self): ] # Save a dummy image on filesystem, to be used as thumbnail. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_STORY, self.STORY_ID)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_STORY, self.STORY_ID) fs.commit( '%s/image.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, mimetype='image/svg+xml') @@ -332,9 +364,9 @@ def test_update_story_node_properties(self): self.assertEqual( story.story_contents.nodes[0].outline_is_finalized, False) - def test_prerequisite_skills_validation(self): + def test_prerequisite_skills_validation(self) -> None: self.story.story_contents.next_node_id = 'node_4' - node_1 = { + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -349,7 +381,7 @@ def test_prerequisite_skills_validation(self): 'outline_is_finalized': False, 'exploration_id': None } - node_2 = { + node_2: story_domain.StoryNodeDict = { 'id': 'node_2', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -364,7 +396,7 @@ def test_prerequisite_skills_validation(self): 'outline_is_finalized': False, 'exploration_id': None } - node_3 = { + node_3: story_domain.StoryNodeDict = { 'id': 'node_3', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -389,14 +421,14 @@ def test_prerequisite_skills_validation(self): expected_error_string = ( 'The skills with ids skill_4 were specified as prerequisites for ' 'Chapter Title 3, but were not taught in any chapter before it') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_string): story_services.validate_prerequisite_skills_in_story_contents( - self.story.corresponding_topic_id, self.story.story_contents) + self.topic.get_all_skill_ids(), self.story.story_contents) - def test_story_with_loop(self): + def test_story_with_loop(self) -> None: self.story.story_contents.next_node_id = 'node_4' - node_1 = { + node_1: story_domain.StoryNodeDict = { 'id': 'node_1', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -411,7 +443,7 @@ def test_story_with_loop(self): 'outline_is_finalized': False, 'exploration_id': None } - node_2 = { + node_2: story_domain.StoryNodeDict = { 'id': 'node_2', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -426,7 +458,7 @@ def test_story_with_loop(self): 'outline_is_finalized': False, 'exploration_id': None } - node_3 = { + node_3: story_domain.StoryNodeDict = { 'id': 'node_3', 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -447,12 +479,12 @@ def test_story_with_loop(self): story_domain.StoryNode.from_dict(node_3) ] expected_error_string = 'Loops are not allowed in stories.' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_string): story_services.validate_prerequisite_skills_in_story_contents( - self.story.corresponding_topic_id, self.story.story_contents) + self.topic.get_all_skill_ids(), self.story.story_contents) - def test_does_story_exist_with_url_fragment(self): + def test_does_story_exist_with_url_fragment(self) -> None: story_id_1 = story_services.get_new_story_id() story_id_2 = story_services.get_new_story_id() self.save_new_story( @@ -470,7 +502,9 @@ def test_does_story_exist_with_url_fragment(self): self.assertFalse( story_services.does_story_exist_with_url_fragment('story-three')) - def test_update_story_with_invalid_corresponding_topic_id_value(self): + def test_update_story_with_invalid_corresponding_topic_id_value( + self + ) -> None: topic_id = topic_fetchers.get_new_topic_id() story_id = story_services.get_new_story_id() self.save_new_story(story_id, self.USER_ID, topic_id) @@ -482,14 +516,14 @@ def test_update_story_with_invalid_corresponding_topic_id_value(self): }) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, ( 'Expected story to only belong to a valid topic, but ' 'found no topic with ID: %s' % topic_id)): story_services.update_story( self.USER_ID, story_id, changelist, 'Added node.') - def test_update_story_which_not_corresponding_topic_id(self): + def test_update_story_which_not_corresponding_topic_id(self) -> None: topic_id = topic_fetchers.get_new_topic_id() story_id = story_services.get_new_story_id() self.save_new_topic( @@ -508,7 +542,7 @@ def test_update_story_which_not_corresponding_topic_id(self): }) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, ( 'Expected story to belong to the topic %s, but it is ' 'neither a part of the canonical stories or the ' @@ -516,7 +550,7 @@ def test_update_story_which_not_corresponding_topic_id(self): story_services.update_story( self.USER_ID, story_id, changelist, 'Added node.') - def test_update_story_schema(self): + def test_update_story_schema(self) -> None: topic_id = topic_fetchers.get_new_topic_id() story_id = story_services.get_new_story_id() self.save_new_topic( @@ -544,12 +578,12 @@ def test_update_story_schema(self): # Check version is updated. self.assertEqual(new_story_dict['version'], 2) - # Delete version and check that the two dicts are the same. - del orig_story_dict['version'] - del new_story_dict['version'] + # Instead of deleting the version key, we are making them equal to + # check if the other contents of the two dicts are the same or not. + orig_story_dict['version'] = new_story_dict['version'] self.assertEqual(orig_story_dict, new_story_dict) - def test_delete_story(self): + def test_delete_story(self) -> None: story_services.delete_story(self.USER_ID, self.STORY_ID) self.assertEqual(story_fetchers.get_story_by_id( self.STORY_ID, strict=False), None) @@ -557,18 +591,20 @@ def test_delete_story(self): story_fetchers.get_story_summary_by_id( self.STORY_ID, strict=False), None) - def test_cannot_get_story_from_model_with_invalid_schema_version(self): + def test_cannot_get_story_from_model_with_invalid_schema_version( + self + ) -> None: story_model = story_models.StoryModel.get(self.STORY_ID) story_model.story_contents_schema_version = 0 story_model.commit(self.USER_ID, 'change schema version', []) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d story schemas at ' 'present.' % feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION): story_fetchers.get_story_from_model(story_model) - def test_get_story_summaries_by_ids(self): + def test_get_story_summaries_by_ids(self) -> None: story_summaries = story_fetchers.get_story_summaries_by_ids( [self.STORY_ID]) @@ -582,20 +618,23 @@ def test_get_story_summaries_by_ids(self): self.assertEqual(story_summaries[0].thumbnail_bg_color, None) self.assertEqual(story_summaries[0].version, 2) - def test_cannot_update_story_with_non_story_change_changelist(self): + def test_cannot_update_story_with_non_story_change_changelist(self) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.error().""" observed_log_messages.append(msg % args) logging_swap = self.swap(logging, 'error', _mock_logging_function) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'Expected change to be of type StoryChange') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. with logging_swap, assert_raises_regexp_context_manager: story_services.update_story( - self.USER_ID, self.STORY_ID, [{}], 'Updated story node.') + self.USER_ID, self.STORY_ID, [{}], 'Updated story node.') # type: ignore[list-item] self.assertEqual( observed_log_messages, @@ -605,7 +644,7 @@ def _mock_logging_function(msg, *args): ] ) - def test_update_story_node_outline(self): + def test_update_story_node_outline(self) -> None: story = story_fetchers.get_story_by_id(self.STORY_ID) self.assertEqual(story.story_contents.nodes[0].outline, '') @@ -625,7 +664,9 @@ def test_update_story_node_outline(self): self.assertEqual(story.story_contents.nodes[0].outline, 'new_outline') - def test_cannot_update_story_node_outline_with_invalid_node_id(self): + def test_cannot_update_story_node_outline_with_invalid_node_id( + self + ) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': ( @@ -635,14 +676,14 @@ def test_cannot_update_story_node_outline_with_invalid_node_id(self): 'new_value': 'new_outline' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story outline.') - def test_cannot_update_story_with_no_commit_message(self): + def test_cannot_update_story_with_no_commit_message(self) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': ( @@ -652,13 +693,16 @@ def test_cannot_update_story_with_no_commit_message(self): 'new_value': 'New description.' })] - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( Exception, 'Expected a commit message but received none.'): story_services.update_story( - self.USER_ID, self.STORY_ID, change_list, None) + self.USER_ID, self.STORY_ID, change_list, None) # type: ignore[arg-type] - def test_update_story_acquired_skill_ids(self): + def test_update_story_acquired_skill_ids(self) -> None: story = story_fetchers.get_story_by_id(self.STORY_ID) self.assertEqual(story.story_contents.nodes[0].acquired_skill_ids, []) @@ -667,7 +711,7 @@ def test_update_story_acquired_skill_ids(self): 'property_name': ( story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS), 'node_id': 'node_1', - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': ['skill_id'] })] @@ -680,7 +724,7 @@ def test_update_story_acquired_skill_ids(self): self.assertEqual( story.story_contents.nodes[0].acquired_skill_ids, ['skill_id']) - def test_exploration_context_model_is_modified_correctly(self): + def test_exploration_context_model_is_modified_correctly(self) -> None: changelist = [ story_domain.StoryChange({ 'cmd': story_domain.CMD_ADD_STORY_NODE, @@ -692,7 +736,7 @@ def test_exploration_context_model_is_modified_correctly(self): 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': self.NODE_ID_1, - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': [self.NODE_ID_2] }) ] @@ -799,7 +843,7 @@ def test_exploration_context_model_is_modified_correctly(self): 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': self.NODE_ID_1, - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': ['node_3'] }), story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, @@ -850,14 +894,14 @@ def test_exploration_context_model_is_modified_correctly(self): 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': self.NODE_ID_1, - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': ['node_2'] }), story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': self.NODE_ID_2, - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': ['node_3'] }), story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, @@ -929,7 +973,7 @@ def test_exploration_context_model_is_modified_correctly(self): self.assertIsNone( exp_services.get_story_id_linked_to_exploration('2')) - def test_exploration_story_link_collision(self): + def test_exploration_story_link_collision(self) -> None: self.save_new_story('story_id_2', self.USER_ID, self.TOPIC_ID) topic_services.add_canonical_story( self.USER_ID, self.TOPIC_ID, 'story_id_2') @@ -963,7 +1007,7 @@ def test_exploration_story_link_collision(self): 'new_value': '0' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The exploration with ID 0 is already linked to story ' 'with ID %s' % self.STORY_ID): @@ -971,24 +1015,26 @@ def test_exploration_story_link_collision(self): self.USER_ID, 'story_id_2', change_list, 'Added chapter.') - def test_cannot_update_story_acquired_skill_ids_with_invalid_node_id(self): + def test_cannot_update_story_acquired_skill_ids_with_invalid_node_id( + self + ) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': ( story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS), 'node_id': 'invalid_node', - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': ['skill_id'] })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story acquired_skill_ids.') - def test_update_story_notes(self): + def test_update_story_notes(self) -> None: story = story_fetchers.get_story_by_id(self.STORY_ID) self.assertEqual(story.notes, 'Notes') @@ -1006,7 +1052,7 @@ def test_update_story_notes(self): self.assertEqual(story.notes, 'New notes') - def test_update_story_language_code(self): + def test_update_story_language_code(self) -> None: story = story_fetchers.get_story_by_id(self.STORY_ID) self.assertEqual(story.language_code, 'en') @@ -1025,7 +1071,7 @@ def test_update_story_language_code(self): self.assertEqual(story.language_code, 'bn') - def test_update_story_url_fragment(self): + def test_update_story_url_fragment(self) -> None: story = story_fetchers.get_story_by_id(self.STORY_ID) self.assertEqual(story.url_fragment, 'title') @@ -1044,7 +1090,7 @@ def test_update_story_url_fragment(self): self.assertEqual(story.url_fragment, 'updated-title') - def test_cannot_update_story_if_url_fragment_already_exists(self): + def test_cannot_update_story_if_url_fragment_already_exists(self) -> None: topic_id = topic_fetchers.get_new_topic_id() story_id = story_services.get_new_story_id() self.save_new_story( @@ -1057,20 +1103,20 @@ def test_cannot_update_story_if_url_fragment_already_exists(self): 'new_value': 'original' })] exception_message = 'Story Url Fragment is not unique across the site.' - with self.assertRaisesRegexp(Exception, exception_message): + with self.assertRaisesRegex(Exception, exception_message): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story url_fragment.') - def test_cannot_update_story_with_no_change_list(self): - with self.assertRaisesRegexp( + def test_cannot_update_story_with_no_change_list(self) -> None: + with self.assertRaisesRegex( Exception, 'Unexpected error: received an invalid change list when trying to ' 'save story'): story_services.update_story( self.USER_ID, self.STORY_ID, [], 'Commit message') - def test_cannot_update_story_with_invalid_exploration_id(self): + def test_cannot_update_story_with_invalid_exploration_id(self) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) change_list = [story_domain.StoryChange({ @@ -1082,19 +1128,21 @@ def test_cannot_update_story_with_invalid_exploration_id(self): 'new_value': 'invalid_exp_id' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected story to only reference valid explorations'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_validate_exploration_throws_an_exception(self): + def test_validate_exploration_throws_an_exception(self) -> None: observed_log_messages = [] - def _mock_logging_function(msg): + def _mock_logging_function(msg: str) -> None: """Mocks logging.exception().""" observed_log_messages.append(msg) - def _mock_validate_function(_exploration, _strict): + def _mock_validate_function( + _exploration: exp_domain.Exploration, _strict: bool + ) -> None: """Mocks logging.exception().""" raise Exception('Error in exploration') @@ -1105,10 +1153,10 @@ def _mock_validate_function(_exploration, _strict): with logging_swap, validate_fn_swap: self.save_new_valid_exploration( 'exp_id_1', self.user_id_a, title='title', - category='Category 1', correctness_feedback_enabled=True) + category='Algebra', correctness_feedback_enabled=True) self.publish_exploration(self.user_id_a, 'exp_id_1') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Error in exploration'): story_services.validate_explorations_for_story( ['exp_id_1'], False) @@ -1117,11 +1165,11 @@ def _mock_validate_function(_exploration, _strict): 'Exploration validation failed for exploration with ' 'ID: exp_id_1. Error: Error in exploration']) - def test_validate_exploration_returning_error_messages(self): + def test_validate_exploration_returning_error_messages(self) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( - 'exp_id_1', self.user_id_a, title='title', category='Category 1', + 'exp_id_1', self.user_id_a, title='title', category='Algebra', correctness_feedback_enabled=True) validation_error_messages = ( story_services.validate_explorations_for_story( @@ -1135,11 +1183,11 @@ def test_validate_exploration_returning_error_messages(self): ) self.assertEqual(validation_error_messages, [message_1, message_2]) - def test_cannot_update_story_with_private_exploration_id(self): + def test_cannot_update_story_with_private_exploration_id(self) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( - 'exp_id_1', self.user_id_a, title='title', category='Category 1', + 'exp_id_1', self.user_id_a, title='title', category='Algebra', correctness_feedback_enabled=True) change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, @@ -1150,12 +1198,12 @@ def test_cannot_update_story_with_private_exploration_id(self): 'new_value': 'exp_id_1' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Exploration with ID exp_id_1 is not public'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_cannot_update_story_with_blank_exp_id(self): + def test_cannot_update_story_with_blank_exp_id(self) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) @@ -1168,22 +1216,24 @@ def test_cannot_update_story_with_blank_exp_id(self): 'new_value': None })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Story node with id node_1 does not contain an ' 'exploration id.'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_cannot_update_story_with_exps_with_different_categories(self): + def test_cannot_update_story_with_exps_with_different_categories( + self + ) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( - 'exp_id_1', self.user_id_a, title='title', category='Category 1', + 'exp_id_1', self.user_id_a, title='title', category='Algebra', correctness_feedback_enabled=True) self.publish_exploration(self.user_id_a, 'exp_id_1') self.save_new_valid_exploration( - 'exp_id_2', self.user_id_a, title='title', category='Category 2', + 'exp_id_2', self.user_id_a, title='title', category='Reading', correctness_feedback_enabled=True) self.publish_exploration(self.user_id_a, 'exp_id_2') @@ -1206,7 +1256,7 @@ def test_cannot_update_story_with_exps_with_different_categories(self): 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': 'node_1', - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': ['node_2'] }), story_domain.StoryChange({ @@ -1228,17 +1278,55 @@ def test_cannot_update_story_with_exps_with_different_categories(self): 'All explorations in a story should be of the same category. ' 'The explorations with ID exp_id_2 and exp_id_1 have different ' 'categories.']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'All explorations in a story should be of the ' 'same category'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_cannot_update_story_with_exps_with_other_languages(self): + def test_cannot_update_story_with_exps_with_invalid_categories( + self + ) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( 'exp_id_1', self.user_id_a, title='title', category='Category 1', + correctness_feedback_enabled=True) + self.publish_exploration(self.user_id_a, 'exp_id_1') + + change_list = [ + story_domain.StoryChange({ + 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, + 'property_name': ( + story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID), + 'node_id': self.NODE_ID_1, + 'old_value': None, + 'new_value': 'exp_id_1' + }) + ] + + validation_error_messages = ( + story_services.validate_explorations_for_story( + ['exp_id_1'], False)) + + self.assertEqual( + validation_error_messages, [ + 'All explorations in a story should be of a default category. ' + 'The exploration with ID exp_id_1 has an invalid ' + 'category Category 1.', 'Expected all explorations in a story ' + 'to be of a default category. Invalid exploration: exp_id_1']) + with self.assertRaisesRegex( + Exception, 'All explorations in a story should be of a ' + 'default category. The exploration with ID exp_id_1 ' + 'has an invalid category Category 1.'): + story_services.update_story( + self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') + + def test_cannot_update_story_with_exps_with_other_languages(self) -> None: + topic_services.publish_story( + self.TOPIC_ID, self.STORY_ID, self.user_id_admin) + self.save_new_valid_exploration( + 'exp_id_1', self.user_id_a, title='title', category='Algebra', language_code='es', correctness_feedback_enabled=True) self.publish_exploration(self.user_id_a, 'exp_id_1') @@ -1257,18 +1345,23 @@ def test_cannot_update_story_with_exps_with_other_languages(self): story_services.validate_explorations_for_story(['exp_id_1'], False)) self.assertEqual( validation_error_messages, [ - 'Invalid language es found for exploration with ID exp_id_1.']) - with self.assertRaisesRegexp( + 'Invalid language es found for exploration with ID exp_id_1.' + ' This language is not supported for explorations in a story' + ' on the mobile app.']) + with self.assertRaisesRegex( Exception, 'Invalid language es found for exploration with ' - 'ID exp_id_1'): + 'ID exp_id_1. This language is not supported for explorations ' + 'in a story on the mobile app.'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_cannot_update_story_with_exps_without_correctness_feedback(self): + def test_cannot_update_story_with_exps_without_correctness_feedback( + self + ) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( - 'exp_id_1', self.user_id_a, title='title', category='Category 1', + 'exp_id_1', self.user_id_a, title='title', category='Algebra', language_code='en') self.publish_exploration(self.user_id_a, 'exp_id_1') @@ -1287,19 +1380,22 @@ def test_cannot_update_story_with_exps_without_correctness_feedback(self): story_services.validate_explorations_for_story(['exp_id_1'], False)) self.assertEqual( validation_error_messages, [ - 'Expected all explorations to have correctness feedback ' - 'enabled. Invalid exploration: exp_id_1']) - with self.assertRaisesRegexp( - Exception, 'Expected all explorations to have correctness feedback ' - 'enabled. Invalid exploration: exp_id_1'): + 'Expected all explorations in a story to ' + 'have correctness feedback enabled. Invalid ' + 'exploration: exp_id_1']) + with self.assertRaisesRegex( + Exception, 'Expected all explorations in a story to ' + 'have correctness feedback enabled. Invalid exploration: exp_id_1'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_cannot_update_story_with_exps_with_invalid_interactions(self): + def test_cannot_update_story_with_exps_with_invalid_interactions( + self + ) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( - 'exp_id_1', self.user_id_a, title='title', category='Category 1', + 'exp_id_1', self.user_id_a, title='title', category='Algebra', interaction_id='GraphInput', correctness_feedback_enabled=True) self.publish_exploration(self.user_id_a, 'exp_id_1') @@ -1319,18 +1415,20 @@ def test_cannot_update_story_with_exps_with_invalid_interactions(self): self.assertEqual( validation_error_messages, [ 'Invalid interaction GraphInput in exploration with ID: ' - 'exp_id_1.']) - with self.assertRaisesRegexp( + 'exp_id_1. This interaction is not supported for explorations ' + 'in a story on the mobile app.']) + with self.assertRaisesRegex( Exception, 'Invalid interaction GraphInput in exploration with ' - 'ID: exp_id_1'): + 'ID: exp_id_1. This interaction is not supported for explorations ' + 'in a story on the mobile app.'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_cannot_update_story_with_exps_with_recommended_exps(self): + def test_cannot_update_story_with_exps_with_recommended_exps(self) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( - 'exp_id_1', self.user_id_a, title='title', category='Category 1', + 'exp_id_1', self.user_id_a, title='title', category='Algebra', interaction_id='TextInput', end_state_name='End', correctness_feedback_enabled=True) self.publish_exploration(self.user_id_a, 'exp_id_1') @@ -1363,19 +1461,25 @@ def test_cannot_update_story_with_exps_with_recommended_exps(self): story_services.validate_explorations_for_story(['exp_id_1'], False)) self.assertEqual( validation_error_messages, [ - 'Exploration with ID: exp_id_1 contains exploration ' - 'recommendations in its EndExploration interaction.']) - with self.assertRaisesRegexp( - Exception, 'Exploration with ID: exp_id_1 contains exploration ' - 'recommendations in its EndExploration interaction.'): + 'Explorations in a story are not expected to contain ' + 'exploration recommendations. Exploration with ID: exp_id_1 ' + 'contains exploration recommendations in its EndExploration ' + 'interaction.']) + with self.assertRaisesRegex( + Exception, 'Explorations in a story are not expected to contain ' + 'exploration recommendations. Exploration with ID: exp_id_1 ' + 'contains exploration recommendations in its EndExploration ' + 'interaction.'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_cannot_update_story_with_exps_with_invalid_rte_content(self): + def test_cannot_update_story_with_exps_with_invalid_rte_content( + self + ) -> None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( - 'exp_id_1', self.user_id_a, title='title', category='Category 1', + 'exp_id_1', self.user_id_a, title='title', category='Algebra', end_state_name='End', correctness_feedback_enabled=True) self.publish_exploration(self.user_id_a, 'exp_id_1') exp_services.update_exploration( @@ -1384,7 +1488,7 @@ def test_cannot_update_story_with_exps_with_invalid_rte_content(self): 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'Introduction', 'new_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': ( ' None: topic_services.publish_story( self.TOPIC_ID, self.STORY_ID, self.user_id_admin) self.save_new_valid_exploration( - 'exp_id_1', self.user_id_a, title='title', category='Category 1', + 'exp_id_1', self.user_id_a, title='title', category='Algebra', correctness_feedback_enabled=True) exp_services.update_exploration( self.user_id_a, 'exp_id_1', [exp_domain.ExplorationChange({ @@ -1451,16 +1557,16 @@ def test_cannot_update_story_with_exps_with_parameter_values(self): story_services.validate_explorations_for_story(['exp_id_1'], False)) self.assertEqual( validation_error_messages, [ - 'Expected no exploration to have parameter values in' - ' it. Invalid exploration: exp_id_1']) - with self.assertRaisesRegexp( - Exception, 'Expected no exploration to have parameter values in' - ' it. Invalid exploration: exp_id_1'): + 'Expected no exploration in a story to have parameter ' + 'values in it. Invalid exploration: exp_id_1']) + with self.assertRaisesRegex( + Exception, 'Expected no exploration in a story to have parameter ' + 'values in it. Invalid exploration: exp_id_1'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') self.save_new_valid_exploration( - 'exp_id_2', self.user_id_a, title='title 2', category='Category 1', + 'exp_id_2', self.user_id_a, title='title 2', category='Algebra', interaction_id='GraphInput', correctness_feedback_enabled=True) exp_services.update_exploration( self.user_id_a, 'exp_id_2', [exp_domain.ExplorationChange({ @@ -1475,7 +1581,12 @@ def test_cannot_update_story_with_exps_with_parameter_values(self): 'property_name': exp_domain.STATE_PROPERTY_PARAM_CHANGES, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, 'new_value': [ - param_domain.ParamChange('param1', 'Copier', {}).to_dict()] + # Here we use MyPy ignore because the expected type for 3rd + # argument of paramChange is CustomizationArgsDict, but for + # testing purposes here we are providing an empty dict which + # causes MyPy to throw an incompatible argument type error. + # Thus to avoid error, we used ignore. + param_domain.ParamChange('param1', 'Copier', {}).to_dict()] # type: ignore[arg-type] })], '') self.publish_exploration(self.user_id_a, 'exp_id_2') @@ -1491,13 +1602,13 @@ def test_cannot_update_story_with_exps_with_parameter_values(self): }) ] - with self.assertRaisesRegexp( - Exception, 'Expected no exploration to have parameter values in' - ' it. Invalid exploration: exp_id_2'): + with self.assertRaisesRegex( + Exception, 'Expected no exploration in a story to have parameter ' + 'values in it. Invalid exploration: exp_id_2'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_cannot_update_story_with_mismatch_of_story_versions(self): + def test_cannot_update_story_with_mismatch_of_story_versions(self) -> None: self.save_new_default_exploration( 'exp_id', self.user_id_a, title='title') self.publish_exploration(self.user_id_a, 'exp_id') @@ -1515,7 +1626,7 @@ def test_cannot_update_story_with_mismatch_of_story_versions(self): story_model.version = 0 story_model.commit(self.user_id_a, 'Changed version', []) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unexpected error: trying to update version 1 of story ' 'from version 2. Please reload the page and try again.'): @@ -1526,14 +1637,14 @@ def test_cannot_update_story_with_mismatch_of_story_versions(self): story_model.version = 10 story_model.commit(self.user_id_a, 'Changed version', []) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Trying to update version 11 of story from version 2, ' 'which is too old. Please reload the page and try again.'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node.') - def test_get_story_by_version(self): + def test_get_story_by_version(self) -> None: topic_id = topic_fetchers.get_new_topic_id() story_id = story_services.get_new_story_id() self.save_new_topic( @@ -1564,7 +1675,7 @@ def test_get_story_by_version(self): self.assertEqual(story_v1.language_code, 'en') self.assertEqual(story_v2.language_code, 'bn') - def test_cannot_update_initial_node_with_invalid_node_id(self): + def test_cannot_update_initial_node_with_invalid_node_id(self) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY, 'property_name': story_domain.INITIAL_NODE_ID, @@ -1572,14 +1683,14 @@ def test_cannot_update_initial_node_with_invalid_node_id(self): 'new_value': 'new_initial_node_id' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id new_initial_node_id is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story initial_node_id.') - def test_rearrange_node_in_story(self): + def test_rearrange_node_in_story(self) -> None: changelist = [ story_domain.StoryChange({ 'cmd': story_domain.CMD_ADD_STORY_NODE, @@ -1605,7 +1716,9 @@ def test_rearrange_node_in_story(self): self.assertEqual(story.story_contents.nodes[0].id, self.NODE_ID_2) self.assertEqual(story.story_contents.nodes[1].id, self.NODE_ID_1) - def test_cannot_update_node_exploration_id_with_invalid_node_id(self): + def test_cannot_update_node_exploration_id_with_invalid_node_id( + self + ) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID, @@ -1614,7 +1727,7 @@ def test_cannot_update_node_exploration_id_with_invalid_node_id(self): 'new_value': 'exp_id' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( @@ -1622,7 +1735,8 @@ def test_cannot_update_node_exploration_id_with_invalid_node_id(self): 'Updated story node_exploration_id.') def test_cannot_update_node_exploration_id_with_existing_exploration_id( - self): + self + ) -> None: self.save_new_default_exploration( 'exp_id', self.user_id_a, title='title') self.publish_exploration(self.user_id_a, 'exp_id') @@ -1649,7 +1763,7 @@ def test_cannot_update_node_exploration_id_with_existing_exploration_id( 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': self.NODE_ID_1, - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': [self.NODE_ID_2] }), story_domain.StoryChange({ @@ -1662,24 +1776,27 @@ def test_cannot_update_node_exploration_id_with_existing_exploration_id( }) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'A node with exploration id exp_id already exists.'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story node_exploration_id.') - def test_cannot_update_destination_node_ids_with_invalid_node_id(self): + def test_cannot_update_destination_node_ids_with_invalid_node_id( + self + ) -> None: + new_value: List[str] = [] change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': 'invalid_node', - 'old_value': [], - 'new_value': [] + 'old_value': self.OLD_VALUE, + 'new_value': new_value })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( @@ -1687,24 +1804,28 @@ def test_cannot_update_destination_node_ids_with_invalid_node_id(self): 'Updated story new_destination_node_ids.') def test_cannot_update_new_prerequisite_skill_ids_with_invalid_node_id( - self): + self + ) -> None: + new_value: List[str] = [] change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': ( story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS), 'node_id': 'invalid_node', - 'old_value': [], - 'new_value': [] + 'old_value': self.OLD_VALUE, + 'new_value': new_value })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Updated story new_prerequisite_skill_ids.') - def test_cannot_mark_node_outline_as_unfinalized_with_invalid_node_id(self): + def test_cannot_mark_node_outline_as_unfinalized_with_invalid_node_id( + self + ) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS, 'node_id': 'invalid_node', @@ -1712,14 +1833,16 @@ def test_cannot_mark_node_outline_as_unfinalized_with_invalid_node_id(self): 'new_value': '' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Mark node outline as unfinalized.') - def test_cannot_mark_node_outline_as_finalized_with_invalid_node_id(self): + def test_cannot_mark_node_outline_as_finalized_with_invalid_node_id( + self + ) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS, 'node_id': 'invalid_node', @@ -1727,14 +1850,14 @@ def test_cannot_mark_node_outline_as_finalized_with_invalid_node_id(self): 'new_value': 'new_value' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Mark node outline as finalized.') - def test_cannot_update_node_title_with_invalid_node_id(self): + def test_cannot_update_node_title_with_invalid_node_id(self) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': story_domain.STORY_NODE_PROPERTY_TITLE, @@ -1743,13 +1866,13 @@ def test_cannot_update_node_title_with_invalid_node_id(self): 'new_value': 'new_title' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Update node title.') - def test_cannot_update_node_description_with_invalid_node_id(self): + def test_cannot_update_node_description_with_invalid_node_id(self) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': story_domain.STORY_NODE_PROPERTY_DESCRIPTION, @@ -1758,14 +1881,16 @@ def test_cannot_update_node_description_with_invalid_node_id(self): 'new_value': 'new_description' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Update node description.') - def test_cannot_update_node_thumbnail_filename_with_invalid_node_id(self): + def test_cannot_update_node_thumbnail_filename_with_invalid_node_id( + self + ) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': ( @@ -1775,14 +1900,16 @@ def test_cannot_update_node_thumbnail_filename_with_invalid_node_id(self): 'new_value': 'new_image.svg' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Update node thumbnail filename.') - def test_cannot_update_node_thumbnail_bg_color_with_invalid_node_id(self): + def test_cannot_update_node_thumbnail_bg_color_with_invalid_node_id( + self + ) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, 'property_name': ( @@ -1792,26 +1919,26 @@ def test_cannot_update_node_thumbnail_bg_color_with_invalid_node_id(self): 'new_value': '#F8BF74' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Update node thumbnail bg color.') - def test_cannot_delete_node_with_invalid_node_id(self): + def test_cannot_delete_node_with_invalid_node_id(self) -> None: change_list = [story_domain.StoryChange({ 'cmd': story_domain.CMD_DELETE_STORY_NODE, 'node_id': 'invalid_node' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id invalid_node is not part of this story'): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Delete node.') - def test_cannot_delete_starting_node_of_story(self): + def test_cannot_delete_starting_node_of_story(self) -> None: changelist = [ story_domain.StoryChange({ 'cmd': story_domain.CMD_ADD_STORY_NODE, @@ -1823,7 +1950,7 @@ def test_cannot_delete_starting_node_of_story(self): 'property_name': ( story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS), 'node_id': self.NODE_ID_2, - 'old_value': [], + 'old_value': self.OLD_VALUE, 'new_value': [self.NODE_ID_1] }), story_domain.StoryChange({ @@ -1848,14 +1975,14 @@ def test_cannot_delete_starting_node_of_story(self): 'node_id': self.NODE_ID_2 })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The node with id %s is the starting node for the story, ' 'change the starting node before deleting it.' % self.NODE_ID_2): story_services.update_story( self.USER_ID, self.STORY_ID, change_list, 'Delete node.') - def test_delete_initial_node(self): + def test_delete_initial_node(self) -> None: story = story_fetchers.get_story_by_id(self.STORY_ID) self.assertEqual( @@ -1879,20 +2006,24 @@ class StoryProgressUnitTests(test_utils.GenericTestBase): which are completed in the context of the story. """ - def _get_progress_model(self, user_id, STORY_ID): + def _get_progress_model( + self, user_id: str, STORY_ID: str + ) -> Optional[user_models.StoryProgressModel]: """Returns the StoryProgressModel corresponding to the story id and user id. """ return user_models.StoryProgressModel.get( user_id, STORY_ID, strict=False) - def _record_completion(self, user_id, STORY_ID, node_id): + def _record_completion( + self, user_id: str, STORY_ID: str, node_id: str + ) -> None: """Records the completion of a node in the context of a story.""" story_services.record_completed_node_in_story_context( user_id, STORY_ID, node_id) - def setUp(self): - super(StoryProgressUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.STORY_1_ID = 'story_id' self.STORY_ID_1 = 'story_id_1' @@ -1914,7 +2045,7 @@ def setUp(self): story = story_domain.Story.create_default_story( self.STORY_1_ID, 'Title', 'Description', self.TOPIC_ID, 'title') - self.node_1 = { + self.node_1: story_domain.StoryNodeDict = { 'id': self.NODE_ID_1, 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1929,7 +2060,7 @@ def setUp(self): 'outline_is_finalized': False, 'exploration_id': None } - self.node_2 = { + self.node_2: story_domain.StoryNodeDict = { 'id': self.NODE_ID_2, 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1944,7 +2075,7 @@ def setUp(self): 'outline_is_finalized': False, 'exploration_id': None } - self.node_3 = { + self.node_3: story_domain.StoryNodeDict = { 'id': self.NODE_ID_3, 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1959,7 +2090,7 @@ def setUp(self): 'outline_is_finalized': False, 'exploration_id': None } - self.node_4 = { + self.node_4: story_domain.StoryNodeDict = { 'id': self.NODE_ID_4, 'thumbnail_filename': 'image.svg', 'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[ @@ -1987,7 +2118,7 @@ def setUp(self): topic_services.add_canonical_story( self.USER_ID, self.TOPIC_ID, story.id) - def test_get_completed_node_ids(self): + def test_get_completed_node_ids(self) -> None: # There should be no exception if the user or story do not exist; # it should also return an empty list in both of these situations. self.assertEqual(story_fetchers.get_completed_node_ids( @@ -2014,7 +2145,7 @@ def test_get_completed_node_ids(self): self.owner_id, self.STORY_1_ID), [self.NODE_ID_1, self.NODE_ID_2, self.NODE_ID_3]) - def test_get_latest_completed_node_ids(self): + def test_get_latest_completed_node_ids(self) -> None: self.assertIsNone( self._get_progress_model(self.owner_id, self.STORY_1_ID)) self.assertEqual(story_fetchers.get_latest_completed_node_ids( @@ -2033,7 +2164,9 @@ def test_get_latest_completed_node_ids(self): self.owner_id, self.STORY_1_ID), [self.NODE_ID_2, self.NODE_ID_3, self.NODE_ID_4]) - def test_get_latest_completed_node_ids_different_completion_order(self): + def test_get_latest_completed_node_ids_different_completion_order( + self + ) -> None: self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_4) self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_3) self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1) @@ -2044,7 +2177,7 @@ def test_get_latest_completed_node_ids_different_completion_order(self): self.owner_id, self.STORY_1_ID), [self.NODE_ID_2, self.NODE_ID_3, self.NODE_ID_4]) - def test_get_latest_completed_node_ids_multiple_completions(self): + def test_get_latest_completed_node_ids_multiple_completions(self) -> None: self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1) self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2) self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2) @@ -2056,7 +2189,7 @@ def test_get_latest_completed_node_ids_multiple_completions(self): self.owner_id, self.STORY_1_ID), [self.NODE_ID_2, self.NODE_ID_3, self.NODE_ID_4]) - def test_get_completed_nodes_in_story(self): + def test_get_completed_nodes_in_story(self) -> None: self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1) self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2) @@ -2066,7 +2199,7 @@ def test_get_completed_nodes_in_story(self): self.assertEqual( completed_node.to_dict(), self.nodes[ind].to_dict()) - def test_get_pending_and_all_nodes_in_story(self): + def test_get_pending_and_all_nodes_in_story(self) -> None: self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1) # The starting index is 1 because the first story node is completed, @@ -2077,7 +2210,7 @@ def test_get_pending_and_all_nodes_in_story(self): self.assertEqual( pending_node.to_dict(), self.nodes[index].to_dict()) - def test_record_completed_node_in_story_context(self): + def test_record_completed_node_in_story_context(self) -> None: # Ensure that node completed within the context of a story are # recorded correctly. This test actually validates both # test_get_completed_node_ids and @@ -2095,7 +2228,8 @@ def test_record_completed_node_in_story_context(self): completion_model = self._get_progress_model( self.owner_id, self.STORY_1_ID) - self.assertIsNotNone(completion_model) + # Ruling out the possibility of None for mypy type checking. + assert completion_model is not None self.assertEqual( completion_model.completed_node_ids, [ self.NODE_ID_1]) @@ -2106,6 +2240,8 @@ def test_record_completed_node_in_story_context(self): self.owner_id, self.STORY_1_ID, self.NODE_ID_1) completion_model = self._get_progress_model( self.owner_id, self.STORY_1_ID) + # Ruling out the possibility of None for mypy type checking. + assert completion_model is not None self.assertEqual( completion_model.completed_node_ids, [ self.NODE_ID_1]) @@ -2121,6 +2257,8 @@ def test_record_completed_node_in_story_context(self): self.owner_id, self.STORY_ID_1, self.NODE_ID_2) completion_model = self._get_progress_model( self.owner_id, self.STORY_1_ID) + # Ruling out the possibility of None for mypy type checking. + assert completion_model is not None self.assertEqual( completion_model.completed_node_ids, [ self.NODE_ID_1]) @@ -2132,6 +2270,8 @@ def test_record_completed_node_in_story_context(self): self.owner_id, self.STORY_1_ID, self.NODE_ID_3) completion_model = self._get_progress_model( self.owner_id, self.STORY_1_ID) + # Ruling out the possibility of None for mypy type checking. + assert completion_model is not None self.assertEqual( completion_model.completed_node_ids, [ self.NODE_ID_1, self.NODE_ID_2, self.NODE_ID_3]) @@ -2139,7 +2279,7 @@ def test_record_completed_node_in_story_context(self): class StoryContentsMigrationTests(test_utils.GenericTestBase): - def test_migrate_story_contents_to_latest_schema(self): + def test_migrate_story_contents_to_latest_schema(self) -> None: story_id = story_services.get_new_story_id() topic_id = topic_fetchers.get_new_topic_id() user_id = 'user_id' diff --git a/core/domain/subscription_services.py b/core/domain/subscription_services.py index d80514795cf9..508574bbf326 100644 --- a/core/domain/subscription_services.py +++ b/core/domain/subscription_services.py @@ -20,12 +20,16 @@ from core.platform import models -(user_models,) = models.Registry.import_models([ - models.NAMES.user -]) +from typing import List +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models -def subscribe_to_thread(user_id, feedback_thread_id): +(user_models,) = models.Registry.import_models([models.Names.USER]) + + +def subscribe_to_thread(user_id: str, feedback_thread_id: str) -> None: """Subscribes a user to a feedback thread. WARNING: Callers of this function should ensure that the user_id and @@ -38,7 +42,7 @@ def subscribe_to_thread(user_id, feedback_thread_id): subscribe_to_threads(user_id, [feedback_thread_id]) -def subscribe_to_threads(user_id, feedback_thread_ids): +def subscribe_to_threads(user_id: str, feedback_thread_ids: List[str]) -> None: """Subscribes a user to feedback threads. WARNING: Callers of this function should ensure that the user_id and @@ -68,7 +72,7 @@ def subscribe_to_threads(user_id, feedback_thread_ids): subscriptions_model.put() -def subscribe_to_exploration(user_id, exploration_id): +def subscribe_to_exploration(user_id: str, exploration_id: str) -> None: """Subscribes a user to an exploration (and, therefore, indirectly to all feedback threads for that exploration). @@ -90,7 +94,7 @@ def subscribe_to_exploration(user_id, exploration_id): subscriptions_model.put() -def subscribe_to_creator(user_id, creator_id): +def subscribe_to_creator(user_id: str, creator_id: str) -> None: """Subscribes a user (learner) to a creator. WARNING: Callers of this function should ensure that the user_id and @@ -99,6 +103,10 @@ def subscribe_to_creator(user_id, creator_id): Args: user_id: str. The user ID of the new subscriber. creator_id: str. The user ID of the creator. + + Raises: + Exception. The user ID of the new subscriber is same as the + user ID of the creator. """ if user_id == creator_id: raise Exception('User %s is not allowed to self subscribe.' % user_id) @@ -124,7 +132,7 @@ def subscribe_to_creator(user_id, creator_id): subscriptions_model_user.put() -def unsubscribe_from_creator(user_id, creator_id): +def unsubscribe_from_creator(user_id: str, creator_id: str) -> None: """Unsubscribe a user from a creator. WARNING: Callers of this function should ensure that the user_id and @@ -135,9 +143,9 @@ def unsubscribe_from_creator(user_id, creator_id): creator_id: str. The user ID of the creator. """ subscribers_model_creator = user_models.UserSubscribersModel.get( - creator_id, strict=False) + creator_id) subscriptions_model_user = user_models.UserSubscriptionsModel.get( - user_id, strict=False) + user_id) if user_id in subscribers_model_creator.subscriber_ids: subscribers_model_creator.subscriber_ids.remove(user_id) @@ -148,7 +156,7 @@ def unsubscribe_from_creator(user_id, creator_id): subscriptions_model_user.put() -def get_all_threads_subscribed_to(user_id): +def get_all_threads_subscribed_to(user_id: str) -> List[str]: """Returns a list with ids of all the feedback and suggestion threads to which the user is subscribed. @@ -163,13 +171,19 @@ def get_all_threads_subscribed_to(user_id): """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) - - return ( - subscriptions_model.general_feedback_thread_ids - if subscriptions_model else []) - - -def get_all_creators_subscribed_to(user_id): + # TODO(#15621): The explicit declaration of type for ndb properties should + # be removed. Currently, these ndb properties are annotated with Any return + # type. Once we have proper return type we can remove this. + if subscriptions_model: + feedback_thread_ids: List[str] = ( + subscriptions_model.general_feedback_thread_ids + ) + return feedback_thread_ids + else: + return [] + + +def get_all_creators_subscribed_to(user_id: str) -> List[str]: """Returns a list with ids of all the creators to which this learner has subscribed. @@ -184,12 +198,17 @@ def get_all_creators_subscribed_to(user_id): """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) - return ( - subscriptions_model.creator_ids - if subscriptions_model else []) + # TODO(#15621): The explicit declaration of type for ndb properties should + # be removed. Currently, these ndb properties are annotated with Any return + # type. Once we have proper return type we can remove this. + if subscriptions_model: + creator_ids: List[str] = subscriptions_model.creator_ids + return creator_ids + else: + return [] -def get_all_subscribers_of_creator(user_id): +def get_all_subscribers_of_creator(user_id: str) -> List[str]: """Returns a list with ids of all users who have subscribed to this creator. @@ -203,12 +222,17 @@ def get_all_subscribers_of_creator(user_id): """ subscribers_model = user_models.UserSubscribersModel.get( user_id, strict=False) - return ( - subscribers_model.subscriber_ids - if subscribers_model else []) + # TODO(#15621): The explicit declaration of type for ndb properties should + # be removed. Currently, these ndb properties are annotated with Any return + # type. Once we have proper return type we can remove this. + if subscribers_model: + subscriber_ids: List[str] = subscribers_model.subscriber_ids + return subscriber_ids + else: + return [] -def get_exploration_ids_subscribed_to(user_id): +def get_exploration_ids_subscribed_to(user_id: str) -> List[str]: """Returns a list with ids of all explorations that the given user subscribes to. @@ -223,12 +247,17 @@ def get_exploration_ids_subscribed_to(user_id): """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) - return ( - subscriptions_model.exploration_ids - if subscriptions_model else []) + # TODO(#15621): The explicit declaration of type for ndb properties should + # be removed. Currently, these ndb properties are annotated with Any return + # type. Once we have proper return type we can remove this. + if subscriptions_model: + exploration_ids: List[str] = subscriptions_model.exploration_ids + return exploration_ids + else: + return [] -def subscribe_to_collection(user_id, collection_id): +def subscribe_to_collection(user_id: str, collection_id: str) -> None: """Subscribes a user to a collection. WARNING: Callers of this function should ensure that the user_id and @@ -249,7 +278,7 @@ def subscribe_to_collection(user_id, collection_id): subscriptions_model.put() -def get_collection_ids_subscribed_to(user_id): +def get_collection_ids_subscribed_to(user_id: str) -> List[str]: """Returns a list with ids of all collections that the given user subscribes to. @@ -264,6 +293,11 @@ def get_collection_ids_subscribed_to(user_id): """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) - return ( - subscriptions_model.collection_ids - if subscriptions_model else []) + # TODO(#15621): The explicit declaration of type for ndb properties should + # be removed. Currently, these ndb properties are annotated with Any return + # type. Once we have proper return type we can remove this. + if subscriptions_model: + collection_ids: List[str] = subscriptions_model.collection_ids + return collection_ids + else: + return [] diff --git a/core/domain/subscription_services_test.py b/core/domain/subscription_services_test.py index 044e3cfaedcd..74dff6a33a91 100644 --- a/core/domain/subscription_services_test.py +++ b/core/domain/subscription_services_test.py @@ -31,26 +31,32 @@ from core.platform import models from core.tests import test_utils -(user_models,) = models.Registry.import_models([models.NAMES.user]) +from typing import Final, List -COLLECTION_ID = 'col_id' -COLLECTION_ID_2 = 'col_id_2' -EXP_ID = 'exp_id' -EXP_ID_2 = 'exp_id_2' -FEEDBACK_THREAD_ID = 'fthread_id' -FEEDBACK_THREAD_ID_2 = 'fthread_id_2' -USER_ID = 'user_id' -USER_ID_2 = 'user_id_2' +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) + +COLLECTION_ID: Final = 'col_id' +COLLECTION_ID_2: Final = 'col_id_2' +EXP_ID: Final = 'exp_id' +EXP_ID_2: Final = 'exp_id_2' +FEEDBACK_THREAD_ID: Final = 'fthread_id' +FEEDBACK_THREAD_ID_2: Final = 'fthread_id_2' +USER_ID: Final = 'user_id' +USER_ID_2: Final = 'user_id_2' class SubscriptionsTest(test_utils.GenericTestBase): """Tests for subscription management.""" - OWNER_2_EMAIL = 'owner2@example.com' - OWNER2_USERNAME = 'owner2' + OWNER_2_EMAIL: Final = 'owner2@example.com' + OWNER2_USERNAME: Final = 'owner2' - def setUp(self): - super(SubscriptionsTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) @@ -63,7 +69,7 @@ def setUp(self): self.owner = user_services.get_user_actions_info(self.owner_id) - def _get_thread_ids_subscribed_to(self, user_id): + def _get_thread_ids_subscribed_to(self, user_id: str) -> List[str]: """Returns the feedback thread ids to which the user corresponding to the given user id is subscribed to. @@ -71,16 +77,23 @@ def _get_thread_ids_subscribed_to(self, user_id): user_id: str. The user id. Returns: - tuple(str). The tuple containing all the feedback thread ids to + List(str). The list containing all the feedback thread ids to which the user is subscribed to. """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) - return ( - subscriptions_model.general_feedback_thread_ids - if subscriptions_model else []) - - def _get_exploration_ids_subscribed_to(self, user_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if subscriptions_model: + feedback_thread_ids: List[str] = ( + subscriptions_model.general_feedback_thread_ids + ) + return feedback_thread_ids + else: + return [] + + def _get_exploration_ids_subscribed_to(self, user_id: str) -> List[str]: """Returns all the exploration ids of the explorations to which the user has subscribed to. @@ -88,16 +101,21 @@ def _get_exploration_ids_subscribed_to(self, user_id): user_id: str. The user id. Returns: - tuple(str). The tuple containing all the exploration ids of the + List(str). The list containing all the exploration ids of the explorations to which the user has subscribed to. """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) - return ( - subscriptions_model.exploration_ids - if subscriptions_model else []) - - def _get_collection_ids_subscribed_to(self, user_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if subscriptions_model: + exploration_ids: List[str] = subscriptions_model.exploration_ids + return exploration_ids + else: + return [] + + def _get_collection_ids_subscribed_to(self, user_id: str) -> List[str]: """Returns all the collection ids of the collections to which the user has subscribed to. @@ -105,16 +123,21 @@ def _get_collection_ids_subscribed_to(self, user_id): user_id: str. The user id. Returns: - tuple(str). The tuple containing all the collection ids of the + List(str). The list containing all the collection ids of the collections to which the user has subscribed to. """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) - return ( - subscriptions_model.collection_ids - if subscriptions_model else []) - - def test_subscribe_to_feedback_thread(self): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if subscriptions_model: + collection_ids: List[str] = subscriptions_model.collection_ids + return collection_ids + else: + return [] + + def test_subscribe_to_feedback_thread(self) -> None: self.assertEqual(self._get_thread_ids_subscribed_to(USER_ID), []) subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID) @@ -132,7 +155,7 @@ def test_subscribe_to_feedback_thread(self): self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID, FEEDBACK_THREAD_ID_2]) - def test_subscribe_to_exploration(self): + def test_subscribe_to_exploration(self) -> None: self.assertEqual(self._get_exploration_ids_subscribed_to(USER_ID), []) subscription_services.subscribe_to_exploration(USER_ID, EXP_ID) @@ -149,7 +172,7 @@ def test_subscribe_to_exploration(self): self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID, EXP_ID_2]) - def test_get_exploration_ids_subscribed_to(self): + def test_get_exploration_ids_subscribed_to(self) -> None: self.assertEqual( subscription_services.get_exploration_ids_subscribed_to( USER_ID), []) @@ -164,7 +187,7 @@ def test_get_exploration_ids_subscribed_to(self): subscription_services.get_exploration_ids_subscribed_to(USER_ID), [EXP_ID, EXP_ID_2]) - def test_get_all_threads_subscribed_to(self): + def test_get_all_threads_subscribed_to(self) -> None: self.assertEqual( subscription_services.get_all_threads_subscribed_to( USER_ID), []) @@ -179,7 +202,9 @@ def test_get_all_threads_subscribed_to(self): subscription_services.get_all_threads_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID, FEEDBACK_THREAD_ID_2]) - def test_thread_and_exp_subscriptions_are_tracked_individually(self): + def test_thread_and_exp_subscriptions_are_tracked_individually( + self + ) -> None: self.assertEqual(self._get_thread_ids_subscribed_to(USER_ID), []) subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID) @@ -189,7 +214,7 @@ def test_thread_and_exp_subscriptions_are_tracked_individually(self): self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID]) - def test_posting_to_feedback_thread_results_in_subscription(self): + def test_posting_to_feedback_thread_results_in_subscription(self) -> None: # The viewer posts a message to the thread. message_text = 'text' feedback_services.create_thread( @@ -216,7 +241,7 @@ def test_posting_to_feedback_thread_results_in_subscription(self): self.assertEqual( self._get_thread_ids_subscribed_to(self.editor_id), [thread_id]) - def test_creating_exploration_results_in_subscription(self): + def test_creating_exploration_results_in_subscription(self) -> None: self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), []) exp_services.save_new_exploration( @@ -224,7 +249,9 @@ def test_creating_exploration_results_in_subscription(self): self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID]) - def test_adding_new_exploration_owner_or_editor_role_results_in_subscription(self): # pylint: disable=line-too-long + def test_adding_new_exploration_owner_or_editor_role_results_in_subscription( # pylint: disable=line-too-long + self + ) -> None: exploration = exp_domain.Exploration.create_default_exploration(EXP_ID) exp_services.save_new_exploration(self.owner_id, exploration) @@ -242,7 +269,9 @@ def test_adding_new_exploration_owner_or_editor_role_results_in_subscription(sel self.assertEqual( self._get_exploration_ids_subscribed_to(self.editor_id), [EXP_ID]) - def test_adding_new_exploration_viewer_role_does_not_result_in_subscription(self): # pylint: disable=line-too-long + def test_adding_new_exploration_viewer_role_does_not_result_in_subscription( + self + ) -> None: exploration = exp_domain.Exploration.create_default_exploration(EXP_ID) exp_services.save_new_exploration(self.owner_id, exploration) @@ -253,7 +282,7 @@ def test_adding_new_exploration_viewer_role_does_not_result_in_subscription(self self.assertEqual( self._get_exploration_ids_subscribed_to(self.viewer_id), []) - def test_deleting_exploration_does_not_delete_subscription(self): + def test_deleting_exploration_does_not_delete_subscription(self) -> None: exploration = exp_domain.Exploration.create_default_exploration(EXP_ID) exp_services.save_new_exploration(self.owner_id, exploration) self.assertEqual( @@ -263,7 +292,7 @@ def test_deleting_exploration_does_not_delete_subscription(self): self.assertEqual( self._get_exploration_ids_subscribed_to(self.owner_id), [EXP_ID]) - def test_subscribe_to_collection(self): + def test_subscribe_to_collection(self) -> None: self.assertEqual(self._get_collection_ids_subscribed_to(USER_ID), []) subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID) @@ -280,7 +309,7 @@ def test_subscribe_to_collection(self): self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID, COLLECTION_ID_2]) - def test_get_collection_ids_subscribed_to(self): + def test_get_collection_ids_subscribed_to(self) -> None: self.assertEqual( subscription_services.get_collection_ids_subscribed_to( USER_ID), []) @@ -295,7 +324,7 @@ def test_get_collection_ids_subscribed_to(self): subscription_services.get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID, COLLECTION_ID_2]) - def test_creating_collection_results_in_subscription(self): + def test_creating_collection_results_in_subscription(self) -> None: self.assertEqual( self._get_collection_ids_subscribed_to(USER_ID), []) self.save_new_default_collection(COLLECTION_ID, USER_ID) @@ -303,7 +332,8 @@ def test_creating_collection_results_in_subscription(self): self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID]) def test_adding_new_collection_owner_or_editor_role_results_in_subscription( - self): + self + ) -> None: self.save_new_default_collection(COLLECTION_ID, self.owner_id) self.assertEqual( @@ -325,7 +355,8 @@ def test_adding_new_collection_owner_or_editor_role_results_in_subscription( [COLLECTION_ID]) def test_adding_new_collection_viewer_role_does_not_result_in_subscription( - self): + self + ) -> None: self.save_new_default_collection(COLLECTION_ID, self.owner_id) self.assertEqual( @@ -336,7 +367,7 @@ def test_adding_new_collection_viewer_role_does_not_result_in_subscription( self.assertEqual( self._get_collection_ids_subscribed_to(self.viewer_id), []) - def test_deleting_collection_does_not_delete_subscription(self): + def test_deleting_collection_does_not_delete_subscription(self) -> None: self.save_new_default_collection(COLLECTION_ID, self.owner_id) self.assertEqual( self._get_collection_ids_subscribed_to(self.owner_id), @@ -349,7 +380,8 @@ def test_deleting_collection_does_not_delete_subscription(self): [COLLECTION_ID]) def test_adding_exploration_to_collection_does_not_create_subscription( - self): + self + ) -> None: self.save_new_default_collection(COLLECTION_ID, self.owner_id) # The author is subscribed to the collection but to no explorations. @@ -382,18 +414,18 @@ def test_adding_exploration_to_collection_does_not_create_subscription( class UserSubscriptionsTest(test_utils.GenericTestBase): """Tests for subscription management.""" - OWNER_2_EMAIL = 'owner2@example.com' - OWNER2_USERNAME = 'owner2' + OWNER_2_EMAIL: Final = 'owner2@example.com' + OWNER2_USERNAME: Final = 'owner2' - def setUp(self): - super(UserSubscriptionsTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.OWNER_2_EMAIL, self.OWNER2_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner_2_id = self.get_user_id_from_email(self.OWNER_2_EMAIL) - def _get_all_subscribers_of_creator(self, user_id): + def _get_all_subscribers_of_creator(self, user_id: str) -> List[str]: """Returns all the ids of the subscribers that have subscribed to the creator. @@ -401,37 +433,47 @@ def _get_all_subscribers_of_creator(self, user_id): user_id: str. The user id. Returns: - tuple(str). The tuple containing all the ids of the subscribers that + List(str). The list containing all the ids of the subscribers that have subscribed to the creator. """ subscribers_model = user_models.UserSubscribersModel.get( user_id, strict=False) - return ( - subscribers_model.subscriber_ids - if subscribers_model else []) - - def _get_all_creators_subscribed_to(self, user_id): + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if subscribers_model: + subscriber_ids: List[str] = subscribers_model.subscriber_ids + return subscriber_ids + else: + return [] + + def _get_all_creators_subscribed_to(self, user_id: str) -> List[str]: """Returns the ids of the creators the given user has subscribed to. Args: user_id: str. The user id. Returns: - tuple(str). The tuple containing all the creator ids the given user + List(str). The list containing all the creator ids the given user has subscribed to. """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) - return ( - subscriptions_model.creator_ids - if subscriptions_model else []) - - def test_exception_is_raised_when_user_self_subscribes(self): - with self.assertRaisesRegexp( + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + if subscriptions_model: + creator_ids: List[str] = subscriptions_model.creator_ids + return creator_ids + else: + return [] + + def test_exception_is_raised_when_user_self_subscribes(self) -> None: + with self.assertRaisesRegex( Exception, 'User %s is not allowed to self subscribe.' % USER_ID): subscription_services.subscribe_to_creator(USER_ID, USER_ID) - def test_subscribe_to_creator(self): + def test_subscribe_to_creator(self) -> None: self.assertEqual(self._get_all_subscribers_of_creator( self.owner_id), []) @@ -461,7 +503,7 @@ def test_subscribe_to_creator(self): self._get_all_creators_subscribed_to( USER_ID_2), [self.owner_id]) - def test_unsubscribe_from_creator(self): + def test_unsubscribe_from_creator(self) -> None: self.assertEqual(self._get_all_subscribers_of_creator( self.owner_id), []) @@ -501,7 +543,7 @@ def test_unsubscribe_from_creator(self): self._get_all_creators_subscribed_to(USER_ID_2), []) - def test_get_all_subscribers_of_creator(self): + def test_get_all_subscribers_of_creator(self) -> None: self.assertEqual( subscription_services.get_all_subscribers_of_creator( self.owner_id), []) @@ -516,7 +558,7 @@ def test_get_all_subscribers_of_creator(self): subscription_services.get_all_subscribers_of_creator(self.owner_id), [USER_ID, USER_ID_2]) - def test_get_all_creators_subscribed_to(self): + def test_get_all_creators_subscribed_to(self) -> None: self.assertEqual( subscription_services.get_all_creators_subscribed_to( USER_ID), []) diff --git a/core/domain/subtopic_page_domain.py b/core/domain/subtopic_page_domain.py index 6c2f1febeb0b..be2cc7bbdf14 100644 --- a/core/domain/subtopic_page_domain.py +++ b/core/domain/subtopic_page_domain.py @@ -22,20 +22,26 @@ from core import utils from core.constants import constants from core.domain import change_domain -from core.domain import html_validation_service from core.domain import state_domain -from core.platform import models +from core.domain import translation_domain -(topic_models,) = models.Registry.import_models([models.NAMES.topic]) +from typing import Callable, Final, List, Literal, Optional, TypedDict, Union -SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' -SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' -SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' +from core.domain import html_validation_service # pylint: disable=invalid-import-from # isort:skip -CMD_CREATE_NEW = 'create_new' +# TODO(#14537): Refactor this file and remove imports marked +# with 'invalid-import-from'. + +SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML: Final = 'page_contents_html' +SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO: Final = 'page_contents_audio' +SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS: Final = ( + 'page_written_translations' +) + +CMD_CREATE_NEW: Final = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. -CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' +CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY: Final = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): @@ -49,31 +55,120 @@ class SubtopicPageChange(change_domain.BaseChange): # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. - SUBTOPIC_PAGE_PROPERTIES = ( + SUBTOPIC_PAGE_PROPERTIES: List[str] = [ SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, - SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) + SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS + ] - ALLOWED_COMMANDS = [{ + ALLOWED_COMMANDS: List[feconf.ValidCmdDict] = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} + 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES}, + 'deprecated_values': {} }] +AllowedUpdateSubtopicPagePropertyCmdTypes = Union[ + state_domain.SubtitledHtmlDict, + state_domain.RecordedVoiceoversDict, + translation_domain.WrittenTranslationsDict +] + + +class CreateNewSubtopicPageCmd(SubtopicPageChange): + """Class representing the SubtopicPageChange's + CMD_CREATE_NEW command. + """ + + topic_id: str + subtopic_id: int + + +class UpdateSubtopicPagePropertyCmd(SubtopicPageChange): + """Class representing the SubtopicPageChange's + CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY command. + """ + + subtopic_id: int + property_name: str + new_value: AllowedUpdateSubtopicPagePropertyCmdTypes + old_value: AllowedUpdateSubtopicPagePropertyCmdTypes + + +class UpdateSubtopicPagePropertyPageContentsHtmlCmd(SubtopicPageChange): + """Class representing the SubtopicPageChange's + CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY command with + SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML as + allowed value. + """ + + subtopic_id: int + property_name: Literal['page_contents_html'] + new_value: state_domain.SubtitledHtmlDict + old_value: state_domain.SubtitledHtmlDict + + +class UpdateSubtopicPagePropertyPageContentsAudioCmd(SubtopicPageChange): + """Class representing the SubtopicPageChange's + CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY command with + SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO as + allowed value. + """ + + subtopic_id: int + property_name: Literal['page_contents_audio'] + new_value: state_domain.RecordedVoiceoversDict + old_value: state_domain.RecordedVoiceoversDict + + +class UpdateSubtopicPagePropertyPageWrittenTranslationsCmd(SubtopicPageChange): + """Class representing the SubtopicPageChange's + CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY command with + SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS + as allowed value. + """ + + subtopic_id: int + property_name: Literal['page_written_translations'] + new_value: translation_domain.WrittenTranslationsDict + old_value: translation_domain.WrittenTranslationsDict + + +class SubtopicPageContentsDict(TypedDict): + """Dictionary representing the SubtopicPageContents object.""" + + subtitled_html: state_domain.SubtitledHtmlDict + recorded_voiceovers: state_domain.RecordedVoiceoversDict + written_translations: translation_domain.WrittenTranslationsDict + + +class VersionedSubtopicPageContentsDict(TypedDict): + """Dictionary representing the versioned SubtopicPageContents object.""" + + schema_version: int + page_contents: SubtopicPageContentsDict + + class SubtopicPageContents: """Domain object for the contents on a subtopic page.""" def __init__( - self, subtitled_html, recorded_voiceovers, written_translations): + self, + subtitled_html: state_domain.SubtitledHtml, + recorded_voiceovers: state_domain.RecordedVoiceovers, + written_translations: translation_domain.WrittenTranslations + ) -> None: """Constructs a SubtopicPageContents domain object. Args: @@ -89,17 +184,17 @@ def __init__( self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations - def validate(self): + def validate(self) -> None: """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() - content_ids = set([self.subtitled_html.content_id]) + content_ids = [self.subtitled_html.content_id] self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod - def create_default_subtopic_page_contents(cls): + def create_default_subtopic_page_contents(cls) -> SubtopicPageContents: """Creates a default subtopic page contents object. Returns: @@ -111,10 +206,10 @@ def create_default_subtopic_page_contents(cls): content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), - state_domain.WrittenTranslations.from_dict( + translation_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) - def to_dict(self): + def to_dict(self) -> SubtopicPageContentsDict: """Returns a dict representing this SubtopicPageContents domain object. Returns: @@ -127,7 +222,9 @@ def to_dict(self): } @classmethod - def from_dict(cls, page_contents_dict): + def from_dict( + cls, page_contents_dict: SubtopicPageContentsDict + ) -> SubtopicPageContents: """Creates a subtopic page contents object from a dictionary. Args: @@ -144,16 +241,33 @@ def from_dict(cls, page_contents_dict): page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), - state_domain.WrittenTranslations.from_dict(page_contents_dict[ + translation_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) +class SubtopicPageDict(TypedDict): + """Dictionary representing the SubtopicPage object.""" + + id: str + topic_id: str + page_contents: SubtopicPageContentsDict + page_contents_schema_version: int + language_code: str + version: int + + class SubtopicPage: """Domain object for a Subtopic page.""" def __init__( - self, subtopic_page_id, topic_id, page_contents, - page_contents_schema_version, language_code, version): + self, + subtopic_page_id: str, + topic_id: str, + page_contents: SubtopicPageContents, + page_contents_schema_version: int, + language_code: str, + version: int + ) -> None: """Constructs a SubtopicPage domain object. Args: @@ -174,7 +288,7 @@ def __init__( self.language_code = language_code self.version = version - def to_dict(self): + def to_dict(self) -> SubtopicPageDict: """Returns a dict representing this SubtopicPage domain object. Returns: @@ -190,7 +304,7 @@ def to_dict(self): } @classmethod - def get_subtopic_page_id(cls, topic_id, subtopic_id): + def get_subtopic_page_id(cls, topic_id: str, subtopic_id: int) -> str: """Returns the subtopic page id from the topic_id and subtopic_id. Args: @@ -203,11 +317,13 @@ def get_subtopic_page_id(cls, topic_id, subtopic_id): return '%s-%s' % (topic_id, subtopic_id) @classmethod - def create_default_subtopic_page(cls, subtopic_id, topic_id): + def create_default_subtopic_page( + cls, subtopic_id: int, topic_id: str + ) -> SubtopicPage: """Creates a SubtopicPage object with default values. Args: - subtopic_id: str. ID of the subtopic. + subtopic_id: int. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. @@ -224,7 +340,10 @@ def create_default_subtopic_page(cls, subtopic_id, topic_id): @classmethod def convert_html_fields_in_subtopic_page_contents( - cls, subtopic_page_contents_dict, conversion_fn): + cls, + subtopic_page_contents_dict: SubtopicPageContentsDict, + conversion_fn: Callable[[str], str] + ) -> SubtopicPageContentsDict: """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. @@ -237,18 +356,15 @@ def convert_html_fields_in_subtopic_page_contents( Returns: dict. The converted subtopic_page_contents_dict. """ - subtopic_page_contents_dict['written_translations'] = ( - state_domain.WrittenTranslations. - convert_html_in_written_translations( - subtopic_page_contents_dict['written_translations'], - conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict @classmethod - def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): + def _convert_page_contents_v1_dict_to_v2_dict( + cls, page_contents_dict: SubtopicPageContentsDict + ) -> SubtopicPageContentsDict: """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. @@ -264,7 +380,9 @@ def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): html_validation_service.add_math_content_to_math_rte_components) @classmethod - def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): + def _convert_page_contents_v2_dict_to_v3_dict( + cls, page_contents_dict: SubtopicPageContentsDict + ) -> SubtopicPageContentsDict: """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. @@ -281,7 +399,9 @@ def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod - def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): + def _convert_page_contents_v3_dict_to_v4_dict( + cls, page_contents_dict: SubtopicPageContentsDict + ) -> SubtopicPageContentsDict: """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. @@ -298,7 +418,10 @@ def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): @classmethod def update_page_contents_from_model( - cls, versioned_page_contents, current_version): + cls, + versioned_page_contents: VersionedSubtopicPageContentsDict, + current_version: int + ) -> None: """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being @@ -320,7 +443,7 @@ def update_page_contents_from_model( versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) - def get_subtopic_id_from_subtopic_page_id(self): + def get_subtopic_id_from_subtopic_page_id(self) -> int: """Returns the id from the subtopic page id of the object. Returns: @@ -328,7 +451,9 @@ def get_subtopic_id_from_subtopic_page_id(self): """ return int(self.id[len(self.topic_id) + 1:]) - def update_page_contents_html(self, new_page_contents_html): + def update_page_contents_html( + self, new_page_contents_html: state_domain.SubtitledHtml + ) -> None: """The new value for the html data field. Args: @@ -337,7 +462,9 @@ def update_page_contents_html(self, new_page_contents_html): """ self.page_contents.subtitled_html = new_page_contents_html - def update_page_contents_audio(self, new_page_contents_audio): + def update_page_contents_audio( + self, new_page_contents_audio: state_domain.RecordedVoiceovers + ) -> None: """The new value for the recorded_voiceovers data field. Args: @@ -347,7 +474,10 @@ def update_page_contents_audio(self, new_page_contents_audio): self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( - self, new_page_written_translations_dict): + self, + new_page_written_translations_dict: ( + translation_domain.WrittenTranslationsDict) + ) -> None: """The new value for the written_translations data field. Args: @@ -355,10 +485,10 @@ def update_page_contents_written_translations( the subtopic page. """ self.page_contents.written_translations = ( - state_domain.WrittenTranslations.from_dict( + translation_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) - def validate(self): + def validate(self) -> None: """Validates various properties of the SubtopicPage object. Raises: @@ -399,3 +529,78 @@ def validate(self): ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code) + + +class SubtopicPageSummaryDict(TypedDict): + """Dictionary representation of SubtopicPageSummary domain object.""" + + subtopic_id: int + subtopic_title: str + parent_topic_id: str + parent_topic_name: str + thumbnail_filename: Optional[str] + thumbnail_bg_color: Optional[str] + subtopic_mastery: Optional[float] + parent_topic_url_fragment: Optional[str] + classroom_url_fragment: Optional[str] + + +class SubtopicPageSummary: + """Domain object for Subtopic Page Summary.""" + + def __init__( + self, + subtopic_id: int, + subtopic_title: str, + parent_topic_id: str, + parent_topic_name: str, + thumbnail_filename: Optional[str], + thumbnail_bg_color: Optional[str], + subtopic_mastery: Optional[float], + parent_topic_url_fragment: Optional[str], + classroom_url_fragment: Optional[str] + ): + """Initialize a SubtopicPageSummary object. + + Args: + subtopic_id: str. The id of the subtopic. + subtopic_title: str. The title of the subtopic. + parent_topic_id: str. The id of the parent topic. + parent_topic_name: str. The name of the parent topic. + thumbnail_filename: str. The filename of the thumbnail image. + thumbnail_bg_color: str. The background color of the thumbnail + image. + subtopic_mastery: float. The mastery score of a user in the + subtopic. + parent_topic_url_fragment: str. The url fragment of the parent + topic. + classroom_url_fragment: str. The url fragment of the classroom + to which the parent topic belongs. + """ + self.subtopic_id = subtopic_id + self.subtopic_title = subtopic_title + self.parent_topic_id = parent_topic_id + self.parent_topic_name = parent_topic_name + self.thumbnail_filename = thumbnail_filename + self.thumbnail_bg_color = thumbnail_bg_color + self.subtopic_mastery = subtopic_mastery + self.parent_topic_url_fragment = parent_topic_url_fragment + self.classroom_url_fragment = classroom_url_fragment + + def to_dict(self) -> SubtopicPageSummaryDict: + """Returns a dict representing this SubtopicPageSummary domain object. + + Returns: + dict. A dict, mapping all fields of SubtopicPageSummary instance. + """ + return { + 'subtopic_id': self.subtopic_id, + 'subtopic_title': self.subtopic_title, + 'parent_topic_id': self.parent_topic_id, + 'parent_topic_name': self.parent_topic_name, + 'thumbnail_filename': self.thumbnail_filename, + 'thumbnail_bg_color': self.thumbnail_bg_color, + 'subtopic_mastery': self.subtopic_mastery, + 'parent_topic_url_fragment': self.parent_topic_url_fragment, + 'classroom_url_fragment': self.classroom_url_fragment + } diff --git a/core/domain/subtopic_page_domain_test.py b/core/domain/subtopic_page_domain_test.py index 816cca2edb1a..a2fbbbbd2a18 100644 --- a/core/domain/subtopic_page_domain_test.py +++ b/core/domain/subtopic_page_domain_test.py @@ -23,22 +23,23 @@ from core.constants import constants from core.domain import state_domain from core.domain import subtopic_page_domain +from core.domain import translation_domain from core.tests import test_utils class SubtopicPageDomainUnitTests(test_utils.GenericTestBase): """Tests for subtopic page domain objects.""" - topic_id = 'topic_id' - subtopic_id = 1 + topic_id: str = 'topic_id' + subtopic_id: int = 1 - def setUp(self): - super(SubtopicPageDomainUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( self.subtopic_id, self.topic_id)) - def test_to_dict(self): + def test_to_dict(self) -> None: expected_subtopic_page_dict = { 'id': 'topic_id-1', 'topic_id': 'topic_id', @@ -66,7 +67,7 @@ def test_to_dict(self): self.assertEqual( self.subtopic_page.to_dict(), expected_subtopic_page_dict) - def test_create_default_subtopic_page(self): + def test_create_default_subtopic_page(self) -> None: """Tests the create_default_topic() function.""" subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( @@ -98,34 +99,46 @@ def test_create_default_subtopic_page(self): } self.assertEqual(subtopic_page.to_dict(), expected_subtopic_page_dict) - def test_get_subtopic_page_id(self): + def test_get_subtopic_page_id(self) -> None: self.assertEqual( subtopic_page_domain.SubtopicPage.get_subtopic_page_id('abc', 1), 'abc-1') - def test_get_subtopic_id_from_subtopic_page_id(self): + def test_get_subtopic_id_from_subtopic_page_id(self) -> None: self.assertEqual( self.subtopic_page.get_subtopic_id_from_subtopic_page_id(), 1) - def _assert_validation_error(self, expected_error_substring): + def _assert_subtopic_validation_error( + self, expected_error_substring: str + ) -> None: """Checks that the topic passes strict validation.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): self.subtopic_page.validate() - def test_subtopic_topic_id_validation(self): - self.subtopic_page.topic_id = 1 - self._assert_validation_error('Expected topic_id to be a string') - - def test_language_code_validation(self): - self.subtopic_page.language_code = 0 - self._assert_validation_error('Expected language code to be a string') + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_subtopic_topic_id_validation(self) -> None: + self.subtopic_page.topic_id = 1 # type: ignore[assignment] + self._assert_subtopic_validation_error( + 'Expected topic_id to be a string' + ) + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_language_code_validation(self) -> None: + self.subtopic_page.language_code = 0 # type: ignore[assignment] + self._assert_subtopic_validation_error( + 'Expected language code to be a string' + ) self.subtopic_page.language_code = 'xz' - self._assert_validation_error('Invalid language code') + self._assert_subtopic_validation_error('Invalid language code') - def test_update_audio(self): - recorded_voiceovers_dict = { + def test_update_audio(self) -> None: + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content': { 'en': { @@ -137,7 +150,7 @@ def test_update_audio(self): } } } - expected_subtopic_page_dict = { + expected_subtopic_page_dict: subtopic_page_domain.SubtopicPageDict = { 'id': 'topic_id-1', 'topic_id': 'topic_id', 'page_contents': { @@ -163,7 +176,7 @@ def test_update_audio(self): self.assertEqual( self.subtopic_page.to_dict(), expected_subtopic_page_dict) - def test_update_html(self): + def test_update_html(self) -> None: expected_subtopic_page_dict = { 'id': 'topic_id-1', 'topic_id': 'topic_id', @@ -196,8 +209,10 @@ def test_update_html(self): self.assertEqual( self.subtopic_page.to_dict(), expected_subtopic_page_dict) - def test_update_written_translations(self): - written_translations_dict = { + def test_update_written_translations(self) -> None: + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { 'translations_mapping': { 'content': { 'en': { @@ -208,7 +223,7 @@ def test_update_written_translations(self): } } } - expected_subtopic_page_dict = { + expected_subtopic_page_dict: subtopic_page_domain.SubtopicPageDict = { 'id': 'topic_id-1', 'topic_id': 'topic_id', 'page_contents': { @@ -234,7 +249,7 @@ def test_update_written_translations(self): self.assertEqual( self.subtopic_page.to_dict(), expected_subtopic_page_dict) - def test_create_subtopic_page_change(self): + def test_create_subtopic_page_change(self) -> None: subtopic_page_change_object = subtopic_page_domain.SubtopicPageChange({ 'cmd': subtopic_page_domain.CMD_CREATE_NEW, 'topic_id': self.topic_id, @@ -248,22 +263,28 @@ def test_create_subtopic_page_change(self): 'subtopic_id': 'subtopic_id' }) - def test_validate_version_number(self): - self.subtopic_page.version = 'invalid_version' - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_version_number(self) -> None: + self.subtopic_page.version = 'invalid_version' # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected version number to be an int'): self.subtopic_page.validate() - def test_validate_page_contents_schema_version_type(self): - self.subtopic_page.page_contents_schema_version = 'invalid_version' - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_page_contents_schema_version_type(self) -> None: + self.subtopic_page.page_contents_schema_version = 'invalid_version' # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected page contents schema version to be an integer'): self.subtopic_page.validate() - def test_validate_page_contents_schema_version(self): + def test_validate_page_contents_schema_version(self) -> None: self.subtopic_page.page_contents_schema_version = 0 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected page contents schema version to be %s' % feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): @@ -271,13 +292,13 @@ def test_validate_page_contents_schema_version(self): class SubtopicPageContentsDomainUnitTests(test_utils.GenericTestBase): - def setUp(self): - super(SubtopicPageContentsDomainUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.subtopic_page_contents = ( subtopic_page_domain.SubtopicPageContents .create_default_subtopic_page_contents()) - def test_create_default_subtopic_page(self): + def test_create_default_subtopic_page(self) -> None: subtopic_page_contents = ( subtopic_page_domain.SubtopicPageContents .create_default_subtopic_page_contents()) @@ -301,8 +322,10 @@ def test_create_default_subtopic_page(self): subtopic_page_contents.to_dict(), expected_subtopic_page_contents_dict) - def test_to_and_from_dict(self): - subtopic_page_contents_dict = { + def test_to_and_from_dict(self) -> None: + subtopic_page_contents_dict: ( + subtopic_page_domain.SubtopicPageContentsDict + ) = { 'subtitled_html': { 'html': '

    test

    ', 'content_id': 'content' @@ -340,18 +363,20 @@ def test_to_and_from_dict(self): class SubtopicPageChangeTests(test_utils.GenericTestBase): - def test_subtopic_page_change_object_with_missing_cmd(self): - with self.assertRaisesRegexp( + def test_subtopic_page_change_object_with_missing_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): subtopic_page_domain.SubtopicPageChange({'invalid': 'data'}) - def test_subtopic_page_change_object_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_subtopic_page_change_object_with_invalid_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): subtopic_page_domain.SubtopicPageChange({'cmd': 'invalid'}) - def test_subtopic_page_change_object_with_missing_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_subtopic_page_change_object_with_missing_attribute_in_cmd( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following required attributes are missing: ' 'new_value, old_value')): @@ -360,8 +385,10 @@ def test_subtopic_page_change_object_with_missing_attribute_in_cmd(self): 'property_name': '

    page_contents_html

    ', }) - def test_subtopic_page_change_object_with_extra_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_subtopic_page_change_object_with_extra_attribute_in_cmd( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following extra attributes are present: invalid')): subtopic_page_domain.SubtopicPageChange({ @@ -372,8 +399,9 @@ def test_subtopic_page_change_object_with_extra_attribute_in_cmd(self): }) def test_subtopic_page_change_object_with_invalid_subtopic_page_property( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd update_subtopic_page_property: ' 'invalid is not allowed')): @@ -386,7 +414,8 @@ def test_subtopic_page_change_object_with_invalid_subtopic_page_property( }) def test_subtopic_page_change_object_with_update_subtopic_page_property( - self): + self + ) -> None: subtopic_page_change_object = subtopic_page_domain.SubtopicPageChange({ 'cmd': 'update_subtopic_page_property', 'subtopic_id': 'subtopic_id', @@ -403,7 +432,7 @@ def test_subtopic_page_change_object_with_update_subtopic_page_property( self.assertEqual(subtopic_page_change_object.new_value, 'new_value') self.assertEqual(subtopic_page_change_object.old_value, 'old_value') - def test_subtopic_page_change_object_with_create_new(self): + def test_subtopic_page_change_object_with_create_new(self) -> None: subtopic_page_change_object = ( subtopic_page_domain.SubtopicPageChange({ 'cmd': 'create_new', @@ -415,7 +444,7 @@ def test_subtopic_page_change_object_with_create_new(self): self.assertEqual(subtopic_page_change_object.topic_id, 'topic_id') self.assertEqual(subtopic_page_change_object.subtopic_id, 'subtopic_id') - def test_to_dict(self): + def test_to_dict(self) -> None: subtopic_page_change_dict = { 'cmd': 'create_new', 'topic_id': 'topic_id', @@ -425,3 +454,41 @@ def test_to_dict(self): subtopic_page_change_dict) self.assertEqual( subtopic_page_change_object.to_dict(), subtopic_page_change_dict) + + +class SubtopicPageSummaryTests(test_utils.GenericTestBase): + + SUBTOPIC_ID = 1 + SUBTOPIC_TITLE = 'subtopic_title' + TOPIC_ID = 'topic_id' + TOPIC_TITLE = 'topic_title' + SUBTOPIC_MASTERY = 0.5 + + def setUp(self) -> None: + super().setUp() + + self.subtopic_page_summary = subtopic_page_domain.SubtopicPageSummary( + self.SUBTOPIC_ID, self.SUBTOPIC_TITLE, self.TOPIC_ID, + self.TOPIC_TITLE, 'thumbnail_filename', 'red', + self.SUBTOPIC_MASTERY, 'topic-url', 'classroom-url' + ) + + def test_to_dict(self) -> None: + subtopic_page_summary_dict = self.subtopic_page_summary.to_dict() + + self.assertEqual( + subtopic_page_summary_dict['subtopic_id'], self.SUBTOPIC_ID + ) + self.assertEqual( + subtopic_page_summary_dict['subtopic_title'], self.SUBTOPIC_TITLE + ) + self.assertEqual( + subtopic_page_summary_dict['parent_topic_id'], self.TOPIC_ID + ) + self.assertEqual( + subtopic_page_summary_dict['parent_topic_name'], self.TOPIC_TITLE + ) + self.assertEqual( + subtopic_page_summary_dict['subtopic_mastery'], + self.SUBTOPIC_MASTERY + ) diff --git a/core/domain/subtopic_page_services.py b/core/domain/subtopic_page_services.py index 9cf77e585c83..4844e057f153 100644 --- a/core/domain/subtopic_page_services.py +++ b/core/domain/subtopic_page_services.py @@ -21,14 +21,28 @@ import copy from core import feconf +from core.domain import change_domain +from core.domain import classroom_services +from core.domain import learner_group_services +from core.domain import skill_services from core.domain import subtopic_page_domain +from core.domain import topic_fetchers from core.platform import models -(subtopic_models,) = models.Registry.import_models([models.NAMES.subtopic]) -datastore_services = models.Registry.import_datastore_services() +from typing import Dict, List, Literal, Optional, Sequence, overload +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import subtopic_models -def _migrate_page_contents_to_latest_schema(versioned_page_contents): +(subtopic_models,) = models.Registry.import_models([models.Names.SUBTOPIC]) + + +def _migrate_page_contents_to_latest_schema( + versioned_page_contents: ( + subtopic_page_domain.VersionedSubtopicPageContentsDict + ) +) -> None: """Holds the responsibility of performing a step-by-step, sequential update of the page contents structure based on the schema version of the input page contents dictionary. If the current page_contents schema changes, a @@ -58,7 +72,9 @@ def _migrate_page_contents_to_latest_schema(versioned_page_contents): page_contents_schema_version += 1 -def get_subtopic_page_from_model(subtopic_page_model): +def get_subtopic_page_from_model( + subtopic_page_model: subtopic_models.SubtopicPageModel +) -> subtopic_page_domain.SubtopicPage: """Returns a domain object for an SubtopicPage given a subtopic page model. Args: @@ -68,7 +84,9 @@ def get_subtopic_page_from_model(subtopic_page_model): Returns: SubtopicPage. The domain object corresponding to the given model object. """ - versioned_page_contents = { + versioned_page_contents: ( + subtopic_page_domain.VersionedSubtopicPageContentsDict + ) = { 'schema_version': subtopic_page_model.page_contents_schema_version, 'page_contents': copy.deepcopy(subtopic_page_model.page_contents) } @@ -86,7 +104,44 @@ def get_subtopic_page_from_model(subtopic_page_model): ) -def get_subtopic_page_by_id(topic_id, subtopic_id, strict=True): +@overload +def get_subtopic_page_by_id( + topic_id: str, subtopic_id: int +) -> subtopic_page_domain.SubtopicPage: ... + + +@overload +def get_subtopic_page_by_id( + topic_id: str, + subtopic_id: int, + *, + strict: Literal[True] +) -> subtopic_page_domain.SubtopicPage: ... + + +@overload +def get_subtopic_page_by_id( + topic_id: str, + subtopic_id: int, + *, + strict: Literal[False] +) -> Optional[subtopic_page_domain.SubtopicPage]: ... + + +@overload +def get_subtopic_page_by_id( + topic_id: str, + subtopic_id: int, + *, + strict: bool = ... +) -> Optional[subtopic_page_domain.SubtopicPage]: ... + + +def get_subtopic_page_by_id( + topic_id: str, + subtopic_id: int, + strict: bool = True +) -> Optional[subtopic_page_domain.SubtopicPage]: """Returns a domain object representing a subtopic page. Args: @@ -110,7 +165,10 @@ def get_subtopic_page_by_id(topic_id, subtopic_id, strict=True): return None -def get_subtopic_pages_with_ids(topic_id, subtopic_ids): +def get_subtopic_pages_with_ids( + topic_id: str, + subtopic_ids: List[int] +) -> List[Optional[subtopic_page_domain.SubtopicPage]]: """Returns a list of domain objects with given ids. Args: @@ -128,7 +186,7 @@ def get_subtopic_pages_with_ids(topic_id, subtopic_ids): topic_id, subtopic_id)) subtopic_page_models = subtopic_models.SubtopicPageModel.get_multi( subtopic_page_ids) - subtopic_pages = [] + subtopic_pages: List[Optional[subtopic_page_domain.SubtopicPage]] = [] for subtopic_page_model in subtopic_page_models: if subtopic_page_model is None: subtopic_pages.append(subtopic_page_model) @@ -138,7 +196,35 @@ def get_subtopic_pages_with_ids(topic_id, subtopic_ids): return subtopic_pages -def get_subtopic_page_contents_by_id(topic_id, subtopic_id, strict=True): +@overload +def get_subtopic_page_contents_by_id( + topic_id: str, subtopic_id: int +) -> subtopic_page_domain.SubtopicPageContents: ... + + +@overload +def get_subtopic_page_contents_by_id( + topic_id: str, + subtopic_id: int, + *, + strict: Literal[True] +) -> subtopic_page_domain.SubtopicPageContents: ... + + +@overload +def get_subtopic_page_contents_by_id( + topic_id: str, + subtopic_id: int, + *, + strict: Literal[False] +) -> Optional[subtopic_page_domain.SubtopicPageContents]: ... + + +def get_subtopic_page_contents_by_id( + topic_id: str, + subtopic_id: int, + strict: bool = True +) -> Optional[subtopic_page_domain.SubtopicPageContents]: """Returns the page contents of a subtopic Args: @@ -160,7 +246,11 @@ def get_subtopic_page_contents_by_id(topic_id, subtopic_id, strict=True): def save_subtopic_page( - committer_id, subtopic_page, commit_message, change_list): + committer_id: str, + subtopic_page: subtopic_page_domain.SubtopicPage, + commit_message: Optional[str], + change_list: Sequence[change_domain.BaseChange] +) -> None: """Validates a subtopic page and commits it to persistent storage. If successful, increments the version number of the incoming subtopic page domain object by 1. @@ -169,7 +259,8 @@ def save_subtopic_page( committer_id: str. ID of the given committer. subtopic_page: SubtopicPage. The subtopic page domain object to be saved. - commit_message: str. The commit message. + commit_message: str|None. The commit description message, for + unpublished topics, it may be equal to None. change_list: list(SubtopicPageChange). List of changes applied to a subtopic page. @@ -195,7 +286,8 @@ def save_subtopic_page( 'Unexpected error: trying to update version %s of topic ' 'from version %s. Please reload the page and try again.' % (subtopic_page_model.version, subtopic_page.version)) - elif subtopic_page.version < subtopic_page_model.version: + + if subtopic_page.version < subtopic_page_model.version: raise Exception( 'Trying to update version %s of topic from version %s, ' 'which is too old. Please reload the page and try again.' @@ -212,7 +304,11 @@ def save_subtopic_page( def delete_subtopic_page( - committer_id, topic_id, subtopic_id, force_deletion=False): + committer_id: str, + topic_id: str, + subtopic_id: int, + force_deletion: bool = False +) -> None: """Delete a topic summary model. Args: @@ -230,3 +326,143 @@ def delete_subtopic_page( subtopic_models.SubtopicPageModel.get(subtopic_page_id).delete( committer_id, feconf.COMMIT_MESSAGE_SUBTOPIC_PAGE_DELETED, force_deletion=force_deletion) + learner_group_services.remove_subtopic_page_reference_from_learner_groups( + topic_id, subtopic_id) + + +def get_topic_ids_from_subtopic_page_ids( + subtopic_page_ids: List[str] +) -> List[str]: + """Returns the topic ids corresponding to the given set of subtopic page + ids. + + Args: + subtopic_page_ids: list(str). The ids of the subtopic pages. + + Returns: + list(str). The topic ids corresponding to the given subtopic page ids. + The returned list of topic ids is deduplicated and ordered + alphabetically. + """ + return sorted(list({ + subtopic_page_id.split(':')[0] for subtopic_page_id in + subtopic_page_ids + })) + + +def get_multi_users_subtopic_pages_progress( + user_ids: List[str], + subtopic_page_ids: List[str] +) -> Dict[str, List[subtopic_page_domain.SubtopicPageSummaryDict]]: + """Returns the progress of the given user on the given subtopic pages. + + Args: + user_ids: list(str). The ids of the users. + subtopic_page_ids: list(str). The ids of the subtopic pages. + + Returns: + dict(str, list(SubtopicPageSummaryDict)). User IDs as keys and Subtopic + Page Summary domain object dictionaries containing details of the + subtopic page and users mastery in it as values. + """ + + topic_ids = get_topic_ids_from_subtopic_page_ids(subtopic_page_ids) + topics = topic_fetchers.get_topics_by_ids(topic_ids, strict=True) + + all_skill_ids_lists = [ + topic.get_all_skill_ids() for topic in topics if topic + ] + all_skill_ids = list( + { + skill_id for skill_list in all_skill_ids_lists + for skill_id in skill_list + } + ) + + all_users_skill_mastery_dicts = ( + skill_services.get_multi_users_skills_mastery( + user_ids, all_skill_ids + ) + ) + + all_users_subtopic_prog_summaries: Dict[ + str, List[subtopic_page_domain.SubtopicPageSummaryDict] + ] = {user_id: [] for user_id in user_ids} + for topic in topics: + for subtopic in topic.subtopics: + subtopic_page_id = '{}:{}'.format(topic.id, subtopic.id) + if subtopic_page_id not in subtopic_page_ids: + continue + for user_id, skills_mastery_dict in ( + all_users_skill_mastery_dicts.items() + ): + skill_mastery_dict = { + skill_id: mastery + for skill_id, mastery in skills_mastery_dict.items() + if mastery is not None and ( + skill_id in subtopic.skill_ids + ) + } + subtopic_mastery: Optional[float] = None + + # Subtopic mastery is average of skill masteries. + if skill_mastery_dict: + subtopic_mastery = ( + sum(skill_mastery_dict.values()) / + len(skill_mastery_dict) + ) + + all_users_subtopic_prog_summaries[user_id].append({ + 'subtopic_id': subtopic.id, + 'subtopic_title': subtopic.title, + 'parent_topic_id': topic.id, + 'parent_topic_name': topic.name, + 'thumbnail_filename': subtopic.thumbnail_filename, + 'thumbnail_bg_color': subtopic.thumbnail_bg_color, + 'subtopic_mastery': subtopic_mastery, + 'parent_topic_url_fragment': topic.url_fragment, + 'classroom_url_fragment': ( + classroom_services + .get_classroom_url_fragment_for_topic_id( + topic.id)) + }) + + return all_users_subtopic_prog_summaries + + +def get_learner_group_syllabus_subtopic_page_summaries( + subtopic_page_ids: List[str] +) -> List[subtopic_page_domain.SubtopicPageSummaryDict]: + """Returns summary dicts corresponding to the given subtopic page ids. + + Args: + subtopic_page_ids: list(str). The ids of the subtopic pages. + + Returns: + list(SubtopicPageSummaryDict). The summary dicts corresponding to the + given subtopic page ids. + """ + topic_ids = get_topic_ids_from_subtopic_page_ids(subtopic_page_ids) + topics = topic_fetchers.get_topics_by_ids(topic_ids, strict=True) + + all_learner_group_subtopic_page_summaries: List[ + subtopic_page_domain.SubtopicPageSummaryDict + ] = [] + for topic in topics: + for subtopic in topic.subtopics: + subtopic_page_id = '{}:{}'.format(topic.id, subtopic.id) + if subtopic_page_id not in subtopic_page_ids: + continue + all_learner_group_subtopic_page_summaries.append({ + 'subtopic_id': subtopic.id, + 'subtopic_title': subtopic.title, + 'parent_topic_id': topic.id, + 'parent_topic_name': topic.name, + 'thumbnail_filename': subtopic.thumbnail_filename, + 'thumbnail_bg_color': subtopic.thumbnail_bg_color, + 'subtopic_mastery': None, + 'parent_topic_url_fragment': topic.url_fragment, + 'classroom_url_fragment': None + }) + + return all_learner_group_subtopic_page_summaries diff --git a/core/domain/subtopic_page_services_test.py b/core/domain/subtopic_page_services_test.py index 476b461b4d57..deda69c4f4d0 100644 --- a/core/domain/subtopic_page_services_test.py +++ b/core/domain/subtopic_page_services_test.py @@ -21,16 +21,25 @@ import re from core import feconf +from core.constants import constants +from core.domain import skill_services from core.domain import state_domain from core.domain import subtopic_page_domain from core.domain import subtopic_page_services from core.domain import topic_domain from core.domain import topic_fetchers +from core.domain import topic_services +from core.domain import translation_domain from core.platform import models from core.tests import test_utils +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import subtopic_models + (base_models, subtopic_models) = models.Registry.import_models([ - models.NAMES.base_model, models.NAMES.subtopic]) + models.Names.BASE_MODEL, models.Names.SUBTOPIC]) class SubtopicPageServicesUnitTests(test_utils.GenericTestBase): @@ -44,9 +53,16 @@ class SubtopicPageServicesUnitTests(test_utils.GenericTestBase): skill_id_1 = 'skill_1' skill_id_2 = 'skill_2' - def setUp(self): - super(SubtopicPageServicesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() + self.signup( + self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.admin_id = self.get_user_id_from_email( + self.CURRICULUM_ADMIN_EMAIL) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + self.TOPIC_ID = topic_fetchers.get_new_topic_id() + self.subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( self.subtopic_id, self.TOPIC_ID)) @@ -55,21 +71,44 @@ def setUp(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'sample-fragment' })] ) self.subtopic_page_id = ( subtopic_page_domain.SubtopicPage.get_subtopic_page_id( self.TOPIC_ID, 1)) - def test_get_subtopic_page_from_model(self): + self.TOPIC_ID_1 = topic_fetchers.get_new_topic_id() + # Set up topic and subtopic. + topic = topic_domain.Topic.create_default_topic( + self.TOPIC_ID_1, 'Place Values', 'abbrev', 'description', 'fragm') + topic.thumbnail_filename = 'thumbnail.svg' + topic.thumbnail_bg_color = '#C6DCDA' + topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Naming Numbers', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-url'), + topic_domain.Subtopic( + 2, 'Subtopic Name', ['skill_id_2'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'other-subtopic-url')] + topic.next_subtopic_id = 3 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] + topic_services.save_new_topic(self.admin_id, topic) + + # Publish the topic and its stories. + topic_services.publish_topic(self.TOPIC_ID_1, self.admin_id) + + def test_get_subtopic_page_from_model(self) -> None: subtopic_page_model = subtopic_models.SubtopicPageModel.get( self.subtopic_page_id) subtopic_page = subtopic_page_services.get_subtopic_page_from_model( subtopic_page_model) self.assertEqual(subtopic_page.to_dict(), self.subtopic_page.to_dict()) - def test_get_subtopic_page_by_id(self): + def test_get_subtopic_page_by_id(self) -> None: subtopic_page_1 = subtopic_page_services.get_subtopic_page_by_id( self.TOPIC_ID, self.subtopic_id) self.assertEqual( @@ -80,10 +119,12 @@ def test_get_subtopic_page_by_id(self): 'topic_id', 1, strict=False) self.assertEqual(subtopic_page_2, None) - def test_get_subtopic_pages_with_ids(self): + def test_get_subtopic_pages_with_ids(self) -> None: subtopic_ids = [self.subtopic_id] subtopic_pages = subtopic_page_services.get_subtopic_pages_with_ids( self.TOPIC_ID, subtopic_ids) + # Ruling out the possibility of None for mypy type checking. + assert subtopic_pages[0] is not None self.assertEqual( subtopic_pages[0].to_dict(), self.subtopic_page.to_dict()) subtopic_ids = [2] @@ -94,6 +135,8 @@ def test_get_subtopic_pages_with_ids(self): subtopic_pages = subtopic_page_services.get_subtopic_pages_with_ids( self.TOPIC_ID, subtopic_ids) expected_subtopic_pages = [self.subtopic_page.to_dict(), None] + # Ruling out the possibility of None for mypy type checking. + assert subtopic_pages[0] is not None self.assertEqual( [subtopic_pages[0].to_dict(), subtopic_pages[1]], expected_subtopic_pages) @@ -106,10 +149,10 @@ def test_get_subtopic_pages_with_ids(self): self.TOPIC_ID, subtopic_ids) self.assertEqual(subtopic_pages, [None, None]) - def test_get_subtopic_page_contents_by_id(self): + def test_get_subtopic_page_contents_by_id(self) -> None: self.subtopic_page = subtopic_page_services.get_subtopic_page_by_id( self.TOPIC_ID, 1) - recorded_voiceovers = { + recorded_voiceovers: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { 'content': { 'en': { @@ -153,12 +196,12 @@ def test_get_subtopic_page_contents_by_id(self): self.TOPIC_ID, 1)) self.assertEqual( subtopic_page_contents.to_dict(), expected_page_contents_dict) - subtopic_page_contents = ( + subtopic_page_content = ( subtopic_page_services.get_subtopic_page_contents_by_id( self.TOPIC_ID, 2, strict=False)) - self.assertEqual(subtopic_page_contents, None) + self.assertEqual(subtopic_page_content, None) - def test_save_subtopic_page(self): + def test_save_subtopic_page(self) -> None: subtopic_page_1 = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, 'topic_id_1')) @@ -167,9 +210,10 @@ def test_save_subtopic_page(self): [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'sample-fragment-one' })]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unexpected error: received an invalid change list *'): subtopic_page_services.save_subtopic_page( self.user_id, subtopic_page_1, 'Added subtopic', []) @@ -180,50 +224,54 @@ def test_save_subtopic_page(self): subtopic_page_id_1) subtopic_page_1.version = 2 subtopic_page_model_1.version = 3 - with self.assertRaisesRegexp(Exception, 'Trying to update version *'): + with self.assertRaisesRegex(Exception, 'Trying to update version *'): subtopic_page_services.save_subtopic_page( self.user_id, subtopic_page_1, 'Added subtopic', [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'fragment' })]) subtopic_page_1.version = 3 subtopic_page_model_1.version = 2 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unexpected error: trying to update version *'): subtopic_page_services.save_subtopic_page( self.user_id, subtopic_page_1, 'Added subtopic', [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, - 'title': 'Sample' + 'title': 'Sample', + 'url_fragment': 'sample-frag' })]) - def test_commit_log_entry(self): + def test_commit_log_entry(self) -> None: subtopic_page_commit_log_entry = ( subtopic_models.SubtopicPageCommitLogEntryModel.get_commit( self.subtopic_page_id, 1) ) + # Ruling out the possibility of None for mypy type checking. + assert subtopic_page_commit_log_entry is not None self.assertEqual(subtopic_page_commit_log_entry.commit_type, 'create') self.assertEqual( subtopic_page_commit_log_entry.subtopic_page_id, self.subtopic_page_id) self.assertEqual(subtopic_page_commit_log_entry.user_id, self.user_id) - def test_delete_subtopic_page(self): + def test_delete_subtopic_page(self) -> None: subtopic_page_id = ( subtopic_page_domain.SubtopicPage.get_subtopic_page_id( self.TOPIC_ID, 1)) subtopic_page_services.delete_subtopic_page( self.user_id, self.TOPIC_ID, 1) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( base_models.BaseModel.EntityNotFoundError, re.escape( 'Entity for class SubtopicPageModel with id %s not found' % ( subtopic_page_id))): subtopic_models.SubtopicPageModel.get(subtopic_page_id) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( base_models.BaseModel.EntityNotFoundError, re.escape( 'Entity for class SubtopicPageModel with id %s not found' % ( @@ -231,71 +279,24 @@ def test_delete_subtopic_page(self): subtopic_page_services.delete_subtopic_page( self.user_id, self.TOPIC_ID, 1) - def test_migrate_page_contents_from_v1_to_v2_schema(self): + def test_migrate_page_contents_from_v1_to_v2_schema(self) -> None: current_schema_version_swap = self.swap( feconf, 'CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION', 2) html_content = ( '

    Value

    ') + 'mp;quot;+,-,-,+&quot;" svg_filename-with-value="&a' + 'mp;quot;abc.svg&quot;">
    ') expected_html_content = ( '

    Value

    ') - written_translations_dict = { + 'amp;quot;svg_filename&quot;: &quot;abc.svg&quot;}">' + '') + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': html_content, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': 'Testing!', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - written_translations_dict_math = { - 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': expected_html_content, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': 'Testing!', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } + 'content1': {}, + 'feedback_1': {} } } recorded_voiceovers = { @@ -312,17 +313,17 @@ def test_migrate_page_contents_from_v1_to_v2_schema(self): } page_contents_dict = { 'subtitled_html': { - 'content_id': 'content', 'html': html_content + 'content_id': 'content_0', 'html': html_content }, 'recorded_voiceovers': recorded_voiceovers, 'written_translations': written_translations_dict } expected_page_contents_dict = { 'subtitled_html': { - 'content_id': 'content', 'html': expected_html_content + 'content_id': 'content_0', 'html': expected_html_content }, 'recorded_voiceovers': recorded_voiceovers, - 'written_translations': written_translations_dict_math + 'written_translations': written_translations_dict } subtopic_page_id = subtopic_models.SubtopicPageModel.get_new_id('') @@ -343,7 +344,7 @@ def test_migrate_page_contents_from_v1_to_v2_schema(self): self.assertEqual( subtopic_page.page_contents.to_dict(), expected_page_contents_dict) - def test_migrate_page_contents_from_v2_to_v3_schema(self): + def test_migrate_page_contents_from_v2_to_v3_schema(self) -> None: current_schema_version_swap = self.swap( feconf, 'CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION', 3) html_content = ( @@ -357,60 +358,12 @@ def test_migrate_page_contents_from_v2_to_v3_schema(self): ' caption-with-value="&quot;&quot;" ' 'filepath-with-value=\'\"img1.svg\"\'>' '
    ') - written_translations_dict = { + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': html_content, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': 'Testing!', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - written_translations_dict_math = { - 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': expected_html_content, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': 'Testing!', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } + 'content1': {}, + 'feedback_1': {} } } recorded_voiceovers = { @@ -427,17 +380,17 @@ def test_migrate_page_contents_from_v2_to_v3_schema(self): } page_contents_dict = { 'subtitled_html': { - 'content_id': 'content', 'html': html_content + 'content_id': 'content_0', 'html': html_content }, 'recorded_voiceovers': recorded_voiceovers, 'written_translations': written_translations_dict } expected_page_contents_dict = { 'subtitled_html': { - 'content_id': 'content', 'html': expected_html_content + 'content_id': 'content_0', 'html': expected_html_content }, 'recorded_voiceovers': recorded_voiceovers, - 'written_translations': written_translations_dict_math + 'written_translations': written_translations_dict } subtopic_page_id = subtopic_models.SubtopicPageModel.get_new_id('') @@ -458,7 +411,7 @@ def test_migrate_page_contents_from_v2_to_v3_schema(self): self.assertEqual( subtopic_page.page_contents.to_dict(), expected_page_contents_dict) - def test_migrate_page_contents_from_v3_to_v4_schema(self): + def test_migrate_page_contents_from_v3_to_v4_schema(self) -> None: current_schema_version_swap = self.swap( feconf, 'CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION', 4) expected_html_content = ( @@ -467,60 +420,12 @@ def test_migrate_page_contents_from_v3_to_v4_schema(self): html_content = ( '

    1 × 3 😕 😊

    ' ) - written_translations_dict = { + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': html_content, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': 'Testing!', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } - } - } - written_translations_dict_math = { - 'translations_mapping': { - 'content1': { - 'en': { - 'data_format': 'html', - 'translation': expected_html_content, - 'needs_update': True - }, - 'hi': { - 'data_format': 'html', - 'translation': 'Hey!', - 'needs_update': False - } - }, - 'feedback_1': { - 'hi': { - 'data_format': 'html', - 'translation': 'Testing!', - 'needs_update': False - }, - 'en': { - 'data_format': 'html', - 'translation': 'hello!', - 'needs_update': False - } - } + 'content1': {}, + 'feedback_1': {} } } recorded_voiceovers = { @@ -537,17 +442,17 @@ def test_migrate_page_contents_from_v3_to_v4_schema(self): } page_contents_dict = { 'subtitled_html': { - 'content_id': 'content', 'html': html_content + 'content_id': 'content_0', 'html': html_content }, 'recorded_voiceovers': recorded_voiceovers, 'written_translations': written_translations_dict } expected_page_contents_dict = { 'subtitled_html': { - 'content_id': 'content', 'html': expected_html_content + 'content_id': 'content_0', 'html': expected_html_content }, 'recorded_voiceovers': recorded_voiceovers, - 'written_translations': written_translations_dict_math + 'written_translations': written_translations_dict } subtopic_page_id = subtopic_models.SubtopicPageModel.get_new_id('') @@ -569,10 +474,11 @@ def test_migrate_page_contents_from_v3_to_v4_schema(self): subtopic_page.page_contents.to_dict(), expected_page_contents_dict) def test_cannot_migrate_page_contents_to_latest_schema_with_invalid_version( - self): + self + ) -> None: current_schema_version_swap = self.swap( feconf, 'CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION', 2) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v2 page schemas at present.') @@ -585,3 +491,69 @@ def test_cannot_migrate_page_contents_to_latest_schema_with_invalid_version( assert_raises_regexp_context_manager): subtopic_page_services.get_subtopic_page_from_model( subtopic_page_model) + + def test_get_topic_ids_from_subtopic_page_ids(self) -> None: + topic_ids = ( + subtopic_page_services.get_topic_ids_from_subtopic_page_ids( + ['topic1:subtopic1', 'topic2:subtopic2', 'topic1:subtopic3'] + ) + ) + + self.assertEqual(topic_ids, ['topic1', 'topic2']) + + def test_get_multi_users_subtopic_pages_progress(self) -> None: + degree_of_mastery = 0.5 + learner_id_1 = 'learner_1' + learner_id_2 = 'learner_2' + + # Add some subtopic progress for the learner. + skill_services.create_user_skill_mastery( + learner_id_1, 'skill_id_1', degree_of_mastery + ) + + subtopic_page_id = '{}:{}'.format(self.TOPIC_ID_1, 1) + progress = ( + subtopic_page_services.get_multi_users_subtopic_pages_progress( + [learner_id_1, learner_id_2], [subtopic_page_id] + ) + ) + + learner_1_progress = progress[learner_id_1] + learner_2_progress = progress[learner_id_2] + + self.assertEqual(len(learner_1_progress), 1) + self.assertEqual(len(learner_2_progress), 1) + self.assertEqual(learner_1_progress[0]['subtopic_id'], 1) + self.assertEqual( + learner_1_progress[0]['subtopic_title'], 'Naming Numbers' + ) + self.assertEqual( + learner_1_progress[0]['parent_topic_id'], self.TOPIC_ID_1 + ) + self.assertEqual( + learner_1_progress[0]['parent_topic_name'], 'Place Values' + ) + self.assertEqual( + learner_1_progress[0]['subtopic_mastery'], degree_of_mastery + ) + self.assertIsNone(learner_2_progress[0]['subtopic_mastery']) + + def test_get_learner_group_syllabus_subtopic_page_summaries(self) -> None: + subtopic_page_id = '{}:{}'.format(self.TOPIC_ID_1, 1) + expected_summaries = [{ + 'subtopic_id': 1, + 'subtopic_title': 'Naming Numbers', + 'parent_topic_id': self.TOPIC_ID_1, + 'parent_topic_name': 'Place Values', + 'thumbnail_filename': 'image.svg', + 'thumbnail_bg_color': + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], + 'subtopic_mastery': None, + 'parent_topic_url_fragment': 'abbrev', + 'classroom_url_fragment': None + }] + summaries = ( + subtopic_page_services + .get_learner_group_syllabus_subtopic_page_summaries( + [subtopic_page_id])) + self.assertEqual(summaries, expected_summaries) diff --git a/core/domain/suggestion_registry.py b/core/domain/suggestion_registry.py index e4b0af41440e..88a13b7fbef3 100644 --- a/core/domain/suggestion_registry.py +++ b/core/domain/suggestion_registry.py @@ -19,25 +19,58 @@ from __future__ import annotations import copy +import datetime from core import feconf from core import utils from core.constants import constants +from core.domain import change_domain from core.domain import config_domain from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services from core.domain import fs_services from core.domain import html_cleaner +from core.domain import opportunity_services from core.domain import question_domain from core.domain import question_services from core.domain import skill_domain from core.domain import skill_fetchers from core.domain import state_domain +from core.domain import translation_domain +from core.domain import translation_services from core.domain import user_services from core.platform import models +from extensions import domain -(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion]) +from typing import ( + Any, Callable, Dict, List, Mapping, Optional, Set, Type, TypedDict, Union, + cast +) + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import suggestion_models + +(suggestion_models,) = models.Registry.import_models([models.Names.SUGGESTION]) + + +class BaseSuggestionDict(TypedDict): + """Dictionary representing the BaseSuggestion object.""" + + suggestion_id: str + suggestion_type: str + target_type: str + target_id: str + target_version_at_submission: int + status: str + author_name: str + final_reviewer_id: Optional[str] + change: Dict[str, change_domain.AcceptableChangeDictTypes] + score_category: str + language_code: str + last_updated: float + edited_by_reviewer: bool class BaseSuggestion: @@ -66,12 +99,33 @@ class BaseSuggestion: reviewer. """ - def __init__(self, status, final_reviewer_id): + # Here, we explicitly defined all the attributes that are used in + # BaseSuggestion because in `to_dict`, `get_score_type` and other + # methods too we are accessing these attributes but due to the lack + # of definition in main implementation the types of these attributes + # are not available which causes MyPy to throw undefined attribute + # error for all attributes that are used in BaseSuggestion. Thus to + # provide type-info to MyPy about these attributes, we defined them + # as class variables. + suggestion_id: str + suggestion_type: str + target_type: str + target_id: str + target_version_at_submission: int + author_id: str + change: change_domain.BaseChange + score_category: str + last_updated: datetime.datetime + language_code: str + edited_by_reviewer: bool + image_context: str + + def __init__(self, status: str, final_reviewer_id: Optional[str]) -> None: """Initializes a Suggestion object.""" self.status = status self.final_reviewer_id = final_reviewer_id - def to_dict(self): + def to_dict(self) -> BaseSuggestionDict: """Returns a dict representation of a suggestion object. Returns: @@ -93,7 +147,7 @@ def to_dict(self): 'edited_by_reviewer': self.edited_by_reviewer } - def get_score_type(self): + def get_score_type(self) -> str: """Returns the first part of the score category. The first part refers to the the type of scoring. The value of this part will be among suggestion_models.SCORE_TYPE_CHOICES. @@ -104,7 +158,7 @@ def get_score_type(self): return self.score_category.split( suggestion_models.SCORE_CATEGORY_DELIMITER)[0] - def get_author_name(self): + def get_author_name(self) -> str: """Returns the author's username. Returns: @@ -112,7 +166,7 @@ def get_author_name(self): """ return user_services.get_username(self.author_id) - def get_score_sub_type(self): + def get_score_sub_type(self) -> str: """Returns the second part of the score category. The second part refers to the specific area where the author needs to be scored. This can be the category of the exploration, the language of the suggestion, or the @@ -124,19 +178,19 @@ def get_score_sub_type(self): return self.score_category.split( suggestion_models.SCORE_CATEGORY_DELIMITER)[1] - def set_suggestion_status_to_accepted(self): + def set_suggestion_status_to_accepted(self) -> None: """Sets the status of the suggestion to accepted.""" self.status = suggestion_models.STATUS_ACCEPTED - def set_suggestion_status_to_in_review(self): + def set_suggestion_status_to_in_review(self) -> None: """Sets the status of the suggestion to in review.""" self.status = suggestion_models.STATUS_IN_REVIEW - def set_suggestion_status_to_rejected(self): + def set_suggestion_status_to_rejected(self) -> None: """Sets the status of the suggestion to rejected.""" self.status = suggestion_models.STATUS_REJECTED - def set_final_reviewer_id(self, reviewer_id): + def set_final_reviewer_id(self, reviewer_id: str) -> None: """Sets the final reviewer id of the suggestion to be reviewer_id. Args: @@ -144,7 +198,7 @@ def set_final_reviewer_id(self, reviewer_id): """ self.final_reviewer_id = reviewer_id - def validate(self): + def validate(self) -> None: """Validates the BaseSuggestion object. Each subclass must implement this function. @@ -234,22 +288,14 @@ def validate(self): 'Expected the first part of score_category to be among allowed' ' choices, received %s' % self.get_score_type()) - def accept(self): + def accept(self, commit_msg: str) -> None: """Accepts the suggestion. Each subclass must implement this function. """ raise NotImplementedError( 'Subclasses of BaseSuggestion should implement accept.') - def get_change_list_for_accepting_suggestion(self): - """Before accepting the suggestion, a change_list needs to be generated - from the change. Each subclass must implement this function. - """ - raise NotImplementedError( - 'Subclasses of BaseSuggestion should implement ' - 'get_change_list_for_accepting_suggestion.') - - def pre_accept_validate(self): + def pre_accept_validate(self) -> None: """Performs referential validation. This function needs to be called before accepting the suggestion. """ @@ -257,13 +303,19 @@ def pre_accept_validate(self): 'Subclasses of BaseSuggestion should implement ' 'pre_accept_validate.') - def populate_old_value_of_change(self): + def populate_old_value_of_change(self) -> None: """Populates the old_value field of the change.""" raise NotImplementedError( 'Subclasses of BaseSuggestion should implement ' 'populate_old_value_of_change.') - def pre_update_validate(self, change): + # TODO(#16047): Here we use type Any because the method pre_update_validate + # is used inside sub-classes with different argument types, which according + # to MyPy violates the 'Liskov substitution principle' and throws an error + # in every sub-class where this pre_update_validate method is used. So, to + # avoid the error in every sub-class, we have used Any type here but once + # this BaseSuggestion class is refactored, we can remove type Any from here. + def pre_update_validate(self, change: Any) -> None: """Performs the pre update validation. This function needs to be called before updating the suggestion. """ @@ -271,13 +323,13 @@ def pre_update_validate(self, change): 'Subclasses of BaseSuggestion should implement ' 'pre_update_validate.') - def get_all_html_content_strings(self): + def get_all_html_content_strings(self) -> List[str]: """Gets all html content strings used in this suggestion.""" raise NotImplementedError( 'Subclasses of BaseSuggestion should implement ' 'get_all_html_content_strings.') - def get_target_entity_html_strings(self): + def get_target_entity_html_strings(self) -> List[str]: """Gets all html content strings from target entity used in the suggestion. """ @@ -285,7 +337,7 @@ def get_target_entity_html_strings(self): 'Subclasses of BaseSuggestion should implement ' 'get_target_entity_html_strings.') - def get_new_image_filenames_added_in_suggestion(self): + def get_new_image_filenames_added_in_suggestion(self) -> List[str]: """Returns the list of newly added image filenames in the suggestion. Returns: @@ -305,7 +357,7 @@ def get_new_image_filenames_added_in_suggestion(self): return new_image_filenames - def _copy_new_images_to_target_entity_storage(self): + def _copy_new_images_to_target_entity_storage(self) -> None: """Copy newly added images in suggestion to the target entity storage. """ @@ -314,7 +366,9 @@ def _copy_new_images_to_target_entity_storage(self): self.image_context, self.target_id, self.target_type, self.target_id, new_image_filenames) - def convert_html_in_suggestion_change(self, conversion_fn): + def convert_html_in_suggestion_change( + self, conversion_fn: Callable[[str], str] + ) -> None: """Checks for HTML fields in a suggestion change and converts it according to the conversion function. """ @@ -323,7 +377,7 @@ def convert_html_in_suggestion_change(self, conversion_fn): 'convert_html_in_suggestion_change.') @property - def is_handled(self): + def is_handled(self) -> bool: """Returns if the suggestion has either been accepted or rejected. Returns: @@ -338,14 +392,23 @@ class SuggestionEditStateContent(BaseSuggestion): """ def __init__( - self, suggestion_id, target_id, target_version_at_submission, - status, author_id, final_reviewer_id, - change, score_category, language_code, edited_by_reviewer, - last_updated=None): + self, + suggestion_id: str, + target_id: str, + target_version_at_submission: int, + status: str, + author_id: str, + final_reviewer_id: Optional[str], + change: Mapping[str, change_domain.AcceptableChangeDictTypes], + score_category: str, + language_code: Optional[str], + edited_by_reviewer: bool, + last_updated: Optional[datetime.datetime] = None + ) -> None: """Initializes an object of type SuggestionEditStateContent corresponding to the SUGGESTION_TYPE_EDIT_STATE_CONTENT choice. """ - super(SuggestionEditStateContent, self).__init__( + super().__init__( status, final_reviewer_id) self.suggestion_id = suggestion_id self.suggestion_type = ( @@ -354,23 +417,41 @@ def __init__( self.target_id = target_id self.target_version_at_submission = target_version_at_submission self.author_id = author_id - self.change = exp_domain.ExplorationChange(change) + self.change: exp_domain.EditExpStatePropertyContentCmd = ( + exp_domain.EditExpStatePropertyContentCmd(change) + ) self.score_category = score_category - self.language_code = language_code - self.last_updated = last_updated + # Here we use MyPy ignore because in BaseSuggestion, language_code + # is defined with only string type but here language_code is of + # Optional[str] type because language_code can accept None values as + # well. So, due to this conflict in types MyPy throws an `Incompatible + # types in assignment` error. Thus to avoid the error, we used ignore. + self.language_code = language_code # type: ignore[assignment] + # TODO(#16048): Here we use MyPy ignore because in BaseSuggestion, + # last_updated is defined with only datetime type but here + # last_updated is of Optional[datetime] type because while creating + # 'SuggestionEditStateContent' through create_suggestion() method, we + # are not providing 'last_updated' and just using None default value. + # So, once this suggestion_services.create_suggestion() method is + # fixed, we can remove both todo and MyPy ignore from here. + self.last_updated = last_updated # type: ignore[assignment] self.edited_by_reviewer = edited_by_reviewer - # Currently, we don't allow adding images in the "edit state content" - # suggestion, so the image_context is None. - self.image_context = None - - def validate(self): + # Here we use MyPy ignore because in BaseSuggestion, image_context + # is defined as string type attribute but currently, we don't + # allow adding images in the "edit state content" suggestion, + # so the image_context is None here and due to None MyPy throws + # an `Incompatible types in assignment` error. Thus to avoid the + # error, we used ignore here. + self.image_context = None # type: ignore[assignment] + + def validate(self) -> None: """Validates a suggestion object of type SuggestionEditStateContent. Raises: ValidationError. One or more attributes of the SuggestionEditStateContent object are invalid. """ - super(SuggestionEditStateContent, self).validate() + super().validate() if not isinstance(self.change, exp_domain.ExplorationChange): raise utils.ValidationError( @@ -403,7 +484,7 @@ def validate(self): 'Expected language_code to be None, received %s' % ( self.language_code)) - def pre_accept_validate(self): + def pre_accept_validate(self) -> None: """Performs referential validation. This function needs to be called before accepting the suggestion. """ @@ -414,8 +495,10 @@ def pre_accept_validate(self): 'Expected %s to be a valid state name' % self.change.state_name) - def get_change_list_for_accepting_suggestion(self): - """Gets a complete change for the suggestion. + def _get_change_list_for_accepting_edit_state_content_suggestion( + self + ) -> List[exp_domain.ExplorationChange]: + """Gets a complete change for the SuggestionEditStateContent. Returns: list(ExplorationChange). The change_list corresponding to the @@ -431,7 +514,7 @@ def get_change_list_for_accepting_suggestion(self): return [change] - def populate_old_value_of_change(self): + def populate_old_value_of_change(self) -> None: """Populates old value of the change.""" exploration = exp_fetchers.get_exploration_by_id(self.target_id) if self.change.state_name not in exploration.states: @@ -444,18 +527,25 @@ def populate_old_value_of_change(self): self.change.old_value = old_content - def accept(self, commit_message): + def accept(self, commit_message: str) -> None: """Accepts the suggestion. Args: commit_message: str. The commit message. """ - change_list = self.get_change_list_for_accepting_suggestion() + change_list = ( + self._get_change_list_for_accepting_edit_state_content_suggestion() + ) + # Before calling this accept method we are already checking if user + # with 'final_reviewer_id' exists or not. + assert self.final_reviewer_id is not None exp_services.update_exploration( self.final_reviewer_id, self.target_id, change_list, - commit_message, is_suggestion=True) + commit_message) - def pre_update_validate(self, change): + def pre_update_validate( + self, change: exp_domain.EditExpStatePropertyContentCmd + ) -> None: """Performs the pre update validation. This function needs to be called before updating the suggestion. @@ -469,19 +559,19 @@ def pre_update_validate(self, change): raise utils.ValidationError( 'The new change cmd must be equal to %s' % self.change.cmd) - elif self.change.property_name != change.property_name: + if self.change.property_name != change.property_name: raise utils.ValidationError( 'The new change property_name must be equal to %s' % self.change.property_name) - elif self.change.state_name != change.state_name: + if self.change.state_name != change.state_name: raise utils.ValidationError( 'The new change state_name must be equal to %s' % self.change.state_name) - elif self.change.new_value['html'] == change.new_value['html']: + if self.change.new_value['html'] == change.new_value['html']: raise utils.ValidationError( 'The new html must not match the old html') - def get_all_html_content_strings(self): + def get_all_html_content_strings(self) -> List[str]: """Gets all html content strings used in this suggestion. Returns: @@ -492,7 +582,7 @@ def get_all_html_content_strings(self): html_string_list.append(self.change.old_value['html']) return html_string_list - def get_target_entity_html_strings(self): + def get_target_entity_html_strings(self) -> List[str]: """Gets all html content strings from target entity used in the suggestion. @@ -505,7 +595,9 @@ def get_target_entity_html_strings(self): return [] - def convert_html_in_suggestion_change(self, conversion_fn): + def convert_html_in_suggestion_change( + self, conversion_fn: Callable[[str], str] + ) -> None: """Checks for HTML fields in a suggestion change and converts it according to the conversion function. @@ -526,14 +618,23 @@ class SuggestionTranslateContent(BaseSuggestion): """ def __init__( - self, suggestion_id, target_id, target_version_at_submission, - status, author_id, final_reviewer_id, - change, score_category, language_code, edited_by_reviewer, - last_updated=None): + self, + suggestion_id: str, + target_id: str, + target_version_at_submission: int, + status: str, + author_id: str, + final_reviewer_id: Optional[str], + change: Mapping[str, change_domain.AcceptableChangeDictTypes], + score_category: str, + language_code: str, + edited_by_reviewer: bool, + last_updated: Optional[datetime.datetime] = None + ) -> None: """Initializes an object of type SuggestionTranslateContent corresponding to the SUGGESTION_TYPE_TRANSLATE_CONTENT choice. """ - super(SuggestionTranslateContent, self).__init__( + super().__init__( status, final_reviewer_id) self.suggestion_id = suggestion_id self.suggestion_type = ( @@ -542,21 +643,30 @@ def __init__( self.target_id = target_id self.target_version_at_submission = target_version_at_submission self.author_id = author_id - self.change = exp_domain.ExplorationChange(change) + self.change: exp_domain.AddWrittenTranslationCmd = ( + exp_domain.AddWrittenTranslationCmd(change) + ) self.score_category = score_category self.language_code = language_code - self.last_updated = last_updated + # TODO(#16048): Here we use MyPy ignore because in BaseSuggestion, + # last_updated is defined with only datetime type but here + # last_updated is of Optional[datetime] type because while creating + # 'SuggestionTranslateContent' through create_suggestion() method, we + # are not providing 'last_updated' and just using None default value. + # So, once this suggestion_services.create_suggestion() method is + # fixed, we can remove both todo and MyPy ignore from here. + self.last_updated = last_updated # type: ignore[assignment] self.edited_by_reviewer = edited_by_reviewer self.image_context = feconf.IMAGE_CONTEXT_EXPLORATION_SUGGESTIONS - def validate(self): + def validate(self) -> None: """Validates a suggestion object of type SuggestionTranslateContent. Raises: ValidationError. One or more attributes of the SuggestionTranslateContent object are invalid. """ - super(SuggestionTranslateContent, self).validate() + super().validate() if not isinstance(self.change, exp_domain.ExplorationChange): raise utils.ValidationError( @@ -592,6 +702,9 @@ def validate(self): raise utils.ValidationError( 'Invalid language_code: %s' % self.change.language_code) + if isinstance(self.change.translation_html, str): + html_cleaner.validate_rte_tags(self.change.translation_html) + if self.language_code is None: raise utils.ValidationError('language_code cannot be None') @@ -600,7 +713,7 @@ def validate(self): 'Expected language_code to be %s, received %s' % ( self.change.language_code, self.language_code)) - def pre_update_validate(self, change): + def pre_update_validate(self, change: exp_domain.ExplorationChange) -> None: """Performs the pre update validation. This function needs to be called before updating the suggestion. @@ -614,20 +727,20 @@ def pre_update_validate(self, change): raise utils.ValidationError( 'The new change cmd must be equal to %s' % self.change.cmd) - elif self.change.state_name != change.state_name: + if self.change.state_name != change.state_name: raise utils.ValidationError( 'The new change state_name must be equal to %s' % self.change.state_name) - elif self.change.content_html != change.content_html: + if self.change.content_html != change.content_html: raise utils.ValidationError( 'The new change content_html must be equal to %s' % self.change.content_html) - elif self.change.language_code != change.language_code: + if self.change.language_code != change.language_code: raise utils.ValidationError( 'The language code must be equal to %s' % self.change.language_code) - def pre_accept_validate(self): + def pre_accept_validate(self) -> None: """Performs referential validation. This function needs to be called before accepting the suggestion. """ @@ -637,30 +750,44 @@ def pre_accept_validate(self): raise utils.ValidationError( 'Expected %s to be a valid state name' % self.change.state_name) - def accept(self, commit_message): - """Accepts the suggestion. + def accept(self, unused_commit_message: str) -> None: + """Accepts the suggestion.""" + translated_content = translation_domain.TranslatedContent( + self.change.translation_html, + translation_domain.TranslatableContentFormat( + self.change.data_format), + needs_update=False + ) + + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, + self.target_id, + self.target_version_at_submission, + self.language_code, + self.change.content_id, + translated_content) + + ( + opportunity_services. + update_translation_opportunity_with_accepted_suggestion( + self.target_id, self.language_code) + ) - Args: - commit_message: str. The commit message. - """ # If the translation is for a set of strings, we don't want to process # the HTML strings for images. + # Before calling this accept method we are already checking if user + # with 'final_reviewer_id' exists or not. + assert self.final_reviewer_id is not None if ( - hasattr(self.change, 'data_format') and - state_domain.WrittenTranslation.is_data_format_list( - self.change.data_format) + hasattr(self.change, 'data_format') and + translation_domain.TranslatableContentFormat.is_data_format_list( + self.change.data_format) ): - exp_services.update_exploration( - self.final_reviewer_id, self.target_id, [self.change], - commit_message, is_suggestion=True) return self._copy_new_images_to_target_entity_storage() - exp_services.update_exploration( - self.final_reviewer_id, self.target_id, [self.change], - commit_message, is_suggestion=True) - def get_all_html_content_strings(self): + def get_all_html_content_strings(self) -> List[str]: """Gets all html content strings used in this suggestion. Returns: @@ -677,7 +804,7 @@ def get_all_html_content_strings(self): content_strings.append(self.change.content_html) return content_strings - def get_target_entity_html_strings(self): + def get_target_entity_html_strings(self) -> List[str]: """Gets all html content strings from target entity used in the suggestion. @@ -687,7 +814,9 @@ def get_target_entity_html_strings(self): """ return [self.change.content_html] - def convert_html_in_suggestion_change(self, conversion_fn): + def convert_html_in_suggestion_change( + self, conversion_fn: Callable[[str], str] + ) -> None: """Checks for HTML fields in a suggestion change and converts it according to the conversion function. @@ -727,29 +856,47 @@ class SuggestionAddQuestion(BaseSuggestion): """ def __init__( - self, suggestion_id, target_id, target_version_at_submission, - status, author_id, final_reviewer_id, - change, score_category, language_code, edited_by_reviewer, - last_updated=None): + self, + suggestion_id: str, + target_id: str, + target_version_at_submission: int, + status: str, + author_id: str, + final_reviewer_id: Optional[str], + change: Mapping[str, change_domain.AcceptableChangeDictTypes], + score_category: str, + language_code: str, + edited_by_reviewer: bool, + last_updated: Optional[datetime.datetime] = None + ) -> None: """Initializes an object of type SuggestionAddQuestion corresponding to the SUGGESTION_TYPE_ADD_QUESTION choice. """ - super(SuggestionAddQuestion, self).__init__(status, final_reviewer_id) + super().__init__(status, final_reviewer_id) self.suggestion_id = suggestion_id self.suggestion_type = feconf.SUGGESTION_TYPE_ADD_QUESTION self.target_type = feconf.ENTITY_TYPE_SKILL self.target_id = target_id self.target_version_at_submission = target_version_at_submission self.author_id = author_id - self.change = question_domain.QuestionSuggestionChange(change) + self.change: question_domain.CreateNewFullySpecifiedQuestionSuggestionCmd = ( # pylint: disable=line-too-long + question_domain.CreateNewFullySpecifiedQuestionSuggestionCmd(change) + ) self.score_category = score_category self.language_code = language_code - self.last_updated = last_updated + # TODO(#16048): Here we use MyPy ignore because in BaseSuggestion, + # last_updated is defined with only datetime type but here + # last_updated is of Optional[datetime] type because while creating + # 'SuggestionAddQuestion' through create_suggestion() method, we + # are not providing 'last_updated' and just using None default value. + # So, once this suggestion_services.create_suggestion() method is + # fixed, we can remove both todo and MyPy ignore from here. + self.last_updated = last_updated # type: ignore[assignment] self.image_context = feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS self._update_change_to_latest_state_schema_version() self.edited_by_reviewer = edited_by_reviewer - def _update_change_to_latest_state_schema_version(self): + def _update_change_to_latest_state_schema_version(self) -> None: """Holds the responsibility of performing a step-by-step, sequential update of the state structure inside the change_cmd based on the schema version of the current state dictionary. @@ -758,12 +905,15 @@ def _update_change_to_latest_state_schema_version(self): Exception. The state_schema_version of suggestion cannot be processed. """ - state_schema_version = self.change.question_dict[ + question_dict: question_domain.QuestionDict = self.change.question_dict + + state_schema_version = question_dict[ 'question_state_data_schema_version'] - versioned_question_state = { + versioned_question_state: question_domain.VersionedQuestionStateDict = { + 'state_schema_version': state_schema_version, 'state': copy.deepcopy( - self.change.question_dict['question_state_data']) + question_dict['question_state_data']) } if not (25 <= state_schema_version @@ -783,14 +933,14 @@ def _update_change_to_latest_state_schema_version(self): self.change.question_dict['question_state_data_schema_version'] = ( state_schema_version) - def validate(self): + def validate(self) -> None: """Validates a suggestion object of type SuggestionAddQuestion. Raises: ValidationError. One or more attributes of the SuggestionAddQuestion object are invalid. """ - super(SuggestionAddQuestion, self).validate() + super().validate() if self.get_score_type() != suggestion_models.SCORE_TYPE_QUESTION: raise utils.ValidationError( @@ -817,16 +967,18 @@ def validate(self): raise utils.ValidationError( 'Expected change to contain question_dict') + question_dict: question_domain.QuestionDict = self.change.question_dict + if self.language_code != constants.DEFAULT_LANGUAGE_CODE: raise utils.ValidationError( 'Expected language_code to be %s, received %s' % ( constants.DEFAULT_LANGUAGE_CODE, self.language_code)) - if self.language_code != self.change.question_dict['language_code']: + if self.language_code != question_dict['language_code']: raise utils.ValidationError( 'Expected question language_code(%s) to be same as suggestion ' 'language_code(%s)' % ( - self.change.question_dict['language_code'], + question_dict['language_code'], self.language_code)) if not self.change.skill_difficulty: @@ -840,16 +992,27 @@ def validate(self): 'Expected change skill_difficulty to be one of %s, found %s ' % (skill_difficulties, self._get_skill_difficulty())) + # Here we use MyPy ignore because here we are building Question + # domain object only for validation purpose, so 'question_id' is + # provided as None which causes MyPy to throw 'invalid argument + # type' error. Thus, to avoid the error, we used ignore here. question = question_domain.Question( - None, state_domain.State.from_dict( - self.change.question_dict['question_state_data']), + None, # type: ignore[arg-type] + state_domain.State.from_dict( + self.change.question_dict['question_state_data'] + ), self.change.question_dict['question_state_data_schema_version'], - self.change.question_dict['language_code'], None, + self.change.question_dict['language_code'], + # Here we use MyPy ignore because here we are building Question + # domain object only for validation purpose, so 'version' is + # provided as None which causes MyPy to throw 'invalid argument + # type' error. Thus, to avoid the error, we use ignore here. + None, # type: ignore[arg-type] self.change.question_dict['linked_skill_ids'], - self.change.question_dict['inapplicable_skill_misconception_ids']) - question.partial_validate() + self.change.question_dict['inapplicable_skill_misconception_ids'], + self.change.question_dict['next_content_id_index']) question_state_data_schema_version = ( - self.change.question_dict['question_state_data_schema_version']) + question_dict['question_state_data_schema_version']) if question_state_data_schema_version != ( feconf.CURRENT_STATE_SCHEMA_VERSION): raise utils.ValidationError( @@ -857,8 +1020,9 @@ def validate(self): '%s' % ( feconf.CURRENT_STATE_SCHEMA_VERSION, question_state_data_schema_version)) + question.partial_validate() - def pre_accept_validate(self): + def pre_accept_validate(self) -> None: """Performs referential validation. This function needs to be called before accepting the suggestion. """ @@ -873,10 +1037,7 @@ def pre_accept_validate(self): raise utils.ValidationError( 'The skill with the given id doesn\'t exist.') - def get_change_list_for_accepting_suggestion(self): - pass - - def accept(self, unused_commit_message): + def accept(self, unused_commit_message: str) -> None: """Accepts the suggestion. Args: @@ -884,7 +1045,7 @@ def accept(self, unused_commit_message): consistency with the existing suggestions. As a default commit message is used in the add_question function, the arg is unused. """ - question_dict = self.change.question_dict + question_dict: question_domain.QuestionDict = self.change.question_dict question_dict['version'] = 1 question_dict['id'] = ( question_services.get_new_question_id()) @@ -895,6 +1056,25 @@ def accept(self, unused_commit_message): # Images need to be stored in the storage path corresponding to the # question. new_image_filenames = self.get_new_image_filenames_added_in_suggestion() + + # Image for interaction with Image Region is not included as an html + # string. This image is included in the imagePath in customization args. + # Other interactions such as Item Selection, Multiple Choice, Drag and + # Drop Sort have ck editor that includes the images of the interactions + # so that references for those images are included as html strings. + if question.question_state_data.interaction.id == 'ImageClickInput': + # Here we use cast because we are narrowing down the type from + # various types of cust. arg values to ImageAndRegionDict, and + # here we are sure that the type is always going to be + # ImageAndRegionDict because imageAndRegions customization arg + # object always contain values of type ImageAndRegionDict. + customization_arg_image_dict = cast( + domain.ImageAndRegionDict, + question.question_state_data.interaction.customization_args[ + 'imageAndRegions'].value + ) + new_image_filenames.append( + customization_arg_image_dict['imagePath']) fs_services.copy_images( self.image_context, self.target_id, feconf.ENTITY_TYPE_QUESTION, question_dict['id'], new_image_filenames) @@ -910,11 +1090,17 @@ def accept(self, unused_commit_message): self.author_id, question_dict['id'], self.change.skill_id, self._get_skill_difficulty()) - def populate_old_value_of_change(self): + def populate_old_value_of_change(self) -> None: """Populates old value of the change.""" pass - def pre_update_validate(self, change): + def pre_update_validate( + self, + change: Union[ + question_domain.CreateNewFullySpecifiedQuestionSuggestionCmd, + question_domain.CreateNewFullySpecifiedQuestionCmd + ] + ) -> None: """Performs the pre update validation. This functions need to be called before updating the suggestion. @@ -939,29 +1125,32 @@ def pre_update_validate(self, change): 'At least one of the new skill_difficulty or question_dict ' 'should be changed.') - def _get_skill_difficulty(self): + def _get_skill_difficulty(self) -> float: """Returns the suggestion's skill difficulty.""" return self.change.skill_difficulty - def get_all_html_content_strings(self): + def get_all_html_content_strings(self) -> List[str]: """Gets all html content strings used in this suggestion. Returns: list(str). The list of html content strings. """ + question_dict: question_domain.QuestionDict = self.change.question_dict state_object = ( state_domain.State.from_dict( - self.change.question_dict['question_state_data'])) + question_dict['question_state_data'])) html_string_list = state_object.get_all_html_content_strings() return html_string_list - def get_target_entity_html_strings(self): + def get_target_entity_html_strings(self) -> List[str]: """Gets all html content strings from target entity used in the suggestion. """ return [] - def convert_html_in_suggestion_change(self, conversion_fn): + def convert_html_in_suggestion_change( + self, conversion_fn: Callable[[str], str] + ) -> None: """Checks for HTML fields in the suggestion change and converts it according to the conversion function. @@ -969,224 +1158,29 @@ def convert_html_in_suggestion_change(self, conversion_fn): conversion_fn: function. The function to be used for converting the HTML. """ - self.change.question_dict['question_state_data'] = ( + question_dict: question_domain.QuestionDict = self.change.question_dict + question_dict['question_state_data'] = ( state_domain.State.convert_html_fields_in_state( - self.change.question_dict['question_state_data'], + question_dict['question_state_data'], conversion_fn, state_uses_old_interaction_cust_args_schema=( - self.change.question_dict[ + question_dict[ 'question_state_data_schema_version'] < 38), state_uses_old_rule_template_schema=( - self.change.question_dict[ + question_dict[ 'question_state_data_schema_version'] < 45) ) ) -class BaseVoiceoverApplication: - """Base class for a voiceover application.""" - - def __init__(self): - """Initializes a GeneralVoiceoverApplication object.""" - raise NotImplementedError( - 'Subclasses of BaseVoiceoverApplication should implement __init__.') - - def to_dict(self): - """Returns a dict representation of a voiceover application object. - - Returns: - dict. A dict representation of a voiceover application object. - """ - return { - 'voiceover_application_id': self.voiceover_application_id, - 'target_type': self.target_type, - 'target_id': self.target_id, - 'status': self.status, - 'author_name': self.get_author_name(), - 'final_reviewer_name': ( - None if self.final_reviewer_id is None else ( - self.get_final_reviewer_name())), - 'language_code': self.language_code, - 'content': self.content, - 'filename': self.filename, - 'rejection_message': self.rejection_message - } - - def get_author_name(self): - """Returns the author's username. - - Returns: - str. The username of the author of the voiceover application. - """ - return user_services.get_username(self.author_id) - - def get_final_reviewer_name(self): - """Returns the reviewer's username. - - Returns: - str. The username of the reviewer of the voiceover application. - """ - return user_services.get_username(self.final_reviewer_id) - - def validate(self): - """Validates the BaseVoiceoverApplication object. - - Raises: - ValidationError. One or more attributes of the - BaseVoiceoverApplication object are invalid. - """ - - if self.target_type not in feconf.SUGGESTION_TARGET_TYPE_CHOICES: - raise utils.ValidationError( - 'Expected target_type to be among allowed choices, ' - 'received %s' % self.target_type) - - if not isinstance(self.target_id, str): - raise utils.ValidationError( - 'Expected target_id to be a string, received %s' % type( - self.target_id)) - - if self.status not in suggestion_models.STATUS_CHOICES: - raise utils.ValidationError( - 'Expected status to be among allowed choices, ' - 'received %s' % self.status) - - if not isinstance(self.author_id, str): - raise utils.ValidationError( - 'Expected author_id to be a string, received %s' % type( - self.author_id)) - if self.status == suggestion_models.STATUS_IN_REVIEW: - if self.final_reviewer_id is not None: - raise utils.ValidationError( - 'Expected final_reviewer_id to be None as the ' - 'voiceover application is not yet handled.') - else: - if not isinstance(self.final_reviewer_id, str): - raise utils.ValidationError( - 'Expected final_reviewer_id to be a string, received %s' % ( - type(self.final_reviewer_id))) - if self.status == suggestion_models.STATUS_REJECTED: - if not isinstance(self.rejection_message, str): - raise utils.ValidationError( - 'Expected rejection_message to be a string for a ' - 'rejected application, received %s' % type( - self.final_reviewer_id)) - if self.status == suggestion_models.STATUS_ACCEPTED: - if self.rejection_message is not None: - raise utils.ValidationError( - 'Expected rejection_message to be None for the ' - 'accepted voiceover application, received %s' % ( - self.rejection_message)) - - if not isinstance(self.language_code, str): - raise utils.ValidationError( - 'Expected language_code to be a string, received %s' % - self.language_code) - if not utils.is_supported_audio_language_code(self.language_code): - raise utils.ValidationError( - 'Invalid language_code: %s' % self.language_code) - - if not isinstance(self.filename, str): - raise utils.ValidationError( - 'Expected filename to be a string, received %s' % type( - self.filename)) - - if not isinstance(self.content, str): - raise utils.ValidationError( - 'Expected content to be a string, received %s' % type( - self.content)) - - def accept(self): - """Accepts the voiceover application. Each subclass must implement this - function. - """ - raise NotImplementedError( - 'Subclasses of BaseVoiceoverApplication should implement accept.') - - def reject(self): - """Rejects the voiceover application. Each subclass must implement this - function. - """ - raise NotImplementedError( - 'Subclasses of BaseVoiceoverApplication should implement reject.') - - @property - def is_handled(self): - """Returns true if the voiceover application has either been accepted or - rejected. - - Returns: - bool. Whether the voiceover application has been handled or not. - """ - return self.status != suggestion_models.STATUS_IN_REVIEW - - -class ExplorationVoiceoverApplication(BaseVoiceoverApplication): - """Domain object for a voiceover application for exploration.""" - - def __init__( # pylint: disable=super-init-not-called - self, voiceover_application_id, target_id, status, author_id, - final_reviewer_id, language_code, filename, content, - rejection_message): - """Initializes a ExplorationVoiceoverApplication domain object. - - Args: - voiceover_application_id: str. The ID of the voiceover application. - target_id: str. The ID of the target entity. - status: str. The status of the voiceover application. - author_id: str. The ID of the user who submitted the voiceover - application. - final_reviewer_id: str|None. The ID of the reviewer who has - accepted/rejected the voiceover application. - language_code: str. The language code for the voiceover application. - filename: str. The filename of the voiceover audio. - content: str. The html content which is voiceover in the - application. - rejection_message: str. The plain text message submitted by the - reviewer while rejecting the application. - """ - self.voiceover_application_id = voiceover_application_id - self.target_type = feconf.ENTITY_TYPE_EXPLORATION - self.target_id = target_id - self.status = status - self.author_id = author_id - self.final_reviewer_id = final_reviewer_id - self.language_code = language_code - self.filename = filename - self.content = content - self.rejection_message = rejection_message - - def accept(self, reviewer_id): - """Accepts the voiceover application and updates the final_reviewer_id. - - Args: - reviewer_id: str. The user ID of the reviewer. - """ - self.final_reviewer_id = reviewer_id - self.status = suggestion_models.STATUS_ACCEPTED - self.validate() - - def reject(self, reviewer_id, rejection_message): - """Rejects the voiceover application, updates the final_reviewer_id and - adds rejection message. - - Args: - reviewer_id: str. The user ID of the reviewer. - rejection_message: str. The rejection message submitted by the - reviewer. - """ - self.status = suggestion_models.STATUS_REJECTED - self.final_reviewer_id = reviewer_id - self.rejection_message = rejection_message - self.validate() - - -VOICEOVER_APPLICATION_TARGET_TYPE_TO_DOMAIN_CLASSES = { - feconf.ENTITY_TYPE_EXPLORATION: ( - ExplorationVoiceoverApplication) -} - -SUGGESTION_TYPES_TO_DOMAIN_CLASSES = { +SUGGESTION_TYPES_TO_DOMAIN_CLASSES: Dict[ + str, + Union[ + Type[SuggestionEditStateContent], + Type[SuggestionTranslateContent], + Type[SuggestionAddQuestion] + ] +] = { feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT: ( SuggestionEditStateContent), feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: ( @@ -1216,9 +1210,12 @@ class CommunityContributionStats: """ def __init__( - self, translation_reviewer_counts_by_lang_code, - translation_suggestion_counts_by_lang_code, - question_reviewer_count, question_suggestion_count): + self, + translation_reviewer_counts_by_lang_code: Dict[str, int], + translation_suggestion_counts_by_lang_code: Dict[str, int], + question_reviewer_count: int, + question_suggestion_count: int + ) -> None: self.translation_reviewer_counts_by_lang_code = ( translation_reviewer_counts_by_lang_code ) @@ -1228,7 +1225,7 @@ def __init__( self.question_reviewer_count = question_reviewer_count self.question_suggestion_count = question_suggestion_count - def validate(self): + def validate(self) -> None: """Validates the CommunityContributionStats object. Raises: @@ -1249,7 +1246,7 @@ def validate(self): 'an integer for %s language code, received: %s.' % ( language_code, reviewer_count) ) - elif reviewer_count < 0: + if reviewer_count < 0: raise utils.ValidationError( 'Expected the translation reviewer count to be ' 'non-negative for %s language code, received: %s.' % ( @@ -1270,7 +1267,7 @@ def validate(self): 'an integer for %s language code, received: %s.' % ( language_code, suggestion_count) ) - elif suggestion_count < 0: + if suggestion_count < 0: raise utils.ValidationError( 'Expected the translation suggestion count to be ' 'non-negative for %s language code, received: %s.' % ( @@ -1300,7 +1297,8 @@ def validate(self): ) def set_translation_reviewer_count_for_language_code( - self, language_code, count): + self, language_code: str, count: int + ) -> None: """Sets the translation reviewer count to be count, for the given language code. @@ -1313,7 +1311,8 @@ def set_translation_reviewer_count_for_language_code( self.translation_reviewer_counts_by_lang_code[language_code] = count def set_translation_suggestion_count_for_language_code( - self, language_code, count): + self, language_code: str, count: int + ) -> None: """Sets the translation suggestion count to be count, for the language code given. @@ -1324,7 +1323,9 @@ def set_translation_suggestion_count_for_language_code( """ self.translation_suggestion_counts_by_lang_code[language_code] = count - def are_translation_reviewers_needed_for_lang_code(self, lang_code): + def are_translation_reviewers_needed_for_lang_code( + self, lang_code: str + ) -> bool: """Returns whether or not more reviewers are needed to review translation suggestions in the given language code. Translation suggestions in a given language need more reviewers if the number of @@ -1350,12 +1351,12 @@ def are_translation_reviewers_needed_for_lang_code(self, lang_code): self.translation_reviewer_counts_by_lang_code[lang_code]) number_of_suggestions = ( self.translation_suggestion_counts_by_lang_code[lang_code]) - return ( + return bool( number_of_suggestions > ( config_domain.MAX_NUMBER_OF_SUGGESTIONS_PER_REVIEWER.value * ( number_of_reviewers))) - def get_translation_language_codes_that_need_reviewers(self): + def get_translation_language_codes_that_need_reviewers(self) -> Set[str]: """Returns the language codes where more reviewers are needed to review translations in those language codes. Translation suggestions in a given language need more reviewers if the number of translation @@ -1374,7 +1375,7 @@ def get_translation_language_codes_that_need_reviewers(self): language_codes_that_need_reviewers.add(language_code) return language_codes_that_need_reviewers - def are_question_reviewers_needed(self): + def are_question_reviewers_needed(self) -> bool: """Returns whether or not more reviewers are needed to review question suggestions. Question suggestions need more reviewers if the number of question suggestions divided by the number of question reviewers is @@ -1390,22 +1391,63 @@ def are_question_reviewers_needed(self): if self.question_reviewer_count == 0: return True - return ( + return bool( self.question_suggestion_count > ( config_domain.MAX_NUMBER_OF_SUGGESTIONS_PER_REVIEWER.value * ( self.question_reviewer_count))) +class TranslationContributionStatsDict(TypedDict): + """Dictionary representing the TranslationContributionStats object.""" + + language_code: Optional[str] + contributor_user_id: Optional[str] + topic_id: Optional[str] + submitted_translations_count: int + submitted_translation_word_count: int + accepted_translations_count: int + accepted_translations_without_reviewer_edits_count: int + accepted_translation_word_count: int + rejected_translations_count: int + rejected_translation_word_count: int + contribution_dates: Set[datetime.date] + + +class TranslationContributionStatsFrontendDict(TypedDict): + """Dictionary representing the TranslationContributionStats + object for frontend. + """ + + language_code: Optional[str] + topic_id: Optional[str] + submitted_translations_count: int + submitted_translation_word_count: int + accepted_translations_count: int + accepted_translations_without_reviewer_edits_count: int + accepted_translation_word_count: int + rejected_translations_count: int + rejected_translation_word_count: int + first_contribution_date: str + last_contribution_date: str + + class TranslationContributionStats: """Domain object for the TranslationContributionStatsModel.""" def __init__( - self, language_code, contributor_user_id, topic_id, - submitted_translations_count, submitted_translation_word_count, - accepted_translations_count, - accepted_translations_without_reviewer_edits_count, - accepted_translation_word_count, rejected_translations_count, - rejected_translation_word_count, contribution_dates): + self, + language_code: Optional[str], + contributor_user_id: Optional[str], + topic_id: Optional[str], + submitted_translations_count: int, + submitted_translation_word_count: int, + accepted_translations_count: int, + accepted_translations_without_reviewer_edits_count: int, + accepted_translation_word_count: int, + rejected_translations_count: int, + rejected_translation_word_count: int, + contribution_dates: Set[datetime.date] + ) -> None: self.language_code = language_code self.contributor_user_id = contributor_user_id self.topic_id = topic_id @@ -1422,7 +1464,10 @@ def __init__( @classmethod def create_default( - cls, language_code=None, contributor_user_id=None, topic_id=None + cls, + language_code: Optional[str] = None, + contributor_user_id: Optional[str] = None, + topic_id: Optional[str] = None ) -> TranslationContributionStats: """Create default translation contribution stats. @@ -1443,7 +1488,7 @@ def create_default( 0, 0, 0, 0, 0, 0, 0, set() ) - def to_dict(self): + def to_dict(self) -> TranslationContributionStatsDict: """Returns a dict representation of a TranslationContributionStats domain object. @@ -1469,9 +1514,469 @@ def to_dict(self): 'contribution_dates': self.contribution_dates } + # TODO(#16051): TranslationContributionStats to use first_contribution_date + # and last_contribution_date. + def to_frontend_dict(self) -> TranslationContributionStatsFrontendDict: + """Returns a dict representation of a TranslationContributionStats + domain object for frontend. + + Returns: + dict. A dict representation of a TranslationContributionStats + domain object for frontend. + """ + sorted_contribution_dates = sorted(self.contribution_dates) + return { + 'language_code': self.language_code, + 'topic_id': self.topic_id, + 'submitted_translations_count': self.submitted_translations_count, + 'submitted_translation_word_count': ( + self.submitted_translation_word_count), + 'accepted_translations_count': self.accepted_translations_count, + 'accepted_translations_without_reviewer_edits_count': ( + self.accepted_translations_without_reviewer_edits_count), + 'accepted_translation_word_count': ( + self.accepted_translation_word_count), + 'rejected_translations_count': self.rejected_translations_count, + 'rejected_translation_word_count': ( + self.rejected_translation_word_count), + 'first_contribution_date': ( + sorted_contribution_dates[0].strftime('%b %Y')), + 'last_contribution_date': ( + sorted_contribution_dates[-1].strftime('%b %Y')) + } + + +class TranslationReviewStatsDict(TypedDict): + """Dictionary representing the TranslationReviewStats object.""" + + language_code: str + contributor_user_id: str + topic_id: str + reviewed_translations_count: int + reviewed_translation_word_count: int + accepted_translations_count: int + accepted_translation_word_count: int + accepted_translations_with_reviewer_edits_count: int + first_contribution_date: datetime.date + last_contribution_date: datetime.date + + +class TranslationReviewStatsFrontendDict(TypedDict): + """Dictionary representing the TranslationReviewStats + object for frontend. + """ + + language_code: str + topic_id: str + reviewed_translations_count: int + reviewed_translation_word_count: int + accepted_translations_count: int + accepted_translation_word_count: int + accepted_translations_with_reviewer_edits_count: int + first_contribution_date: str + last_contribution_date: str + + +class TranslationReviewStats: + """Domain object for the TranslationReviewStatsModel.""" + + def __init__( + self, + language_code: str, + contributor_user_id: str, + topic_id: str, + reviewed_translations_count: int, + reviewed_translation_word_count: int, + accepted_translations_count: int, + accepted_translation_word_count: int, + accepted_translations_with_reviewer_edits_count: int, + first_contribution_date: datetime.date, + last_contribution_date: datetime.date + ) -> None: + self.language_code = language_code + self.contributor_user_id = contributor_user_id + self.topic_id = topic_id + self.reviewed_translations_count = reviewed_translations_count + self.reviewed_translation_word_count = reviewed_translation_word_count + self.accepted_translations_count = accepted_translations_count + self.accepted_translation_word_count = accepted_translation_word_count + self.accepted_translations_with_reviewer_edits_count = ( + accepted_translations_with_reviewer_edits_count + ) + self.first_contribution_date = first_contribution_date + self.last_contribution_date = last_contribution_date + + def to_dict(self) -> TranslationReviewStatsDict: + """Returns a dict representation of a TranslationReviewStats + domain object. + + Returns: + dict. A dict representation of a TranslationReviewStats + domain object. + """ + return { + 'language_code': self.language_code, + 'contributor_user_id': self.contributor_user_id, + 'topic_id': self.topic_id, + 'reviewed_translations_count': self.reviewed_translations_count, + 'reviewed_translation_word_count': ( + self.reviewed_translation_word_count), + 'accepted_translations_count': self.accepted_translations_count, + 'accepted_translation_word_count': ( + self.accepted_translation_word_count), + 'accepted_translations_with_reviewer_edits_count': ( + self.accepted_translations_with_reviewer_edits_count), + 'first_contribution_date': self.first_contribution_date, + 'last_contribution_date': self.last_contribution_date, + } + + def to_frontend_dict(self) -> TranslationReviewStatsFrontendDict: + """Returns a dict representation of a TranslationReviewStats + domain object for frontend. + + Returns: + dict. A dict representation of a TranslationReviewStats + domain object for frontend. + """ + return { + 'language_code': self.language_code, + 'topic_id': self.topic_id, + 'reviewed_translations_count': self.reviewed_translations_count, + 'reviewed_translation_word_count': ( + self.reviewed_translation_word_count), + 'accepted_translations_count': self.accepted_translations_count, + 'accepted_translation_word_count': ( + self.accepted_translation_word_count), + 'accepted_translations_with_reviewer_edits_count': ( + self.accepted_translations_with_reviewer_edits_count), + 'first_contribution_date': ( + self.first_contribution_date.strftime('%b %Y')), + 'last_contribution_date': ( + self.last_contribution_date.strftime('%b %Y')) + } + + +class QuestionContributionStatsDict(TypedDict): + """Dictionary representing the QuestionContributionStats object.""" + + contributor_user_id: str + topic_id: str + submitted_questions_count: int + accepted_questions_count: int + accepted_questions_without_reviewer_edits_count: int + first_contribution_date: datetime.date + last_contribution_date: datetime.date + + +class QuestionContributionStatsFrontendDict(TypedDict): + """Dictionary representing the QuestionContributionStats + object for frontend. + """ + + topic_id: str + submitted_questions_count: int + accepted_questions_count: int + accepted_questions_without_reviewer_edits_count: int + first_contribution_date: str + last_contribution_date: str + + +class QuestionContributionStats: + """Domain object for the QuestionContributionStatsModel.""" + + def __init__( + self, + contributor_user_id: str, + topic_id: str, + submitted_questions_count: int, + accepted_questions_count: int, + accepted_questions_without_reviewer_edits_count: int, + first_contribution_date: datetime.date, + last_contribution_date: datetime.date + ) -> None: + self.contributor_user_id = contributor_user_id + self.topic_id = topic_id + self.submitted_questions_count = submitted_questions_count + self.accepted_questions_count = accepted_questions_count + self.accepted_questions_without_reviewer_edits_count = ( + accepted_questions_without_reviewer_edits_count + ) + self.first_contribution_date = first_contribution_date + self.last_contribution_date = last_contribution_date + + def to_dict(self) -> QuestionContributionStatsDict: + """Returns a dict representation of a QuestionContributionStats + domain object. + + Returns: + dict. A dict representation of a QuestionContributionStats + domain object. + """ + return { + 'contributor_user_id': self.contributor_user_id, + 'topic_id': self.topic_id, + 'submitted_questions_count': self.submitted_questions_count, + 'accepted_questions_count': ( + self.accepted_questions_count), + 'accepted_questions_without_reviewer_edits_count': ( + self.accepted_questions_without_reviewer_edits_count), + 'first_contribution_date': ( + self.first_contribution_date), + 'last_contribution_date': self.last_contribution_date + } + + def to_frontend_dict(self) -> QuestionContributionStatsFrontendDict: + """Returns a dict representation of a QuestionContributionStats + domain object for frontend. + + Returns: + dict. A dict representation of a QuestionContributionStats + domain object for frontend. + """ + return { + 'topic_id': self.topic_id, + 'submitted_questions_count': self.submitted_questions_count, + 'accepted_questions_count': ( + self.accepted_questions_count), + 'accepted_questions_without_reviewer_edits_count': ( + self.accepted_questions_without_reviewer_edits_count), + 'first_contribution_date': ( + self.first_contribution_date.strftime('%b %Y')), + 'last_contribution_date': ( + self.last_contribution_date.strftime('%b %Y')) + } + + +class QuestionReviewStatsDict(TypedDict): + """Dictionary representing the QuestionReviewStats object.""" + + contributor_user_id: str + topic_id: str + reviewed_questions_count: int + accepted_questions_count: int + accepted_questions_with_reviewer_edits_count: int + first_contribution_date: datetime.date + last_contribution_date: datetime.date + + +class QuestionReviewStatsFrontendDict(TypedDict): + """Dictionary representing the QuestionReviewStats + object for frontend. + """ + + topic_id: str + reviewed_questions_count: int + accepted_questions_count: int + accepted_questions_with_reviewer_edits_count: int + first_contribution_date: str + last_contribution_date: str + + +class QuestionReviewStats: + """Domain object for the QuestionReviewStatsModel.""" + + def __init__( + self, + contributor_user_id: str, + topic_id: str, + reviewed_questions_count: int, + accepted_questions_count: int, + accepted_questions_with_reviewer_edits_count: int, + first_contribution_date: datetime.date, + last_contribution_date: datetime.date + ) -> None: + self.contributor_user_id = contributor_user_id + self.topic_id = topic_id + self.reviewed_questions_count = reviewed_questions_count + self.accepted_questions_count = accepted_questions_count + self.accepted_questions_with_reviewer_edits_count = ( + accepted_questions_with_reviewer_edits_count + ) + self.first_contribution_date = first_contribution_date + self.last_contribution_date = last_contribution_date + + def to_dict(self) -> QuestionReviewStatsDict: + """Returns a dict representation of a QuestionContributionStats + domain object. + + Returns: + dict. A dict representation of a QuestionContributionStats + domain object. + """ + return { + 'contributor_user_id': self.contributor_user_id, + 'topic_id': self.topic_id, + 'reviewed_questions_count': self.reviewed_questions_count, + 'accepted_questions_count': ( + self.accepted_questions_count), + 'accepted_questions_with_reviewer_edits_count': ( + self.accepted_questions_with_reviewer_edits_count), + 'first_contribution_date': ( + self.first_contribution_date), + 'last_contribution_date': self.last_contribution_date + } + + def to_frontend_dict(self) -> QuestionReviewStatsFrontendDict: + """Returns a dict representation of a QuestionContributionStats + domain object for frontend. + + Returns: + dict. A dict representation of a QuestionContributionStats + domain object for frontend. + """ + return { + 'topic_id': self.topic_id, + 'reviewed_questions_count': self.reviewed_questions_count, + 'accepted_questions_count': ( + self.accepted_questions_count), + 'accepted_questions_with_reviewer_edits_count': ( + self.accepted_questions_with_reviewer_edits_count), + 'first_contribution_date': ( + self.first_contribution_date.strftime('%b %Y')), + 'last_contribution_date': ( + self.last_contribution_date.strftime('%b %Y')) + } + + +class ContributorCertificateInfoDict(TypedDict): + """Dictionary representing the ContributorCertificateInfo object.""" + + from_date: str + to_date: str + team_lead: str + contribution_hours: str + language: Optional[str] + + +class ContributorCertificateInfo: + """Encapsulates key information that is used to generate contributor + certificate. + """ + + def __init__( + self, + from_date: str, + to_date: str, + team_lead: str, + contribution_hours: str, + language: Optional[str] + ) -> None: + self.from_date = from_date + self.to_date = to_date + self.team_lead = team_lead + self.contribution_hours = contribution_hours + self.language = language + + def to_dict(self) -> ContributorCertificateInfoDict: + """Returns a dict representation of a ContributorCertificateInfo + domain object. + + Returns: + dict. A dict representation of a ContributorCertificateInfo + domain object. + """ + return { + 'from_date': self.from_date, + 'to_date': self.to_date, + 'team_lead': self.team_lead, + 'contribution_hours': self.contribution_hours, + 'language': self.language + } + + +class ContributorMilestoneEmailInfo: + """Encapsulates key information that is used to create the email content for + notifying contributors about milestones they achieved. + + Attributes: + contributor_user_id: str. The ID of the contributor. + language_code: str|None. The language code of the suggestion. + contribution_type: str. The type of the contribution i.e. + translation or question. + contribution_sub_type: str. The sub type of the contribution + i.e. submissions/acceptances/reviews/edits. + rank_name: str. The name of the rank that the contributor achieved. + """ + + def __init__( + self, + contributor_user_id: str, + contribution_type: str, + contribution_subtype: str, + language_code: Optional[str], + rank_name: str + ) -> None: + self.contributor_user_id = contributor_user_id + self.contribution_type = contribution_type + self.contribution_subtype = contribution_subtype + self.language_code = language_code + self.rank_name = rank_name + + +class ContributorStatsSummaryDict(TypedDict): + """Dictionary representing the ContributorStatsSummary object.""" + + contributor_user_id: str + translation_contribution_stats: List[TranslationContributionStatsDict] + question_contribution_stats: List[QuestionContributionStatsDict] + translation_review_stats: List[TranslationReviewStatsDict] + question_review_stats: List[QuestionReviewStatsDict] + + +class ContributorStatsSummary: + """Encapsulates key information that is used to send to the frontend + regarding contributor stats. + + Attributes: + contributor_user_id: str. The ID of the contributor. + translation_contribution_stats: list(TranslationContributionStats). A + list of TranslationContributionStats corresponding to the user. + question_contribution_stats: list(QuestionContributionStats). A list of + QuestionContributionStats corresponding to the user. + translation_review_stats: list(TranslationReviewStats). A list of + TranslationReviewStats corresponding to the user. + question_review_stats: list(QuestionReviewStats). A list of + QuestionReviewStats corresponding to the user. + """ + + def __init__( + self, + contributor_user_id: str, + translation_contribution_stats: List[TranslationContributionStats], + question_contribution_stats: List[QuestionContributionStats], + translation_review_stats: List[TranslationReviewStats], + question_review_stats: List[QuestionReviewStats] + ) -> None: + self.contributor_user_id = contributor_user_id + self.translation_contribution_stats = translation_contribution_stats + self.question_contribution_stats = question_contribution_stats + self.translation_review_stats = translation_review_stats + self.question_review_stats = question_review_stats + + def to_dict(self) -> ContributorStatsSummaryDict: + """Returns a dict representation of a ContributorStatsSummary + domain object. + + Returns: + dict. A dict representation of a ContributorStatsSummary + domain object. + """ + return { + 'contributor_user_id': self.contributor_user_id, + 'translation_contribution_stats': [ + stats.to_dict() for stats in ( + self.translation_contribution_stats)], + 'question_contribution_stats': [ + stats.to_dict() for stats in self.question_contribution_stats], + 'translation_review_stats': [ + stats.to_dict() for stats in self.translation_review_stats], + 'question_review_stats': [ + stats.to_dict() for stats in self.question_review_stats] + } + class ReviewableSuggestionEmailInfo: - """Stores key information that is used to create the email content for + """Encapsulates key information that is used to create the email content for notifying admins and reviewers that there are suggestions that need to be reviewed. @@ -1486,8 +1991,12 @@ class ReviewableSuggestionEmailInfo: """ def __init__( - self, suggestion_type, language_code, suggestion_content, - submission_datetime): + self, + suggestion_type: str, + language_code: str, + suggestion_content: str, + submission_datetime: datetime.datetime + ) -> None: self.suggestion_type = suggestion_type self.language_code = language_code self.suggestion_content = suggestion_content diff --git a/core/domain/suggestion_registry_test.py b/core/domain/suggestion_registry_test.py index d2cf116fe9be..7d19955c561b 100644 --- a/core/domain/suggestion_registry_test.py +++ b/core/domain/suggestion_registry_test.py @@ -20,92 +20,103 @@ import os from core import feconf -from core import python_utils from core import utils +from core.constants import constants +from core.domain import change_domain from core.domain import config_services from core.domain import exp_domain -from core.domain import exp_fetchers from core.domain import exp_services from core.domain import fs_services from core.domain import html_validation_service from core.domain import question_domain +from core.domain import question_services from core.domain import skill_services from core.domain import state_domain from core.domain import suggestion_registry from core.domain import suggestion_services +from core.domain import translation_domain +from core.domain import translation_fetchers from core.platform import models from core.tests import test_utils +from extensions import domain -(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion]) +from typing import Dict, Final, List, Optional, TypedDict, Union, cast + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import opportunity_models + from mypy_imports import suggestion_models + +( + suggestion_models, opportunity_models +) = models.Registry.import_models([ + models.Names.SUGGESTION, models.Names.OPPORTUNITY +]) + +ChangeType = Dict[ + str, Union[str, float, Dict[str, Union[str, int, state_domain.StateDict]]] +] class MockInvalidSuggestion(suggestion_registry.BaseSuggestion): - def __init__(self): # pylint: disable=super-init-not-called + def __init__(self) -> None: # pylint: disable=super-init-not-called pass class BaseSuggestionUnitTests(test_utils.GenericTestBase): """Tests for the BaseSuggestion class.""" - def setUp(self): - super(BaseSuggestionUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.base_suggestion = MockInvalidSuggestion() - def test_base_class_accept_raises_error(self): - with self.assertRaisesRegexp( + def test_base_class_accept_raises_error(self) -> None: + with self.assertRaisesRegex( NotImplementedError, 'Subclasses of BaseSuggestion should implement accept.'): - self.base_suggestion.accept() - - def test_base_class_get_change_list_for_accepting_suggestion_raises_error( - self): - with self.assertRaisesRegexp( - NotImplementedError, - 'Subclasses of BaseSuggestion should implement ' - 'get_change_list_for_accepting_suggestion.'): - self.base_suggestion.get_change_list_for_accepting_suggestion() + self.base_suggestion.accept('test_message') - def test_base_class_pre_accept_validate_raises_error(self): - with self.assertRaisesRegexp( + def test_base_class_pre_accept_validate_raises_error(self) -> None: + with self.assertRaisesRegex( NotImplementedError, 'Subclasses of BaseSuggestion should implement' ' pre_accept_validate.'): self.base_suggestion.pre_accept_validate() - def test_base_class_populate_old_value_of_change_raises_error(self): - with self.assertRaisesRegexp( + def test_base_class_populate_old_value_of_change_raises_error(self) -> None: + with self.assertRaisesRegex( NotImplementedError, 'Subclasses of BaseSuggestion should implement' ' populate_old_value_of_change.'): self.base_suggestion.populate_old_value_of_change() - def test_base_class_pre_update_validate_raises_error(self): - with self.assertRaisesRegexp( + def test_base_class_pre_update_validate_raises_error(self) -> None: + with self.assertRaisesRegex( NotImplementedError, 'Subclasses of BaseSuggestion should implement' ' pre_update_validate.'): self.base_suggestion.pre_update_validate({}) - def test_base_class_get_all_html_content_strings(self): - with self.assertRaisesRegexp( + def test_base_class_get_all_html_content_strings(self) -> None: + with self.assertRaisesRegex( NotImplementedError, 'Subclasses of BaseSuggestion should implement' ' get_all_html_content_strings.'): self.base_suggestion.get_all_html_content_strings() - def test_base_class_get_target_entity_html_strings(self): - with self.assertRaisesRegexp( + def test_base_class_get_target_entity_html_strings(self) -> None: + with self.assertRaisesRegex( NotImplementedError, 'Subclasses of BaseSuggestion should implement' ' get_target_entity_html_strings.'): self.base_suggestion.get_target_entity_html_strings() - def test_base_class_convert_html_in_suggestion_change(self): - def conversion_fn(): + def test_base_class_convert_html_in_suggestion_change(self) -> None: + def conversion_fn(_: str) -> str: """Temporary function.""" - pass - with self.assertRaisesRegexp( + return 'abcd' + with self.assertRaisesRegex( NotImplementedError, 'Subclasses of BaseSuggestion should implement' ' convert_html_in_suggestion_change.'): @@ -113,22 +124,40 @@ def conversion_fn(): conversion_fn) +class SuggestionEditStateContentDict(TypedDict): + """Dictionary representing the SuggestionEditStateContent object.""" + + suggestion_id: str + suggestion_type: str + target_type: str + target_id: str + target_version_at_submission: int + status: str + author_name: str + final_reviewer_id: Optional[str] + change: Dict[str, change_domain.AcceptableChangeDictTypes] + score_category: str + language_code: Optional[str] + last_updated: float + edited_by_reviewer: bool + + class SuggestionEditStateContentUnitTests(test_utils.GenericTestBase): """Tests for the SuggestionEditStateContent class.""" - AUTHOR_EMAIL = 'author@example.com' - REVIEWER_EMAIL = 'reviewer@example.com' - ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com' - fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) + AUTHOR_EMAIL: Final = 'author@example.com' + REVIEWER_EMAIL: Final = 'reviewer@example.com' + ASSIGNED_REVIEWER_EMAIL: Final = 'assigned_reviewer@example.com' + fake_date: datetime.datetime = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) - def setUp(self): - super(SuggestionEditStateContentUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.signup(self.REVIEWER_EMAIL, 'reviewer') self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) - self.suggestion_dict = { + self.suggestion_dict: SuggestionEditStateContentDict = { 'suggestion_id': 'exploration.exp1.thread1', 'suggestion_type': ( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT), @@ -151,7 +180,7 @@ def setUp(self): 'edited_by_reviewer': False } - def test_create_suggestion_edit_state_content(self): + def test_create_suggestion_edit_state_content(self) -> None: expected_suggestion_dict = self.suggestion_dict observed_suggestion = suggestion_registry.SuggestionEditStateContent( @@ -166,7 +195,7 @@ def test_create_suggestion_edit_state_content(self): self.assertDictEqual( observed_suggestion.to_dict(), expected_suggestion_dict) - def test_validate_suggestion_edit_state_content(self): + def test_validate_suggestion_edit_state_content(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( @@ -180,7 +209,7 @@ def test_validate_suggestion_edit_state_content(self): suggestion.validate() - def test_get_score_part_helper_methods(self): + def test_get_score_part_helper_methods(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( @@ -195,7 +224,7 @@ def test_get_score_part_helper_methods(self): self.assertEqual(suggestion.get_score_type(), 'content') self.assertEqual(suggestion.get_score_sub_type(), 'Algebra') - def test_validate_suggestion_type(self): + def test_validate_suggestion_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -209,13 +238,13 @@ def test_validate_suggestion_type(self): suggestion.validate() suggestion.suggestion_type = 'invalid_suggestion_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected suggestion_type to be among allowed choices' ): suggestion.validate() - def test_validate_target_type(self): + def test_validate_target_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -229,13 +258,13 @@ def test_validate_target_type(self): suggestion.validate() suggestion.target_type = 'invalid_target_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected target_type to be among allowed choices' ): suggestion.validate() - def test_validate_target_id(self): + def test_validate_target_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -248,13 +277,16 @@ def test_validate_target_id(self): suggestion.validate() - suggestion.target_id = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.target_id = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected target_id to be a string' ): suggestion.validate() - def test_validate_target_version_at_submission(self): + def test_validate_target_version_at_submission(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -267,14 +299,17 @@ def test_validate_target_version_at_submission(self): suggestion.validate() - suggestion.target_version_at_submission = 'invalid_version' - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.target_version_at_submission = 'invalid_version' # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected target_version_at_submission to be an int' ): suggestion.validate() - def test_validate_status(self): + def test_validate_status(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -288,12 +323,12 @@ def test_validate_status(self): suggestion.validate() suggestion.status = 'invalid_status' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected status to be among allowed choices' ): suggestion.validate() - def test_validate_author_id(self): + def test_validate_author_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -306,13 +341,16 @@ def test_validate_author_id(self): suggestion.validate() - suggestion.author_id = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.author_id = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected author_id to be a string' ): suggestion.validate() - def test_validate_author_id_format(self): + def test_validate_author_id_format(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -329,13 +367,13 @@ def test_validate_author_id_format(self): suggestion.validate() suggestion.author_id = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected author_id to be in a valid user ID format' ): suggestion.validate() - def test_validate_final_reviewer_id(self): + def test_validate_final_reviewer_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -348,13 +386,16 @@ def test_validate_final_reviewer_id(self): suggestion.validate() - suggestion.final_reviewer_id = 1 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.final_reviewer_id = 1 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected final_reviewer_id to be a string' ): suggestion.validate() - def test_validate_final_reviewer_id_format(self): + def test_validate_final_reviewer_id_format(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -371,13 +412,13 @@ def test_validate_final_reviewer_id_format(self): suggestion.validate() suggestion.final_reviewer_id = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected final_reviewer_id to be in a valid user ID format' ): suggestion.validate() - def test_validate_score_category(self): + def test_validate_score_category(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -390,13 +431,16 @@ def test_validate_score_category(self): suggestion.validate() - suggestion.score_category = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.score_category = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected score_category to be a string' ): suggestion.validate() - def test_validate_score_category_format(self): + def test_validate_score_category_format(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -410,7 +454,7 @@ def test_validate_score_category_format(self): suggestion.validate() suggestion.score_category = 'score.score_type.score_sub_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected score_category to be of the form' ' score_type.score_sub_type' @@ -418,14 +462,14 @@ def test_validate_score_category_format(self): suggestion.validate() suggestion.score_category = 'invalid_score_category' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected score_category to be of the form' ' score_type.score_sub_type' ): suggestion.validate() - def test_validate_score_type(self): + def test_validate_score_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -439,14 +483,14 @@ def test_validate_score_type(self): suggestion.validate() suggestion.score_category = 'invalid_score_type.score_sub_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the first part of score_category to be among allowed' ' choices' ): suggestion.validate() - def test_validate_change(self): + def test_validate_change(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -459,13 +503,16 @@ def test_validate_change(self): suggestion.validate() - suggestion.change = {} - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.change = {} # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected change to be an ExplorationChange' ): suggestion.validate() - def test_validate_score_type_content(self): + def test_validate_score_type_content(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -479,13 +526,13 @@ def test_validate_score_type_content(self): suggestion.validate() suggestion.score_category = 'question.score_sub_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the first part of score_category to be content' ): suggestion.validate() - def test_validate_change_cmd(self): + def test_validate_change_cmd(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -498,12 +545,12 @@ def test_validate_change_cmd(self): suggestion.validate() suggestion.change.cmd = 'invalid_cmd' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected cmd to be edit_state_property' ): suggestion.validate() - def test_validate_change_property_name(self): + def test_validate_change_property_name(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -516,14 +563,19 @@ def test_validate_change_property_name(self): suggestion.validate() - suggestion.change.property_name = 'invalid_property' - with self.assertRaisesRegexp( + # Here we use MyPy ignore because 'property_name' can only accept + # 'content' string literal but here we are providing 'invalid_property' + # which causes MyPy to throw an error. Thus to avoid the error, we used + # ignore here. + suggestion.change.property_name = 'invalid_property' # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected property_name to be content' ): suggestion.validate() def test_validate_language_code_fails_when_language_codes_do_not_match( - self): + self + ) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -537,13 +589,13 @@ def test_validate_language_code_fails_when_language_codes_do_not_match( suggestion.language_code = 'wrong_language_code' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected language_code to be None, received wrong_language_code' ): suggestion.validate() - def test_pre_accept_validate_state_name(self): + def test_pre_accept_validate_state_name(self) -> None: self.save_new_default_exploration('exp1', self.author_id) expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( @@ -555,25 +607,18 @@ def test_pre_accept_validate_state_name(self): expected_suggestion_dict['score_category'], expected_suggestion_dict['language_code'], False, self.fake_date) - exp_services.update_exploration( - self.author_id, 'exp1', [ - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'State A', - }) - ], 'Added state') - suggestion.change.state_name = 'State A' + suggestion.change.state_name = 'Introduction' suggestion.pre_accept_validate() suggestion.change.state_name = 'invalid_state_name' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected invalid_state_name to be a valid state name' ): suggestion.pre_accept_validate() - def test_populate_old_value_of_change_with_invalid_state(self): + def test_populate_old_value_of_change_with_invalid_state(self) -> None: self.save_new_default_exploration('exp1', self.author_id) expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( @@ -593,7 +638,7 @@ def test_populate_old_value_of_change_with_invalid_state(self): self.assertIsNone(suggestion.change.old_value) - def test_pre_update_validate_change_cmd(self): + def test_pre_update_validate_change_cmd(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -611,14 +656,16 @@ def test_pre_update_validate_change_cmd(self): 'new_value': 'new suggestion content', 'old_value': None } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The following extra attributes are present: new_value, ' 'old_value, property_name' ): - suggestion.pre_update_validate(exp_domain.ExplorationChange(change)) + suggestion.pre_update_validate( + exp_domain.EditExpStatePropertyContentCmd(change) + ) - def test_pre_update_validate_change_property_name(self): + def test_pre_update_validate_change_property_name(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -636,13 +683,15 @@ def test_pre_update_validate_change_property_name(self): 'new_value': 'new suggestion content', 'old_value': None } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The new change property_name must be equal to content' ): - suggestion.pre_update_validate(exp_domain.ExplorationChange(change)) + suggestion.pre_update_validate( + exp_domain.EditExpStatePropertyContentCmd(change) + ) - def test_pre_update_validate_change_state_name(self): + def test_pre_update_validate_change_state_name(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -660,13 +709,15 @@ def test_pre_update_validate_change_state_name(self): 'new_value': 'new suggestion content', 'old_value': None } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The new change state_name must be equal to state_1' ): - suggestion.pre_update_validate(exp_domain.ExplorationChange(change)) + suggestion.pre_update_validate( + exp_domain.EditExpStatePropertyContentCmd(change) + ) - def test_pre_update_validate_change_new_value(self): + def test_pre_update_validate_change_new_value(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -681,19 +732,23 @@ def test_pre_update_validate_change_new_value(self): suggestion.change.new_value = new_content - change = { + change: Dict[ + str, Union[Optional[str], state_domain.SubtitledHtmlDict] + ] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': suggestion.change.state_name, 'new_value': new_content, 'old_value': None } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The new html must not match the old html' ): - suggestion.pre_update_validate(exp_domain.ExplorationChange(change)) + suggestion.pre_update_validate( + exp_domain.EditExpStatePropertyContentCmd(change) + ) - def test_pre_update_validate_non_equal_change_cmd(self): + def test_pre_update_validate_non_equal_change_cmd(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionEditStateContent( expected_suggestion_dict['suggestion_id'], @@ -704,18 +759,20 @@ def test_pre_update_validate_non_equal_change_cmd(self): expected_suggestion_dict['score_category'], expected_suggestion_dict['language_code'], False, self.fake_date) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The new change cmd must be equal to edit_state_property' ): - suggestion.pre_update_validate(exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, - 'property_name': 'title', - 'new_value': 'Exploration 1 Albert title' - })) + suggestion.pre_update_validate( + exp_domain.EditExpStatePropertyContentCmd({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'Exploration 1 Albert title' + }) + ) - def test_get_all_html_content_strings(self): - change_dict = { + def test_get_all_html_content_strings(self) -> None: + change_dict: Dict[str, Union[Optional[str], Dict[str, str]]] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'state_1', @@ -738,7 +795,7 @@ def test_get_all_html_content_strings(self): expected_outcome_list = [u'new suggestion content'] self.assertEqual(expected_outcome_list, actual_outcome_list) - def test_convert_html_in_suggestion_change(self): + def test_convert_html_in_suggestion_change(self) -> None: html_content = ( '

    Value

    ') @@ -748,7 +805,7 @@ def test_convert_html_in_suggestion_change(self): 'amp;quot;svg_filename&quot;: &quot;&quot;}">') - change = { + change: Dict[str, Union[str, Dict[str, str]]] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'Introduction', @@ -773,11 +830,15 @@ def test_convert_html_in_suggestion_change(self): suggestion.convert_html_in_suggestion_change( html_validation_service. add_math_content_to_math_rte_components) + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(suggestion.change.old_value, dict) self.assertEqual( suggestion.change.old_value['html'], expected_html_content) - def test_get_target_entity_html_strings_returns_expected_strings(self): - change_dict = { + def test_get_target_entity_html_strings_returns_expected_strings( + self + ) -> None: + change_dict: Dict[str, Union[str, Dict[str, str]]] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'state_1', @@ -803,8 +864,8 @@ def test_get_target_entity_html_strings_returns_expected_strings(self): expected_outcome_list = [u'Old content.'] self.assertEqual(expected_outcome_list, actual_outcome_list) - def test_get_target_entity_html_with_none_old_value(self): - change_dict = { + def test_get_target_entity_html_with_none_old_value(self) -> None: + change_dict: Dict[str, Union[Optional[str], Dict[str, str]]] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'state_1', @@ -830,19 +891,19 @@ def test_get_target_entity_html_with_none_old_value(self): class SuggestionTranslateContentUnitTests(test_utils.GenericTestBase): """Tests for the SuggestionEditStateContent class.""" - AUTHOR_EMAIL = 'author@example.com' - REVIEWER_EMAIL = 'reviewer@example.com' - ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com' - fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) + AUTHOR_EMAIL: Final = 'author@example.com' + REVIEWER_EMAIL: Final = 'reviewer@example.com' + ASSIGNED_REVIEWER_EMAIL: Final = 'assigned_reviewer@example.com' + fake_date: datetime.datetime = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) - def setUp(self): - super(SuggestionTranslateContentUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.signup(self.REVIEWER_EMAIL, 'reviewer') self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) - self.suggestion_dict = { + self.suggestion_dict: suggestion_registry.BaseSuggestionDict = { 'suggestion_id': 'exploration.exp1.thread1', 'suggestion_type': ( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT), @@ -867,7 +928,23 @@ def setUp(self): 'edited_by_reviewer': False } - def test_pre_update_validate_fails_for_invalid_change_cmd(self): + opportunity_models.ExplorationOpportunitySummaryModel( + id='exp1', + topic_id='Topic1', + topic_name='New Topic', + story_id='Story1', + story_title='New Story', + chapter_title='New chapter', + content_count=10, + translation_counts={}, + incomplete_translation_language_codes=[ + language['id'] + for language in constants.SUPPORTED_AUDIO_LANGUAGES + ], + language_codes_needing_voice_artists=['en'] + ).put() + + def test_pre_update_validate_fails_for_invalid_change_cmd(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -876,20 +953,21 @@ def test_pre_update_validate_fails_for_invalid_change_cmd(self): expected_suggestion_dict['status'], self.author_id, self.reviewer_id, expected_suggestion_dict['change'], expected_suggestion_dict['score_category'], - expected_suggestion_dict['language_code'], self.fake_date) + expected_suggestion_dict['language_code'], False, + self.fake_date) change = { - 'cmd': exp_domain.CMD_ADD_STATE, + 'cmd': exp_domain.CMD_DELETE_STATE, 'state_name': 'Introduction' } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The new change cmd must be equal to %s' % ( exp_domain.CMD_ADD_WRITTEN_TRANSLATION) ): suggestion.pre_update_validate(exp_domain.ExplorationChange(change)) - def test_pre_update_validate_change_state_name(self): + def test_pre_update_validate_change_state_name(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -898,7 +976,8 @@ def test_pre_update_validate_change_state_name(self): expected_suggestion_dict['status'], self.author_id, self.reviewer_id, expected_suggestion_dict['change'], expected_suggestion_dict['score_category'], - expected_suggestion_dict['language_code'], self.fake_date) + expected_suggestion_dict['language_code'], False, + self.fake_date) change = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': 'State 1', @@ -908,13 +987,13 @@ def test_pre_update_validate_change_state_name(self): 'translation_html': '

    This is the updated translated html.

    ', 'data_format': 'html' } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The new change state_name must be equal to Introduction' ): suggestion.pre_update_validate(exp_domain.ExplorationChange(change)) - def test_pre_update_validate_change_language_code(self): + def test_pre_update_validate_change_language_code(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -923,7 +1002,8 @@ def test_pre_update_validate_change_language_code(self): expected_suggestion_dict['status'], self.author_id, self.reviewer_id, expected_suggestion_dict['change'], expected_suggestion_dict['score_category'], - expected_suggestion_dict['language_code'], self.fake_date) + expected_suggestion_dict['language_code'], False, + self.fake_date) change = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': 'Introduction', @@ -933,13 +1013,13 @@ def test_pre_update_validate_change_language_code(self): 'translation_html': '

    This is the updated translated html.

    ', 'data_format': 'html' } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The language code must be equal to hi' ): suggestion.pre_update_validate(exp_domain.ExplorationChange(change)) - def test_pre_update_validate_change_content_html(self): + def test_pre_update_validate_change_content_html(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -948,7 +1028,8 @@ def test_pre_update_validate_change_content_html(self): expected_suggestion_dict['status'], self.author_id, self.reviewer_id, expected_suggestion_dict['change'], expected_suggestion_dict['score_category'], - expected_suggestion_dict['language_code'], self.fake_date) + expected_suggestion_dict['language_code'], False, + self.fake_date) change = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': 'Introduction', @@ -958,7 +1039,7 @@ def test_pre_update_validate_change_content_html(self): 'translation_html': '

    This is the updated translated html.

    ', 'data_format': 'html' } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The new change content_html must be equal to

    This is a ' + 'content.

    ' @@ -966,7 +1047,7 @@ def test_pre_update_validate_change_content_html(self): suggestion.pre_update_validate( exp_domain.ExplorationChange(change)) - def test_create_suggestion_add_translation(self): + def test_create_suggestion_add_translation(self) -> None: expected_suggestion_dict = self.suggestion_dict observed_suggestion = suggestion_registry.SuggestionTranslateContent( @@ -981,7 +1062,7 @@ def test_create_suggestion_add_translation(self): self.assertDictEqual( observed_suggestion.to_dict(), expected_suggestion_dict) - def test_validate_suggestion_add_translation(self): + def test_validate_suggestion_add_translation(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( @@ -995,7 +1076,7 @@ def test_validate_suggestion_add_translation(self): suggestion.validate() - def test_get_score_part_helper_methods(self): + def test_get_score_part_helper_methods(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( @@ -1010,7 +1091,7 @@ def test_get_score_part_helper_methods(self): self.assertEqual(suggestion.get_score_type(), 'translation') self.assertEqual(suggestion.get_score_sub_type(), 'Algebra') - def test_validate_suggestion_type(self): + def test_validate_suggestion_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1024,13 +1105,13 @@ def test_validate_suggestion_type(self): suggestion.validate() suggestion.suggestion_type = 'invalid_suggestion_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected suggestion_type to be among allowed choices' ): suggestion.validate() - def test_validate_target_type(self): + def test_validate_target_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1044,13 +1125,13 @@ def test_validate_target_type(self): suggestion.validate() suggestion.target_type = 'invalid_target_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected target_type to be among allowed choices' ): suggestion.validate() - def test_validate_target_id(self): + def test_validate_target_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1063,13 +1144,16 @@ def test_validate_target_id(self): suggestion.validate() - suggestion.target_id = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.target_id = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected target_id to be a string' ): suggestion.validate() - def test_validate_target_version_at_submission(self): + def test_validate_target_version_at_submission(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1082,14 +1166,17 @@ def test_validate_target_version_at_submission(self): suggestion.validate() - suggestion.target_version_at_submission = 'invalid_version' - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.target_version_at_submission = 'invalid_version' # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected target_version_at_submission to be an int' ): suggestion.validate() - def test_validate_status(self): + def test_validate_status(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1103,12 +1190,12 @@ def test_validate_status(self): suggestion.validate() suggestion.status = 'invalid_status' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected status to be among allowed choices' ): suggestion.validate() - def test_validate_author_id(self): + def test_validate_author_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1121,13 +1208,16 @@ def test_validate_author_id(self): suggestion.validate() - suggestion.author_id = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.author_id = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected author_id to be a string' ): suggestion.validate() - def test_validate_author_id_format(self): + def test_validate_author_id_format(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1141,13 +1231,13 @@ def test_validate_author_id_format(self): suggestion.validate() suggestion.author_id = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected author_id to be in a valid user ID format.' ): suggestion.validate() - def test_validate_final_reviewer_id(self): + def test_validate_final_reviewer_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1160,13 +1250,16 @@ def test_validate_final_reviewer_id(self): suggestion.validate() - suggestion.final_reviewer_id = 1 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.final_reviewer_id = 1 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected final_reviewer_id to be a string' ): suggestion.validate() - def test_validate_final_reviewer_id_format(self): + def test_validate_final_reviewer_id_format(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1180,13 +1273,13 @@ def test_validate_final_reviewer_id_format(self): suggestion.validate() suggestion.final_reviewer_id = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected final_reviewer_id to be in a valid user ID format' ): suggestion.validate() - def test_validate_score_category(self): + def test_validate_score_category(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1199,13 +1292,16 @@ def test_validate_score_category(self): suggestion.validate() - suggestion.score_category = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.score_category = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected score_category to be a string' ): suggestion.validate() - def test_validate_score_category_format(self): + def test_validate_score_category_format(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1219,7 +1315,7 @@ def test_validate_score_category_format(self): suggestion.validate() suggestion.score_category = 'score.score_type.score_sub_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected score_category to be of the form' ' score_type.score_sub_type' @@ -1227,14 +1323,14 @@ def test_validate_score_category_format(self): suggestion.validate() suggestion.score_category = 'invalid_score_category' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected score_category to be of the form' ' score_type.score_sub_type' ): suggestion.validate() - def test_validate_score_type(self): + def test_validate_score_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1248,14 +1344,14 @@ def test_validate_score_type(self): suggestion.validate() suggestion.score_category = 'invalid_score_type.score_sub_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the first part of score_category to be among allowed' ' choices' ): suggestion.validate() - def test_validate_change(self): + def test_validate_change(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1268,13 +1364,16 @@ def test_validate_change(self): suggestion.validate() - suggestion.change = {} - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.change = {} # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected change to be an ExplorationChange' ): suggestion.validate() - def test_validate_score_type_translation(self): + def test_validate_score_type_translation(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1288,13 +1387,13 @@ def test_validate_score_type_translation(self): suggestion.validate() suggestion.score_category = 'question.score_sub_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the first part of score_category to be translation' ): suggestion.validate() - def test_validate_change_cmd(self): + def test_validate_change_cmd(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1308,13 +1407,36 @@ def test_validate_change_cmd(self): suggestion.validate() suggestion.change.cmd = 'invalid_cmd' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected cmd to be add_written_translation' ): suggestion.validate() + def test_validate_translation_html_rte_tags(self) -> None: + expected_suggestion_dict = self.suggestion_dict + suggestion = suggestion_registry.SuggestionTranslateContent( + expected_suggestion_dict['suggestion_id'], + expected_suggestion_dict['target_id'], + expected_suggestion_dict['target_version_at_submission'], + expected_suggestion_dict['status'], self.author_id, + self.reviewer_id, expected_suggestion_dict['change'], + expected_suggestion_dict['score_category'], + expected_suggestion_dict['language_code'], False, self.fake_date) + + suggestion.validate() + + suggestion.change.translation_html = ( + '') + + with self.assertRaisesRegex( + utils.ValidationError, + 'Image tag does not have \'alt-with-value\' attribute.' + ): + suggestion.validate() + def test_validate_language_code_fails_when_language_codes_do_not_match( - self): + self + ) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1331,7 +1453,7 @@ def test_validate_language_code_fails_when_language_codes_do_not_match( suggestion.language_code = 'wrong_language_code' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected language_code to be %s, ' 'received wrong_language_code' % expected_language_code @@ -1339,7 +1461,8 @@ def test_validate_language_code_fails_when_language_codes_do_not_match( suggestion.validate() def test_validate_language_code_fails_when_language_code_is_set_to_none( - self): + self + ) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1351,14 +1474,19 @@ def test_validate_language_code_fails_when_language_code_is_set_to_none( expected_suggestion_dict['language_code'], False, self.fake_date) suggestion.validate() - suggestion.language_code = None + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.language_code = None # type: ignore[assignment] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'language_code cannot be None' ): suggestion.validate() - def test_validate_change_with_invalid_language_code_fails_validation(self): + def test_validate_change_with_invalid_language_code_fails_validation( + self + ) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( expected_suggestion_dict['suggestion_id'], @@ -1372,12 +1500,12 @@ def test_validate_change_with_invalid_language_code_fails_validation(self): suggestion.validate() suggestion.change.language_code = 'invalid_code' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid language_code: invalid_code' ): suggestion.validate() - def test_pre_accept_validate_state_name(self): + def test_pre_accept_validate_state_name(self) -> None: self.save_new_default_exploration('exp1', self.author_id) expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( @@ -1391,10 +1519,6 @@ def test_pre_accept_validate_state_name(self): exp_services.update_exploration( self.author_id, 'exp1', [ - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'State A', - }), exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, @@ -1402,24 +1526,28 @@ def test_pre_accept_validate_state_name(self): 'content_id': 'content', 'html': '

    This is a content.

    ' }, - 'state_name': 'State A', + 'state_name': 'Introduction', }) ], 'Added state') - suggestion.change.state_name = 'State A' + + suggestion.change.state_name = 'Introduction' suggestion.pre_accept_validate() suggestion.change.state_name = 'invalid_state_name' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected invalid_state_name to be a valid state name' ): suggestion.pre_accept_validate() - def test_accept_suggestion_adds_translation_in_exploration(self): - self.save_new_default_exploration('exp1', self.author_id) - exploration = exp_fetchers.get_exploration_by_id('exp1') - self.assertEqual(exploration.get_translation_counts(), {}) + def test_accept_suggestion_adds_translation_in_exploration(self) -> None: + exp = self.save_new_default_exploration('exp1', self.author_id) + translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, + exp.id, exp.version)) + self.assertEqual(len(translations), 0) suggestion = suggestion_registry.SuggestionTranslateContent( self.suggestion_dict['suggestion_id'], self.suggestion_dict['target_id'], @@ -1432,15 +1560,23 @@ def test_accept_suggestion_adds_translation_in_exploration(self): suggestion.accept( 'Accepted suggestion by translator: Add translation change.') - exploration = exp_fetchers.get_exploration_by_id('exp1') - self.assertEqual(exploration.get_translation_counts(), { - 'hi': 1 - }) - - def test_accept_suggestion_with_set_of_string_adds_translation(self): - self.save_new_default_exploration('exp1', self.author_id) - exploration = exp_fetchers.get_exploration_by_id('exp1') - self.assertEqual(exploration.get_translation_counts(), {}) + translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, + exp.id, exp.version)) + self.assertEqual(len(translations), 1) + self.assertEqual(translations[0].language_code, 'hi') + self.assertEqual(translations[0].get_translation_count(), 1) + + def test_accept_suggestion_with_set_of_string_adds_translation( + self + ) -> None: + exp = self.save_new_default_exploration('exp1', self.author_id) + translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, + exp.id, exp.version)) + self.assertEqual(len(translations), 0) suggestion = suggestion_registry.SuggestionTranslateContent( self.suggestion_dict['suggestion_id'], self.suggestion_dict['target_id'], @@ -1462,16 +1598,24 @@ def test_accept_suggestion_with_set_of_string_adds_translation(self): suggestion.accept( 'Accepted suggestion by translator: Add translation change.') - exploration = exp_fetchers.get_exploration_by_id('exp1') - self.assertEqual(exploration.get_translation_counts(), { - 'hi': 1 - }) - - def test_accept_suggestion_with_psedonymous_author_adds_translation(self): - self.save_new_default_exploration('exp1', self.author_id) - - exploration = exp_fetchers.get_exploration_by_id('exp1') - self.assertEqual(exploration.get_translation_counts(), {}) + translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, exp.id, exp.version + ) + ) + self.assertEqual(len(translations), 1) + self.assertEqual(translations[0].language_code, 'hi') + self.assertEqual(translations[0].get_translation_count(), 1) + + def test_accept_suggestion_with_psedonymous_author_adds_translation( + self + ) -> None: + exp = self.save_new_default_exploration('exp1', self.author_id) + translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, exp.id, exp.version) + ) + self.assertEqual(len(translations), 0) expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionTranslateContent( @@ -1486,13 +1630,16 @@ def test_accept_suggestion_with_psedonymous_author_adds_translation(self): suggestion.accept( 'Accepted suggestion by translator: Add translation change.') - exploration = exp_fetchers.get_exploration_by_id('exp1') - - self.assertEqual(exploration.get_translation_counts(), { - 'hi': 1 - }) + translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, exp.id, exp.version + ) + ) + self.assertEqual(len(translations), 1) + self.assertEqual(translations[0].language_code, 'hi') + self.assertEqual(translations[0].get_translation_count(), 1) - def test_get_all_html_content_strings(self): + def test_get_all_html_content_strings(self) -> None: suggestion = suggestion_registry.SuggestionTranslateContent( self.suggestion_dict['suggestion_id'], self.suggestion_dict['target_id'], @@ -1508,7 +1655,7 @@ def test_get_all_html_content_strings(self): u'

    This is translated html.

    ', u'

    This is a content.

    '] self.assertEqual(expected_outcome_list, actual_outcome_list) - def test_get_all_html_content_strings_for_content_lists(self): + def test_get_all_html_content_strings_for_content_lists(self) -> None: suggestion = suggestion_registry.SuggestionTranslateContent( self.suggestion_dict['suggestion_id'], self.suggestion_dict['target_id'], @@ -1533,7 +1680,9 @@ def test_get_all_html_content_strings_for_content_lists(self): 'translated text1', 'translated text2', 'text1', 'text2'] self.assertEqual(expected_outcome_list, actual_outcome_list) - def test_get_target_entity_html_strings_returns_expected_strings(self): + def test_get_target_entity_html_strings_returns_expected_strings( + self + ) -> None: suggestion = suggestion_registry.SuggestionTranslateContent( self.suggestion_dict['suggestion_id'], self.suggestion_dict['target_id'], @@ -1547,7 +1696,7 @@ def test_get_target_entity_html_strings_returns_expected_strings(self): expected_outcome_list = [self.suggestion_dict['change']['content_html']] self.assertEqual(expected_outcome_list, actual_outcome_list) - def test_convert_html_in_suggestion_change(self): + def test_convert_html_in_suggestion_change(self) -> None: html_content = ( '

    Value

    ') @@ -1579,22 +1728,33 @@ def test_convert_html_in_suggestion_change(self): suggestion.change.content_html, expected_html_content) +TestChangeDictType = Dict[ + str, + Union[ + str, + float, + Dict[str, Union[state_domain.StateDict, int, str, List[str]]] + ] +] + + class SuggestionAddQuestionTest(test_utils.GenericTestBase): """Tests for the SuggestionAddQuestion class.""" - AUTHOR_EMAIL = 'author@example.com' - REVIEWER_EMAIL = 'reviewer@example.com' - ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com' - fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) + AUTHOR_EMAIL: Final = 'author@example.com' + REVIEWER_EMAIL: Final = 'reviewer@example.com' + ASSIGNED_REVIEWER_EMAIL: Final = 'assigned_reviewer@example.com' + fake_date: datetime.datetime = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) - def setUp(self): - super(SuggestionAddQuestionTest, self).setUp() + def setUp(self) -> None: + super().setUp() + content_id_generator = translation_domain.ContentIdGenerator() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.signup(self.REVIEWER_EMAIL, 'reviewer') self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) - self.suggestion_dict = { + self.suggestion_dict: suggestion_registry.BaseSuggestionDict = { 'suggestion_id': 'skill1.thread1', 'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION, 'target_type': feconf.ENTITY_TYPE_SKILL, @@ -1607,12 +1767,14 @@ def setUp(self): 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': 'skill_1', 'skill_difficulty': 0.3, @@ -1623,7 +1785,7 @@ def setUp(self): 'edited_by_reviewer': False } - def test_create_suggestion_add_question(self): + def test_create_suggestion_add_question(self) -> None: expected_suggestion_dict = self.suggestion_dict observed_suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1638,7 +1800,7 @@ def test_create_suggestion_add_question(self): self.assertDictEqual( observed_suggestion.to_dict(), expected_suggestion_dict) - def test_validate_suggestion_edit_state_content(self): + def test_validate_suggestion_edit_state_content(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1652,7 +1814,7 @@ def test_validate_suggestion_edit_state_content(self): suggestion.validate() - def test_get_score_part_helper_methods(self): + def test_get_score_part_helper_methods(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1667,7 +1829,7 @@ def test_get_score_part_helper_methods(self): self.assertEqual(suggestion.get_score_type(), 'question') self.assertEqual(suggestion.get_score_sub_type(), 'topic_1') - def test_validate_score_type(self): + def test_validate_score_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1683,13 +1845,13 @@ def test_validate_score_type(self): suggestion.score_category = 'content.score_sub_type' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the first part of score_category to be "question"' ): suggestion.validate() - def test_validate_change_type(self): + def test_validate_change_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1703,15 +1865,18 @@ def test_validate_change_type(self): suggestion.validate() - suggestion.change = 'invalid_change' + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.change = 'invalid_change' # type: ignore[assignment] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected change to be an instance of QuestionSuggestionChange' ): suggestion.validate() - def test_validate_change_cmd(self): + def test_validate_change_cmd(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1727,12 +1892,12 @@ def test_validate_change_cmd(self): suggestion.change.cmd = None - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected change to contain cmd' ): suggestion.validate() - def test_validate_change_cmd_type(self): + def test_validate_change_cmd_type(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1748,13 +1913,13 @@ def test_validate_change_cmd_type(self): suggestion.change.cmd = 'invalid_cmd' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected cmd to be create_new_fully_specified_question' ): suggestion.validate() - def test_validate_change_question_dict(self): + def test_validate_change_question_dict(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1768,14 +1933,17 @@ def test_validate_change_question_dict(self): suggestion.validate() - suggestion.change.question_dict = None + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.change.question_dict = None # type: ignore[assignment] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected change to contain question_dict' ): suggestion.validate() - def test_validate_change_question_state_data_schema_version(self): + def test_validate_change_question_state_data_schema_version(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1793,18 +1961,21 @@ def test_validate_change_question_state_data_schema_version(self): # directly since pylint produces unsupported-assignment-operation # error. The detailed analysis for the same can be checked # in this issue: https://github.com/oppia/oppia/issues/7008. - question_dict = suggestion.change.question_dict + assert isinstance(suggestion.change.question_dict, dict) + question_dict: question_domain.QuestionDict = ( + suggestion.change.question_dict + ) question_dict['question_state_data_schema_version'] = 0 suggestion.change.question_dict = question_dict - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected question state schema version to be %s, ' 'received 0' % feconf.CURRENT_STATE_SCHEMA_VERSION ): suggestion.validate() - def test_validate_change_skill_difficulty_none(self): + def test_validate_change_skill_difficulty_none(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( expected_suggestion_dict['suggestion_id'], @@ -1816,14 +1987,17 @@ def test_validate_change_skill_difficulty_none(self): expected_suggestion_dict['language_code'], False, self.fake_date) suggestion.validate() - suggestion.change.skill_difficulty = None + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.change.skill_difficulty = None # type: ignore[assignment] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected change to contain skill_difficulty' ): suggestion.validate() - def test_validate_change_skill_difficulty_invalid_value(self): + def test_validate_change_skill_difficulty_invalid_value(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( expected_suggestion_dict['suggestion_id'], @@ -1837,13 +2011,13 @@ def test_validate_change_skill_difficulty_invalid_value(self): suggestion.change.skill_difficulty = 0.4 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected change skill_difficulty to be one of ' ): suggestion.validate() - def test_pre_accept_validate_change_skill_id(self): + def test_pre_accept_validate_change_skill_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1861,14 +2035,17 @@ def test_pre_accept_validate_change_skill_id(self): suggestion.pre_accept_validate() - suggestion.change.skill_id = None + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.change.skill_id = None # type: ignore[assignment] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected change to contain skill_id' ): suggestion.pre_accept_validate() - def test_pre_accept_validate_change_invalid_skill_id(self): + def test_pre_accept_validate_change_invalid_skill_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1888,26 +2065,12 @@ def test_pre_accept_validate_change_invalid_skill_id(self): suggestion.change.skill_id = skill_services.get_new_skill_id() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The skill with the given id doesn\'t exist.' ): suggestion.pre_accept_validate() - def test_get_change_list_for_accepting_suggestion(self): - expected_suggestion_dict = self.suggestion_dict - - suggestion = suggestion_registry.SuggestionAddQuestion( - expected_suggestion_dict['suggestion_id'], - expected_suggestion_dict['target_id'], - expected_suggestion_dict['target_version_at_submission'], - expected_suggestion_dict['status'], self.author_id, - self.reviewer_id, expected_suggestion_dict['change'], - expected_suggestion_dict['score_category'], - expected_suggestion_dict['language_code'], False, self.fake_date) - - self.assertIsNone(suggestion.get_change_list_for_accepting_suggestion()) - - def test_populate_old_value_of_change(self): + def test_populate_old_value_of_change(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1919,9 +2082,13 @@ def test_populate_old_value_of_change(self): expected_suggestion_dict['score_category'], expected_suggestion_dict['language_code'], False, self.fake_date) - self.assertIsNone(suggestion.populate_old_value_of_change()) + # Here we use MyPy ignore because method `populate_old_value_of_change` + # does not return any value but for testing purpose we are still + # comparing it's return value with None which causes MyPy to throw + # error. Thus to avoid the error, we used ignore here. + self.assertIsNone(suggestion.populate_old_value_of_change()) # type: ignore[func-returns-value] - def test_cannot_accept_suggestion_with_invalid_skill_id(self): + def test_cannot_accept_suggestion_with_invalid_skill_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1935,13 +2102,13 @@ def test_cannot_accept_suggestion_with_invalid_skill_id(self): suggestion.change.skill_id = skill_services.get_new_skill_id() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The skill with the given id doesn\'t exist.' ): suggestion.accept('commit message') - def test_pre_update_validate_change_cmd(self): + def test_pre_update_validate_change_cmd(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1959,15 +2126,18 @@ def test_pre_update_validate_change_cmd(self): 'new_value': 'bn', 'old_value': 'en' } - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( utils.ValidationError, 'The new change cmd must be equal to ' 'create_new_fully_specified_question' ): suggestion.pre_update_validate( - question_domain.QuestionChange(change)) + question_domain.QuestionChange(change)) # type: ignore[arg-type] - def test_pre_update_validate_change_skill_id(self): + def test_pre_update_validate_change_skill_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( @@ -1979,34 +2149,43 @@ def test_pre_update_validate_change_skill_id(self): expected_suggestion_dict['score_category'], expected_suggestion_dict['language_code'], False, self.fake_date) - change = { + content_id_generator = translation_domain.ContentIdGenerator() + change: ChangeType = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( - feconf.CURRENT_STATE_SCHEMA_VERSION) + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': 'skill_2' } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The new change skill_id must be equal to skill_1' ): suggestion.pre_update_validate( - question_domain.QuestionChange(change)) + question_domain.CreateNewFullySpecifiedQuestionCmd( + change + ) + ) - def test_pre_update_validate_complains_if_nothing_changed(self): - change = { + def test_pre_update_validate_complains_if_nothing_changed(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + change: ChangeType = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( - feconf.CURRENT_STATE_SCHEMA_VERSION) + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': 'skill_1', 'skill_difficulty': 0.3 @@ -2016,38 +2195,48 @@ def test_pre_update_validate_complains_if_nothing_changed(self): 'exploration.exp1.thread1', 'exp1', 1, suggestion_models.STATUS_ACCEPTED, self.author_id, self.reviewer_id, change, - 'question.topic_1', 'en', self.fake_date) + 'question.topic_1', 'en', False, self.fake_date) - new_change = { + content_id_generator = translation_domain.ContentIdGenerator() + new_change: ChangeType = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( - feconf.CURRENT_STATE_SCHEMA_VERSION) + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': 'skill_1', 'skill_difficulty': 0.3 } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'At least one of the new skill_difficulty or question_dict ' 'should be changed.'): suggestion.pre_update_validate( - question_domain.QuestionSuggestionChange(new_change)) + question_domain.CreateNewFullySpecifiedQuestionSuggestionCmd( + new_change + ) + ) def test_pre_update_validate_accepts_a_change_in_skill_difficulty_only( - self): - change = { + self + ) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + change: ChangeType = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( - feconf.CURRENT_STATE_SCHEMA_VERSION) + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': 'skill_1', 'skill_difficulty': 0.3 @@ -2057,34 +2246,51 @@ def test_pre_update_validate_accepts_a_change_in_skill_difficulty_only( 'exploration.exp1.thread1', 'exp1', 1, suggestion_models.STATUS_ACCEPTED, self.author_id, self.reviewer_id, change, - 'question.topic_1', 'en', self.fake_date) + 'question.topic_1', 'en', False, self.fake_date) - new_change = { + content_id_generator = translation_domain.ContentIdGenerator() + new_change: ChangeType = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( - feconf.CURRENT_STATE_SCHEMA_VERSION) + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': 'skill_1', 'skill_difficulty': 0.6 } + # Here we use MyPy ignore because method `pre_update_validate` does not + # return any value but for testing purpose we are still comparing it's + # return value with None which causes MyPy to throw error. Thus to avoid + # the error, we used ignore here. self.assertEqual( - suggestion.pre_update_validate( - question_domain.QuestionSuggestionChange(new_change)), None) + suggestion.pre_update_validate( # type: ignore[func-returns-value] + question_domain.CreateNewFullySpecifiedQuestionSuggestionCmd( + new_change + ) + ), + None + ) - def test_pre_update_validate_accepts_a_change_in_state_data_only(self): - change = { + def test_pre_update_validate_accepts_a_change_in_state_data_only( + self + ) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + change: ChangeType = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( - feconf.CURRENT_STATE_SCHEMA_VERSION) + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': 'skill_1', 'skill_difficulty': 0.3 @@ -2094,26 +2300,38 @@ def test_pre_update_validate_accepts_a_change_in_state_data_only(self): 'exploration.exp1.thread1', 'exp1', 1, suggestion_models.STATUS_ACCEPTED, self.author_id, self.reviewer_id, change, - 'question.topic_1', 'en', self.fake_date) + 'question.topic_1', 'en', False, self.fake_date) - new_change = { + content_id_generator = translation_domain.ContentIdGenerator() + new_change: ChangeType = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'hi', 'question_state_data_schema_version': ( - feconf.CURRENT_STATE_SCHEMA_VERSION) + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': 'skill_1', 'skill_difficulty': 0.3 } + # Here we use MyPy ignore because method `pre_update_validate` does not + # return any value but for testing purpose we are still comparing it's + # return value with None which causes MyPy to throw error. Thus to avoid + # the error, we used ignore here. self.assertEqual( - suggestion.pre_update_validate( - question_domain.QuestionSuggestionChange(new_change)), None) + suggestion.pre_update_validate( # type: ignore[func-returns-value] + question_domain.CreateNewFullySpecifiedQuestionSuggestionCmd( + new_change + ) + ), + None + ) - def test_validate_author_id(self): + def test_validate_author_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( expected_suggestion_dict['suggestion_id'], @@ -2126,12 +2344,15 @@ def test_validate_author_id(self): suggestion.validate() - suggestion.author_id = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.author_id = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected author_id to be a string'): suggestion.validate() - def test_validate_author_id_format(self): + def test_validate_author_id_format(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( expected_suggestion_dict['suggestion_id'], @@ -2145,12 +2366,12 @@ def test_validate_author_id_format(self): suggestion.validate() suggestion.author_id = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected author_id to be in a valid user ID format.'): suggestion.validate() - def test_validate_final_reviewer_id(self): + def test_validate_final_reviewer_id(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( expected_suggestion_dict['suggestion_id'], @@ -2163,12 +2384,15 @@ def test_validate_final_reviewer_id(self): suggestion.validate() - suggestion.final_reviewer_id = 1 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.final_reviewer_id = 1 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected final_reviewer_id to be a string'): suggestion.validate() - def test_validate_final_reviewer_id_format(self): + def test_validate_final_reviewer_id_format(self) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( expected_suggestion_dict['suggestion_id'], @@ -2182,13 +2406,14 @@ def test_validate_final_reviewer_id_format(self): suggestion.validate() suggestion.final_reviewer_id = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected final_reviewer_id to be in a valid user ID format'): suggestion.validate() def test_validate_language_code_fails_when_language_codes_do_not_match( - self): + self + ) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( expected_suggestion_dict['suggestion_id'], @@ -2198,13 +2423,22 @@ def test_validate_language_code_fails_when_language_codes_do_not_match( self.reviewer_id, expected_suggestion_dict['change'], expected_suggestion_dict['score_category'], expected_suggestion_dict['language_code'], False, self.fake_date) + # Here we use cast because the value of `question_dict` key is a + # Union of all allowed change dict types. So, to narrow down the type + # to QuestionDict, we used assert here. + assert isinstance( + expected_suggestion_dict['change']['question_dict'], dict + ) expected_question_dict = ( - expected_suggestion_dict['change']['question_dict'] + cast( + question_domain.QuestionDict, + expected_suggestion_dict['change']['question_dict'] + ) ) suggestion.validate() expected_question_dict['language_code'] = 'wrong_language_code' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected question language_code.wrong_language_code. to be same ' 'as suggestion language_code.en.' @@ -2212,7 +2446,8 @@ def test_validate_language_code_fails_when_language_codes_do_not_match( suggestion.validate() def test_validate_language_code_fails_when_language_code_is_set_to_none( - self): + self + ) -> None: expected_suggestion_dict = self.suggestion_dict suggestion = suggestion_registry.SuggestionAddQuestion( expected_suggestion_dict['suggestion_id'], @@ -2224,14 +2459,17 @@ def test_validate_language_code_fails_when_language_code_is_set_to_none( expected_suggestion_dict['language_code'], False, self.fake_date) suggestion.validate() - suggestion.language_code = None + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + suggestion.language_code = None # type: ignore[assignment] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected language_code to be en, received None'): suggestion.validate() - def test_get_all_html_conztent_strings(self): + def test_get_all_html_content_strings(self) -> None: suggestion = suggestion_registry.SuggestionAddQuestion( self.suggestion_dict['suggestion_id'], self.suggestion_dict['target_id'], @@ -2239,14 +2477,14 @@ def test_get_all_html_conztent_strings(self): self.suggestion_dict['status'], self.author_id, self.reviewer_id, self.suggestion_dict['change'], self.suggestion_dict['score_category'], - self.suggestion_dict['language_code'], self.fake_date) + self.suggestion_dict['language_code'], False, self.fake_date) actual_outcome_list = suggestion.get_all_html_content_strings() expected_outcome_list = [ - u'', u'

    This is a hint.

    ', u'

    This is a solution.

    ', u''] + u'', u'', u'

    This is a hint.

    ', u'

    This is a solution.

    '] self.assertEqual(expected_outcome_list, actual_outcome_list) - def test_convert_html_in_suggestion_change(self): + def test_convert_html_in_suggestion_change(self) -> None: html_content = ( '

    Value

    ') @@ -2258,6 +2496,7 @@ def test_convert_html_in_suggestion_change(self): answer_group = { 'outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_1', 'html': '' @@ -2291,15 +2530,6 @@ def test_convert_html_in_suggestion_change(self): 'solution': {} } }, - 'written_translations': { - 'translations_mapping': { - 'content_1': {}, - 'feedback_1': {}, - 'feedback_2': {}, - 'hint_1': {}, - 'solution': {} - } - }, 'interaction': { 'answer_groups': [answer_group], 'confirmed_unclassified_answers': [], @@ -2316,6 +2546,7 @@ def test_convert_html_in_suggestion_change(self): }, 'default_outcome': { 'dest': None, + 'dest_if_really_stuck': None, 'feedback': { 'content_id': 'feedback_2', 'html': 'Correct Answer' @@ -2346,7 +2577,7 @@ def test_convert_html_in_suggestion_change(self): 'classifier_model_id': None } - suggestion_dict = { + suggestion_dict: suggestion_registry.BaseSuggestionDict = { 'suggestion_id': 'skill1.thread1', 'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION, 'target_type': feconf.ENTITY_TYPE_SKILL, @@ -2370,7 +2601,8 @@ def test_convert_html_in_suggestion_change(self): }, 'score_category': 'question.skill1', 'language_code': 'en', - 'last_updated': utils.get_time_in_millisecs(self.fake_date) + 'last_updated': utils.get_time_in_millisecs(self.fake_date), + 'edited_by_reviewer': False } suggestion = suggestion_registry.SuggestionAddQuestion( suggestion_dict['suggestion_id'], suggestion_dict['target_id'], @@ -2380,20 +2612,26 @@ def test_convert_html_in_suggestion_change(self): suggestion_dict['language_code'], False, self.fake_date) suggestion.convert_html_in_suggestion_change( html_validation_service.add_math_content_to_math_rte_components) + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(suggestion.change.question_dict, dict) + question_dict: question_domain.QuestionDict = ( + suggestion.change.question_dict + ) self.assertEqual( - suggestion.change.question_dict['question_state_data']['content'][ + question_dict['question_state_data']['content'][ 'html'], expected_html_content) - def test_accept_suggestion_with_images(self): + def test_accept_suggestion_with_images(self) -> None: html_content = ( '

    Value

    ' '') + content_id_generator = translation_domain.ContentIdGenerator() question_state_dict = self._create_valid_question_data( - 'default_state').to_dict() + 'default_state', content_id_generator).to_dict() question_state_dict['content']['html'] = html_content - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() @@ -2403,7 +2641,7 @@ def test_accept_suggestion_with_images(self): raw_image, 'image', False) self.save_new_skill('skill1', self.author_id, description='description') - suggestion_dict = { + suggestion_dict: suggestion_registry.BaseSuggestionDict = { 'suggestion_id': 'skill1.thread1', 'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION, 'target_type': feconf.ENTITY_TYPE_SKILL, @@ -2420,6 +2658,154 @@ def test_accept_suggestion_with_images(self): 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], + 'inapplicable_skill_misconception_ids': [], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) + }, + 'skill_id': 'skill1', + 'skill_difficulty': 0.3, + }, + 'score_category': 'question.skill1', + 'language_code': 'en', + 'last_updated': utils.get_time_in_millisecs(self.fake_date), + 'edited_by_reviewer': False + } + suggestion = suggestion_registry.SuggestionAddQuestion( + suggestion_dict['suggestion_id'], suggestion_dict['target_id'], + suggestion_dict['target_version_at_submission'], + suggestion_dict['status'], self.author_id, self.reviewer_id, + suggestion_dict['change'], suggestion_dict['score_category'], + suggestion_dict['language_code'], False, self.fake_date) + suggestion.accept('commit_message') + + def test_accept_suggestion_with_image_region_interactions(self) -> None: + with utils.open_file( + os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', + encoding=None) as f: + original_image_content = f.read() + fs_services.save_original_and_compressed_versions_of_image( + 'image.png', 'question_suggestions', 'skill1', + original_image_content, 'image', True) + + image_and_region_ca_dict: domain.ImageAndRegionDict = { + 'imagePath': 'image.png', + 'labeledRegions': [ + { + 'label': 'Region1', + 'region': { + 'regionType': 'Rectangle', + 'area': [ + [ + 0.2644628099173554, + 0.21807065217391305 + ], + [ + 0.9201101928374655, + 0.8847373188405797 + ] + ] + } + } + ] + } + + # Here, the expected type for `solution` key is SolutionDict but + # for testing purposes here we are providing None which causes + # MyPy to throw `Incompatible types` error. Thus to avoid the + # error, we used ignore here. + question_state_dict: state_domain.StateDict = { + 'content': { + 'html': '

    Text

    ', + 'content_id': 'content_0' + }, + 'classifier_model_id': None, + 'linked_skill_id': None, + 'interaction': { + 'answer_groups': [ + { + 'rule_specs': [ + { + 'rule_type': 'IsInRegion', + 'inputs': {'x': 'Region1'} + } + ], + 'outcome': { + 'dest': None, + 'dest_if_really_stuck': None, + 'feedback': { + 'html': '

    assas

    ', + 'content_id': 'feedback_2' + }, + 'labelled_as_correct': True, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'training_data': [], + 'tagged_skill_misconception_id': None + } + ], + 'confirmed_unclassified_answers': [], + 'customization_args': { + 'imageAndRegions': { + 'value': image_and_region_ca_dict + }, + 'highlightRegionsOnHover': { + 'value': False + } + }, + 'default_outcome': { + 'dest': None, + 'dest_if_really_stuck': None, + 'feedback': { + 'html': '

    wer

    ', + 'content_id': 'default_outcome_1' + }, + 'labelled_as_correct': False, + 'param_changes': [], + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None + }, + 'hints': [ + { + 'hint_content': { + 'html': '

    assaas

    ', + 'content_id': 'hint_3' + } + } + ], + 'id': 'ImageClickInput', 'solution': None + }, + 'param_changes': [], + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content_0': {}, + 'default_outcome_1': {}, + 'feedback_2': {}, + 'hint_3': {} + } + }, + 'solicit_answer_details': False, + 'card_is_checkpoint': False, + } + suggestion_dict: suggestion_registry.BaseSuggestionDict = { + 'suggestion_id': 'skill1.thread1', + 'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION, + 'target_type': feconf.ENTITY_TYPE_SKILL, + 'target_id': 'skill1', + 'target_version_at_submission': 1, + 'status': suggestion_models.STATUS_ACCEPTED, + 'author_name': 'author', + 'final_reviewer_id': self.reviewer_id, + 'change': { + 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, + 'question_dict': { + 'question_state_data': question_state_dict, + 'language_code': 'en', + 'question_state_data_schema_version': ( + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'linked_skill_ids': ['skill1'], + 'next_content_id_index': 4, 'inapplicable_skill_misconception_ids': [] }, 'skill_id': 'skill1', @@ -2427,21 +2813,34 @@ def test_accept_suggestion_with_images(self): }, 'score_category': 'question.skill1', 'language_code': 'en', - 'last_updated': utils.get_time_in_millisecs(self.fake_date) + 'last_updated': utils.get_time_in_millisecs(self.fake_date), + 'edited_by_reviewer': False } + self.save_new_skill( + 'skill1', self.author_id, description='description') suggestion = suggestion_registry.SuggestionAddQuestion( suggestion_dict['suggestion_id'], suggestion_dict['target_id'], suggestion_dict['target_version_at_submission'], suggestion_dict['status'], self.author_id, self.reviewer_id, suggestion_dict['change'], suggestion_dict['score_category'], suggestion_dict['language_code'], False, self.fake_date) + suggestion.accept('commit_message') - def test_contructor_updates_state_shema_in_change_cmd(self): + question = question_services.get_questions_by_skill_ids( + 1, ['skill1'], False)[0] + destination_fs = fs_services.GcsFileSystem( + feconf.ENTITY_TYPE_QUESTION, question.id) + self.assertTrue(destination_fs.isfile('image/%s' % 'image.png')) + self.assertEqual( + suggestion.status, + suggestion_models.STATUS_ACCEPTED) + + def test_contructor_updates_state_shema_in_change_cmd(self) -> None: score_category = ( suggestion_models.SCORE_TYPE_QUESTION + suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id') - change = { + change: TestChangeDictType = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), @@ -2455,23 +2854,29 @@ def test_contructor_updates_state_shema_in_change_cmd(self): 'skill_id': 'skill_id', 'skill_difficulty': 0.3 } + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(change['question_dict'], dict) self.assertEqual( change['question_dict']['question_state_data_schema_version'], 27) suggestion = suggestion_registry.SuggestionAddQuestion( 'suggestionId', 'target_id', 1, suggestion_models.STATUS_IN_REVIEW, - self.author_id, None, change, score_category, 'en', False, - self.fake_date) + self.author_id, 'test_reviewer', change, score_category, 'en', + False, self.fake_date) + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(suggestion.change.question_dict, dict) self.assertEqual( suggestion.change.question_dict[ 'question_state_data_schema_version'], feconf.CURRENT_STATE_SCHEMA_VERSION) - def test_contructor_raise_exception_for_invalid_state_shema_version(self): + def test_contructor_raise_exception_for_invalid_state_shema_version( + self + ) -> None: score_category = ( suggestion_models.SCORE_TYPE_QUESTION + suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id') - change = { + change: TestChangeDictType = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), @@ -2485,280 +2890,44 @@ def test_contructor_raise_exception_for_invalid_state_shema_version(self): 'skill_id': 'skill_id', 'skill_difficulty': 0.3 } + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(change['question_dict'], dict) self.assertEqual( change['question_dict']['question_state_data_schema_version'], 23) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected state schema version to be in between 25' ): suggestion_registry.SuggestionAddQuestion( 'suggestionId', 'target_id', 1, - suggestion_models.STATUS_IN_REVIEW, self.author_id, None, - change, score_category, 'en', False, self.fake_date) - - -class MockInvalidVoiceoverApplication( - suggestion_registry.BaseVoiceoverApplication): - - def __init__(self): # pylint: disable=super-init-not-called - pass - - -class BaseVoiceoverApplicationUnitTests(test_utils.GenericTestBase): - """Tests for the BaseVoiceoverApplication class.""" - - def setUp(self): - super(BaseVoiceoverApplicationUnitTests, self).setUp() - self.base_voiceover_application = MockInvalidVoiceoverApplication() - - def test_base_class_init_raises_error(self): - with self.assertRaisesRegexp( - NotImplementedError, - 'Subclasses of BaseVoiceoverApplication should implement ' - '__init__.'): - suggestion_registry.BaseVoiceoverApplication() - - def test_base_class_accept_raises_error(self): - with self.assertRaisesRegexp( - NotImplementedError, - 'Subclasses of BaseVoiceoverApplication should implement accept.'): - self.base_voiceover_application.accept() - - def test_base_class_reject_raises_error(self): - with self.assertRaisesRegexp( - NotImplementedError, - 'Subclasses of BaseVoiceoverApplication should implement reject.'): - self.base_voiceover_application.reject() - - -class ExplorationVoiceoverApplicationUnitTest(test_utils.GenericTestBase): - """Tests for the ExplorationVoiceoverApplication class.""" - - def setUp(self): - super(ExplorationVoiceoverApplicationUnitTest, self).setUp() - self.signup('author@example.com', 'author') - self.author_id = self.get_user_id_from_email('author@example.com') - - self.signup('reviewer@example.com', 'reviewer') - self.reviewer_id = self.get_user_id_from_email('reviewer@example.com') - - self.voiceover_application = ( - suggestion_registry.ExplorationVoiceoverApplication( - 'application_id', 'exp_id', suggestion_models.STATUS_IN_REVIEW, - self.author_id, None, 'en', 'audio_file.mp3', '

    Content

    ', - None)) - - def test_validation_with_invalid_target_type_raise_exception(self): - self.voiceover_application.validate() - - self.voiceover_application.target_type = 'invalid_target' - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected target_type to be among allowed choices, ' - 'received invalid_target' - ): - self.voiceover_application.validate() - - def test_validation_with_invalid_target_id_raise_exception(self): - self.voiceover_application.validate() - - self.voiceover_application.target_id = 123 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected target_id to be a string' - ): - self.voiceover_application.validate() - - def test_validation_with_invalid_status_raise_exception(self): - self.voiceover_application.validate() - - self.voiceover_application.status = 'invalid_status' - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected status to be among allowed choices, ' - 'received invalid_status' - ): - self.voiceover_application.validate() - - def test_validation_with_invalid_author_id_raise_exception(self): - self.voiceover_application.validate() - - self.voiceover_application.author_id = 123 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected author_id to be a string' - ): - self.voiceover_application.validate() - - def test_validation_with_invalid_final_reviewer_id_raise_exception(self): - self.assertEqual( - self.voiceover_application.status, - suggestion_models.STATUS_IN_REVIEW) - self.assertEqual(self.voiceover_application.final_reviewer_id, None) - self.voiceover_application.validate() - - self.voiceover_application.final_reviewer_id = 123 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected final_reviewer_id to be None as the ' - 'voiceover application is not yet handled.' - ): - self.voiceover_application.validate() - - def test_validation_for_handled_application_with_invalid_final_review(self): - self.assertEqual( - self.voiceover_application.status, - suggestion_models.STATUS_IN_REVIEW) - self.assertEqual(self.voiceover_application.final_reviewer_id, None) - self.voiceover_application.validate() - - self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected final_reviewer_id to be a string' - ): - self.voiceover_application.validate() - - def test_validation_for_rejected_application_with_no_message(self): - self.assertEqual( - self.voiceover_application.status, - suggestion_models.STATUS_IN_REVIEW) - self.assertEqual(self.voiceover_application.rejection_message, None) - self.voiceover_application.validate() - - self.voiceover_application.final_reviewer_id = 'reviewer_id' - self.voiceover_application.status = suggestion_models.STATUS_REJECTED - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected rejection_message to be a string for a ' - 'rejected application' - ): - self.voiceover_application.validate() - - def test_validation_for_accepted_application_with_message(self): - self.assertEqual( - self.voiceover_application.status, - suggestion_models.STATUS_IN_REVIEW) - self.assertEqual(self.voiceover_application.rejection_message, None) - self.voiceover_application.validate() - - self.voiceover_application.final_reviewer_id = 'reviewer_id' - self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED - self.voiceover_application.rejection_message = 'Invalid message' - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected rejection_message to be None for the accepted ' - 'voiceover application, received Invalid message' - ): - self.voiceover_application.validate() - - def test_validation_with_invalid_language_code_type_raise_exception(self): - self.assertEqual(self.voiceover_application.language_code, 'en') - self.voiceover_application.validate() - - self.voiceover_application.language_code = 1 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected language_code to be a string' - ): - self.voiceover_application.validate() - - def test_validation_with_invalid_language_code_raise_exception(self): - self.assertEqual(self.voiceover_application.language_code, 'en') - self.voiceover_application.validate() - - self.voiceover_application.language_code = 'invalid language' - with self.assertRaisesRegexp( - utils.ValidationError, 'Invalid language_code: invalid language' - ): - self.voiceover_application.validate() - - def test_validation_with_invalid_filename_type_raise_exception(self): - self.assertEqual(self.voiceover_application.filename, 'audio_file.mp3') - self.voiceover_application.validate() - - self.voiceover_application.filename = 1 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected filename to be a string' - ): - self.voiceover_application.validate() - - def test_validation_with_invalid_content_type_raise_exception(self): - self.assertEqual(self.voiceover_application.content, '

    Content

    ') - self.voiceover_application.validate() - - self.voiceover_application.content = 1 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected content to be a string' - ): - self.voiceover_application.validate() - - def test_to_dict_returns_correct_dict(self): - self.voiceover_application.accept(self.reviewer_id) - expected_dict = { - 'voiceover_application_id': 'application_id', - 'target_type': 'exploration', - 'target_id': 'exp_id', - 'status': 'accepted', - 'author_name': 'author', - 'final_reviewer_name': 'reviewer', - 'language_code': 'en', - 'content': '

    Content

    ', - 'filename': 'audio_file.mp3', - 'rejection_message': None - } - self.assertEqual( - self.voiceover_application.to_dict(), expected_dict) - - def test_is_handled_property_returns_correct_value(self): - self.assertFalse(self.voiceover_application.is_handled) - - self.voiceover_application.accept(self.reviewer_id) - - self.assertTrue(self.voiceover_application.is_handled) - - def test_accept_voiceover_application(self): - self.assertEqual(self.voiceover_application.final_reviewer_id, None) - self.assertEqual(self.voiceover_application.status, 'review') - - self.voiceover_application.accept(self.reviewer_id) - - self.assertEqual( - self.voiceover_application.final_reviewer_id, self.reviewer_id) - self.assertEqual(self.voiceover_application.status, 'accepted') - - def test_reject_voiceover_application(self): - self.assertEqual(self.voiceover_application.final_reviewer_id, None) - self.assertEqual(self.voiceover_application.status, 'review') - - self.voiceover_application.reject(self.reviewer_id, 'rejection message') - - self.assertEqual( - self.voiceover_application.final_reviewer_id, self.reviewer_id) - self.assertEqual(self.voiceover_application.status, 'rejected') - self.assertEqual( - self.voiceover_application.rejection_message, 'rejection message') + suggestion_models.STATUS_IN_REVIEW, self.author_id, + 'test_reviewer', change, score_category, 'en', False, + self.fake_date) class CommunityContributionStatsUnitTests(test_utils.GenericTestBase): """Tests for the CommunityContributionStats class.""" - translation_reviewer_counts_by_lang_code = { + translation_reviewer_counts_by_lang_code: Dict[str, int] = { 'hi': 0, 'en': 1 } - translation_suggestion_counts_by_lang_code = { + translation_suggestion_counts_by_lang_code: Dict[str, int] = { 'fr': 6, 'en': 5 } - question_reviewer_count = 1 - question_suggestion_count = 4 + question_reviewer_count: int = 1 + question_suggestion_count: int = 4 - negative_count = -1 - non_integer_count = 'non_integer_count' - sample_language_code = 'en' - invalid_language_code = 'invalid' + negative_count: int = -1 + non_integer_count: str = 'non_integer_count' + sample_language_code: str = 'en' + invalid_language_code: str = 'invalid' - def _assert_community_contribution_stats_is_in_default_state(self): + def _assert_community_contribution_stats_is_in_default_state(self) -> None: """Checks if the community contribution stats is in its default state. """ @@ -2781,7 +2950,9 @@ def _assert_community_contribution_stats_is_in_default_state(self): self.assertEqual( community_contribution_stats.question_suggestion_count, 0) - def test_initial_object_with_valid_arguments_has_correct_properties(self): + def test_initial_object_with_valid_arguments_has_correct_properties( + self + ) -> None: community_contribution_stats = ( suggestion_registry.CommunityContributionStats( self.translation_reviewer_counts_by_lang_code, @@ -2815,7 +2986,8 @@ def test_initial_object_with_valid_arguments_has_correct_properties(self): ) def test_set_translation_reviewer_count_for_lang_code_updates_empty_dict( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -2836,7 +3008,8 @@ def test_set_translation_reviewer_count_for_lang_code_updates_empty_dict( ) def test_set_translation_reviewer_count_for_lang_code_updates_count_value( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -2861,7 +3034,8 @@ def test_set_translation_reviewer_count_for_lang_code_updates_count_value( ) def test_set_translation_reviewer_count_for_lang_code_adds_new_lang_key( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -2885,7 +3059,8 @@ def test_set_translation_reviewer_count_for_lang_code_adds_new_lang_key( ) def test_set_translation_suggestion_count_for_lang_code_updates_empty_dict( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -2905,7 +3080,8 @@ def test_set_translation_suggestion_count_for_lang_code_updates_empty_dict( ) def test_set_translation_suggestion_count_for_lang_code_updates_count_value( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -2930,7 +3106,8 @@ def test_set_translation_suggestion_count_for_lang_code_updates_count_value( ) def test_set_translation_suggestion_count_for_lang_code_adds_new_lang_key( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -2954,7 +3131,8 @@ def test_set_translation_suggestion_count_for_lang_code_adds_new_lang_key( ) def test_get_translation_language_codes_that_need_reviewers_for_one_lang( - self): + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.set_translation_suggestion_count_for_language_code( self.sample_language_code, 1) @@ -2967,7 +3145,8 @@ def test_get_translation_language_codes_that_need_reviewers_for_one_lang( language_codes_that_need_reviewers, {self.sample_language_code}) def test_get_translation_language_codes_that_need_reviewers_for_multi_lang( - self): + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.set_translation_suggestion_count_for_language_code('hi', 1) stats.set_translation_suggestion_count_for_language_code('fr', 1) @@ -2980,7 +3159,8 @@ def test_get_translation_language_codes_that_need_reviewers_for_multi_lang( language_codes_that_need_reviewers, {'hi', 'fr'}) def test_get_translation_language_codes_that_need_reviewers_for_no_lang( - self): + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() language_codes_that_need_reviewers = ( @@ -2991,7 +3171,8 @@ def test_get_translation_language_codes_that_need_reviewers_for_no_lang( language_codes_that_need_reviewers, set()) def test_translation_reviewers_are_needed_if_suggestions_but_no_reviewers( - self): + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.set_translation_suggestion_count_for_language_code( self.sample_language_code, 1) @@ -3000,7 +3181,9 @@ def test_translation_reviewers_are_needed_if_suggestions_but_no_reviewers( stats.are_translation_reviewers_needed_for_lang_code( self.sample_language_code)) - def test_translation_reviewers_are_needed_if_num_suggestions_past_max(self): + def test_translation_reviewers_are_needed_if_num_suggestions_past_max( + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.set_translation_suggestion_count_for_language_code( self.sample_language_code, 2) @@ -3015,7 +3198,9 @@ def test_translation_reviewers_are_needed_if_num_suggestions_past_max(self): self.assertTrue(reviewers_are_needed) - def test_translation_reviewers_not_needed_if_num_suggestions_eqs_max(self): + def test_translation_reviewers_not_needed_if_num_suggestions_eqs_max( + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.set_translation_suggestion_count_for_language_code( self.sample_language_code, 2) @@ -3030,7 +3215,9 @@ def test_translation_reviewers_not_needed_if_num_suggestions_eqs_max(self): self.assertFalse(reviewers_are_needed) - def test_translation_reviewers_not_needed_if_num_suggestions_less_max(self): + def test_translation_reviewers_not_needed_if_num_suggestions_less_max( + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.set_translation_suggestion_count_for_language_code( self.sample_language_code, 1) @@ -3046,7 +3233,8 @@ def test_translation_reviewers_not_needed_if_num_suggestions_less_max(self): self.assertFalse(reviewers_are_needed) def test_translation_reviewers_not_needed_if_reviewers_and_no_sugestions( - self): + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.set_translation_reviewer_count_for_language_code( self.sample_language_code, 1) @@ -3056,7 +3244,8 @@ def test_translation_reviewers_not_needed_if_reviewers_and_no_sugestions( self.sample_language_code)) def test_translation_reviewers_not_needed_if_no_reviewers_no_sugestions( - self): + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() self._assert_community_contribution_stats_is_in_default_state() @@ -3065,13 +3254,16 @@ def test_translation_reviewers_not_needed_if_no_reviewers_no_sugestions( self.sample_language_code)) def test_question_reviewers_are_needed_if_suggestions_zero_reviewers( - self): + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.question_suggestion_count = 1 self.assertTrue(stats.are_question_reviewers_needed()) - def test_question_reviewers_are_needed_if_num_suggestions_past_max(self): + def test_question_reviewers_are_needed_if_num_suggestions_past_max( + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.question_suggestion_count = 2 stats.question_reviewer_count = 1 @@ -3082,7 +3274,9 @@ def test_question_reviewers_are_needed_if_num_suggestions_past_max(self): self.assertTrue(reviewers_are_needed) - def test_question_reviewers_not_needed_if_num_suggestions_eqs_max(self): + def test_question_reviewers_not_needed_if_num_suggestions_eqs_max( + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.question_suggestion_count = 2 stats.question_reviewer_count = 2 @@ -3093,7 +3287,9 @@ def test_question_reviewers_not_needed_if_num_suggestions_eqs_max(self): self.assertFalse(reviewers_are_needed) - def test_question_reviewers_not_needed_if_num_suggestions_less_max(self): + def test_question_reviewers_not_needed_if_num_suggestions_less_max( + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() stats.question_suggestion_count = 1 stats.question_reviewer_count = 2 @@ -3105,14 +3301,16 @@ def test_question_reviewers_not_needed_if_num_suggestions_less_max(self): self.assertFalse(reviewers_are_needed) def test_question_reviewers_not_needed_if_no_reviewers_no_sugestions( - self): + self + ) -> None: stats = suggestion_services.get_community_contribution_stats() self._assert_community_contribution_stats_is_in_default_state() self.assertFalse(stats.are_question_reviewers_needed()) def test_validate_translation_reviewer_counts_fails_for_negative_counts( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -3122,7 +3320,7 @@ def test_validate_translation_reviewer_counts_fails_for_negative_counts( self.sample_language_code, self.negative_count) ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the translation reviewer count to be non-negative for ' '%s language code, received: %s.' % ( @@ -3131,7 +3329,8 @@ def test_validate_translation_reviewer_counts_fails_for_negative_counts( community_contribution_stats.validate() def test_validate_translation_suggestion_counts_fails_for_negative_counts( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -3141,7 +3340,7 @@ def test_validate_translation_suggestion_counts_fails_for_negative_counts( self.sample_language_code, self.negative_count) ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the translation suggestion count to be non-negative for ' '%s language code, received: %s.' % ( @@ -3149,7 +3348,9 @@ def test_validate_translation_suggestion_counts_fails_for_negative_counts( ): community_contribution_stats.validate() - def test_validate_question_reviewer_count_fails_for_negative_count(self): + def test_validate_question_reviewer_count_fails_for_negative_count( + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -3157,7 +3358,7 @@ def test_validate_question_reviewer_count_fails_for_negative_count(self): self.negative_count ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the question reviewer count to be non-negative, ' 'received: %s.' % ( @@ -3165,7 +3366,9 @@ def test_validate_question_reviewer_count_fails_for_negative_count(self): ): community_contribution_stats.validate() - def test_validate_question_suggestion_count_fails_for_negative_count(self): + def test_validate_question_suggestion_count_fails_for_negative_count( + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -3173,7 +3376,7 @@ def test_validate_question_suggestion_count_fails_for_negative_count(self): self.negative_count ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the question suggestion count to be non-negative, ' 'received: %s.' % ( @@ -3181,18 +3384,22 @@ def test_validate_question_suggestion_count_fails_for_negative_count(self): ): community_contribution_stats.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_validate_translation_reviewer_counts_fails_for_non_integer_counts( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) ( community_contribution_stats .set_translation_reviewer_count_for_language_code( - self.sample_language_code, self.non_integer_count) + self.sample_language_code, self.non_integer_count) # type: ignore[arg-type] ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the translation reviewer count to be an integer for ' '%s language code, received: %s.' % ( @@ -3200,18 +3407,22 @@ def test_validate_translation_reviewer_counts_fails_for_non_integer_counts( ): community_contribution_stats.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_validate_translation_suggestion_counts_fails_for_non_integer_count( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) ( community_contribution_stats .set_translation_suggestion_count_for_language_code( - self.sample_language_code, self.non_integer_count) + self.sample_language_code, self.non_integer_count) # type: ignore[arg-type] ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the translation suggestion count to be an integer for ' '%s language code, received: %s.' % ( @@ -3219,16 +3430,20 @@ def test_validate_translation_suggestion_counts_fails_for_non_integer_count( ): community_contribution_stats.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_validate_question_reviewer_count_fails_for_non_integer_count( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) community_contribution_stats.question_reviewer_count = ( - self.non_integer_count + self.non_integer_count # type: ignore[assignment] ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the question reviewer count to be an integer, ' 'received: %s.' % ( @@ -3236,16 +3451,20 @@ def test_validate_question_reviewer_count_fails_for_non_integer_count( ): community_contribution_stats.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_validate_question_suggestion_count_fails_for_non_integer_count( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) community_contribution_stats.question_suggestion_count = ( - self.non_integer_count + self.non_integer_count # type: ignore[assignment] ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected the question suggestion count to be an integer, ' 'received: %s.' % ( @@ -3254,7 +3473,8 @@ def test_validate_question_suggestion_count_fails_for_non_integer_count( community_contribution_stats.validate() def test_validate_translation_reviewer_counts_fails_for_invalid_lang_code( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -3264,7 +3484,7 @@ def test_validate_translation_reviewer_counts_fails_for_invalid_lang_code( self.invalid_language_code, 1) ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid language code for the translation reviewer counts: ' '%s.' % self.invalid_language_code @@ -3272,7 +3492,8 @@ def test_validate_translation_reviewer_counts_fails_for_invalid_lang_code( community_contribution_stats.validate() def test_validate_translation_suggestion_counts_fails_for_invalid_lang_code( - self): + self + ) -> None: community_contribution_stats = ( suggestion_services.get_community_contribution_stats() ) @@ -3282,7 +3503,7 @@ def test_validate_translation_suggestion_counts_fails_for_invalid_lang_code( self.invalid_language_code, 1) ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid language code for the translation suggestion counts: ' '%s.' % self.invalid_language_code @@ -3293,12 +3514,14 @@ def test_validate_translation_suggestion_counts_fails_for_invalid_lang_code( class ReviewableSuggestionEmailInfoUnitTests(test_utils.GenericTestBase): """Tests for the ReviewableSuggestionEmailInfo class.""" - suggestion_type = feconf.SUGGESTION_TYPE_ADD_QUESTION - language_code = 'en' - suggestion_content = 'sample question' - submission_datetime = datetime.datetime.utcnow() + suggestion_type: str = feconf.SUGGESTION_TYPE_ADD_QUESTION + language_code: str = 'en' + suggestion_content: str = 'sample question' + submission_datetime: datetime.datetime = datetime.datetime.utcnow() - def test_initial_object_with_valid_arguments_has_correct_properties(self): + def test_initial_object_with_valid_arguments_has_correct_properties( + self + ) -> None: reviewable_suggestion_email_info = ( suggestion_registry.ReviewableSuggestionEmailInfo( self.suggestion_type, self.language_code, @@ -3318,3 +3541,307 @@ def test_initial_object_with_valid_arguments_has_correct_properties(self): self.assertEqual( reviewable_suggestion_email_info.submission_datetime, self.submission_datetime) + + +class TranslationReviewStatsUnitTests(test_utils.GenericTestBase): + """Tests for the TranslationReviewStats class.""" + + LANGUAGE_CODE: Final = 'es' + CONTRIBUTOR_USER_ID: Final = 'uid_01234567890123456789012345678912' + TOPIC_ID: Final = 'topic_id' + REVIEWED_TRANSLATIONS_COUNT: Final = 2 + REVIEWED_TRANSLATION_WORD_COUNT: Final = 100 + ACCEPTED_TRANSLATIONS_COUNT: Final = 1 + ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT: Final = 0 + ACCEPTED_TRANSLATION_WORD_COUNT: Final = 50 + FIRST_CONTRIBUTION_DATE: Final = datetime.date.fromtimestamp(1616173836) + LAST_CONTRIBUTION_DATE: Final = datetime.date.fromtimestamp(1616173836) + + def test_create_translation_review_stats(self) -> None: + expected_stats_dict = { + 'language_code': self.LANGUAGE_CODE, + 'contributor_user_id': self.CONTRIBUTOR_USER_ID, + 'topic_id': self.TOPIC_ID, + 'reviewed_translations_count': self.REVIEWED_TRANSLATIONS_COUNT, + 'reviewed_translation_word_count': ( + self.REVIEWED_TRANSLATION_WORD_COUNT), + 'accepted_translations_count': self.ACCEPTED_TRANSLATIONS_COUNT, + 'accepted_translation_word_count': ( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + 'accepted_translations_with_reviewer_edits_count': ( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + 'first_contribution_date': self.FIRST_CONTRIBUTION_DATE, + 'last_contribution_date': self.LAST_CONTRIBUTION_DATE, + } + + actual_stats = suggestion_registry.TranslationReviewStats( + self.LANGUAGE_CODE, self.CONTRIBUTOR_USER_ID, + self.TOPIC_ID, self.REVIEWED_TRANSLATIONS_COUNT, + self.REVIEWED_TRANSLATION_WORD_COUNT, + self.ACCEPTED_TRANSLATIONS_COUNT, + self.ACCEPTED_TRANSLATION_WORD_COUNT, + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT, + self.FIRST_CONTRIBUTION_DATE, self.LAST_CONTRIBUTION_DATE + ) + + self.assertDictEqual( + actual_stats.to_dict(), expected_stats_dict) + + +class QuestionContributionStatsUnitTests(test_utils.GenericTestBase): + """Tests for the QuestionContributionStats class.""" + + CONTRIBUTOR_USER_ID: Final = 'uid_01234567890123456789012345678912' + TOPIC_ID: Final = 'topic_id' + SUBMITTED_QUESTION_COUNT: Final = 2 + ACCEPTED_QUESTIONS_COUNT: Final = 1 + ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT: Final = 0 + FIRST_CONTRIBUTION_DATE: Final = datetime.date.fromtimestamp(1616173836) + LAST_CONTRIBUTION_DATE: Final = datetime.date.fromtimestamp(1616173836) + + def test_create_question_contribution_stats(self) -> None: + expected_stats_dict = { + 'contributor_user_id': self.CONTRIBUTOR_USER_ID, + 'topic_id': self.TOPIC_ID, + 'submitted_questions_count': ( + self.SUBMITTED_QUESTION_COUNT), + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_without_reviewer_edits_count': ( + self + .ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE) + } + + actual_stats = suggestion_registry.QuestionContributionStats( + self.CONTRIBUTOR_USER_ID, self.TOPIC_ID, + self.SUBMITTED_QUESTION_COUNT, self.ACCEPTED_QUESTIONS_COUNT, + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT, + self.FIRST_CONTRIBUTION_DATE, self.LAST_CONTRIBUTION_DATE + ) + + self.assertDictEqual( + actual_stats.to_dict(), expected_stats_dict) + + +class QuestionReviewStatsUnitTests(test_utils.GenericTestBase): + """Tests for the QuestionReviewStats class.""" + + CONTRIBUTOR_USER_ID: Final = 'uid_01234567890123456789012345678912' + TOPIC_ID: Final = 'topic_id' + REVIEWED_QUESTIONS_COUNT: Final = 2 + ACCEPTED_QUESTIONS_COUNT: Final = 1 + ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT: Final = 0 + FIRST_CONTRIBUTION_DATE: Final = datetime.date.fromtimestamp(1616173836) + LAST_CONTRIBUTION_DATE: Final = datetime.date.fromtimestamp(1616173836) + + def test_create_question_review_stats(self) -> None: + expected_stats_dict = { + 'contributor_user_id': self.CONTRIBUTOR_USER_ID, + 'topic_id': self.TOPIC_ID, + 'reviewed_questions_count': self.REVIEWED_QUESTIONS_COUNT, + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_with_reviewer_edits_count': ( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE), + 'last_contribution_date': self.LAST_CONTRIBUTION_DATE + } + + actual_stats = suggestion_registry.QuestionReviewStats( + self.CONTRIBUTOR_USER_ID, self.TOPIC_ID, + self.REVIEWED_QUESTIONS_COUNT, + self.ACCEPTED_QUESTIONS_COUNT, + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT, + self.FIRST_CONTRIBUTION_DATE, self.LAST_CONTRIBUTION_DATE + ) + + self.assertDictEqual( + actual_stats.to_dict(), expected_stats_dict) + + +class ContributorMilestoneEmailInfoUnitTests(test_utils.GenericTestBase): + """Tests for the ContributorMilestoneEmailInfo class.""" + + CONTRIBUTOR_USER_ID: Final = 'uid_01234567890123456789012345678912' + CONTRIBUTION_TYPE: Final = 'translation' + CONTRIBUTION_SUBTYPE: Final = 'submission' + LANGUAGE_CODE: Final = 'es' + RANK_NAME: Final = 'Initial Contributor' + + def test_create_contribution_milestone_email_info(self) -> None: + actual_info = suggestion_registry.ContributorMilestoneEmailInfo( + self.CONTRIBUTOR_USER_ID, self.CONTRIBUTION_TYPE, + self.CONTRIBUTION_SUBTYPE, self.LANGUAGE_CODE, + self.RANK_NAME + ) + + self.assertEqual( + actual_info.contributor_user_id, self.CONTRIBUTOR_USER_ID + ) + self.assertEqual( + actual_info.contribution_type, self.CONTRIBUTION_TYPE + ) + self.assertEqual( + actual_info.contribution_subtype, self.CONTRIBUTION_SUBTYPE + ) + self.assertEqual( + actual_info.language_code, self.LANGUAGE_CODE + ) + self.assertEqual( + actual_info.rank_name, self.RANK_NAME + ) + + +class ContributorStatsSummaryUnitTests(test_utils.GenericTestBase): + """Tests for the ContributorStatsSummary class.""" + + LANGUAGE_CODE: Final = 'es' + CONTRIBUTOR_USER_ID: Final = 'user_01' + TOPIC_ID: Final = 'topic_id' + SUBMITTED_TRANSLATIONS_COUNT: Final = 2 + SUBMITTED_TRANSLATION_WORD_COUNT: Final = 100 + REJECTED_TRANSLATIONS_COUNT: Final = 0 + REJECTED_TRANSLATION_WORD_COUNT: Final = 0 + # Timestamp dates in sec since epoch for Mar 19 2021 UTC. + CONTRIBUTION_DATES: Final = { + datetime.date.fromtimestamp(1616173836), + datetime.date.fromtimestamp(1616173837) + } + REVIEWED_TRANSLATIONS_COUNT: Final = 2 + REVIEWED_TRANSLATION_WORD_COUNT: Final = 100 + ACCEPTED_TRANSLATIONS_COUNT: Final = 1 + ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT: Final = 0 + ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT: Final = 0 + ACCEPTED_TRANSLATION_WORD_COUNT: Final = 50 + SUBMITTED_QUESTION_COUNT: Final = 2 + ACCEPTED_QUESTIONS_COUNT: Final = 1 + ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT: Final = 0 + REVIEWED_QUESTIONS_COUNT: Final = 2 + ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT: Final = 0 + FIRST_CONTRIBUTION_DATE: Final = datetime.date.fromtimestamp(1616173836) + LAST_CONTRIBUTION_DATE: Final = datetime.date.fromtimestamp(1616173836) + + def test_create_contribution_stats_summary(self) -> None: + expected_translation_contribution_stats = { + 'language_code': self.LANGUAGE_CODE, + 'contributor_user_id': self.CONTRIBUTOR_USER_ID, + 'topic_id': self.TOPIC_ID, + 'submitted_translations_count': self.SUBMITTED_TRANSLATIONS_COUNT, + 'submitted_translation_word_count': ( + self.SUBMITTED_TRANSLATION_WORD_COUNT), + 'accepted_translations_count': self.ACCEPTED_TRANSLATIONS_COUNT, + 'accepted_translations_without_reviewer_edits_count': ( + self.ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT), + 'accepted_translation_word_count': ( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + 'rejected_translations_count': self.REJECTED_TRANSLATIONS_COUNT, + 'rejected_translation_word_count': ( + self.REJECTED_TRANSLATION_WORD_COUNT), + 'contribution_dates': self.CONTRIBUTION_DATES + } + expected_translation_review_stats = { + 'language_code': self.LANGUAGE_CODE, + 'contributor_user_id': self.CONTRIBUTOR_USER_ID, + 'topic_id': self.TOPIC_ID, + 'reviewed_translations_count': self.REVIEWED_TRANSLATIONS_COUNT, + 'reviewed_translation_word_count': ( + self.REVIEWED_TRANSLATION_WORD_COUNT), + 'accepted_translations_count': self.ACCEPTED_TRANSLATIONS_COUNT, + 'accepted_translation_word_count': ( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + 'accepted_translations_with_reviewer_edits_count': ( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + 'first_contribution_date': self.FIRST_CONTRIBUTION_DATE, + 'last_contribution_date': self.LAST_CONTRIBUTION_DATE, + } + expected_question_contribution_stats = { + 'contributor_user_id': self.CONTRIBUTOR_USER_ID, + 'topic_id': self.TOPIC_ID, + 'submitted_questions_count': ( + self.SUBMITTED_QUESTION_COUNT), + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_without_reviewer_edits_count': ( + self + .ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE) + } + expected_question_review_stats = { + 'contributor_user_id': self.CONTRIBUTOR_USER_ID, + 'topic_id': self.TOPIC_ID, + 'reviewed_questions_count': self.REVIEWED_QUESTIONS_COUNT, + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_with_reviewer_edits_count': ( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE), + 'last_contribution_date': self.LAST_CONTRIBUTION_DATE + } + expected_contribution_summary = { + 'contributor_user_id': self.CONTRIBUTOR_USER_ID, + 'translation_contribution_stats': [ + expected_translation_contribution_stats], + 'question_contribution_stats': [ + expected_question_contribution_stats], + 'translation_review_stats': [expected_translation_review_stats], + 'question_review_stats': [expected_question_review_stats] + } + translation_contribution_stats = ( + suggestion_registry).TranslationContributionStats( + self.LANGUAGE_CODE, + self.CONTRIBUTOR_USER_ID, + self.TOPIC_ID, + self.SUBMITTED_TRANSLATIONS_COUNT, + self.SUBMITTED_TRANSLATION_WORD_COUNT, + self.ACCEPTED_TRANSLATIONS_COUNT, + ( + self + .ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT + ), + self.ACCEPTED_TRANSLATION_WORD_COUNT, + self.REJECTED_TRANSLATIONS_COUNT, + self.REJECTED_TRANSLATION_WORD_COUNT, + self.CONTRIBUTION_DATES + ) + translation_review_stats = suggestion_registry.TranslationReviewStats( + self.LANGUAGE_CODE, self.CONTRIBUTOR_USER_ID, + self.TOPIC_ID, self.REVIEWED_TRANSLATIONS_COUNT, + self.REVIEWED_TRANSLATION_WORD_COUNT, + self.ACCEPTED_TRANSLATIONS_COUNT, + self.ACCEPTED_TRANSLATION_WORD_COUNT, + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT, + self.FIRST_CONTRIBUTION_DATE, self.LAST_CONTRIBUTION_DATE + ) + question_contribution_stats = ( + suggestion_registry).QuestionContributionStats( + self.CONTRIBUTOR_USER_ID, self.TOPIC_ID, + self.SUBMITTED_QUESTION_COUNT, self.ACCEPTED_QUESTIONS_COUNT, + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT, + self.FIRST_CONTRIBUTION_DATE, self.LAST_CONTRIBUTION_DATE + ) + question_review_stats = suggestion_registry.QuestionReviewStats( + self.CONTRIBUTOR_USER_ID, self.TOPIC_ID, + self.REVIEWED_QUESTIONS_COUNT, + self.ACCEPTED_QUESTIONS_COUNT, + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT, + self.FIRST_CONTRIBUTION_DATE, self.LAST_CONTRIBUTION_DATE + ) + + contribution_summary = suggestion_registry.ContributorStatsSummary( + self.CONTRIBUTOR_USER_ID, + [translation_contribution_stats], [question_contribution_stats], + [translation_review_stats], [question_review_stats] + ) + + self.assertDictEqual( + contribution_summary.to_dict(), expected_contribution_summary + ) diff --git a/core/domain/suggestion_services.py b/core/domain/suggestion_services.py index e86547e1fafb..c9c2b4fe3469 100644 --- a/core/domain/suggestion_services.py +++ b/core/domain/suggestion_services.py @@ -18,6 +18,7 @@ from __future__ import annotations +import datetime import heapq import logging import re @@ -25,27 +26,64 @@ from core import feconf from core.constants import constants from core.domain import email_manager +from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import feedback_services from core.domain import html_cleaner from core.domain import html_validation_service +from core.domain import opportunity_services from core.domain import question_domain +from core.domain import skill_services +from core.domain import state_domain from core.domain import suggestion_registry +from core.domain import taskqueue_services from core.domain import user_domain from core.domain import user_services from core.platform import models +from typing import ( + Callable, Dict, Final, List, Literal, Mapping, Match, Optional, + Sequence, Set, Tuple, Union, cast, overload +) + +MYPY = False +if MYPY: # pragma: no cover + # Here, change domain is imported only for type checking. + from core.domain import change_domain + from mypy_imports import feedback_models + from mypy_imports import suggestion_models + from mypy_imports import transaction_services + from mypy_imports import user_models + + AllowedSuggestionClasses = Union[ + suggestion_registry.SuggestionEditStateContent, + suggestion_registry.SuggestionTranslateContent, + suggestion_registry.SuggestionAddQuestion + ] + (feedback_models, suggestion_models, user_models) = ( - models.Registry.import_models( - [models.NAMES.feedback, models.NAMES.suggestion, models.NAMES.user])) + models.Registry.import_models([ + models.Names.FEEDBACK, models.Names.SUGGESTION, models.Names.USER + ]) +) + transaction_services = models.Registry.import_transaction_services() -DEFAULT_SUGGESTION_THREAD_SUBJECT = 'Suggestion from a user' -DEFAULT_SUGGESTION_THREAD_INITIAL_MESSAGE = '' +DEFAULT_SUGGESTION_THREAD_SUBJECT: Final = 'Suggestion from a user' +DEFAULT_SUGGESTION_THREAD_INITIAL_MESSAGE: Final = '' # The maximum number of suggestions to recommend to a reviewer to review in an # email. -MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER = 5 +MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_REVIEWER: Final = 5 + +SUGGESTION_TRANSLATE_CONTENT_HTML: Callable[ + [suggestion_registry.SuggestionTranslateContent], str +] = lambda suggestion: suggestion.change.translation_html + +SUGGESTION_ADD_QUESTION_HTML: Callable[ + [suggestion_registry.SuggestionAddQuestion], str +] = lambda suggestion: suggestion.change.question_dict[ + 'question_state_data']['content']['html'] # A dictionary that maps the suggestion type to a lambda function, which is # used to retrieve the html content that corresponds to the suggestion's @@ -54,18 +92,69 @@ # suggestion opportunities. For instance, for translation suggestions the # emphasized text is the translation. Similarly, for question suggestions the # emphasized text is the question being asked. -SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS = { - feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: ( - lambda suggestion: suggestion.change.translation_html), - feconf.SUGGESTION_TYPE_ADD_QUESTION: ( - lambda suggestion: suggestion.change.question_dict[ - 'question_state_data']['content']['html']) +SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS: Dict[str, Callable[..., str]] = { + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: SUGGESTION_TRANSLATE_CONTENT_HTML, + feconf.SUGGESTION_TYPE_ADD_QUESTION: SUGGESTION_ADD_QUESTION_HTML } +@overload +def create_suggestion( + suggestion_type: Literal['add_question'], + target_type: str, + target_id: str, + target_version_at_submission: int, + author_id: str, + change: Mapping[str, change_domain.AcceptableChangeDictTypes], + description: Optional[str] +) -> suggestion_registry.SuggestionAddQuestion: ... + + +@overload +def create_suggestion( + suggestion_type: Literal['translate_content'], + target_type: str, + target_id: str, + target_version_at_submission: int, + author_id: str, + change: Mapping[str, change_domain.AcceptableChangeDictTypes], + description: Optional[str] +) -> suggestion_registry.SuggestionTranslateContent: ... + + +@overload def create_suggestion( - suggestion_type, target_type, target_id, target_version_at_submission, - author_id, change, description): + suggestion_type: Literal['edit_exploration_state_content'], + target_type: str, + target_id: str, + target_version_at_submission: int, + author_id: str, + change: Mapping[str, change_domain.AcceptableChangeDictTypes], + description: Optional[str] +) -> suggestion_registry.SuggestionEditStateContent: ... + + +@overload +def create_suggestion( + suggestion_type: str, + target_type: str, + target_id: str, + target_version_at_submission: int, + author_id: str, + change: Mapping[str, change_domain.AcceptableChangeDictTypes], + description: Optional[str] +) -> suggestion_registry.BaseSuggestion: ... + + +def create_suggestion( + suggestion_type: str, + target_type: str, + target_id: str, + target_version_at_submission: int, + author_id: str, + change: Mapping[str, change_domain.AcceptableChangeDictTypes], + description: Optional[str] +) -> suggestion_registry.BaseSuggestion: """Creates a new SuggestionModel and the corresponding FeedbackThread. Args: @@ -78,10 +167,14 @@ def create_suggestion( entity at the time of creation of the suggestion. author_id: str. The ID of the user who submitted the suggestion. change: dict. The details of the suggestion. - description: str. The description of the changes provided by the author. + description: str|None. The description of the changes provided by the + author or None, if no description is provided. Returns: Suggestion. The newly created suggestion domain object. + + Raises: + Exception. Invalid suggestion type. """ if description is None: description = DEFAULT_SUGGESTION_THREAD_SUBJECT @@ -100,37 +193,59 @@ def create_suggestion( # Suggestions of this type do not have an associated language code, # since they are not queryable by language. language_code = None + suggestion: AllowedSuggestionClasses = ( + suggestion_registry.SuggestionEditStateContent( + thread_id, target_id, target_version_at_submission, status, + author_id, None, change, score_category, language_code, False + ) + ) elif suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: score_category = ( suggestion_models.SCORE_TYPE_TRANSLATION + suggestion_models.SCORE_CATEGORY_DELIMITER + exploration.category) # The language code of the translation, used for querying purposes. + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(change['language_code'], str) language_code = change['language_code'] + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(change['state_name'], str) + assert isinstance(change['content_id'], str) content_html = exploration.get_content_html( change['state_name'], change['content_id']) if content_html != change['content_html']: raise Exception( 'The Exploration content has changed since this translation ' 'was submitted.') + suggestion = suggestion_registry.SuggestionTranslateContent( + thread_id, target_id, target_version_at_submission, status, + author_id, None, change, score_category, language_code, False + ) elif suggestion_type == feconf.SUGGESTION_TYPE_ADD_QUESTION: score_category = ( suggestion_models.SCORE_TYPE_QUESTION + suggestion_models.SCORE_CATEGORY_DELIMITER + target_id) - change['question_dict']['language_code'] = ( + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(change['question_dict'], dict) + # Here we use cast because we are narrowing down the type from + # various Dict types that are present in AcceptableChangeDictTypes + # to QuestionDict type. + question_dict = cast( + question_domain.QuestionDict, + change['question_dict'] + ) + question_dict['language_code'] = ( constants.DEFAULT_LANGUAGE_CODE) - change['question_dict']['question_state_data_schema_version'] = ( + question_dict['question_state_data_schema_version'] = ( feconf.CURRENT_STATE_SCHEMA_VERSION) # The language code of the question, used for querying purposes. - language_code = constants.DEFAULT_LANGUAGE_CODE + add_question_language_code = constants.DEFAULT_LANGUAGE_CODE + suggestion = suggestion_registry.SuggestionAddQuestion( + thread_id, target_id, target_version_at_submission, status, + author_id, None, change, score_category, add_question_language_code, + False + ) else: raise Exception('Invalid suggestion type %s' % suggestion_type) - - suggestion_domain_class = ( - suggestion_registry.SUGGESTION_TYPES_TO_DOMAIN_CLASSES[ - suggestion_type]) - suggestion = suggestion_domain_class( - thread_id, target_id, target_version_at_submission, status, author_id, - None, change, score_category, language_code, False) suggestion.validate() suggestion_models.GeneralSuggestionModel.create( @@ -145,7 +260,9 @@ def create_suggestion( return get_suggestion_by_id(thread_id) -def get_suggestion_from_model(suggestion_model): +def get_suggestion_from_model( + suggestion_model: suggestion_models.GeneralSuggestionModel +) -> suggestion_registry.BaseSuggestion: """Converts the given SuggestionModel to a Suggestion domain object Args: @@ -167,22 +284,259 @@ def get_suggestion_from_model(suggestion_model): suggestion_model.edited_by_reviewer, suggestion_model.last_updated) -def get_suggestion_by_id(suggestion_id): +@overload +def get_suggestion_by_id( + suggestion_id: str +) -> suggestion_registry.BaseSuggestion: ... + + +@overload +def get_suggestion_by_id( + suggestion_id: str, *, strict: Literal[True] +) -> suggestion_registry.BaseSuggestion: ... + + +@overload +def get_suggestion_by_id( + suggestion_id: str, *, strict: Literal[False] +) -> Optional[suggestion_registry.BaseSuggestion]: ... + + +def get_suggestion_by_id( + suggestion_id: str, strict: bool = True +) -> Optional[suggestion_registry.BaseSuggestion]: """Finds a suggestion by the suggestion ID. Args: suggestion_id: str. The ID of the suggestion. + strict: bool. Whether to fail noisily if no suggestion with a given id + exists. Returns: Suggestion|None. The corresponding suggestion, or None if no suggestion is found. + + Raises: + Exception. The suggestion model does not exists for the given id. """ model = suggestion_models.GeneralSuggestionModel.get_by_id(suggestion_id) + if strict and model is None: + raise Exception( + 'No suggestion model exists for the corresponding suggestion id: %s' + % suggestion_id + ) + return get_suggestion_from_model(model) if model else None -def get_suggestions_by_ids(suggestion_ids): +@overload +def get_translation_contribution_stats_models( + stats_ids: List[str], *, strict: Literal[True] +) -> List[suggestion_models.TranslationContributionStatsModel]: ... + + +@overload +def get_translation_contribution_stats_models( + stats_ids: List[str] +) -> List[suggestion_models.TranslationContributionStatsModel]: ... + + +@overload +def get_translation_contribution_stats_models( + stats_ids: List[str], *, strict: Literal[False] +) -> List[Optional[suggestion_models.TranslationContributionStatsModel]]: ... + + +def get_translation_contribution_stats_models( + stats_ids: List[str], strict: bool = True +) -> Sequence[Optional[suggestion_models.TranslationContributionStatsModel]]: + """Finds translation contribution stats by the IDs. + + Args: + stats_ids: list(str). The IDs of the stats. + strict: bool. Whether to fail noisily if no stat with given ids exists. + + Returns: + list(TranslationContributionStatsModel|None). The corresponding + translation contribution stats for the given IDs. + + Raises: + Exception. The stats models do not exist for the given IDs. + """ + stats_models = ( + suggestion_models.TranslationContributionStatsModel.get_multi( + list(stats_ids))) + + if not strict: + return stats_models + + for index, model in enumerate(stats_models): + if model is None: + raise Exception( + 'The stats models do not exist for the stats_id %s.' % ( + stats_ids[index]) + ) + + return stats_models + + +@overload +def get_translation_review_stats_models( + stats_ids: List[str], *, strict: Literal[True] +) -> List[suggestion_models.TranslationReviewStatsModel]: ... + + +@overload +def get_translation_review_stats_models( + stats_ids: List[str] +) -> List[suggestion_models.TranslationReviewStatsModel]: ... + + +@overload +def get_translation_review_stats_models( + stats_ids: List[str], *, strict: Literal[False] +) -> List[Optional[suggestion_models.TranslationReviewStatsModel]]: ... + + +def get_translation_review_stats_models( + stats_ids: List[str], strict: bool = True +) -> Sequence[Optional[suggestion_models.TranslationReviewStatsModel]]: + """Finds translation review stats by the IDs. + + Args: + stats_ids: list(str). The IDs of the stats. + strict: bool. Whether to fail noisily if no stat with given ids exists. + + Returns: + list(TranslationReviewStatsModel|None). The corresponding translation + review stats for the given IDs. + + Raises: + Exception. The stats models do not exist for the given IDs. + """ + stats_models = ( + suggestion_models.TranslationReviewStatsModel.get_multi( + list(stats_ids))) + + if not strict: + return stats_models + + for index, model in enumerate(stats_models): + if model is None: + raise Exception( + 'The stats models do not exist for the stats_id %s.' % ( + stats_ids[index]) + ) + + return stats_models + + +@overload +def get_question_contribution_stats_models( + stats_ids: List[str], *, strict: Literal[True] +) -> List[suggestion_models.QuestionContributionStatsModel]: ... + + +@overload +def get_question_contribution_stats_models( + stats_ids: List[str] +) -> List[suggestion_models.QuestionContributionStatsModel]: ... + + +@overload +def get_question_contribution_stats_models( + stats_ids: List[str], *, strict: Literal[False] +) -> List[Optional[suggestion_models.QuestionContributionStatsModel]]: ... + + +def get_question_contribution_stats_models( + stats_ids: List[str], strict: bool = True +) -> Sequence[Optional[suggestion_models.QuestionContributionStatsModel]]: + """Finds question contribution stats by the IDs. + + Args: + stats_ids: list(str). The IDs of the stats. + strict: bool. Whether to fail noisily if no stat with given ids exists. + + Returns: + list(QuestionContributionStatsModel|None). The corresponding question + contribution stats for the given IDs. + + Raises: + Exception. The stats models do not exist for the given IDs. + """ + stats_models = ( + suggestion_models.QuestionContributionStatsModel.get_multi( + list(stats_ids))) + + if not strict: + return stats_models + + for index, model in enumerate(stats_models): + if model is None: + raise Exception( + 'The stats models do not exist for the stats_id %s.' % ( + stats_ids[index]) + ) + + return stats_models + + +@overload +def get_question_review_stats_models( + stats_ids: List[str], *, strict: Literal[True] +) -> List[suggestion_models.QuestionReviewStatsModel]: ... + + +@overload +def get_question_review_stats_models( + stats_ids: List[str] +) -> List[suggestion_models.QuestionReviewStatsModel]: ... + + +@overload +def get_question_review_stats_models( + stats_ids: List[str], *, strict: Literal[False] +) -> List[Optional[suggestion_models.QuestionReviewStatsModel]]: ... + + +def get_question_review_stats_models( + stats_ids: List[str], strict: bool = True +) -> Sequence[Optional[suggestion_models.QuestionReviewStatsModel]]: + """Finds question review stats by the IDs. + + Args: + stats_ids: list(str). The IDs of the stats. + strict: bool. Whether to fail noisily if no stat with given ids exists. + + Returns: + list(QuestionReviewStatsModel|None). The corresponding question review + stats for the given IDs. + + Raises: + Exception. The stats models do not exist for the given IDs. + """ + stats_models = ( + suggestion_models.QuestionReviewStatsModel.get_multi( + list(stats_ids))) + + if not strict: + return stats_models + + for index, model in enumerate(stats_models): + if model is None: + raise Exception( + 'The stats models do not exist for the stats_id %s.' % ( + stats_ids[index]) + ) + + return stats_models + + +def get_suggestions_by_ids( + suggestion_ids: List[str] +) -> List[Optional[suggestion_registry.BaseSuggestion]]: """Finds suggestions using the given suggestion IDs. Args: @@ -203,7 +557,9 @@ def get_suggestions_by_ids(suggestion_ids): ] -def query_suggestions(query_fields_and_values): +def query_suggestions( + query_fields_and_values: List[Tuple[str, str]] +) -> List[suggestion_registry.BaseSuggestion]: """Queries for suggestions. Args: @@ -222,7 +578,9 @@ def query_suggestions(query_fields_and_values): ] -def get_translation_suggestion_ids_with_exp_ids(exp_ids): +def get_translation_suggestion_ids_with_exp_ids( + exp_ids: List[str] +) -> List[str]: """Gets the ids of the translation suggestions corresponding to explorations with the given exploration ids. @@ -244,7 +602,7 @@ def get_translation_suggestion_ids_with_exp_ids(exp_ids): ) -def get_all_stale_suggestion_ids(): +def get_all_stale_suggestion_ids() -> List[str]: """Gets a list of the suggestion ids corresponding to suggestions that have not had any activity on them for THRESHOLD_TIME_BEFORE_ACCEPT time. @@ -258,7 +616,9 @@ def get_all_stale_suggestion_ids(): ) -def _update_suggestion(suggestion): +def _update_suggestion( + suggestion: suggestion_registry.BaseSuggestion +) -> None: """Updates the given suggestion. Args: @@ -267,7 +627,10 @@ def _update_suggestion(suggestion): _update_suggestions([suggestion]) -def _update_suggestions(suggestions, update_last_updated_time=True): +def _update_suggestions( + suggestions: List[suggestion_registry.BaseSuggestion], + update_last_updated_time: bool = True +) -> None: """Updates the given suggestions. Args: @@ -281,12 +644,18 @@ def _update_suggestions(suggestions, update_last_updated_time=True): suggestion.validate() suggestion_ids.append(suggestion.suggestion_id) - suggestion_models_to_update = ( + suggestion_models_to_update_with_none = ( suggestion_models.GeneralSuggestionModel.get_multi(suggestion_ids) ) + suggestion_models_to_update = [] - for index, suggestion_model in enumerate(suggestion_models_to_update): + for index, suggestion_model in enumerate( + suggestion_models_to_update_with_none + ): + # Ruling out the possibility of None for mypy type checking. + assert suggestion_model is not None suggestion = suggestions[index] + suggestion_models_to_update.append(suggestion_model) suggestion_model.status = suggestion.status suggestion_model.final_reviewer_id = suggestion.final_reviewer_id suggestion_model.change_cmd = suggestion.change.to_dict() @@ -301,7 +670,9 @@ def _update_suggestions(suggestions, update_last_updated_time=True): suggestion_models_to_update) -def get_commit_message_for_suggestion(author_username, commit_message): +def get_commit_message_for_suggestion( + author_username: str, commit_message: str +) -> str: """Returns a modified commit message for an accepted suggestion. Args: @@ -319,7 +690,11 @@ def get_commit_message_for_suggestion(author_username, commit_message): def accept_suggestion( - suggestion_id, reviewer_id, commit_message, review_message): + suggestion_id: str, + reviewer_id: str, + commit_message: str, + review_message: str +) -> None: """Accepts the suggestion with the given suggestion_id after validating it. Args: @@ -337,7 +712,7 @@ def accept_suggestion( if not commit_message or not commit_message.strip(): raise Exception('Commit message cannot be empty.') - suggestion = get_suggestion_by_id(suggestion_id) + suggestion = get_suggestion_by_id(suggestion_id, strict=False) if suggestion is None: raise Exception( @@ -414,7 +789,9 @@ def accept_suggestion( _update_user_proficiency(user_proficiency) -def reject_suggestion(suggestion_id, reviewer_id, review_message): +def reject_suggestion( + suggestion_id: str, reviewer_id: str, review_message: str +) -> None: """Rejects the suggestion with the given suggestion_id. Args: @@ -430,7 +807,9 @@ def reject_suggestion(suggestion_id, reviewer_id, review_message): reject_suggestions([suggestion_id], reviewer_id, review_message) -def reject_suggestions(suggestion_ids, reviewer_id, review_message): +def reject_suggestions( + suggestion_ids: List[str], reviewer_id: str, review_message: str +) -> None: """Rejects the suggestions with the given suggestion_ids. Args: @@ -442,14 +821,16 @@ def reject_suggestions(suggestion_ids, reviewer_id, review_message): Raises: Exception. One or more of the suggestions has already been handled. """ - suggestions = get_suggestions_by_ids(suggestion_ids) + suggestions_with_none = get_suggestions_by_ids(suggestion_ids) + suggestions = [] - for index, suggestion in enumerate(suggestions): + for index, suggestion in enumerate(suggestions_with_none): if suggestion is None: raise Exception( 'You cannot reject the suggestion with id %s because it does ' 'not exist.' % (suggestion_ids[index]) ) + suggestions.append(suggestion) if suggestion.is_handled: raise Exception( 'The suggestion with id %s has already been accepted/' @@ -475,7 +856,7 @@ def reject_suggestions(suggestion_ids, reviewer_id, review_message): ) -def auto_reject_question_suggestions_for_skill_id(skill_id): +def auto_reject_question_suggestions_for_skill_id(skill_id: str) -> None: """Rejects all SuggestionAddQuestions with target ID matching the supplied skill ID. Reviewer ID is set to SUGGESTION_BOT_USER_ID. @@ -492,13 +873,19 @@ def auto_reject_question_suggestions_for_skill_id(skill_id): ] ) - suggestion_ids = [suggestion.suggestion_id for suggestion in suggestions] + suggestion_ids: List[str] = [] + for suggestion in suggestions: + # Narrowing down the type from BaseSuggestion to SuggestionAddQuestion. + assert isinstance( + suggestion, suggestion_registry.SuggestionAddQuestion + ) + suggestion_ids.append(suggestion.suggestion_id) reject_suggestions( suggestion_ids, feconf.SUGGESTION_BOT_USER_ID, suggestion_models.DELETED_SKILL_REJECT_MESSAGE) -def auto_reject_translation_suggestions_for_exp_ids(exp_ids): +def auto_reject_translation_suggestions_for_exp_ids(exp_ids: List[str]) -> None: """Rejects all translation suggestions with target IDs matching the supplied exploration IDs. These suggestions are being rejected because their corresponding exploration was removed from a story or the story was @@ -516,7 +903,11 @@ def auto_reject_translation_suggestions_for_exp_ids(exp_ids): def resubmit_rejected_suggestion( - suggestion_id, summary_message, author_id, change): + suggestion_id: str, + summary_message: str, + author_id: str, + change: change_domain.BaseChange +) -> None: """Resubmit a rejected suggestion with the given suggestion_id. Args: @@ -524,7 +915,7 @@ def resubmit_rejected_suggestion( summary_message: str. The message provided by the author to summarize new suggestion. author_id: str. The ID of the author creating the suggestion. - change: ExplorationChange. The new change to apply to the suggestion. + change: BaseChange. The new change to apply to the suggestion. Raises: Exception. The summary message is empty. @@ -559,7 +950,9 @@ def resubmit_rejected_suggestion( None, summary_message) -def get_all_suggestions_that_can_be_reviewed_by_user(user_id): +def get_all_suggestions_that_can_be_reviewed_by_user( + user_id: str +) -> List[suggestion_registry.BaseSuggestion]: """Returns a list of suggestions which need to be reviewed, in categories where the user has crossed the minimum score to review. @@ -585,41 +978,133 @@ def get_all_suggestions_that_can_be_reviewed_by_user(user_id): ]) -def get_reviewable_suggestions(user_id, suggestion_type): - """Returns a list of suggestions of given suggestion_type which the user - can review. +def get_reviewable_translation_suggestions_by_offset( + user_id: str, + opportunity_summary_exp_ids: Optional[List[str]], + limit: Optional[int], + offset: int, + sort_key: Optional[str], + language: Optional[str] = None +) -> Tuple[List[suggestion_registry.SuggestionTranslateContent], int]: + """Returns a list of translation suggestions matching the + passed opportunity IDs which the user can review. Args: user_id: str. The ID of the user. - suggestion_type: str. The type of the suggestion. + opportunity_summary_exp_ids: list(str) or None. + The list of exploration IDs for which suggestions + are fetched. If the list is empty, no suggestions are + fetched. If the value is None, all reviewable + suggestions are fetched. If the list consists of some + valid number of ids, suggestions corresponding to the + IDs are fetched. + limit: int|None. The maximum number of results to return. If None, + all available results are returned. + sort_key: str|None. The key to sort the suggestions by. + offset: int. The number of results to skip from the beginning of all + results matching the query. + language: str. ISO 639-1 language code for which to filter. If it is + None, all available languages will be returned. Returns: - list(Suggestion). A list of suggestions which the given user is allowed - to review. + Tuple of (results, next_offset). Where: + results: list(Suggestion). A list of translation suggestions + which the supplied user is permitted to review. + next_offset: int. The input offset + the number of results returned + by the current query. """ - all_suggestions = [] - if suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: - contribution_rights = user_services.get_user_contribution_rights( - user_id) - language_codes = ( - contribution_rights.can_review_translation_for_language_codes) - all_suggestions = ([ - get_suggestion_from_model(s) for s in ( - suggestion_models.GeneralSuggestionModel - .get_in_review_translation_suggestions( - user_id, language_codes)) - ]) - elif suggestion_type == feconf.SUGGESTION_TYPE_ADD_QUESTION: - all_suggestions = ([ - get_suggestion_from_model(s) for s in ( - suggestion_models.GeneralSuggestionModel - .get_in_review_question_suggestions(user_id)) - ]) + contribution_rights = user_services.get_user_contribution_rights( + user_id) + language_codes = ( + contribution_rights.can_review_translation_for_language_codes) + + # No language means all languages. + if language is not None: + language_codes = [language] if language in language_codes else [] + + # The user cannot review any translations, so return early. + if len(language_codes) == 0: + return [], offset + + in_review_translation_suggestions: Sequence[ + suggestion_models.GeneralSuggestionModel + ] = [] + next_offset = offset + if opportunity_summary_exp_ids is None: + in_review_translation_suggestions, next_offset = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit, + offset, + user_id, + sort_key, + language_codes)) + elif len(opportunity_summary_exp_ids) > 0: + in_review_translation_suggestions, next_offset = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_with_exp_ids_by_offset( + limit, + offset, + user_id, + sort_key, + language_codes, + opportunity_summary_exp_ids)) + + translation_suggestions = [] + for suggestion_model in in_review_translation_suggestions: + suggestion = get_suggestion_from_model(suggestion_model) + # Here, we are narrowing down the type from BaseSuggestion to + # SuggestionTranslateContent. + assert isinstance( + suggestion, suggestion_registry.SuggestionTranslateContent + ) + translation_suggestions.append(suggestion) - return all_suggestions + return translation_suggestions, next_offset -def get_question_suggestions_waiting_longest_for_review(): +def get_reviewable_question_suggestions_by_offset( + user_id: str, + limit: int, + offset: int, + sort_key: Optional[str] +) -> Tuple[List[suggestion_registry.SuggestionAddQuestion], int]: + """Returns a list of question suggestions which the user + can review. + + Args: + user_id: str. The ID of the user. + limit: int. The maximum number of results to return. + offset: int. The number of results to skip from the beginning of all + results matching the query. + sort_key: str|None. The key to sort the suggestions by. + + Returns: + Tuple of (results, next_offset). Where: + results: list(Suggestion). A list of question suggestions which + the given user is allowed to review. + next_offset: int. The input offset + the number of results returned + by the current query. + """ + suggestions, next_offset = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_question_suggestions_by_offset( + limit, offset, user_id, sort_key)) + + question_suggestions = [] + for suggestion_model in suggestions: + suggestion = get_suggestion_from_model(suggestion_model) + # Here, we are narrowing down the type from BaseSuggestion to + # SuggestionAddQuestion. + assert isinstance(suggestion, suggestion_registry.SuggestionAddQuestion) + question_suggestions.append(suggestion) + + return question_suggestions, next_offset + + +def get_question_suggestions_waiting_longest_for_review() -> List[ + suggestion_registry.SuggestionAddQuestion +]: """Returns MAX_QUESTION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS number of question suggestions, sorted in descending order by review wait time. @@ -627,15 +1112,24 @@ def get_question_suggestions_waiting_longest_for_review(): list(Suggestion). A list of question suggestions, sorted in descending order based on how long the suggestions have been waiting for review. """ - return [ - get_suggestion_from_model(suggestion_model) for suggestion_model in ( - suggestion_models.GeneralSuggestionModel + question_suggestion_models = ( + suggestion_models.GeneralSuggestionModel .get_question_suggestions_waiting_longest_for_review() - ) - ] + ) + + question_suggestion = [] + for suggestion_model in question_suggestion_models: + suggestion = get_suggestion_from_model(suggestion_model) + # Here, we are narrowing down the type from BaseSuggestion to + # SuggestionAddQuestion. + assert isinstance(suggestion, suggestion_registry.SuggestionAddQuestion) + question_suggestion.append(suggestion) + return question_suggestion -def get_translation_suggestions_waiting_longest_for_review(language_code): +def get_translation_suggestions_waiting_longest_for_review( + language_code: str +) -> List[suggestion_registry.SuggestionTranslateContent]: """Returns MAX_TRANSLATION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS number of translation suggestions in the specified language code, sorted in descending order by review wait time. @@ -649,16 +1143,28 @@ def get_translation_suggestions_waiting_longest_for_review(language_code): descending order based on how long the suggestions have been waiting for review. """ - return [ - get_suggestion_from_model(suggestion_model) for suggestion_model in ( - suggestion_models.GeneralSuggestionModel + translation_suggestion_models = ( + suggestion_models.GeneralSuggestionModel .get_translation_suggestions_waiting_longest_for_review( language_code) + ) + + translation_suggestions = [] + for suggestion_model in translation_suggestion_models: + suggestion = get_suggestion_from_model(suggestion_model) + # Here, we are narrowing down the type from BaseSuggestion + # to SuggestionTranslateContent. + assert isinstance( + suggestion, suggestion_registry.SuggestionTranslateContent ) - ] + translation_suggestions.append(suggestion) + + return translation_suggestions -def get_translation_suggestions_in_review_by_exploration(exp_id, language_code): +def get_translation_suggestions_in_review_by_exploration( + exp_id: str, language_code: str +) -> List[suggestion_registry.BaseSuggestion]: """Returns translation suggestions in review by exploration ID. Args: @@ -674,13 +1180,98 @@ def get_translation_suggestions_in_review_by_exploration(exp_id, language_code): .get_translation_suggestions_in_review_with_exp_id( exp_id, language_code) ) + return [ + get_suggestion_from_model(model) + for model in suggestion_models_in_review + ] + + +def get_translation_suggestions_in_review_by_exp_ids( + exp_ids: List[str], language_code: str +) -> List[Optional[suggestion_registry.BaseSuggestion]]: + """Returns translation suggestions in review by exploration ID and language + code. + + Args: + exp_ids: list(str). Exploration IDs matching the target ID of the + translation suggestions. + language_code: str. The ISO 639-1 language code of the translation + suggestions. + + Returns: + list(Suggestion). A list of translation suggestions in review with + target_id in exp_ids and language_code == language_code, or None if + suggestion model does not exists. + """ + suggestion_models_in_review = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_exp_ids( + exp_ids, language_code) + ) return [ get_suggestion_from_model(model) if model else None for model in suggestion_models_in_review ] -def _get_plain_text_from_html_content_string(html_content_string): +def get_suggestions_with_translatable_explorations( + suggestions: Sequence[suggestion_registry.SuggestionTranslateContent] +) -> Sequence[suggestion_registry.SuggestionTranslateContent]: + """Filters the supplied suggestions for those suggestions that have + translatable exploration content. That is, the following are true: + - The suggestion's change content corresponds to an existing exploration + content card. + - The suggestion's corresponding exploration allows edits. + + Args: + suggestions: list(Suggestion). List of translation suggestions to + filter. + + Returns: + list(Suggestion). List of filtered translation suggestions. + """ + + def _has_translatable_exploration( + suggestion: suggestion_registry.SuggestionTranslateContent, + suggestion_exp_id_to_exp: Dict[str, exp_domain.Exploration] + ) -> bool: + """Returns whether the supplied suggestion corresponds to a translatable + exploration content card. + + Args: + suggestion: Suggestion. Translation suggestion domain object to + check. + suggestion_exp_id_to_exp: dict(str, Exploration). Dictionary mapping + suggestion target exploration IDs to their corresponding + Exploration domain objects. + + Returns: + bool. Whether the supplied suggestion corresponds to a translatable + exploration content card. + """ + exploration = suggestion_exp_id_to_exp[suggestion.target_id] + content_id_exists = False + + # Checks whether the suggestion's change content still exists in the + # corresponding exploration. + # For more details, see https://github.com/oppia/oppia/issues/14339. + if suggestion.change.state_name in exploration.states: + content_id_exists = exploration.states[ + suggestion.change.state_name].has_content_id( + suggestion.change.content_id) + return content_id_exists and exploration.edits_allowed + + suggestion_exp_ids = { + suggestion.target_id for suggestion in suggestions} + suggestion_exp_id_to_exp = exp_fetchers.get_multiple_explorations_by_id( + list(suggestion_exp_ids)) + return list(filter( + lambda suggestion: _has_translatable_exploration( + suggestion, suggestion_exp_id_to_exp), + suggestions)) + + +def _get_plain_text_from_html_content_string(html_content_string: str) -> str: """Retrieves the plain text from the given html content string. RTE element occurrences in the html are replaced by their corresponding rte component name, capitalized in square brackets. @@ -696,7 +1287,7 @@ def _get_plain_text_from_html_content_string(html_content_string): str. The plain text string from the given html content string. """ - def _replace_rte_tag(rte_tag): + def _replace_rte_tag(rte_tag: Match[str]) -> str: """Replaces all of the tags with their corresponding rte component name in square brackets. @@ -714,13 +1305,18 @@ def _replace_rte_tag(rte_tag): # component is more than one word. rte_tag_name = re.search( r'oppia-noninteractive-(\w|-)+', rte_tag_string) + # Here, rte_tag_name is always going to exists because the string + # that was passed in this function is always going to contain + # `` substring. So, to just rule out the + # possibility of None for mypy type checking. we used assertion here. + assert rte_tag_name is not None # Retrieve the matched string from the MatchObject. rte_tag_name_string = rte_tag_name.group(0) # Get the name of the rte component. - rte_component_name_string = rte_tag_name_string.split('-')[2:] + rte_component_name_string_list = rte_tag_name_string.split('-')[2:] # If the component name is more than word, connect the words with spaces # to create a single string. - rte_component_name_string = ' '.join(rte_component_name_string) + rte_component_name_string = ' '.join(rte_component_name_string_list) # Captialize each word in the string. capitalized_rte_component_name_string = ( rte_component_name_string.title()) @@ -742,7 +1338,9 @@ def _replace_rte_tag(rte_tag): return plain_text_without_contiguous_whitespace -def create_reviewable_suggestion_email_info_from_suggestion(suggestion): +def create_reviewable_suggestion_email_info_from_suggestion( + suggestion: suggestion_registry.BaseSuggestion +) -> suggestion_registry.ReviewableSuggestionEmailInfo: """Creates an object with the key information needed to notify reviewers or admins that the given suggestion needs review. @@ -774,13 +1372,19 @@ def create_reviewable_suggestion_email_info_from_suggestion(suggestion): ) plain_text = _get_plain_text_from_html_content_string( get_html_representing_suggestion(suggestion)) + # Here, suggestion can only be of `translate_content` or `add_question` + # type and in both suggestions language_code cannot be None. So, to + # just narrow down type from Optional[str] to str we used assertion here. + assert suggestion.language_code is not None return suggestion_registry.ReviewableSuggestionEmailInfo( suggestion.suggestion_type, suggestion.language_code, plain_text, suggestion.last_updated ) -def get_suggestions_waiting_for_review_info_to_notify_reviewers(reviewer_ids): +def get_suggestions_waiting_for_review_info_to_notify_reviewers( + reviewer_ids: List[str] +) -> List[List[suggestion_registry.ReviewableSuggestionEmailInfo]]: """For each user, returns information that will be used to notify reviewers about the suggestions waiting longest for review, that the reviewer has permissions to review. @@ -817,7 +1421,9 @@ def get_suggestions_waiting_for_review_info_to_notify_reviewers(reviewer_ids): # Use a min heap because then the suggestions that have been waiting the # longest for review (earliest review submission date) are automatically # efficiently sorted. - suggestions_waiting_longest_heap = [] + suggestions_waiting_longest_heap: List[ + Tuple[datetime.datetime, suggestion_registry.BaseSuggestion] + ] = [] if user_contribution_rights.can_review_questions: for question_suggestion in question_suggestions: # Break early because we only want the top @@ -829,8 +1435,7 @@ def get_suggestions_waiting_for_review_info_to_notify_reviewers(reviewer_ids): # We can't include suggestions that were authored by the # reviewer because reviewers aren't allowed to review their own # suggestions. - elif question_suggestion.author_id != ( - user_contribution_rights.id): + if question_suggestion.author_id != user_contribution_rights.id: heapq.heappush(suggestions_waiting_longest_heap, ( question_suggestion.last_updated, question_suggestion)) @@ -890,7 +1495,9 @@ def get_suggestions_waiting_for_review_info_to_notify_reviewers(reviewer_ids): return reviewers_reviewable_suggestion_infos -def get_submitted_suggestions(user_id, suggestion_type): +def get_submitted_suggestions( + user_id: str, suggestion_type: str +) -> List[suggestion_registry.BaseSuggestion]: """Returns a list of suggestions of given suggestion_type which the user has submitted. @@ -910,7 +1517,82 @@ def get_submitted_suggestions(user_id, suggestion_type): ]) -def get_info_about_suggestions_waiting_too_long_for_review(): +@overload +def get_submitted_suggestions_by_offset( + user_id: str, + suggestion_type: Literal['add_question'], + limit: int, + offset: int, + sort_key: Optional[str] +) -> Tuple[ + Sequence[suggestion_registry.SuggestionAddQuestion], int +]: ... + + +@overload +def get_submitted_suggestions_by_offset( + user_id: str, + suggestion_type: Literal['translate_content'], + limit: int, + offset: int, + sort_key: Optional[str] +) -> Tuple[ + Sequence[suggestion_registry.SuggestionTranslateContent], int +]: ... + + +@overload +def get_submitted_suggestions_by_offset( + user_id: str, + suggestion_type: str, + limit: int, + offset: int, + sort_key: Optional[str] +) -> Tuple[Sequence[suggestion_registry.BaseSuggestion], int]: ... + + +def get_submitted_suggestions_by_offset( + user_id: str, + suggestion_type: str, + limit: int, + offset: int, + sort_key: Optional[str] +) -> Tuple[Sequence[suggestion_registry.BaseSuggestion], int]: + """Returns a list of suggestions of given suggestion_type which the user + has submitted. + + Args: + user_id: str. The ID of the user. + suggestion_type: str. The type of suggestion. + limit: int. The maximum number of results to return. + offset: int. The number of results to skip from the beginning + of all results matching the query. + sort_key: str|None. The key to sort the suggestions by. + + Returns: + Tuple of (results, next_offset). Where: + results: list(Suggestion). A list of suggestions of the supplied + type which the supplied user has submitted. + next_offset: int. The input offset + the number of results returned + by the current query. + """ + submitted_suggestion_models, next_offset = ( + suggestion_models.GeneralSuggestionModel + .get_user_created_suggestions_by_offset( + limit, + offset, + suggestion_type, + user_id, + sort_key)) + suggestions = ([ + get_suggestion_from_model(s) for s in submitted_suggestion_models + ]) + return suggestions, next_offset + + +def get_info_about_suggestions_waiting_too_long_for_review() -> List[ + suggestion_registry.ReviewableSuggestionEmailInfo +]: """Gets the information about the suggestions that have been waiting longer than suggestion_models.SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS days for a review on the Contributor Dashboard. There can be information about at @@ -939,7 +1621,9 @@ def get_info_about_suggestions_waiting_too_long_for_review(): ] -def get_user_proficiency_from_model(user_proficiency_model): +def get_user_proficiency_from_model( + user_proficiency_model: user_models.UserContributionProficiencyModel +) -> user_domain.UserContributionProficiency: """Converts the given UserContributionProficiencyModel to a UserContributionProficiency domain object. @@ -959,7 +1643,9 @@ def get_user_proficiency_from_model(user_proficiency_model): ) -def _update_user_proficiency(user_proficiency): +def _update_user_proficiency( + user_proficiency: user_domain.UserContributionProficiency +) -> None: """Updates the user_proficiency. Args: @@ -987,7 +1673,7 @@ def _update_user_proficiency(user_proficiency): user_proficiency.score, user_proficiency.onboarding_email_sent) -def get_all_scores_of_user(user_id): +def get_all_scores_of_user(user_id: str) -> Dict[str, int]: """Gets all scores for a given user. Args: @@ -1006,7 +1692,9 @@ def get_all_scores_of_user(user_id): return scores -def can_user_review_category(user_id, score_category): +def can_user_review_category( + user_id: str, score_category: str +) -> bool: """Checks if user can review suggestions in category score_category. If the user has score above the minimum required score, then the user is allowed to review. @@ -1023,7 +1711,9 @@ def can_user_review_category(user_id, score_category): return user_proficiency.can_user_review_category() -def get_all_user_ids_who_are_allowed_to_review(score_category): +def get_all_user_ids_who_are_allowed_to_review( + score_category: str +) -> List[str]: """Gets all user_ids of users who are allowed to review (as per their scores) suggestions to a particular category. @@ -1040,7 +1730,9 @@ def get_all_user_ids_who_are_allowed_to_review(score_category): ] -def _get_user_proficiency(user_id, score_category): +def _get_user_proficiency( + user_id: str, score_category: str +) -> user_domain.UserContributionProficiency: """Gets the user proficiency model from storage and creates the corresponding user proficiency domain object if the model exists. If the model does not exist a user proficiency domain object with the given @@ -1064,7 +1756,7 @@ def _get_user_proficiency(user_id, score_category): user_id, score_category, 0, False) -def check_can_resubmit_suggestion(suggestion_id, user_id): +def check_can_resubmit_suggestion(suggestion_id: str, user_id: str) -> bool: """Checks whether the given user can resubmit the suggestion. Args: @@ -1080,57 +1772,11 @@ def check_can_resubmit_suggestion(suggestion_id, user_id): return suggestion.author_id == user_id -def _get_voiceover_application_class(target_type): - """Returns the voiceover application class for a given target type. - - Args: - target_type: str. The target type of the voiceover application. - - Returns: - class. The voiceover application class for the given target type. - - Raises: - Exception. The voiceover application target type is invalid. - """ - target_type_to_classes = ( - suggestion_registry.VOICEOVER_APPLICATION_TARGET_TYPE_TO_DOMAIN_CLASSES) - if target_type in target_type_to_classes: - return target_type_to_classes[target_type] - else: - raise Exception( - 'Invalid target type for voiceover application: %s' % target_type) - - -def get_voiceover_application(voiceover_application_id): - """Returns the BaseVoiceoverApplication object for the give - voiceover application model object. - - Args: - voiceover_application_id: str. The ID of the voiceover application. - - Returns: - BaseVoiceoverApplication. The domain object out of the given voiceover - application model object. - """ - voiceover_application_model = ( - suggestion_models.GeneralVoiceoverApplicationModel.get_by_id( - voiceover_application_id)) - voiceover_application_class = _get_voiceover_application_class( - voiceover_application_model.target_type) - return voiceover_application_class( - voiceover_application_model.id, - voiceover_application_model.target_id, - voiceover_application_model.status, - voiceover_application_model.author_id, - voiceover_application_model.final_reviewer_id, - voiceover_application_model.language_code, - voiceover_application_model.filename, - voiceover_application_model.content, - voiceover_application_model.rejection_message) - - def create_community_contribution_stats_from_model( - community_contribution_stats_model): + community_contribution_stats_model: ( + suggestion_models.CommunityContributionStatsModel + ) +) -> suggestion_registry.CommunityContributionStats: """Creates a domain object that represents the community contribution stats from the model given. Note that each call to this function returns a new domain object, but the data copied into the domain object comes from @@ -1158,7 +1804,8 @@ def create_community_contribution_stats_from_model( ) -def get_community_contribution_stats(): +def get_community_contribution_stats( +) -> suggestion_registry.CommunityContributionStats: """Gets the CommunityContributionStatsModel and converts it into the corresponding domain object that represents the community contribution stats. Note that there is only ever one instance of this model and if the @@ -1177,7 +1824,10 @@ def get_community_contribution_stats(): def create_translation_contribution_stats_from_model( - translation_contribution_stats_model): + translation_contribution_stats_model: ( + suggestion_models.TranslationContributionStatsModel + ) +) -> suggestion_registry.TranslationContributionStats: """Creates a domain object representing the supplied TranslationContributionStatsModel. @@ -1203,11 +1853,13 @@ def create_translation_contribution_stats_from_model( translation_contribution_stats_model.accepted_translation_word_count, translation_contribution_stats_model.rejected_translations_count, translation_contribution_stats_model.rejected_translation_word_count, - translation_contribution_stats_model.contribution_dates + set(translation_contribution_stats_model.contribution_dates) ) -def get_all_translation_contribution_stats(user_id): +def get_all_translation_contribution_stats( + user_id: str +) -> List[suggestion_registry.TranslationContributionStats]: """Gets all TranslationContributionStatsModels corresponding to the supplied user and converts them to their corresponding domain objects. @@ -1229,7 +1881,7 @@ def get_all_translation_contribution_stats(user_id): ] -def get_suggestion_types_that_need_reviewers(): +def get_suggestion_types_that_need_reviewers() -> Dict[str, Set[str]]: """Uses the community contribution stats to determine which suggestion types need more reviewers. Suggestion types need more reviewers if the number of suggestions in that type divided by the number of reviewers is @@ -1246,7 +1898,7 @@ def get_suggestion_types_that_need_reviewers(): language codes of the translation suggestions that need more reviewers. """ - suggestion_types_needing_reviewers = {} + suggestion_types_needing_reviewers: Dict[str, Set[str]] = {} stats = get_community_contribution_stats() language_codes_that_need_reviewers = ( @@ -1260,14 +1912,15 @@ def get_suggestion_types_that_need_reviewers(): if stats.are_question_reviewers_needed(): suggestion_types_needing_reviewers[ - feconf.SUGGESTION_TYPE_ADD_QUESTION] = {} + feconf.SUGGESTION_TYPE_ADD_QUESTION] = set() return suggestion_types_needing_reviewers @transaction_services.run_in_transaction_wrapper def _update_suggestion_counts_in_community_contribution_stats_transactional( - suggestions, amount): + suggestions: List[suggestion_registry.BaseSuggestion], amount: int +) -> None: """Updates the community contribution stats counts associated with the given suggestions by the given amount. Note that this method should only ever be called in a transaction. @@ -1312,7 +1965,8 @@ def _update_suggestion_counts_in_community_contribution_stats_transactional( def _update_suggestion_counts_in_community_contribution_stats( - suggestions, amount): + suggestions: Sequence[suggestion_registry.BaseSuggestion], amount: int +) -> None: """Updates the community contribution stats counts associated with the given suggestions by the given amount. The GET and PUT is done in a single transaction to avoid loss of updates that come in rapid succession. @@ -1328,17 +1982,28 @@ def _update_suggestion_counts_in_community_contribution_stats( suggestions, amount) -def update_translation_suggestion(suggestion_id, translation_html): +def update_translation_suggestion( + suggestion_id: str, translation_html: str +) -> None: """Updates the translation_html of a suggestion with the given suggestion_id. Args: suggestion_id: str. The id of the suggestion to be updated. translation_html: str. The new translation_html string. + + Raises: + Exception. Expected SuggestionTranslateContent suggestion but found + different suggestion. """ suggestion = get_suggestion_by_id(suggestion_id) - - # Clean the translation HTML if not a list of strings. + if not isinstance( + suggestion, suggestion_registry.SuggestionTranslateContent + ): + raise Exception( + 'Expected SuggestionTranslateContent suggestion but found: %s.' + % type(suggestion).__name__ + ) suggestion.change.translation_html = ( html_cleaner.clean(translation_html) if isinstance(translation_html, str) @@ -1350,7 +2015,11 @@ def update_translation_suggestion(suggestion_id, translation_html): def update_question_suggestion( - suggestion_id, skill_difficulty, question_state_data): + suggestion_id: str, + skill_difficulty: float, + question_state_data: state_domain.StateDict, + next_content_id_index: int +) -> Optional[suggestion_registry.BaseSuggestion]: """Updates skill_difficulty and question_state_data of a suggestion with the given suggestion_id. @@ -1358,31 +2027,47 @@ def update_question_suggestion( suggestion_id: str. The id of the suggestion to be updated. skill_difficulty: double. The difficulty level of the question. question_state_data: obj. Details of the question. + next_content_id_index: int. The next content Id index for the question's + content. Returns: Suggestion|None. The corresponding suggestion, or None if no suggestion is found. + + Raises: + Exception. Expected SuggestionAddQuestion suggestion but found + different suggestion. """ suggestion = get_suggestion_by_id(suggestion_id) - new_change_obj = question_domain.QuestionSuggestionChange( - { - 'cmd': suggestion.change.cmd, - 'question_dict': { - 'question_state_data': question_state_data, - 'language_code': suggestion.change.question_dict[ - 'language_code'], - 'question_state_data_schema_version': ( - suggestion.change.question_dict[ - 'question_state_data_schema_version']), - 'linked_skill_ids': suggestion.change.question_dict[ - 'linked_skill_ids'], - 'inapplicable_skill_misconception_ids': ( - suggestion.change.question_dict[ - 'inapplicable_skill_misconception_ids']) - }, - 'skill_id': suggestion.change.skill_id, - 'skill_difficulty': skill_difficulty - }) + if not isinstance( + suggestion, suggestion_registry.SuggestionAddQuestion + ): + raise Exception( + 'Expected SuggestionAddQuestion suggestion but found: %s.' + % type(suggestion).__name__ + ) + question_dict = suggestion.change.question_dict + new_change_obj = ( + question_domain.CreateNewFullySpecifiedQuestionSuggestionCmd( + { + 'cmd': suggestion.change.cmd, + 'question_dict': { + 'question_state_data': question_state_data, + 'language_code': question_dict['language_code'], + 'question_state_data_schema_version': ( + question_dict[ + 'question_state_data_schema_version']), + 'linked_skill_ids': question_dict['linked_skill_ids'], + 'inapplicable_skill_misconception_ids': ( + suggestion.change.question_dict[ + 'inapplicable_skill_misconception_ids']), + 'next_content_id_index': next_content_id_index + }, + 'skill_id': suggestion.change.skill_id, + 'skill_difficulty': skill_difficulty + } + ) + ) suggestion.pre_update_validate(new_change_obj) suggestion.edited_by_reviewer = True suggestion.change = new_change_obj @@ -1390,3 +2075,1120 @@ def update_question_suggestion( _update_suggestion(suggestion) return suggestion + + +def _create_translation_review_stats_from_model( + translation_review_stats_model: ( + suggestion_models.TranslationReviewStatsModel + ) +) -> suggestion_registry.TranslationReviewStats: + """Creates a domain object representing the supplied + TranslationReviewStatsModel. + + Args: + translation_review_stats_model: TranslationReviewStatsModel. + The model to convert to a domain object. + + Returns: + TranslationReviewStats. The corresponding TranslationReviewStats domain + object. + """ + return suggestion_registry.TranslationReviewStats( + translation_review_stats_model.language_code, + translation_review_stats_model.reviewer_user_id, + translation_review_stats_model.topic_id, + translation_review_stats_model.reviewed_translations_count, + translation_review_stats_model.reviewed_translation_word_count, + translation_review_stats_model.accepted_translations_count, + translation_review_stats_model.accepted_translation_word_count, + ( + translation_review_stats_model + .accepted_translations_with_reviewer_edits_count), + translation_review_stats_model.first_contribution_date, + translation_review_stats_model.last_contribution_date + ) + + +def _create_question_contribution_stats_from_model( + question_contribution_stats_model: ( + suggestion_models.QuestionContributionStatsModel + ) +) -> suggestion_registry.QuestionContributionStats: + """Creates a domain object representing the supplied + QuestionContributionStatsModel. + + Args: + question_contribution_stats_model: QuestionContributionStatsModel. + The model to convert to a domain object. + + Returns: + QuestionContributionStats. The corresponding QuestionContributionStats + domain object. + """ + return suggestion_registry.QuestionContributionStats( + question_contribution_stats_model.contributor_user_id, + question_contribution_stats_model.topic_id, + question_contribution_stats_model.submitted_questions_count, + question_contribution_stats_model.accepted_questions_count, + ( + question_contribution_stats_model + .accepted_questions_without_reviewer_edits_count), + question_contribution_stats_model.first_contribution_date, + question_contribution_stats_model.last_contribution_date + ) + + +def _create_question_review_stats_from_model( + question_review_stats_model: ( + suggestion_models.QuestionReviewStatsModel + ) +) -> suggestion_registry.QuestionReviewStats: + """Creates a domain object representing the supplied + QuestionReviewStatsModel. + + Args: + question_review_stats_model: QuestionReviewStatsModel. + The model to convert to a domain object. + + Returns: + QuestionReviewStats. The corresponding QuestionReviewStats domain + object. + """ + return suggestion_registry.QuestionReviewStats( + question_review_stats_model.reviewer_user_id, + question_review_stats_model.topic_id, + question_review_stats_model.reviewed_questions_count, + question_review_stats_model.accepted_questions_count, + ( + question_review_stats_model + .accepted_questions_with_reviewer_edits_count), + question_review_stats_model.first_contribution_date, + question_review_stats_model.last_contribution_date + ) + + +def get_all_translation_review_stats( + user_id: str +) -> List[suggestion_registry.TranslationReviewStats]: + """Gets all TranslationReviewStatsModels corresponding to the supplied + user and converts them to their corresponding domain objects. + + Args: + user_id: str. User ID. + + Returns: + list(TranslationReviewStats). TranslationReviewStats domain objects + corresponding to the supplied user. + """ + translation_review_stats_models = ( + suggestion_models.TranslationReviewStatsModel.get_all_by_user_id( + user_id + ) + ) + return [ + _create_translation_review_stats_from_model(model) + for model in translation_review_stats_models + ] + + +def get_all_question_contribution_stats( + user_id: str +) -> List[suggestion_registry.QuestionContributionStats]: + """Gets all QuestionContributionStatsModels corresponding to the supplied + user and converts them to their corresponding domain objects. + + Args: + user_id: str. User ID. + + Returns: + list(QuestionContributionStats). QuestionContributionStats domain + objects corresponding to the supplied user. + """ + question_contribution_stats_models = ( + suggestion_models.QuestionContributionStatsModel.get_all_by_user_id( + user_id + ) + ) + return [ + _create_question_contribution_stats_from_model(model) + for model in question_contribution_stats_models + ] + + +def get_all_question_review_stats( + user_id: str +) -> List[suggestion_registry.QuestionReviewStats]: + """Gets all QuestionReviewStatsModels corresponding to the supplied + user and converts them to their corresponding domain objects. + + Args: + user_id: str. User ID. + + Returns: + list(QuestionReviewStats). QuestionReviewStats domain objects + corresponding to the supplied user. + """ + question_review_stats_models = ( + suggestion_models.QuestionReviewStatsModel.get_all_by_user_id( + user_id + ) + ) + return [ + _create_question_review_stats_from_model(model) + for model in question_review_stats_models + ] + + +# TODO(#16019): Pre-fetching and caching of stats data should be done. +def get_all_contributor_stats( + user_id: str +) -> suggestion_registry.ContributorStatsSummary: + """Gets ContributorStatsSummary corresponding to the supplied user. + + Args: + user_id: str. User ID. + + Returns: + ContributorStatsSummary. ContributorStatsSummary domain objects + corresponding to the supplied user. + """ + translation_contribution_stats = get_all_translation_contribution_stats( + user_id) + translation_review_stats = get_all_translation_review_stats(user_id) + question_contribution_stats = get_all_question_contribution_stats(user_id) + question_review_stats = get_all_question_review_stats(user_id) + + return suggestion_registry.ContributorStatsSummary( + user_id, + translation_contribution_stats, + question_contribution_stats, + translation_review_stats, + question_review_stats) + + +def _update_translation_contribution_stats_models( + translation_contribution_stats: List[ + suggestion_registry.TranslationContributionStats + ] +) -> None: + """Updates TranslationContributionStatsModel models for given translation + contribution stats. + + Args: + translation_contribution_stats: list(TranslationContributionStats). + A list of TranslationContributionStats domain objects. + + Raises: + Exception. Language is None. + Exception. Contributor user ID is None. + Exception. Topic ID is None. + """ + stats_dict = {} + for stat in translation_contribution_stats: + if stat.language_code is None: + raise Exception('Language code should not be None.') + if stat.contributor_user_id is None: + raise Exception('Contributor user ID should not be None.') + if stat.topic_id is None: + raise Exception('Topic ID should not be None.') + stat_id = ( + suggestion_models.TranslationContributionStatsModel.construct_id( + stat.language_code, + stat.contributor_user_id, + stat.topic_id) + ) + stats_dict[stat_id] = stat + + stats_ids = stats_dict.keys() + + stats_models = get_translation_contribution_stats_models(list(stats_ids)) + stats_models_to_update: List[ + suggestion_models.TranslationContributionStatsModel] = [] + for stats_model in stats_models: + stat = stats_dict[stats_model.id] + stats_model.submitted_translations_count = ( + stat.submitted_translations_count) + stats_model.submitted_translation_word_count = ( + stat.submitted_translation_word_count) + stats_model.accepted_translations_count = ( + stat.accepted_translations_count) + stats_model.accepted_translations_without_reviewer_edits_count = ( + stat.accepted_translations_without_reviewer_edits_count) + stats_model.accepted_translation_word_count = ( + stat.accepted_translation_word_count) + stats_model.rejected_translations_count = ( + stat.rejected_translations_count) + stats_model.rejected_translation_word_count = ( + stat.rejected_translation_word_count) + stats_model.contribution_dates = stat.contribution_dates + stats_models_to_update.append(stats_model) + + suggestion_models.TranslationContributionStatsModel.update_timestamps_multi( + stats_models_to_update, + update_last_updated_time=True) + suggestion_models.TranslationContributionStatsModel.put_multi( + stats_models_to_update) + + +def _update_translation_review_stats_models( + translation_review_stats: List[ + suggestion_registry.TranslationReviewStats + ] +) -> None: + """Updates TranslationReviewStatsModel models for given translation + review stats. + + Args: + translation_review_stats: list(TranslationReviewStats). A list of + TranslationReviewStats domain objects. + """ + stats_dict = {} + for stat in translation_review_stats: + stat_id = suggestion_models.TranslationReviewStatsModel.construct_id( + stat.language_code, stat.contributor_user_id, stat.topic_id) + stats_dict[stat_id] = stat + + stats_ids = stats_dict.keys() + + stats_models = get_translation_review_stats_models(list(stats_ids)) + stats_models_to_update: List[ + suggestion_models.TranslationReviewStatsModel] = [] + for stats_model in stats_models: + stat = stats_dict[stats_model.id] + stats_model.reviewed_translations_count = ( + stat.reviewed_translations_count) + stats_model.reviewed_translation_word_count = ( + stat.reviewed_translation_word_count) + stats_model.accepted_translations_count = ( + stat.accepted_translations_count) + stats_model.accepted_translation_word_count = ( + stat.accepted_translation_word_count) + stats_model.accepted_translations_with_reviewer_edits_count = ( + stat.accepted_translations_with_reviewer_edits_count) + stats_model.first_contribution_date = ( + stat.first_contribution_date) + stats_model.last_contribution_date = ( + stat.last_contribution_date) + stats_models_to_update.append(stats_model) + + suggestion_models.TranslationReviewStatsModel.update_timestamps_multi( + stats_models_to_update, + update_last_updated_time=True) + suggestion_models.TranslationReviewStatsModel.put_multi( + stats_models_to_update) + + +def _update_question_contribution_stats_models( + question_contribution_stats: List[ + suggestion_registry.QuestionContributionStats + ] +) -> None: + """Updates QuestionContributionStatsModel models for given question + contribution stats. + + Args: + question_contribution_stats: list(QuestionContributionStats). A list of + QuestionContribution domain objects. + """ + stats_dict = {} + for stat in question_contribution_stats: + stat_id = suggestion_models.QuestionContributionStatsModel.construct_id( + stat.contributor_user_id, stat.topic_id) + stats_dict[stat_id] = stat + + stats_ids = stats_dict.keys() + + stats_models = get_question_contribution_stats_models(list(stats_ids)) + stats_models_to_update: List[ + suggestion_models.QuestionContributionStatsModel] = [] + for stats_model in stats_models: + stat = stats_dict[stats_model.id] + stats_model.submitted_questions_count = ( + stat.submitted_questions_count) + stats_model.accepted_questions_count = ( + stat.accepted_questions_count) + stats_model.accepted_questions_without_reviewer_edits_count = ( + stat.accepted_questions_without_reviewer_edits_count) + stats_model.first_contribution_date = stat.first_contribution_date + stats_model.last_contribution_date = stat.last_contribution_date + stats_models_to_update.append(stats_model) + + suggestion_models.QuestionContributionStatsModel.update_timestamps_multi( + stats_models_to_update, + update_last_updated_time=True) + suggestion_models.QuestionContributionStatsModel.put_multi( + stats_models_to_update) + + +def _update_question_review_stats_models( + question_review_stats: List[ + suggestion_registry.QuestionReviewStats + ] +) -> None: + """Updates QuestionReviewStatsModel models for given question + review stats. + + Args: + question_review_stats: list(QuestionReviewStats). A list of + QuestionReviewStats domain objects. + """ + stats_dict = {} + for stat in question_review_stats: + stat_id = suggestion_models.QuestionReviewStatsModel.construct_id( + stat.contributor_user_id, stat.topic_id) + stats_dict[stat_id] = stat + + stats_ids = stats_dict.keys() + + stats_models = get_question_review_stats_models(list(stats_ids)) + stats_models_to_update: List[ + suggestion_models.QuestionReviewStatsModel] = [] + for stats_model in stats_models: + stat = stats_dict[stats_model.id] + stats_model.reviewed_questions_count = ( + stat.reviewed_questions_count) + stats_model.accepted_questions_count = ( + stat.accepted_questions_count) + stats_model.accepted_questions_with_reviewer_edits_count = ( + stat.accepted_questions_with_reviewer_edits_count) + stats_model.first_contribution_date = stat.first_contribution_date + stats_model.last_contribution_date = stat.last_contribution_date + stats_models_to_update.append(stats_model) + + suggestion_models.QuestionReviewStatsModel.update_timestamps_multi( + stats_models_to_update, + update_last_updated_time=True) + suggestion_models.QuestionReviewStatsModel.put_multi( + stats_models_to_update) + + +def update_translation_contribution_stats_at_submission( + suggestion: suggestion_registry.BaseSuggestion +) -> None: + """Creates/updates TranslationContributionStatsModel model for + given translation submitter when a translation is submitted. + + Args: + suggestion: Suggestion. The suggestion domain object that is being + submitted. + """ + content_word_count = 0 + exp_opportunity = ( + opportunity_services.get_exploration_opportunity_summary_by_id( + suggestion.target_id)) + # We can confirm that exp_opportunity will not be None since there should + # be an assigned opportunity for a given translation. Hence we can rule out + # the possibility of None for mypy type checking. + assert exp_opportunity is not None + topic_id = exp_opportunity.topic_id + + if isinstance(suggestion.change.translation_html, list): + for content in suggestion.change.translation_html: + content_plain_text = html_cleaner.strip_html_tags(content) + content_word_count += len(content_plain_text.split()) + else: + content_plain_text = html_cleaner.strip_html_tags( + suggestion.change.translation_html) + content_word_count = len(content_plain_text.split()) + + translation_contribution_stat_model = ( + suggestion_models.TranslationContributionStatsModel.get( + suggestion.change.language_code, suggestion.author_id, topic_id + )) + + if translation_contribution_stat_model is None: + suggestion_models.TranslationContributionStatsModel.create( + language_code=suggestion.change.language_code, + contributor_user_id=suggestion.author_id, + topic_id=topic_id, + submitted_translations_count=1, + submitted_translation_word_count=content_word_count, + accepted_translations_count=0, + accepted_translations_without_reviewer_edits_count=0, + accepted_translation_word_count=0, + rejected_translations_count=0, + rejected_translation_word_count=0, + contribution_dates=[suggestion.last_updated.date()] + ) + else: + translation_contribution_stat = ( + create_translation_contribution_stats_from_model( + translation_contribution_stat_model)) + + translation_contribution_stat.submitted_translations_count += 1 + translation_contribution_stat.submitted_translation_word_count += ( + content_word_count) + translation_contribution_stat.contribution_dates.add( + suggestion.last_updated.date()) + + _update_translation_contribution_stats_models( + [translation_contribution_stat]) + + +def update_translation_contribution_stats_at_review( + suggestion: suggestion_registry.BaseSuggestion +) -> None: + """Creates/updates TranslationContributionStatsModel model for + given translation submitter when a translation is reviewed. + + Args: + suggestion: Suggestion. The suggestion domain object that is being + reviewed. + """ + content_word_count = 0 + exp_opportunity = ( + opportunity_services.get_exploration_opportunity_summary_by_id( + suggestion.target_id)) + # We can confirm that exp_opportunity will not be None since there should + # be an assigned opportunity for a given translation. Hence we can rule out + # the possibility of None for mypy type checking. + assert exp_opportunity is not None + topic_id = exp_opportunity.topic_id + + if isinstance(suggestion.change.translation_html, list): + for content in suggestion.change.translation_html: + content_plain_text = html_cleaner.strip_html_tags(content) + content_word_count += len(content_plain_text.split()) + else: + content_plain_text = html_cleaner.strip_html_tags( + suggestion.change.translation_html) + content_word_count = len(content_plain_text.split()) + + suggestion_is_accepted = ( + suggestion.status == suggestion_models.STATUS_ACCEPTED + ) + + translation_contribution_stat_model = ( + suggestion_models.TranslationContributionStatsModel.get( + suggestion.change.language_code, suggestion.author_id, topic_id + )) + + if translation_contribution_stat_model is None: + accepted_translations_count = 0 + accepted_translation_word_count = 0 + rejected_translations_count = 0 + rejected_translation_word_count = 0 + accepted_translations_without_reviewer_edits_count = 0 + + if suggestion_is_accepted: + accepted_translations_count += 1 + accepted_translation_word_count += content_word_count + else: + rejected_translations_count += 1 + rejected_translation_word_count += content_word_count + if suggestion_is_accepted and not suggestion.edited_by_reviewer: + accepted_translations_without_reviewer_edits_count += 1 + + suggestion_models.TranslationContributionStatsModel.create( + language_code=suggestion.change.language_code, + contributor_user_id=suggestion.author_id, + topic_id=topic_id, + submitted_translations_count=1, + submitted_translation_word_count=content_word_count, + accepted_translations_count=accepted_translations_count, + accepted_translations_without_reviewer_edits_count=( + accepted_translations_without_reviewer_edits_count), + accepted_translation_word_count=accepted_translation_word_count, + rejected_translations_count=rejected_translations_count, + rejected_translation_word_count=rejected_translation_word_count, + contribution_dates=[suggestion.last_updated.date()] + ) + else: + translation_contribution_stat = ( + create_translation_contribution_stats_from_model( + translation_contribution_stat_model)) + + increment_translation_contribution_stats_at_review( + translation_contribution_stat, content_word_count, + suggestion_is_accepted, suggestion.edited_by_reviewer) + _update_translation_contribution_stats_models( + [translation_contribution_stat]) + + +def update_translation_review_stats( + suggestion: suggestion_registry.BaseSuggestion +) -> None: + """Creates/updates TranslationReviewStatsModel model for given translation + reviewer when a translation is reviewed. + + Args: + suggestion: Suggestion. The suggestion domain object that is being + reviewed. + + Raises: + Exception. The final_reviewer_id of the suggestion should not be None. + """ + content_word_count = 0 + if suggestion.final_reviewer_id is None: + raise Exception( + 'The final_reviewer_id in the suggestion should not be None.' + ) + exp_opportunity = ( + opportunity_services.get_exploration_opportunity_summary_by_id( + suggestion.target_id)) + # We can confirm that exp_opportunity will not be None since there should + # be an assigned opportunity for a given translation. Hence we can rule out + # the possibility of None for mypy type checking. + assert exp_opportunity is not None + topic_id = exp_opportunity.topic_id + suggestion_is_accepted = ( + suggestion.status == suggestion_models.STATUS_ACCEPTED + ) + + if isinstance(suggestion.change.translation_html, list): + for content in suggestion.change.translation_html: + content_plain_text = html_cleaner.strip_html_tags(content) + content_word_count += len(content_plain_text.split()) + else: + content_plain_text = html_cleaner.strip_html_tags( + suggestion.change.translation_html) + content_word_count = len(content_plain_text.split()) + + translation_review_stat_model = ( + # This function is called when reviewing a translation and hence + # final_reviewer_id should not be None when the suggestion is + # up-to-date. + suggestion_models.TranslationReviewStatsModel.get( + suggestion.change.language_code, suggestion.final_reviewer_id, + topic_id + )) + + if translation_review_stat_model is None: + # This function is called when reviewing a translation and hence + # final_reviewer_id should not be None when the suggestion is + # up-to-date. + accepted_translations_count = 0 + accepted_translations_with_reviewer_edits_count = 0 + accepted_translation_word_count = 0 + if suggestion_is_accepted: + accepted_translations_count += 1 + accepted_translation_word_count = content_word_count + if suggestion_is_accepted and suggestion.edited_by_reviewer: + accepted_translations_with_reviewer_edits_count += 1 + suggestion_models.TranslationReviewStatsModel.create( + language_code=suggestion.change.language_code, + reviewer_user_id=suggestion.final_reviewer_id, + topic_id=topic_id, + reviewed_translations_count=1, + reviewed_translation_word_count=content_word_count, + accepted_translations_count=accepted_translations_count, + accepted_translations_with_reviewer_edits_count=( + accepted_translations_with_reviewer_edits_count), + accepted_translation_word_count=accepted_translation_word_count, + first_contribution_date=suggestion.last_updated.date(), + last_contribution_date=suggestion.last_updated.date() + ) + else: + translation_review_stat = ( + _create_translation_review_stats_from_model( + translation_review_stat_model)) + + increment_translation_review_stats( + translation_review_stat, content_word_count, + suggestion.last_updated, suggestion_is_accepted, + suggestion.edited_by_reviewer + ) + _update_translation_review_stats_models([translation_review_stat]) + + update_translation_contribution_stats_at_review(suggestion) + + +def update_question_contribution_stats_at_submission( + suggestion: suggestion_registry.BaseSuggestion +) -> None: + """Creates/updates QuestionContributionStatsModel model for given question + submitter when a question is submitted. + + Args: + suggestion: Suggestion. The suggestion domain object that is being + submitted. + """ + for topic in skill_services.get_all_topic_assignments_for_skill( + suggestion.target_id): + question_contribution_stat_model = ( + suggestion_models.QuestionContributionStatsModel.get( + suggestion.author_id, topic.topic_id + )) + + if question_contribution_stat_model is None: + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id=suggestion.author_id, + topic_id=topic.topic_id, + submitted_questions_count=1, + accepted_questions_count=0, + accepted_questions_without_reviewer_edits_count=0, + first_contribution_date=suggestion.last_updated.date(), + last_contribution_date=suggestion.last_updated.date() + ) + continue + + question_contribution_stat = ( + _create_question_contribution_stats_from_model( + question_contribution_stat_model)) + + question_contribution_stat.submitted_questions_count += 1 + question_contribution_stat.last_contribution_date = ( + suggestion.last_updated.date()) + _update_question_contribution_stats_models( + [question_contribution_stat]) + + +def update_question_contribution_stats_at_review( + suggestion: suggestion_registry.BaseSuggestion +) -> None: + """Creates/updates QuestionContributionStatsModel model for given question + submitter when a question is reviewed. + + Args: + suggestion: Suggestion. The suggestion domain object that is being + reviewed. + """ + suggestion_is_accepted = ( + suggestion.status == suggestion_models.STATUS_ACCEPTED + ) + for topic in skill_services.get_all_topic_assignments_for_skill( + suggestion.target_id): + question_contribution_stat_model = ( + suggestion_models.QuestionContributionStatsModel.get( + suggestion.author_id, topic.topic_id + )) + + if question_contribution_stat_model is None: + accepted_questions_count = 0 + accepted_questions_without_reviewer_edits_count = 0 + if suggestion_is_accepted: + accepted_questions_count += 1 + if suggestion_is_accepted and not suggestion.edited_by_reviewer: + accepted_questions_without_reviewer_edits_count += 1 + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id=suggestion.author_id, + topic_id=topic.topic_id, + submitted_questions_count=1, + accepted_questions_count=accepted_questions_count, + accepted_questions_without_reviewer_edits_count=( + accepted_questions_without_reviewer_edits_count), + first_contribution_date=suggestion.last_updated.date(), + last_contribution_date=suggestion.last_updated.date() + ) + continue + + question_contribution_stat = ( + _create_question_contribution_stats_from_model( + question_contribution_stat_model)) + + if suggestion_is_accepted: + question_contribution_stat.accepted_questions_count += 1 + if suggestion_is_accepted and not suggestion.edited_by_reviewer: + ( + question_contribution_stat + .accepted_questions_without_reviewer_edits_count + ) += 1 + _update_question_contribution_stats_models( + [question_contribution_stat]) + + +def update_question_review_stats( + suggestion: suggestion_registry.BaseSuggestion +) -> None: + """Creates/updates QuestionReviewStatsModel model for given question + reviewer when a question is reviewed. + + Args: + suggestion: Suggestion. The suggestion domain object that is being + reviewed. + + Raises: + Exception. The final_reviewer_id of the suggestion should not be None. + """ + if suggestion.final_reviewer_id is None: + raise Exception( + 'The final_reviewer_id in the suggestion should not be None.' + ) + suggestion_is_accepted = ( + suggestion.status == suggestion_models.STATUS_ACCEPTED + ) + + for topic in skill_services.get_all_topic_assignments_for_skill( + suggestion.target_id): + question_review_stat_model = ( + # This function is called when reviewing a question suggestion and + # hence final_reviewer_id should not be None when the suggestion is + # up-to-date. + suggestion_models.QuestionReviewStatsModel.get( + suggestion.final_reviewer_id, topic.topic_id + )) + + if question_review_stat_model is None: + # This function is called when reviewing a question suggestion and + # hence final_reviewer_id should not be None when the suggestion is + # up-to-date. + accepted_questions_count = 0 + accepted_questions_with_reviewer_edits_count = 0 + if suggestion_is_accepted: + accepted_questions_count += 1 + if suggestion_is_accepted and suggestion.edited_by_reviewer: + accepted_questions_with_reviewer_edits_count += 1 + suggestion_models.QuestionReviewStatsModel.create( + reviewer_user_id=suggestion.final_reviewer_id, + topic_id=topic.topic_id, + reviewed_questions_count=1, + accepted_questions_count=accepted_questions_count, + accepted_questions_with_reviewer_edits_count=( + accepted_questions_with_reviewer_edits_count), + first_contribution_date=suggestion.last_updated.date(), + last_contribution_date=suggestion.last_updated.date() + ) + continue + + question_review_stat = ( + _create_question_review_stats_from_model( + question_review_stat_model)) + + increment_question_review_stats( + question_review_stat, suggestion.last_updated, + suggestion_is_accepted, + suggestion.edited_by_reviewer) + _update_question_review_stats_models([question_review_stat]) + + update_question_contribution_stats_at_review(suggestion) + + +def increment_translation_contribution_stats_at_review( + translation_contribution_stat: ( + suggestion_registry.TranslationContributionStats), + content_word_count: int, + suggestion_is_accepted: bool, + edited_by_reviewer: bool +) -> None: + """Updates TranslationContributionStats object. + + Args: + translation_contribution_stat: TranslationContributionStats. The stats + object to update. + content_word_count: int. The number of words in the translation. + suggestion_is_accepted: bool. A flag that indicates whether the + suggestion is accepted. + edited_by_reviewer: bool. A flag that indicates whether the suggestion + is edited by the reviewer. + """ + if suggestion_is_accepted: + translation_contribution_stat.accepted_translations_count += 1 + translation_contribution_stat.accepted_translation_word_count += ( + content_word_count) + else: + translation_contribution_stat.rejected_translations_count += 1 + translation_contribution_stat.rejected_translation_word_count += ( + content_word_count) + if suggestion_is_accepted and not edited_by_reviewer: + translation_contribution_stat.accepted_translations_without_reviewer_edits_count += 1 # pylint: disable=line-too-long + + +def increment_translation_review_stats( + translation_review_stat: suggestion_registry.TranslationReviewStats, + content_word_count: int, + last_contribution_date: datetime.datetime, + suggestion_is_accepted: bool, + edited_by_reviewer: bool +) -> None: + """Updates TranslationReviewStats object. + + Args: + translation_review_stat: TranslationReviewStats. The stats + object to update. + content_word_count: int. The number of words in the translation. + last_contribution_date: datetime.datetime. The last updated date. + suggestion_is_accepted: bool. A flag that indicates whether the + suggestion is accepted. + edited_by_reviewer: bool. A flag that indicates whether the suggestion + is edited by the reviewer. + """ + translation_review_stat.reviewed_translations_count += 1 + translation_review_stat.reviewed_translation_word_count += ( + content_word_count) + if suggestion_is_accepted: + translation_review_stat.accepted_translations_count += 1 + translation_review_stat.accepted_translation_word_count += ( + content_word_count) + if suggestion_is_accepted and edited_by_reviewer: + ( + translation_review_stat + .accepted_translations_with_reviewer_edits_count + ) += 1 + translation_review_stat.last_contribution_date = ( + last_contribution_date.date()) + + +def increment_question_review_stats( + question_review_stat: suggestion_registry.QuestionReviewStats, + last_contribution_date: datetime.datetime, + suggestion_is_accepted: bool, + edited_by_reviewer: bool +) -> None: + """Updates QuestionReviewStats object. + + Args: + question_review_stat: QuestionReviewStats. The stats object to update. + last_contribution_date: datetime.datetime. The last updated date. + suggestion_is_accepted: bool. A flag that indicates whether the + suggestion is accepted. + edited_by_reviewer: bool. A flag that indicates whether the suggestion + is edited by the reviewer. + """ + question_review_stat.reviewed_questions_count += 1 + if suggestion_is_accepted: + question_review_stat.accepted_questions_count += 1 + if suggestion_is_accepted and edited_by_reviewer: + question_review_stat.accepted_questions_with_reviewer_edits_count += 1 + question_review_stat.last_contribution_date = ( + last_contribution_date.date()) + + +def enqueue_contributor_ranking_notification_email_task( + contributor_user_id: str, contribution_type: str, + contribution_sub_type: str, language_code: str, rank_name: str, +) -> None: + """Adds a 'send feedback email' (instant) task into the task queue. + + Args: + contributor_user_id: str. The ID of the contributor. + contribution_type: str. The type of the contribution i.e. + translation or question. + contribution_sub_type: str. The sub type of the contribution + i.e. submissions/acceptances/reviews/edits. + language_code: str. The language code of the suggestion. + rank_name: str. The name of the rank that the contributor achieved. + + Raises: + Exception. The contribution type must be offered on the Contributor + Dashboard. + Exception. The contribution subtype must be offered on the Contributor + Dashboard. + """ + # contributor_user_id is alrerady validated in the controller layer. + # TODO(#16062): Rank name should be valid to send notification emails. + if language_code not in [language['id'] for language in ( + constants.SUPPORTED_AUDIO_LANGUAGES)]: + raise Exception( + 'Not supported language code: %s' % language_code) + if contribution_type not in [ + feconf.CONTRIBUTION_TYPE_TRANSLATION, + feconf.CONTRIBUTION_TYPE_QUESTION + ]: + raise Exception( + 'Invalid contribution type: %s' % contribution_type) + if contribution_sub_type not in [ + feconf.CONTRIBUTION_SUBTYPE_ACCEPTANCE, + feconf.CONTRIBUTION_SUBTYPE_REVIEW, + feconf.CONTRIBUTION_SUBTYPE_EDIT, + ]: + raise Exception( + 'Invalid contribution subtype: %s' % contribution_sub_type) + + payload = { + 'contributor_user_id': contributor_user_id, + 'contribution_type': contribution_type, + 'contribution_sub_type': contribution_sub_type, + 'language_code': language_code, + 'rank_name': rank_name, + } + + taskqueue_services.enqueue_task( + feconf.TASK_URL_CONTRIBUTOR_DASHBOARD_ACHIEVEMENT_NOTIFICATION_EMAILS, + payload, 0) + + +def generate_contributor_certificate_data( + username: str, + suggestion_type: str, + language_code: Optional[str], + from_date: datetime.datetime, + to_date: datetime.datetime +) -> suggestion_registry.ContributorCertificateInfoDict: + """Returns data to generate the certificate. + + Args: + username: str. The username of the contributor. + language_code: str|None. The language for which the contributions should + be considered. + suggestion_type: str. The type of suggestion that the certificate + needs to generate. + from_date: datetime.datetime. The start of the date range for which the + contributions were created. + to_date: datetime.datetime. The end of the date range for which the + contributions were created. + + Returns: + ContributorCertificateInfoDict. Data to generate the certificate. + + Raises: + Exception. The suggestion type is invalid. + Exception. There is no user for the given username. + """ + user_id = user_services.get_user_id_from_username(username) + if user_id is None: + raise Exception('There is no user for the given username.') + + if suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: + # For the suggestion_type translate_content, there should be a + # corresponding language_code. + assert isinstance(language_code, str) + data = _generate_translation_contributor_certificate_data( + language_code, from_date, to_date, user_id) + + elif suggestion_type == feconf.SUGGESTION_TYPE_ADD_QUESTION: + data = _generate_question_contributor_certificate_data( + from_date, to_date, user_id) + + else: + raise Exception('The suggestion type is invalid.') + + return data.to_dict() + + +def _generate_translation_contributor_certificate_data( + language_code: str, + from_date: datetime.datetime, + to_date: datetime.datetime, + user_id: str +) -> suggestion_registry.ContributorCertificateInfo: + """Returns data to generate translation submitter certificate. + + Args: + language_code: str. The language for which the contributions should + be considered. + from_date: datetime.datetime. The start of the date range for which + the contributions were created. + to_date: datetime.datetime. The end of the date range for which + the contributions were created. + user_id: str. The user ID of the contributor. + + Returns: + ContributorCertificateInfo. Data to generate translation submitter + certificate. + + Raises: + Exception. The language is invalid. + """ + signature = feconf.TRANSLATION_TEAM_LEAD + + # Adds one date to the to_date to make sure the contributions within + # the to_date are also counted for the certificate. + to_date_to_fetch_contributions = to_date + datetime.timedelta(days=1) + + language = next(filter( + lambda lang: lang['id'] == language_code, + constants.SUPPORTED_AUDIO_LANGUAGES), None) + if language is None: + raise Exception('The provided language is invalid.') + language_description = language['description'] + if ' (' in language_description: + language_description = language_description[ + language_description.find('(') + 1:language_description.find(')')] + + suggestions = ( + suggestion_models.GeneralSuggestionModel + .get_translation_suggestions_submitted_within_given_dates( + from_date, + to_date_to_fetch_contributions, + user_id, + language_code + ) + ) + + words_count = 0 + for model in suggestions: + suggestion = get_suggestion_from_model(model) + + # Retrieve the html content that is emphasized on the + # Contributor Dashboard pages. This content is what stands + # out for each suggestion when a user views a list of + # suggestions. + get_html_representing_suggestion = ( + SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS[ + suggestion.suggestion_type] + ) + plain_text = _get_plain_text_from_html_content_string( + get_html_representing_suggestion(suggestion)) + + words = plain_text.split(' ') + words_without_empty_strings = [ + word for word in words if word != ''] + words_count += len(words_without_empty_strings) + # Go to the below link for more information about how we count hours + # contributed.# Goto the below link for more information. + # https://docs.google.com/spreadsheets/d/1ykSNwPLZ5qTCkuO21VLdtm_2SjJ5QJ0z0PlVjjSB4ZQ/edit?usp=sharing + hours_contributed = round(words_count / 300, 2) + + if words_count == 0: + raise Exception( + 'There are no contributions for the given time range.') + + return suggestion_registry.ContributorCertificateInfo( + from_date.strftime('%d %b %Y'), to_date.strftime('%d %b %Y'), + signature, str(hours_contributed), language_description + ) + + +def _generate_question_contributor_certificate_data( + from_date: datetime.datetime, + to_date: datetime.datetime, + user_id: str +) -> suggestion_registry.ContributorCertificateInfo: + """Returns data to generate question submitter certificate. + + Args: + from_date: datetime.datetime. The start of the date range for which + the contributions were created. + to_date: datetime.datetime. The end of the date range for which + the contributions were created. + user_id: str. The user ID of the contributor. + + Returns: + ContributorCertificateInfo. Data to generate question submitter + certificate. + + Raises: + Exception. The suggestion type given to generate the certificate is + invalid. + """ + signature = feconf.QUESTION_TEAM_LEAD + + # Adds one date to the to_date to make sure the contributions within + # the to_date are also counted for the certificate. + to_date_to_fetch_contributions = to_date + datetime.timedelta(days=1) + + suggestions = ( + suggestion_models.GeneralSuggestionModel + .get_question_suggestions_submitted_within_given_dates( + from_date, to_date_to_fetch_contributions, user_id)) + + minutes_contributed = 0 + for model in suggestions: + suggestion = get_suggestion_from_model(model) + # Retrieve the html content that is emphasized on the + # Contributor Dashboard pages. This content is what stands + # out for each suggestion when a user views a list of + # suggestions. + get_html_representing_suggestion = ( + SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS[ + suggestion.suggestion_type] + ) + html_content = get_html_representing_suggestion(suggestion) + + if 'oppia-noninteractive-image' in html_content: + minutes_contributed += 20 + else: + minutes_contributed += 12 + # Go to the below link for more information about how we count hours + # contributed. + # https://docs.google.com/spreadsheets/d/1ykSNwPLZ5qTCkuO21VLdtm_2SjJ5QJ0z0PlVjjSB4ZQ/edit?usp=sharing + hours_contributed = round(minutes_contributed / 60, 2) + + if minutes_contributed == 0: + raise Exception( + 'There are no contributions for the given time range.') + + return suggestion_registry.ContributorCertificateInfo( + from_date.strftime('%d %b %Y'), to_date.strftime('%d %b %Y'), + signature, str(hours_contributed), None + ) diff --git a/core/domain/suggestion_services_test.py b/core/domain/suggestion_services_test.py index 601aaa294f78..eee824428fcb 100644 --- a/core/domain/suggestion_services_test.py +++ b/core/domain/suggestion_services_test.py @@ -32,16 +32,37 @@ from core.domain import state_domain from core.domain import story_domain from core.domain import story_services +from core.domain import subtopic_page_domain +from core.domain import subtopic_page_services from core.domain import suggestion_registry from core.domain import suggestion_services +from core.domain import taskqueue_services +from core.domain import topic_domain +from core.domain import topic_fetchers from core.domain import topic_services +from core.domain import translation_domain from core.domain import user_services from core.platform import models from core.tests import test_utils -(suggestion_models, feedback_models, user_models) = ( +from typing import Dict, Final, List, Mapping, Union + +MYPY = False +if MYPY: # pragma: no cover + from core.domain import change_domain + from mypy_imports import feedback_models + from mypy_imports import opportunity_models + from mypy_imports import suggestion_models + from mypy_imports import user_models + +(suggestion_models, feedback_models, opportunity_models, user_models) = ( models.Registry.import_models( - [models.NAMES.suggestion, models.NAMES.feedback, models.NAMES.user] + [ + models.Names.SUGGESTION, + models.Names.FEEDBACK, + models.Names.OPPORTUNITY, + models.Names.USER + ] ) ) @@ -49,39 +70,39 @@ class SuggestionServicesUnitTests(test_utils.GenericTestBase): """Test the functions in suggestion_services.""" - score_category = ( + score_category: str = ( suggestion_models.SCORE_TYPE_CONTENT + suggestion_models.SCORE_CATEGORY_DELIMITER + 'Algebra') - target_id = 'exp1' - target_id_2 = 'exp2' - target_id_3 = 'exp3' - target_version_at_submission = 1 - change = { + target_id: str = 'exp1' + target_id_2: str = 'exp2' + target_id_3: str = 'exp3' + target_version_at_submission: int = 1 + change: Dict[str, Union[str, Dict[str, str]]] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'state_1', 'new_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': 'new suggestion content' } } - AUTHOR_EMAIL = 'author@example.com' - REVIEWER_EMAIL = 'reviewer@example.com' - NORMAL_USER_EMAIL = 'normal@example.com' + AUTHOR_EMAIL: Final = 'author@example.com' + REVIEWER_EMAIL: Final = 'reviewer@example.com' + NORMAL_USER_EMAIL: Final = 'normal@example.com' - THREAD_ID = 'exploration.exp1.thread_1' + THREAD_ID: Final = 'exploration.exp1.thread_1' - COMMIT_MESSAGE = 'commit message' - EMPTY_COMMIT_MESSAGE = ' ' + COMMIT_MESSAGE: Final = 'commit message' + EMPTY_COMMIT_MESSAGE: Final = ' ' - suggestion_id = THREAD_ID - suggestion_id_2 = 'exploration.exp2.thread_2' - suggestion_id_3 = 'exploration.exp3.thread_3' + suggestion_id: str = THREAD_ID + suggestion_id_2: str = 'exploration.exp2.thread_2' + suggestion_id_3: str = 'exploration.exp3.thread_3' - def setUp(self): - super(SuggestionServicesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) @@ -90,16 +111,21 @@ def setUp(self): self.signup(self.NORMAL_USER_EMAIL, 'normaluser') self.normal_user_id = self.get_user_id_from_email( self.NORMAL_USER_EMAIL) - self.save_new_valid_exploration( + self.exploration = self.save_new_valid_exploration( self.target_id, self.author_id, category='Algebra') - def assert_suggestion_status(self, suggestion_id, status): + def assert_suggestion_status(self, suggestion_id: str, status: str) -> None: """Assert the status of the suggestion with suggestion_id.""" suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) self.assertEqual(suggestion.status, status) def mock_accept_suggestion( - self, suggestion_id, reviewer_id, commit_message, review_message): + self, + suggestion_id: str, + reviewer_id: str, + commit_message: str, + review_message: str + ) -> None: """Sets up the appropriate mocks to successfully call accept_suggestion. """ @@ -114,13 +140,14 @@ def mock_accept_suggestion( self.mock_pre_accept_validate_does_nothing): with self.swap( suggestion_registry.SuggestionEditStateContent, - 'get_change_list_for_accepting_suggestion', - self.mock_get_change_list_does_nothing): + '_get_change_list_for_accepting_edit_state_content_suggestion', # pylint: disable=line-too-long + self.mock_get_change_list_does_nothing + ): suggestion_services.accept_suggestion( suggestion_id, reviewer_id, commit_message, review_message) - def mock_create_suggestion(self, target_id): + def mock_create_suggestion(self, target_id: str) -> None: """Sets up the appropriate mocks to successfully call create_suggestion. """ @@ -136,14 +163,18 @@ def mock_create_suggestion(self, target_id): target_id, self.target_version_at_submission, self.author_id, self.change, 'test description') - def mock_generate_new_thread_id(self, entity_type, exp_id): + def mock_generate_new_thread_id( + self, entity_type: str, exp_id: str + ) -> str: thread_id = 'thread_%s' % exp_id[-1] return '.'.join([entity_type, exp_id, thread_id]) class MockExploration: """Mocks an exploration. To be used only for testing.""" - def __init__(self, exploration_id, states): + def __init__( + self, exploration_id: str, states: Dict[str, Dict[str, str]] + ) -> None: self.id = exploration_id self.states = states self.category = 'Algebra' @@ -155,26 +186,29 @@ def __init__(self, exploration_id, states): MockExploration('exp3', {'state_1': {}, 'state_2': {}}) ] - def mock_get_exploration_by_id(self, exp_id): + def mock_get_exploration_by_id(self, exp_id: str) -> MockExploration: for exp in self.explorations: if exp.id == exp_id: - return exp + mock_exp = exp + return mock_exp - def mock_pre_accept_validate_does_nothing(self): + def mock_pre_accept_validate_does_nothing(self) -> None: pass - def mock_get_change_list_does_nothing(self): + def mock_get_change_list_does_nothing(self) -> None: pass - def mock_accept_does_nothing(self, unused_arg): + def mock_accept_does_nothing(self, unused_arg: str) -> None: pass - def edit_before_pre_accept_validate(self, suggestion): + def edit_before_pre_accept_validate( + self, suggestion: suggestion_registry.BaseSuggestion + ) -> None: """Edits suggestion immediately before pre-accept validation.""" suggestion.score_category = 'invalid_score_category' suggestion.pre_accept_validate() - def test_create_new_suggestion_successfully(self): + def test_create_new_suggestion_successfully(self) -> None: expected_suggestion_dict = { 'suggestion_id': 'exploration.exp1.thread_1', 'suggestion_type': ( @@ -189,7 +223,7 @@ def test_create_new_suggestion_successfully(self): 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'state_1', 'new_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': 'new suggestion content' }, 'old_value': None @@ -204,16 +238,18 @@ def test_create_new_suggestion_successfully(self): self.assertDictContainsSubset( expected_suggestion_dict, observed_suggestion.to_dict()) - def test_cannot_create_suggestion_with_invalid_suggestion_type(self): - with self.assertRaisesRegexp(Exception, 'Invalid suggestion type'): + def test_cannot_create_suggestion_with_invalid_suggestion_type( + self + ) -> None: + with self.assertRaisesRegex(Exception, 'Invalid suggestion type'): suggestion_services.create_suggestion( 'invalid_suggestion_type', feconf.ENTITY_TYPE_EXPLORATION, self.target_id, self.target_version_at_submission, self.author_id, self.change, 'test description') - def test_cannot_create_suggestion_with_invalid_author_id(self): - with self.assertRaisesRegexp( + def test_cannot_create_suggestion_with_invalid_author_id(self) -> None: + with self.assertRaisesRegex( Exception, 'Expected author_id to be in a valid user ID format'): suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, @@ -221,17 +257,19 @@ def test_cannot_create_suggestion_with_invalid_author_id(self): self.target_id, self.target_version_at_submission, 'invalid author ID', self.change, 'test description') - def test_cannot_create_translation_suggestion_with_invalid_content_html_raise_error(self): # pylint: disable=line-too-long + def test_cannot_create_translation_suggestion_with_invalid_content_html_raise_error( # pylint: disable=line-too-long + self + ) -> None: add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': 'Introduction', - 'content_id': 'content', + 'content_id': 'content_0', 'language_code': 'hi', 'content_html': '

    The invalid content html

    ', 'translation_html': '

    Translation for invalid content.

    ', 'data_format': 'html' } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The Exploration content has changed since this translation ' 'was submitted.'): @@ -241,7 +279,24 @@ def test_cannot_create_translation_suggestion_with_invalid_content_html_raise_er self.target_id, self.target_version_at_submission, self.author_id, add_translation_change_dict, 'test description') - def test_get_all_stale_suggestion_ids(self): + def test_get_submitted_submissions(self) -> None: + suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + self.target_id, self.target_version_at_submission, + self.author_id, self.change, '') + suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + self.target_id, self.target_version_at_submission, + self.author_id, self.change, 'test_description') + suggestions = suggestion_services.get_submitted_suggestions( + self.author_id, feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT) + self.assertEqual(len(suggestions), 2) + self.assertEqual(suggestions[0].author_id, self.author_id) + self.assertEqual(suggestions[1].author_id, self.author_id) + + def test_get_all_stale_suggestion_ids(self) -> None: suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, feconf.ENTITY_TYPE_EXPLORATION, @@ -260,14 +315,17 @@ def test_get_all_stale_suggestion_ids(self): len(suggestion_services.get_all_stale_suggestion_ids()), 0) def mock_update_exploration( - self, unused_user_id, unused_exploration_id, unused_change_list, - commit_message, is_suggestion): - self.assertTrue(is_suggestion) + self, + unused_user_id: str, + unused_exploration_id: str, + unused_change_list: str, + commit_message: str, + ) -> None: self.assertEqual( commit_message, 'Accepted suggestion by %s: %s' % ( 'author', self.COMMIT_MESSAGE)) - def test_cannot_reject_suggestion_with_empty_review_message(self): + def test_cannot_reject_suggestion_with_empty_review_message(self) -> None: suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, feconf.ENTITY_TYPE_EXPLORATION, @@ -280,7 +338,7 @@ def test_cannot_reject_suggestion_with_empty_review_message(self): self.assert_suggestion_status( suggestion.suggestion_id, suggestion_models.STATUS_IN_REVIEW) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Review message cannot be empty.'): suggestion_services.reject_suggestion( suggestion.suggestion_id, self.reviewer_id, '') @@ -289,20 +347,13 @@ def test_cannot_reject_suggestion_with_empty_review_message(self): self.assert_suggestion_status( suggestion.suggestion_id, suggestion_models.STATUS_IN_REVIEW) - def test_accept_suggestion_and_send_email_to_author(self): - change_list = [exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_STATE, - 'state_name': 'state 1', - })] - exp_services.update_exploration( - self.author_id, self.target_id, change_list, 'Add state.') - + def test_accept_suggestion_and_send_email_to_author(self) -> None: new_suggestion_content = state_domain.SubtitledHtml( 'content', '

    new suggestion content html

    ').to_dict() - change_dict = { + change_dict: Dict[str, Union[str, state_domain.SubtitledHtmlDict]] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, - 'state_name': 'state 1', + 'state_name': 'Introduction', 'new_value': new_suggestion_content } @@ -352,13 +403,16 @@ def test_accept_suggestion_and_send_email_to_author(self): self.author_id, suggestion.score_category ) ) + # Ruling out the possibility of None for mypy type checking. + assert user_proficiency_model is not None self.assertTrue(user_proficiency_model.onboarding_email_sent) self.assertEqual( user_proficiency_model.score, feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW) def test_accept_suggestion_does_not_send_email_if_users_score_is_too_low( - self): + self + ) -> None: self.mock_create_suggestion(self.target_id) self.assert_suggestion_status( self.suggestion_id, suggestion_models.STATUS_IN_REVIEW) @@ -391,6 +445,8 @@ def test_accept_suggestion_does_not_send_email_if_users_score_is_too_low( ) ) # Assert that the users score was updated correctly. + # Ruling out the possibility of None for mypy type checking. + assert user_proficiency_model is not None self.assertEqual( user_proficiency_model.score, suggestion_models.INCREMENT_SCORE_OF_AUTHOR_BY) @@ -402,7 +458,8 @@ def test_accept_suggestion_does_not_send_email_if_users_score_is_too_low( self.assertFalse(user_proficiency_model.onboarding_email_sent) def test_accept_suggestion_creates_user_proficiency_model_if_it_is_none( - self): + self + ) -> None: self.mock_create_suggestion(self.target_id) self.assert_suggestion_status( self.suggestion_id, suggestion_models.STATUS_IN_REVIEW) @@ -420,7 +477,7 @@ def test_accept_suggestion_creates_user_proficiency_model_if_it_is_none( self.assertIsNotNone(user_models.UserContributionProficiencyModel.get( self.author_id, self.score_category)) - def test_accept_suggestion_successfully(self): + def test_accept_suggestion_successfully(self) -> None: self.mock_create_suggestion(self.target_id) self.assert_suggestion_status( self.suggestion_id, suggestion_models.STATUS_IN_REVIEW) @@ -443,26 +500,27 @@ def test_accept_suggestion_successfully(self): last_message.text, 'review message') def test_accept_suggestion_raises_exception_if_suggestion_does_not_exist( - self): + self + ) -> None: expected_exception_regexp = ( 'You cannot accept the suggestion with id %s because it does not ' 'exist.' % (self.suggestion_id) ) - with self.assertRaisesRegexp(Exception, expected_exception_regexp): + with self.assertRaisesRegex(Exception, expected_exception_regexp): self.mock_accept_suggestion( self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, 'review message') - def test_accept_suggestion_with_invalid_math_fails(self): + def test_accept_suggestion_with_invalid_math_fails(self) -> None: """Test that the method for accepting suggestions raises error when a suggestion with invalid math-tags is tried to be accepted. """ - change_dict = { + change_dict: Dict[str, Union[str, state_domain.SubtitledHtmlDict]] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'state_1', 'new_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': ( ' None: self.id = exploration_id self.states = states self.category = 'Algebra' - def get_content_html(self, state_name, content_id): + def get_content_html(self, state_name: str, content_id: str) -> str: """Used to mock the get_content_html method for explorations.""" # state_name and content_id are used here to suppress the unused # arguments warning. The main goal of this method is to just @@ -998,25 +1210,35 @@ def get_content_html(self, state_name, content_id): MockExploration('exp3', {'state_1': {}, 'state_2': {}}), ] - def mock_get_exploration_by_id(self, exp_id): + def mock_get_exploration_by_id(self, exp_id: str) -> MockExploration: for exp in self.explorations: if exp.id == exp_id: - return exp + mock_exp = exp + return mock_exp - def _create_question_suggestion_with_skill_id(self, skill_id): + def _create_question_suggestion_with_skill_id( + self, skill_id: str + ) -> suggestion_registry.SuggestionAddQuestion: """Creates a question suggestion with the given skill_id.""" - suggestion_change = { + content_id_generator = translation_domain.ContentIdGenerator() + suggestion_change: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': skill_id, 'skill_difficulty': 0.3 @@ -1028,16 +1250,27 @@ def _create_question_suggestion_with_skill_id(self, skill_id): self.author_id_1, suggestion_change, 'test description' ) - def _create_translation_suggestion_with_language_code(self, language_code): + def _create_translation_suggestion_with_language_code( + self, language_code: str + ) -> suggestion_registry.SuggestionTranslateContent: """Creates a translation suggestion with the language code given.""" + return self._create_translation_suggestion( + language_code, self.target_id_1) + + def _create_translation_suggestion( + self, language_code: str, target_id: str + ) -> suggestion_registry.SuggestionTranslateContent: + """Creates a translation suggestion for the supplied language code and + target ID. + """ add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': 'state_1', - 'content_id': 'content', + 'content_id': 'content_0', 'language_code': language_code, 'content_html': ( - '

    State name: state_1, Content id: content

    '), + '

    State name: state_1, Content id: content_0

    '), 'translation_html': '

    This is translated html.

    ', 'data_format': 'html' } @@ -1052,13 +1285,13 @@ def _create_translation_suggestion_with_language_code(self, language_code): translation_suggestion = suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, feconf.ENTITY_TYPE_EXPLORATION, - self.target_id_1, 1, self.author_id_1, + target_id, 1, self.author_id_1, add_translation_change_dict, 'test description') return translation_suggestion - def setUp(self): - super(SuggestionGetServicesUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL_1, 'author1') self.author_id_1 = self.get_user_id_from_email(self.AUTHOR_EMAIL_1) @@ -1069,6 +1302,10 @@ def setUp(self): self.author_id_2 = self.get_user_id_from_email(self.AUTHOR_EMAIL_2) self.signup(self.REVIEWER_EMAIL_2, 'reviewer2') self.reviewer_id_2 = self.get_user_id_from_email(self.REVIEWER_EMAIL_2) + self.opportunity_summary_ids = [ + self.explorations[0].id, self.explorations[1].id, + self.explorations[2].id] + self.topic_name = 'topic' with self.swap( exp_fetchers, 'get_exploration_by_id', @@ -1104,13 +1341,40 @@ def setUp(self): self.target_id_2, self.target_version_at_submission, self.author_id_2, self.change, 'test description') - def test_get_by_author(self): + def test_get_by_author(self) -> None: queries = [('author_id', self.author_id_1)] self.assertEqual(len(suggestion_services.query_suggestions(queries)), 3) queries = [('author_id', self.author_id_2)] self.assertEqual(len(suggestion_services.query_suggestions(queries)), 2) - def test_get_by_target_id(self): + def test_get_translation_suggestions_in_review_by_exp_ids(self) -> None: + suggestions = ( + suggestion_services + .get_translation_suggestions_in_review_by_exp_ids( + [ + self.target_id_1, + self.target_id_2, + self.target_id_3 + ], + 'en' + ) + ) + self.assertEqual(len(suggestions), 0) + self._create_translation_suggestion_with_language_code('en') + suggestions = ( + suggestion_services + .get_translation_suggestions_in_review_by_exp_ids( + [self.target_id_1], + 'en' + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert suggestions[0] is not None + self.assertEqual(suggestions[0].author_id, self.author_id_1) + self.assertEqual(suggestions[0].language_code, 'en') + self.assertEqual(suggestions[0].target_id, self.target_id_1) + + def test_get_by_target_id(self) -> None: queries = [ ('target_type', feconf.ENTITY_TYPE_EXPLORATION), ('target_id', self.target_id_1) @@ -1122,17 +1386,17 @@ def test_get_by_target_id(self): ] self.assertEqual(len(suggestion_services.query_suggestions(queries)), 1) - def test_get_by_status(self): + def test_get_by_status(self) -> None: queries = [('status', suggestion_models.STATUS_IN_REVIEW)] self.assertEqual(len(suggestion_services.query_suggestions(queries)), 5) - def test_get_by_type(self): + def test_get_by_type(self) -> None: queries = [( 'suggestion_type', feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT)] self.assertEqual(len(suggestion_services.query_suggestions(queries)), 5) - def test_query_suggestions(self): + def test_query_suggestions(self) -> None: queries = [ ('target_type', feconf.ENTITY_TYPE_EXPLORATION), ('target_id', self.target_id_1), @@ -1153,11 +1417,13 @@ def test_query_suggestions(self): ('target_id', self.target_id_1), ('invalid_field', 'value') ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Not allowed to query on field invalid_field'): suggestion_services.query_suggestions(queries) - def test_get_translation_suggestion_ids_with_exp_ids_with_one_exp(self): + def test_get_translation_suggestion_ids_with_exp_ids_with_one_exp( + self + ) -> None: # Create the translation suggestion associated with exploration id # target_id_1. with self.swap( @@ -1181,7 +1447,8 @@ def test_get_translation_suggestion_ids_with_exp_ids_with_one_exp(self): [self.target_id_1])), 1) def test_get_translation_suggestion_ids_with_exp_ids_with_multiple_exps( - self): + self + ) -> None: # Create the translation suggestion associated with exploration id # target_id_2. with self.swap( @@ -1218,7 +1485,8 @@ def test_get_translation_suggestion_ids_with_exp_ids_with_multiple_exps( [self.target_id_2, self.target_id_3])), 2) def test_get_translation_suggestion_ids_with_exp_ids_with_invalid_exp( - self): + self + ) -> None: # Assert that there are no translation suggestions with an invalid # exploration id found. self.assertEqual( @@ -1228,7 +1496,8 @@ def test_get_translation_suggestion_ids_with_exp_ids_with_invalid_exp( ['invalid_exp_id'])), 0) def test_get_translation_suggestion_ids_with_exp_ids_with_empty_exp_list( - self): + self + ) -> None: # Assert that there are no translation suggestions found when we # use an empty exp_ids list. self.assertEqual( @@ -1236,7 +1505,75 @@ def test_get_translation_suggestion_ids_with_exp_ids_with_empty_exp_list( suggestion_services .get_translation_suggestion_ids_with_exp_ids([])), 0) - def test_get_translation_suggestions_in_review_by_exploration(self): + def test_get_submitted_suggestions_by_offset(self) -> None: + self._create_translation_suggestion_with_language_code('hi') + self._create_translation_suggestion_with_language_code('pt') + question_1_skill_id = 'skill1' + question_2_skill_id = 'skill2' + self._create_question_suggestion_with_skill_id(question_1_skill_id) + self._create_question_suggestion_with_skill_id(question_2_skill_id) + + # Fetch submitted translation suggestions. + translatable_suggestions, offset = ( + suggestion_services.get_submitted_suggestions_by_offset( + user_id=self.author_id_1, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + limit=constants.OPPORTUNITIES_PAGE_SIZE, + offset=0, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE)) + + self.assertEqual(len(translatable_suggestions), 2) + self.assertEqual(offset, 2) + self.assertEqual( + translatable_suggestions[0].target_id, self.target_id_1 + ) + self.assertEqual( + translatable_suggestions[0].suggestion_type, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT) + self.assertEqual( + translatable_suggestions[0].status, + suggestion_models.STATUS_IN_REVIEW) + self.assertEqual( + translatable_suggestions[1].target_id, self.target_id_1 + ) + self.assertEqual( + translatable_suggestions[1].suggestion_type, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT) + self.assertEqual( + translatable_suggestions[1].status, + suggestion_models.STATUS_IN_REVIEW) + + # Fetch submitted question suggestions. + question_suggestions, offset = ( + suggestion_services.get_submitted_suggestions_by_offset( + user_id=self.author_id_1, + suggestion_type=feconf.SUGGESTION_TYPE_ADD_QUESTION, + limit=constants.OPPORTUNITIES_PAGE_SIZE, + offset=0, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE)) + + self.assertEqual(len(question_suggestions), 2) + self.assertEqual(offset, 2) + self.assertEqual( + question_suggestions[0].target_id, question_2_skill_id + ) + self.assertEqual( + question_suggestions[0].suggestion_type, + feconf.SUGGESTION_TYPE_ADD_QUESTION) + self.assertEqual( + question_suggestions[0].status, + suggestion_models.STATUS_IN_REVIEW) + self.assertEqual( + question_suggestions[1].target_id, question_1_skill_id + ) + self.assertEqual( + question_suggestions[1].suggestion_type, + feconf.SUGGESTION_TYPE_ADD_QUESTION) + self.assertEqual( + question_suggestions[1].status, + suggestion_models.STATUS_IN_REVIEW) + + def test_get_translation_suggestions_in_review_by_exploration(self) -> None: self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('hi') @@ -1245,6 +1582,8 @@ def test_get_translation_suggestions_in_review_by_exploration(self): .get_translation_suggestions_in_review_by_exploration( self.target_id_1, 'hi')) + # Ruling out the possibility of None for mypy type checking. + assert suggestions[0] is not None self.assertEqual(len(suggestions), 2) self.assertEqual(suggestions[0].target_id, self.target_id_1) self.assertEqual( @@ -1253,6 +1592,8 @@ def test_get_translation_suggestions_in_review_by_exploration(self): self.assertEqual( suggestions[0].status, suggestion_models.STATUS_IN_REVIEW) + # Ruling out the possibility of None for mypy type checking. + assert suggestions[1] is not None self.assertEqual(suggestions[1].target_id, self.target_id_1) self.assertEqual( suggestions[1].suggestion_type, @@ -1261,7 +1602,9 @@ def test_get_translation_suggestions_in_review_by_exploration(self): suggestions[1].status, suggestion_models.STATUS_IN_REVIEW) - def test_get_translation_suggestions_in_review_by_exploration_returns_only_suggestions_with_supplied_language_code(self): # pylint: disable=line-too-long + def test_get_translation_suggestions_in_review_by_exploration_returns_only_suggestions_with_supplied_language_code( # pylint: disable=line-too-long + self + ) -> None: self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('pt') @@ -1273,8 +1616,10 @@ def test_get_translation_suggestions_in_review_by_exploration_returns_only_sugge self.assertEqual(len(suggestions), 1) - def test_get_reviewable_translation_suggestions(self): - # Add few translation suggestions in different languages. + def test_get_reviewable_translation_suggestions_with_valid_exp_ids( + self + ) -> None: + # Add a few translation suggestions in different languages. self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('pt') @@ -1289,27 +1634,158 @@ def test_get_reviewable_translation_suggestions(self): self.reviewer_id_1, 'hi') user_services.allow_user_to_review_translation_in_language( self.reviewer_id_1, 'pt') + # Get all reviewable translation suggestions. - suggestions = suggestion_services.get_reviewable_suggestions( - self.reviewer_id_1, feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT) + suggestions, offset = ( + suggestion_services. + get_reviewable_translation_suggestions_by_offset( + self.reviewer_id_1, self.opportunity_summary_ids, + constants.OPPORTUNITIES_PAGE_SIZE, 0, None)) # Expect that the results correspond to translation suggestions that the # user has rights to review. self.assertEqual(len(suggestions), 3) - actual_language_code_list = sorted([ + self.assertEqual(offset, 3) + actual_language_code_list = [ suggestion.change.language_code for suggestion in suggestions - ]) + ] expected_language_code_list = ['hi', 'hi', 'pt'] self.assertEqual(actual_language_code_list, expected_language_code_list) - def test_get_reviewable_question_suggestions(self): - # Add few translation suggestions in different languages. + def test_get_reviewable_translation_suggestions_with_empty_exp_ids( # pylint: disable=line-too-long + self + ) -> None: + # Add a few translation suggestions in different languages. + self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('pt') self._create_translation_suggestion_with_language_code('bn') self._create_translation_suggestion_with_language_code('bn') - # Add few question suggestions. + # Provide the user permission to review suggestions in particular + # languages. + user_services.allow_user_to_review_translation_in_language( + self.reviewer_id_1, 'hi') + user_services.allow_user_to_review_translation_in_language( + self.reviewer_id_1, 'pt') + + # Get all reviewable translation suggestions. + suggestions, offset = suggestion_services.get_reviewable_translation_suggestions_by_offset( + self.reviewer_id_1, [], + constants.OPPORTUNITIES_PAGE_SIZE, 0, None) + + self.assertEqual(offset, 0) + self.assertEqual(len(suggestions), 0) + + def test_get_reviewable_translation_suggestions_with_none_exp_ids( + self + ) -> None: + # Add a few translation suggestions in different languages. + self._create_translation_suggestion_with_language_code('hi') + self._create_translation_suggestion_with_language_code('hi') + self._create_translation_suggestion_with_language_code('pt') + self._create_translation_suggestion_with_language_code('bn') + self._create_translation_suggestion_with_language_code('bn') + # Provide the user permission to review suggestions in particular + # languages. + user_services.allow_user_to_review_translation_in_language( + self.reviewer_id_1, 'hi') + user_services.allow_user_to_review_translation_in_language( + self.reviewer_id_1, 'pt') + + # Get all reviewable translation suggestions. + suggestions, offset = ( + suggestion_services. + get_reviewable_translation_suggestions_by_offset( + self.reviewer_id_1, None, + constants.OPPORTUNITIES_PAGE_SIZE, 0, None)) + + self.assertEqual(len(suggestions), 3) + self.assertEqual(offset, 3) + actual_language_code_list = [ + suggestion.change.language_code + for suggestion in suggestions + ] + expected_language_code_list = ['hi', 'hi', 'pt'] + self.assertEqual(actual_language_code_list, expected_language_code_list) + + def test_get_reviewable_translation_suggestions_with_no_reviewable_languages( # pylint: disable=line-too-long + self + ) -> None: + # Add a few translation suggestions in different languages. + self._create_translation_suggestion_with_language_code('hi') + self._create_translation_suggestion_with_language_code('hi') + self._create_translation_suggestion_with_language_code('pt') + self._create_translation_suggestion_with_language_code('bn') + self._create_translation_suggestion_with_language_code('bn') + + # Get all reviewable translation suggestions. + suggestions, offset = ( + suggestion_services. + get_reviewable_translation_suggestions_by_offset( + self.reviewer_id_1, None, + constants.OPPORTUNITIES_PAGE_SIZE, 0, None)) + + # The user does not have rights to review any languages, so expect an + # empty result. + self.assertEqual(len(suggestions), 0) + self.assertEqual(offset, 0) + + def test_get_reviewable_translation_suggestions_with_language_filter( + self + ) -> None: + # Add a few translation suggestions in different languages. + self._create_translation_suggestion_with_language_code('hi') + self._create_translation_suggestion_with_language_code('hi') + self._create_translation_suggestion_with_language_code('pt') + self._create_translation_suggestion_with_language_code('bn') + self._create_translation_suggestion_with_language_code('bn') + # Provide the user permission to review suggestions in particular + # languages. + user_services.allow_user_to_review_translation_in_language( + self.reviewer_id_1, 'hi') + user_services.allow_user_to_review_translation_in_language( + self.reviewer_id_1, 'pt') + + # Get reviewable translation suggestions in Hindi. + language_to_filter = 'hi' + suggestions, _ = ( + suggestion_services. + get_reviewable_translation_suggestions_by_offset( + self.reviewer_id_1, self.opportunity_summary_ids, + constants.OPPORTUNITIES_PAGE_SIZE, 0, None, language_to_filter)) + + # Expect that the results correspond to translation suggestions that the + # user has rights to review. + self.assertEqual(len(suggestions), 2) + self.assertEqual(suggestions[0].change.language_code, 'hi') + self.assertEqual(suggestions[1].change.language_code, 'hi') + + # Get reviewable translation suggestions in Spanish (there are none). + language_to_filter = 'es' + suggestions, _ = ( + suggestion_services. + get_reviewable_translation_suggestions_by_offset( + self.reviewer_id_1, self.opportunity_summary_ids, + constants.OPPORTUNITIES_PAGE_SIZE, 0, None, language_to_filter)) + + # Expect that the results correspond to translation suggestions that the + # user has rights to review. + self.assertEqual(len(suggestions), 0) + actual_language_code_list = [ + suggestion.change.language_code + for suggestion in suggestions + ] + expected_language_code_list: List[str] = [] + self.assertEqual(actual_language_code_list, expected_language_code_list) + + def test_get_reviewable_question_suggestions(self) -> None: + # Add a few translation suggestions in different languages. + self._create_translation_suggestion_with_language_code('hi') + self._create_translation_suggestion_with_language_code('pt') + self._create_translation_suggestion_with_language_code('bn') + self._create_translation_suggestion_with_language_code('bn') + # Add a few question suggestions. self._create_question_suggestion_with_skill_id('skill1') self._create_question_suggestion_with_skill_id('skill2') # Provide the user permission to review suggestions in particular @@ -1320,13 +1796,19 @@ def test_get_reviewable_question_suggestions(self): self.reviewer_id_1, 'pt') # Provide the user permission to review question suggestions. user_services.allow_user_to_review_question(self.reviewer_id_1) + # Get all reviewable question suggestions. - suggestions = suggestion_services.get_reviewable_suggestions( - self.reviewer_id_1, feconf.SUGGESTION_TYPE_ADD_QUESTION) + suggestions, offset = ( + suggestion_services.get_reviewable_question_suggestions_by_offset( + self.reviewer_id_1, + limit=constants.OPPORTUNITIES_PAGE_SIZE, + offset=0, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE)) # Expect that the results correspond to question suggestions. self.assertEqual(len(suggestions), 2) - expected_suggestion_type_list = ['skill1', 'skill2'] + self.assertEqual(offset, 2) + expected_suggestion_type_list = ['skill2', 'skill1'] actual_suggestion_type_list = [ suggestion.change.skill_id for suggestion in suggestions @@ -1335,7 +1817,8 @@ def test_get_reviewable_question_suggestions(self): actual_suggestion_type_list, expected_suggestion_type_list) def test_get_translation_suggestions_waiting_longest_for_review_per_lang( - self): + self + ) -> None: suggestion_1 = self._create_translation_suggestion_with_language_code( 'hi') suggestion_2 = self._create_translation_suggestion_with_language_code( @@ -1361,7 +1844,8 @@ def test_get_translation_suggestions_waiting_longest_for_review_per_lang( suggestions[i].last_updated, suggestions[i + 1].last_updated) def test_get_translation_suggestions_waiting_longest_for_review_wrong_lang( - self): + self + ) -> None: suggestions = ( suggestion_services .get_translation_suggestions_waiting_longest_for_review( @@ -1370,7 +1854,8 @@ def test_get_translation_suggestions_waiting_longest_for_review_wrong_lang( self.assertEqual(len(suggestions), 0) def test_get_question_suggestions_waiting_longest_for_review_keeps_order( - self): + self + ) -> None: """This test makes sure that if a suggestion is rejected and is then resubmitted, we count the time that the suggestion has been waiting for review from when it was resubmitted, not from when it was first @@ -1408,6 +1893,8 @@ def test_get_question_suggestions_waiting_longest_for_review_keeps_order( # Change the question_dict of the question suggestion that got rejected # so we can resubmit the suggestion for review. resubmit_question_change = suggestion_1.change + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(resubmit_question_change.question_dict, dict) resubmit_question_change.question_dict['linked_skill_ids'] = ['skill1'] # Resubmit the rejected question suggestion. @@ -1431,7 +1918,7 @@ def test_get_question_suggestions_waiting_longest_for_review_keeps_order( self.assertLessEqual( suggestions[0].last_updated, suggestions[1].last_updated) - def test_get_question_suggestions_waiting_longest_for_review(self): + def test_get_question_suggestions_waiting_longest_for_review(self) -> None: suggestion_1 = self._create_question_suggestion_with_skill_id('skill1') suggestion_2 = self._create_question_suggestion_with_skill_id('skill2') suggestion_3 = self._create_question_suggestion_with_skill_id('skill3') @@ -1453,7 +1940,7 @@ def test_get_question_suggestions_waiting_longest_for_review(self): self.assertLessEqual( suggestions[i].last_updated, suggestions[i + 1].last_updated) - def test_query_suggestions_that_can_be_reviewed_by_user(self): + def test_query_suggestions_that_can_be_reviewed_by_user(self) -> None: # User proficiency models for user1. user_models.UserContributionProficiencyModel.create( 'user1', 'category1', 15) @@ -1512,34 +1999,40 @@ def test_query_suggestions_that_can_be_reviewed_by_user(self): class SuggestionIntegrationTests(test_utils.GenericTestBase): - EXP_ID = 'exp1' - TOPIC_ID = 'topic1' - STORY_ID = 'story1' - TRANSLATION_LANGUAGE_CODE = 'en' + EXP_ID: Final = 'exp1' + TOPIC_ID: Final = 'topic1' + STORY_ID: Final = 'story1' + TRANSLATION_LANGUAGE_CODE: Final = 'en' - AUTHOR_EMAIL = 'author@example.com' + AUTHOR_EMAIL: Final = 'author@example.com' - score_category = ( + score_category: str = ( suggestion_models.SCORE_TYPE_CONTENT + suggestion_models.SCORE_CATEGORY_DELIMITER + 'Algebra') - THREAD_ID = 'exploration.exp1.thread_1' + THREAD_ID: Final = 'exploration.exp1.thread_1' - COMMIT_MESSAGE = 'commit message' + COMMIT_MESSAGE: Final = 'commit message' - def mock_generate_new_thread_id(self, unused_entity_type, unused_entity_id): + def mock_generate_new_thread_id( + self, unused_entity_type: str, unused_entity_id: str + ) -> str: return self.THREAD_ID - def setUp(self): - super(SuggestionIntegrationTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.AUTHOR_EMAIL, 'author') + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.reviewer_id = self.editor_id + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.editor = user_services.get_user_actions_info(self.editor_id) # Login and create exploration and suggestions. @@ -1548,15 +2041,16 @@ def setUp(self): # Create exploration. exploration = ( self.save_new_linear_exp_with_state_names_and_interactions( - self.EXP_ID, self.editor_id, ['State 1', 'State 2'], + self.EXP_ID, self.editor_id, + ['State 1', 'State 2', 'End State'], ['TextInput'], category='Algebra', correctness_feedback_enabled=True)) self.old_content = state_domain.SubtitledHtml( - 'content', '

    old content

    ').to_dict() - recorded_voiceovers_dict = { + 'content_0', '

    old content

    ').to_dict() + recorded_voiceovers_dict: state_domain.RecordedVoiceoversDict = { 'voiceovers_mapping': { - 'content': { + 'content_0': { self.TRANSLATION_LANGUAGE_CODE: { 'filename': 'filename3.mp3', 'file_size_bytes': 3000, @@ -1564,8 +2058,8 @@ def setUp(self): 'duration_secs': 42.43 } }, - 'default_outcome': {}, - 'ca_placeholder_0': {} + 'default_outcome_1': {}, + 'ca_placeholder_6': {} } } self.old_recorded_voiceovers = ( @@ -1595,7 +2089,9 @@ def setUp(self): self.new_content = state_domain.SubtitledHtml( 'content', '

    new content

    ').to_dict() - self.change = { + self.change: Dict[ + str, Union[str, state_domain.SubtitledHtmlDict] + ] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 1', @@ -1636,7 +2132,8 @@ def setUp(self): story_change_list_to_add_an_exp, 'Added exploration.') def create_translation_suggestion_associated_with_exp( - self, exp_id, author_id): + self, exp_id: str, author_id: str + ) -> None: """Creates a translation suggestion that is associated with an exploration with id exp_id. The author of the created suggestion is author_id. @@ -1644,58 +2141,1075 @@ def create_translation_suggestion_associated_with_exp( # Gets the html content in the exploration to be translated. exploration = exp_fetchers.get_exploration_by_id(exp_id) content_html = exploration.states['State 1'].content.html + content_id = exploration.states['State 1'].content.content_id add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': 'State 1', - 'content_id': 'content', + 'content_id': content_id, 'language_code': 'hi', 'content_html': content_html, 'translation_html': '

    This is translated html.

    ', 'data_format': 'html' } - suggestion_services.create_suggestion( - feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, - feconf.ENTITY_TYPE_EXPLORATION, - exp_id, 1, author_id, add_translation_change_dict, - 'test description') + suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + exp_id, 1, author_id, add_translation_change_dict, + 'test description') + + def assert_created_suggestion_is_valid( + self, target_id: str, author_id: str + ) -> None: + """Assert that the created suggestion is in review and that only one + suggestion with the given target_id and author_id exists. + """ + suggestions = suggestion_services.query_suggestions( + [('author_id', author_id), ('target_id', target_id)]) + self.assertEqual(len(suggestions), 1) + self.assertEqual( + suggestions[0].status, suggestion_models.STATUS_IN_REVIEW) + + def test_create_and_accept_suggestion(self) -> None: + with self.swap( + feedback_models.GeneralFeedbackThreadModel, + 'generate_new_thread_id', self.mock_generate_new_thread_id): + suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + self.EXP_ID, self.target_version_at_submission, + self.author_id, self.change, 'test description') + + suggestion_id = self.THREAD_ID + + suggestion_services.accept_suggestion( + suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, '') + + exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) + + self.assertEqual( + exploration.states['State 1'].content.html, + '

    new content

    ') + + suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) + self.assertEqual(suggestion.status, suggestion_models.STATUS_ACCEPTED) + + def test_create_translation_contribution_stats_from_model(self) -> None: + suggestion_models.TranslationContributionStatsModel.create( + language_code='es', + contributor_user_id='user_id', + topic_id='topic_id', + submitted_translations_count=2, + submitted_translation_word_count=100, + accepted_translations_count=1, + accepted_translations_without_reviewer_edits_count=0, + accepted_translation_word_count=50, + rejected_translations_count=0, + rejected_translation_word_count=0, + contribution_dates=[ + datetime.date.fromtimestamp(1616173836), + datetime.date.fromtimestamp(1616173837) + ] + ) + translation_suggestion = suggestion_services.get_all_translation_contribution_stats( # pylint: disable=line-too-long + 'user_id') + self.assertEqual(len(translation_suggestion), 1) + self.assertEqual(translation_suggestion[0].language_code, 'es') + self.assertEqual( + translation_suggestion[0].contributor_user_id, + 'user_id' + ) + + def test_fetch_all_contribution_stats(self) -> None: + suggestion_models.TranslationContributionStatsModel.create( + language_code='es', + contributor_user_id='user_id', + topic_id='topic_id', + submitted_translations_count=2, + submitted_translation_word_count=100, + accepted_translations_count=1, + accepted_translations_without_reviewer_edits_count=0, + accepted_translation_word_count=50, + rejected_translations_count=0, + rejected_translation_word_count=0, + contribution_dates=[ + datetime.date.fromtimestamp(1616173836), + datetime.date.fromtimestamp(1616173837) + ] + ) + suggestion_models.TranslationReviewStatsModel.create( + language_code='es', + reviewer_user_id='user_id', + topic_id='topic_id', + reviewed_translations_count=1, + reviewed_translation_word_count=1, + accepted_translations_count=1, + accepted_translations_with_reviewer_edits_count=0, + accepted_translation_word_count=1, + first_contribution_date=datetime.date.fromtimestamp(1616173836), + last_contribution_date=datetime.date.fromtimestamp(1616173836) + ) + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id='user_id', + topic_id='topic_id', + submitted_questions_count=1, + accepted_questions_count=1, + accepted_questions_without_reviewer_edits_count=0, + first_contribution_date=datetime.date.fromtimestamp(1616173836), + last_contribution_date=datetime.date.fromtimestamp(1616173836) + ) + suggestion_models.QuestionReviewStatsModel.create( + reviewer_user_id='user_id', + topic_id='topic_id', + reviewed_questions_count=1, + accepted_questions_count=1, + accepted_questions_with_reviewer_edits_count=1, + first_contribution_date=datetime.date.fromtimestamp(1616173836), + last_contribution_date=datetime.date.fromtimestamp(1616173836) + ) + + stats = suggestion_services.get_all_contributor_stats( # pylint: disable=line-too-long + 'user_id') + + self.assertEqual(stats.contributor_user_id, 'user_id') + self.assertEqual(len(stats.translation_contribution_stats), 1) + self.assertEqual( + stats.translation_contribution_stats[0].language_code, 'es') + self.assertEqual(len(stats.question_contribution_stats), 1) + self.assertEqual( + stats.question_contribution_stats[0].contributor_user_id, 'user_id') + self.assertEqual(len(stats.translation_review_stats), 1) + self.assertEqual( + stats.translation_review_stats[0].contributor_user_id, 'user_id') + self.assertEqual(len(stats.question_review_stats), 1) + self.assertEqual( + stats.question_review_stats[0].contributor_user_id, 'user_id') + + def _publish_valid_topic( + self, topic: topic_domain.Topic, + uncategorized_skill_ids: List[str]) -> None: + """Saves and publishes a valid topic with linked skills and subtopic. + + Args: + topic: Topic. The topic to be saved and published. + uncategorized_skill_ids: list(str). List of uncategorized skills IDs + to add to the supplied topic. + """ + topic.thumbnail_filename = 'thumbnail.svg' + topic.thumbnail_bg_color = '#C6DCDA' + subtopic_id = 1 + subtopic_skill_id = 'subtopic_skill_id' + topic.id + topic.subtopics = [ + topic_domain.Subtopic( + subtopic_id, 'Title', [subtopic_skill_id], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic')] + topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = [subtopic_skill_id] + subtopic_page = ( + subtopic_page_domain.SubtopicPage.create_default_subtopic_page( + subtopic_id, topic.id)) + subtopic_page_services.save_subtopic_page( + self.owner_id, subtopic_page, 'Added subtopic', + [topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_ADD_SUBTOPIC, + 'subtopic_id': 1, + 'title': 'Sample', + 'url_fragment': 'sample-fragment' + })] + ) + topic_services.save_new_topic(self.owner_id, topic) + topic_services.publish_topic(topic.id, self.admin_id) + + for skill_id in uncategorized_skill_ids: + self.save_new_skill( + skill_id, self.admin_id, description='skill_description') + topic_services.add_uncategorized_skill( + self.admin_id, topic.id, skill_id) + + def _set_up_topics_and_stories_for_translations(self) -> Mapping[ + str, change_domain.AcceptableChangeDictTypes]: + """Sets up required topics and stories for translations. It does the + following. + 1. Create 2 explorations and publish them. + 2. Create a default topic. + 3. Publish the topic with two story IDs. + 4. Create 2 stories for translation opportunities. + + Returns: + Mapping[str, change_domain.AcceptableChangeDictTypes]. A dictionary + of the change object for the translations. + """ + explorations = [self.save_new_valid_exploration( + '%s' % i, + self.owner_id, + title='title %d' % i, + category=constants.ALL_CATEGORIES[i], + end_state_name='End State', + correctness_feedback_enabled=True + ) for i in range(2)] + + for exp in explorations: + self.publish_exploration(self.owner_id, exp.id) + exp_services.update_exploration( + self.owner_id, exp.id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': exp_domain.STATE_PROPERTY_CONTENT, + 'state_name': 'Introduction', + 'new_value': { + 'content_id': 'content_0', + 'html': '

    A content to translate.

    ' + } + })], 'Changes content.') + + topic_id = '0' + topic = topic_domain.Topic.create_default_topic( + topic_id, 'topic_name', 'abbrev', 'description', 'fragm') + skill_id_0 = 'skill_id_0' + skill_id_1 = 'skill_id_1' + self._publish_valid_topic(topic, [skill_id_0, skill_id_1]) + + self.create_story_for_translation_opportunity( + self.owner_id, self.admin_id, 'story_id_01', topic_id, '0') + self.create_story_for_translation_opportunity( + self.owner_id, self.admin_id, 'story_id_02', topic_id, '1') + + return { + 'cmd': 'add_written_translation', + 'content_id': 'content_0', + 'language_code': 'hi', + 'content_html': '

    A content to translate.

    ', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ', + 'data_format': 'html' + } + + def _get_change_with_normalized_string(self) -> Mapping[ + str, change_domain.AcceptableChangeDictTypes]: + """Provides change dictionary with normalized translation html. + + Returns: + Mapping[str, change_domain.AcceptableChangeDictTypes]. A dictionary + of the change object for the translations. + """ + return { + 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, + 'content_id': 'content_0', + 'language_code': 'hi', + 'content_html': '

    A content to translate.

    ', + 'state_name': 'Introduction', + 'translation_html': ['translated text1', 'translated text2'], + 'data_format': 'set_of_normalized_string' + } + + def test_update_translation_contribution_stats_without_language_codes( + self + ) -> None: + translation_contribution_stats = ( + suggestion_registry.TranslationContributionStats( + None, 'user1', 'topic1', 1, 1, 1, 0, 1, 0, 0, + {datetime.date.fromtimestamp(1616173836)} + ) + ) + with self.assertRaisesRegex( + Exception, + 'Language code should not be None.'): + suggestion_services._update_translation_contribution_stats_models( # pylint: disable=protected-access + [translation_contribution_stats]) + + def test_update_translation_contribution_stats_without_contributor_id( + self + ) -> None: + translation_contribution_stats = ( + suggestion_registry.TranslationContributionStats( + 'hi', None, 'topic1', 1, 1, 1, 0, 1, 0, 0, + {datetime.date.fromtimestamp(1616173836)} + ) + ) + with self.assertRaisesRegex( + Exception, + 'Contributor user ID should not be None.'): + suggestion_services._update_translation_contribution_stats_models( # pylint: disable=protected-access + [translation_contribution_stats]) + + def test_update_translation_contribution_stats_without_topic_id( + self + ) -> None: + translation_contribution_stats = ( + suggestion_registry.TranslationContributionStats( + 'hi', 'user1', None, 1, 1, 1, 0, 1, 0, 0, + {datetime.date.fromtimestamp(1616173836)} + ) + ) + with self.assertRaisesRegex( + Exception, + 'Topic ID should not be None.'): + suggestion_services._update_translation_contribution_stats_models( # pylint: disable=protected-access + [translation_contribution_stats]) + + def test_get_translation_contribution_stats_for_invalid_id_with_strict_true( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'The stats models do not exist for the stats_id invalid_id.'): + suggestion_services.get_translation_contribution_stats_models( + ['invalid_id']) + + def test_get_translation_contribution_stats_for_strict_false( + self + ) -> None: + stats_models = ( + suggestion_services + .get_translation_contribution_stats_models + )( + ['invalid_id'], strict=False) + + self.assertEqual(stats_models, [None]) + + def test_get_translation_review_stats_for_strict_false( + self + ) -> None: + stats_models = ( + suggestion_services + .get_translation_review_stats_models + )( + ['invalid_id'], strict=False) + + self.assertEqual(stats_models, [None]) + + def test_get_question_contribution_stats_for_strict_false( + self + ) -> None: + stats_models = ( + suggestion_services.get_question_contribution_stats_models + )( + ['invalid_id'], strict=False) + + self.assertEqual(stats_models, [None]) + + def test_get_question_review_stats_for_strict_false( + self + ) -> None: + stats_models = suggestion_services.get_question_review_stats_models( + ['invalid_id'], strict=False) + + self.assertEqual(stats_models, [None]) + + def test_get_translation_review_stats_for_invalid_id_with_strict_true( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'The stats models do not exist for the stats_id invalid_id.'): + suggestion_services.get_translation_review_stats_models( + ['invalid_id']) + + def test_get_question_contribution_stats_for_invalid_id_with_strict_true( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'The stats models do not exist for the stats_id invalid_id.'): + suggestion_services.get_question_contribution_stats_models( + ['invalid_id']) + + def test_get_question_review_stats_for_invalid_id_with_strict_true( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'The stats models do not exist for the stats_id invalid_id.'): + suggestion_services.get_question_review_stats_models( + ['invalid_id']) + + def test_update_translation_contribution_stats_when_submitting( + self) -> None: + # Steps required in the setup phase before testing. + # 1. Create and publish explorations. + # 2. Create and publish topics. + # 3. Create stories for translation opportunities. + # 4. Save translation suggestions. + change_dict = self._set_up_topics_and_stories_for_translations() + initial_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '0', 1, self.author_id, change_dict, 'description') + new_change_dict = self._get_change_with_normalized_string() + latest_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '1', 1, self.author_id, new_change_dict, 'description') + + suggestion_services.update_translation_contribution_stats_at_submission( + initial_suggestion + ) + suggestion_services.update_translation_contribution_stats_at_submission( + latest_suggestion + ) + + translation_contribution_stats_model = ( + suggestion_models.TranslationContributionStatsModel.get( + 'hi', self.author_id, '0' + ) + ) + # Assert translation contribution stats. + # At this point we can confirm that there should be an associated + # translation contribution stat object for the given IDs since we have + # called update_translation_contribution_stats_at_submission function + # to create/update translation contribution stats. + assert translation_contribution_stats_model is not None + self.assertEqual( + translation_contribution_stats_model.submitted_translations_count, + 2 + ) + self.assertEqual( + ( + translation_contribution_stats_model + .submitted_translation_word_count + ), + 7 + ) + self.assertEqual( + translation_contribution_stats_model.accepted_translations_count, + 0 + ) + + def test_update_translation_review_stats_when_suggestion_is_accepted( + self) -> None: + # This test case will check stats of the reviewer and the submitter + # when a translation suggestion is accepted. + # Steps required in the setup phase before testing. + # 1. Create and publish explorations. + # 2. Create and publish topics. + # 3. Create stories for translation opportunities. + # 4. Save translation suggestions. + change_dict = self._set_up_topics_and_stories_for_translations() + initial_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '0', 1, self.author_id, change_dict, 'description') + new_change_dict = self._get_change_with_normalized_string() + latest_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '1', 1, self.author_id, new_change_dict, 'description') + suggestion_services.accept_suggestion( + initial_suggestion.suggestion_id, self.reviewer_id, 'Accepted', + 'Accepted') + suggestion_services.accept_suggestion( + latest_suggestion.suggestion_id, self.reviewer_id, 'Accepted', + 'Accepted') + + suggestion_services.update_translation_review_stats( + suggestion_services.get_suggestion_by_id( + initial_suggestion.suggestion_id) + ) + suggestion_services.update_translation_review_stats( + suggestion_services.get_suggestion_by_id( + latest_suggestion.suggestion_id) + ) + + translation_review_stats_model = ( + suggestion_models.TranslationReviewStatsModel.get( + 'hi', self.reviewer_id, '0' + ) + ) + translation_contribution_stats_model = ( + suggestion_models.TranslationContributionStatsModel.get( + 'hi', self.author_id, '0' + ) + ) + # Assert translation review stats after the review. + # At this point we can confirm that there should be an associated + # translation review stat object for the given IDs since we have + # called update_translation_review_stats function to create/update + # translation review stats. + assert translation_review_stats_model is not None + self.assertEqual( + translation_review_stats_model.accepted_translations_count, + 2 + ) + self.assertEqual( + ( + translation_review_stats_model + .reviewed_translation_word_count + ), + 7 + ) + assert translation_contribution_stats_model is not None + self.assertEqual( + ( + translation_contribution_stats_model + .accepted_translation_word_count + ), + 7 + ) + self.assertEqual( + translation_contribution_stats_model.accepted_translations_count, + 2 + ) + + def test_update_translation_review_stats_when_suggestion_is_rejected( + self) -> None: + # This test case will check stats of the reviewer and the submitter + # when a translation suggestion is rejected. + # Steps required in the setup phase before testing. + # 1. Create and publish explorations. + # 2. Create and publish topics. + # 3. Create stories for translation opportunities. + # 4. Save translation suggestions. + change_dict = self._set_up_topics_and_stories_for_translations() + initial_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '0', 1, self.author_id, change_dict, 'description') + latest_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '1', 1, self.author_id, change_dict, 'description') + suggestion_services.reject_suggestion( + initial_suggestion.suggestion_id, self.reviewer_id, 'Rejected') + suggestion_services.reject_suggestion( + latest_suggestion.suggestion_id, self.reviewer_id, 'Rejected') + + suggestion_services.update_translation_review_stats( + suggestion_services.get_suggestion_by_id( + initial_suggestion.suggestion_id) + ) + suggestion_services.update_translation_review_stats( + suggestion_services.get_suggestion_by_id( + latest_suggestion.suggestion_id) + ) + + translation_review_stats_model = ( + suggestion_models.TranslationReviewStatsModel.get( + 'hi', self.reviewer_id, '0' + ) + ) + translation_contribution_stats_model = ( + suggestion_models.TranslationContributionStatsModel.get( + 'hi', self.author_id, '0' + ) + ) + # Assert translation review stats after the review. + # At this point we can confirm that there should be an associated + # translation review stat object for the given IDs since we have + # called update_translation_review_stats function to create/update + # translation review stats. + assert translation_review_stats_model is not None + self.assertEqual( + translation_review_stats_model.reviewed_translations_count, + 2 + ) + self.assertEqual( + translation_review_stats_model.accepted_translations_count, + 0 + ) + self.assertEqual( + translation_review_stats_model.accepted_translation_word_count, + 0 + ) + self.assertEqual( + ( + translation_review_stats_model + .reviewed_translation_word_count + ), + 6 + ) + assert translation_contribution_stats_model is not None + self.assertEqual( + translation_contribution_stats_model.rejected_translations_count, + 2 + ) + self.assertEqual( + ( + translation_contribution_stats_model + .rejected_translations_count + ), + 2 + ) + self.assertEqual( + translation_contribution_stats_model.accepted_translations_count, + 0 + ) + + def test_update_translation_review_stats_without_a_reviewer_id( + self) -> None: + change_dict = self._set_up_topics_and_stories_for_translations() + translation_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '0', 1, self.author_id, change_dict, 'description') + + with self.assertRaisesRegex( + Exception, + 'The final_reviewer_id in the suggestion should not be None.'): + suggestion_services.update_translation_review_stats( + translation_suggestion) + + def test_update_question_review_stats_without_a_reviewer_id( + self) -> None: + skill_id_1 = self._create_skill() + skill_id_2 = self._create_skill() + self._create_topic(skill_id_1, skill_id_2) + initial_suggestion = self._create_question_suggestion(skill_id_1) + suggestion_services.update_question_contribution_stats_at_submission( + initial_suggestion + ) + + with self.assertRaisesRegex( + Exception, + 'The final_reviewer_id in the suggestion should not be None.'): + suggestion_services.update_question_review_stats( + initial_suggestion + ) + + def test_update_translation_review_stats_when_suggestion_is_edited( + self) -> None: + # This test case will check stats of the reviewer and the submitter + # when a translation suggestion is accepted with reviewer edits. + # Steps required in the setup phase before testing. + # 1. Create and publish explorations. + # 2. Create and publish topics. + # 3. Create stories for translation opportunities. + # 4. Save translation suggestions. + change_dict = self._set_up_topics_and_stories_for_translations() + initial_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '0', 1, self.author_id, change_dict, 'description') + latest_suggestion = suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + '1', 1, self.author_id, change_dict, 'description') + # Contributor's stats are updated manually since contributor's stats are + # checked later. + suggestion_services.update_translation_contribution_stats_at_submission( + initial_suggestion + ) + suggestion_services.update_translation_contribution_stats_at_submission( + latest_suggestion + ) + suggestion_services.update_translation_suggestion( + initial_suggestion.suggestion_id, 'Edited') + suggestion_services.update_translation_suggestion( + latest_suggestion.suggestion_id, 'Edited') + suggestion_services.accept_suggestion( + initial_suggestion.suggestion_id, self.reviewer_id, 'Accepted', + 'Accepted') + suggestion_services.accept_suggestion( + latest_suggestion.suggestion_id, self.reviewer_id, 'Accepted', + 'Accepted') + + suggestion_services.update_translation_review_stats( + suggestion_services.get_suggestion_by_id( + initial_suggestion.suggestion_id) + ) + suggestion_services.update_translation_review_stats( + suggestion_services.get_suggestion_by_id( + latest_suggestion.suggestion_id) + ) + + translation_review_stats_model = ( + suggestion_models.TranslationReviewStatsModel.get( + 'hi', self.reviewer_id, '0' + ) + ) + translation_contribution_stats_model = ( + suggestion_models.TranslationContributionStatsModel.get( + 'hi', self.author_id, '0' + ) + ) + # Assert translation review stats after the review. + # At this point we can confirm that there should be an associated + # translation review stat object for the given IDs since we have + # called update_translation_review_stats function to create/update + # translation review stats. + assert translation_review_stats_model is not None + self.assertEqual( + translation_review_stats_model.accepted_translations_count, + 2 + ) + self.assertEqual( + translation_review_stats_model.accepted_translation_word_count, + 2 + ) + self.assertEqual( + ( + translation_review_stats_model + .reviewed_translation_word_count + ), + 2 + ) + self.assertEqual( + translation_review_stats_model + .accepted_translations_with_reviewer_edits_count, + 2 + ) + assert translation_contribution_stats_model is not None + self.assertEqual( + translation_contribution_stats_model.submitted_translations_count, + 2 + ) + self.assertEqual( + ( + translation_contribution_stats_model + .submitted_translation_word_count + ), + 6 + ) + self.assertEqual( + translation_contribution_stats_model.accepted_translations_count, + 2 + ) + self.assertEqual( + ( + translation_contribution_stats_model + .accepted_translations_without_reviewer_edits_count + ), + 0 + ) + + def _create_question_suggestion( + self, + skill_id: str + ) -> suggestion_registry.SuggestionAddQuestion: + """Creates a question suggestion corresponding to the supplied skill. + + Args: + skill_id: str. ID of the skill. + + Returns: + SuggestionAddQuestion. A new question suggestion. + """ + content_id_generator = translation_domain.ContentIdGenerator() + suggestion_change: Dict[ + str, + Union[str, float, Dict[str, Union[ + str, List[str], int, state_domain.StateDict]]]] = { + 'cmd': ( + question_domain + .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), + 'question_dict': { + 'question_state_data': self._create_valid_question_data( + 'default_state', content_id_generator).to_dict(), + 'language_code': 'en', + 'question_state_data_schema_version': ( + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'linked_skill_ids': ['skill_2'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index + ), + 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + }, + 'skill_id': skill_id, + 'skill_difficulty': 0.3 + } + return suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_SKILL, skill_id, 1, + self.author_id, suggestion_change, 'test description') + + def _create_skill(self) -> str: + """Creates a skill for a question. + + Returns: + str. A skill ID. + """ + skill_id = skill_services.get_new_skill_id() + self.save_new_skill( + skill_id, self.author_id, description='description') + return skill_id + + def _create_topic(self, first_skill_id: str, second_skill_id: str) -> str: + """Creates a topic for a question. + + Args: + first_skill_id: str. ID of the first skill. + second_skill_id: str. ID of the second skill. + + Returns: + str. A topic ID. + """ + topic_id = topic_fetchers.get_new_topic_id() + self.save_new_topic( + topic_id, 'topic_admin', name='Topic1', + abbreviated_name='topic-three', url_fragment='topic-three', + description='Description', + canonical_story_ids=[], + additional_story_ids=[], + uncategorized_skill_ids=[first_skill_id, second_skill_id], + subtopics=[], next_subtopic_id=1) + return topic_id + + def test_update_question_contribution_stats_when_submitting(self) -> None: + # Steps required in the setup phase before testing. + # 1. Save new skills. + # 2. Save a topic assigning skills for it. + # 3. Create a question suggestion. + skill_id_1 = self._create_skill() + skill_id_2 = self._create_skill() + topic_id = self._create_topic(skill_id_1, skill_id_2) + initial_suggestion = self._create_question_suggestion(skill_id_1) + latest_suggestion = self._create_question_suggestion(skill_id_2) + + # Action to update question contribution stats. + suggestion_services.update_question_contribution_stats_at_submission( + initial_suggestion + ) + suggestion_services.update_question_contribution_stats_at_submission( + latest_suggestion + ) + + question_contribution_stats_model = ( + suggestion_models.QuestionContributionStatsModel.get( + self.author_id, topic_id + ) + ) + # Assert question contribution stats before the review. + # At this point we can confirm that there should be an associated + # question contribution stat object for the given IDs since we have + # called update_question_contribution_stats_at_submission function to + # create/update question contribution stats. + assert question_contribution_stats_model is not None + self.assertEqual( + question_contribution_stats_model.submitted_questions_count, + 2 + ) + self.assertEqual( + question_contribution_stats_model.accepted_questions_count, + 0 + ) + + def test_update_question_stats_when_suggestion_is_accepted( + self) -> None: + # This test case will check stats of the reviewer and the submitter + # when a question suggestion is accepted. + # Steps required in the setup phase before testing. + # 1. Save new skills. + # 2. Save a topic assigning skills for it. + # 3. Create a question suggestion. + skill_id_1 = self._create_skill() + skill_id_2 = self._create_skill() + topic_id = self._create_topic(skill_id_1, skill_id_2) + initial_suggestion = self._create_question_suggestion(skill_id_1) + latest_suggestion = self._create_question_suggestion(skill_id_2) + suggestion_services.accept_suggestion( + initial_suggestion.suggestion_id, self.reviewer_id, 'Accepted', + 'Accepted') + suggestion_services.accept_suggestion( + latest_suggestion.suggestion_id, self.reviewer_id, 'Accepted', + 'Accepted') - def assert_created_suggestion_is_valid(self, target_id, author_id): - """Assert that the created suggestion is in review and that only one - suggestion with the given target_id and author_id exists. - """ - suggestions = suggestion_services.query_suggestions( - [('author_id', author_id), ('target_id', target_id)]) - self.assertEqual(len(suggestions), 1) + # Action to update stats when reviewing. + suggestion_services.update_question_review_stats( + suggestion_services.get_suggestion_by_id( + initial_suggestion.suggestion_id) + ) + suggestion_services.update_question_review_stats( + suggestion_services.get_suggestion_by_id( + latest_suggestion.suggestion_id) + ) + + question_review_stats_model = ( + suggestion_models.QuestionReviewStatsModel.get( + self.reviewer_id, topic_id + ) + ) + question_contribution_stats_model = ( + suggestion_models.QuestionContributionStatsModel.get( + self.author_id, topic_id + ) + ) + # Assert question review stats after the review. + # At this point we can confirm that there should be an associated + # question review stat object for the given IDs since we have + # called update_question_review_stats function to create/update question + # review stats. + assert question_review_stats_model is not None self.assertEqual( - suggestions[0].status, suggestion_models.STATUS_IN_REVIEW) + question_review_stats_model.accepted_questions_count, + 2 + ) + self.assertEqual( + ( + question_review_stats_model + .reviewed_questions_count + ), + 2 + ) + assert question_contribution_stats_model is not None + self.assertEqual( + question_contribution_stats_model.accepted_questions_count, + 2 + ) + self.assertEqual( + ( + question_contribution_stats_model + .accepted_questions_without_reviewer_edits_count + ), + 2 + ) - def test_create_and_accept_suggestion(self): - with self.swap( - feedback_models.GeneralFeedbackThreadModel, - 'generate_new_thread_id', self.mock_generate_new_thread_id): - suggestion_services.create_suggestion( - feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, - feconf.ENTITY_TYPE_EXPLORATION, - self.EXP_ID, self.target_version_at_submission, - self.author_id, self.change, 'test description') + def test_update_question_stats_when_suggestion_is_rejected( + self) -> None: + # This test case will check stats of the reviewer and the submitter + # when a question suggestion is rejected. + # Steps required in the setup phase before testing. + # 1. Save new skills. + # 2. Save a topic assigning skills for it. + # 3. Create a question suggestion. + skill_id_1 = self._create_skill() + skill_id_2 = self._create_skill() + topic_id = self._create_topic(skill_id_1, skill_id_2) + initial_suggestion = self._create_question_suggestion(skill_id_1) + latest_suggestion = self._create_question_suggestion(skill_id_2) + suggestion_services.reject_suggestion( + initial_suggestion.suggestion_id, self.reviewer_id, 'Rejected') + suggestion_services.reject_suggestion( + latest_suggestion.suggestion_id, self.reviewer_id, 'Rejected') - suggestion_id = self.THREAD_ID + # Action to update stats when revieweing. + suggestion_services.update_question_review_stats( + suggestion_services.get_suggestion_by_id( + initial_suggestion.suggestion_id) + ) + suggestion_services.update_question_review_stats( + suggestion_services.get_suggestion_by_id( + latest_suggestion.suggestion_id) + ) + + question_review_stats_model = ( + suggestion_models.QuestionReviewStatsModel.get( + self.reviewer_id, topic_id + ) + ) + question_contribution_stats_model = ( + suggestion_models.QuestionContributionStatsModel.get( + self.author_id, topic_id + ) + ) + # Assert question review stats after the review. + # At this point we can confirm that there should be an associated + # question review stat object for the given IDs since we have + # called update_question_review_stats function to create/update question + # review stats. + assert question_review_stats_model is not None + self.assertEqual( + question_review_stats_model.reviewed_questions_count, + 2 + ) + self.assertEqual( + question_review_stats_model.accepted_questions_count, + 0 + ) + self.assertEqual( + ( + question_review_stats_model + .reviewed_questions_count + ), + 2 + ) + assert question_contribution_stats_model is not None + self.assertEqual( + question_contribution_stats_model.accepted_questions_count, + 0 + ) + self.assertEqual( + ( + question_contribution_stats_model + .accepted_questions_without_reviewer_edits_count + ), + 0 + ) + def test_update_question_stats_when_suggestion_is_edited( + self + ) -> None: + # This test case will check stats of the reviewer and the submitter + # when a question suggestion is accepted with reviewer edits. + # Steps required in the setup phase before testing. + # 1. Save new skills. + # 2. Save a topic assigning skills for it. + # 3. Create a question suggestion. + skill_id_1 = self._create_skill() + skill_id_2 = self._create_skill() + topic_id = self._create_topic(skill_id_1, skill_id_2) + initial_suggestion = self._create_question_suggestion(skill_id_1) + latest_suggestion = self._create_question_suggestion(skill_id_2) + content_id_generator = translation_domain.ContentIdGenerator() + question_state_data = self._create_valid_question_data( + 'default_state', content_id_generator).to_dict() + suggestion_services.accept_suggestion( + initial_suggestion.suggestion_id, self.reviewer_id, 'Accepted', + 'Accepted') suggestion_services.accept_suggestion( - suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, None) + latest_suggestion.suggestion_id, self.reviewer_id, 'Accepted', + 'Accepted') + suggestion_services.update_question_suggestion( + initial_suggestion.suggestion_id, 0.6, question_state_data, + content_id_generator.next_content_id_index) + suggestion_services.update_question_suggestion( + latest_suggestion.suggestion_id, 0.6, question_state_data, + content_id_generator.next_content_id_index) - exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) + # Actual action to update stats when reviewing. + suggestion_services.update_question_review_stats( + suggestion_services.get_suggestion_by_id( + initial_suggestion.suggestion_id) + ) + suggestion_services.update_question_review_stats( + suggestion_services.get_suggestion_by_id( + latest_suggestion.suggestion_id) + ) + question_review_stats_model = ( + suggestion_models.QuestionReviewStatsModel.get( + self.reviewer_id, topic_id + ) + ) + question_contribution_stats_model = ( + suggestion_models.QuestionContributionStatsModel.get( + self.author_id, topic_id + ) + ) + # Assert question review stats. + # At this point we can confirm that there should be an associated + # question review stat object for the given IDs since we have + # called update_question_review_stats function to create/update question + # review stats. + assert question_review_stats_model is not None self.assertEqual( - exploration.states['State 1'].content.html, - '

    new content

    ') - - suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) - self.assertEqual(suggestion.status, suggestion_models.STATUS_ACCEPTED) + question_review_stats_model.reviewed_questions_count, + 2 + ) + self.assertEqual( + question_review_stats_model.accepted_questions_count, + 2 + ) + self.assertEqual( + ( + question_review_stats_model + .accepted_questions_with_reviewer_edits_count + ), + 2 + ) + assert question_contribution_stats_model is not None + self.assertEqual( + question_contribution_stats_model.accepted_questions_count, + 2 + ) + self.assertEqual( + ( + question_contribution_stats_model + .accepted_questions_without_reviewer_edits_count + ), + 0 + ) - def test_create_and_reject_suggestion(self): + def test_create_and_reject_suggestion(self) -> None: with self.swap( feedback_models.GeneralFeedbackThreadModel, 'generate_new_thread_id', self.mock_generate_new_thread_id): @@ -1722,7 +3236,7 @@ def test_create_and_reject_suggestion(self): suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) self.assertEqual(suggestion.status, suggestion_models.STATUS_REJECTED) - def test_create_and_accept_suggestion_with_message(self): + def test_create_and_accept_suggestion_with_message(self) -> None: with self.swap( feedback_models.GeneralFeedbackThreadModel, 'generate_new_thread_id', self.mock_generate_new_thread_id): @@ -1751,21 +3265,28 @@ def test_create_and_accept_suggestion_with_message(self): suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) self.assertEqual(suggestion.status, suggestion_models.STATUS_ACCEPTED) - def test_delete_skill_rejects_question_suggestion(self): + def test_delete_skill_rejects_question_suggestion(self) -> None: skill_id = skill_services.get_new_skill_id() self.save_new_skill(skill_id, self.author_id, description='description') - suggestion_change = { + content_id_generator = translation_domain.ContentIdGenerator() + suggestion_change: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': skill_id, 'skill_difficulty': 0.3 @@ -1785,7 +3306,7 @@ def test_delete_skill_rejects_question_suggestion(self): self.assertEqual( suggestions[0].status, suggestion_models.STATUS_REJECTED) - def test_delete_topic_rejects_translation_suggestion(self): + def test_delete_topic_rejects_translation_suggestion(self) -> None: self.create_translation_suggestion_associated_with_exp( self.EXP_ID, self.author_id) self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id) @@ -1799,7 +3320,7 @@ def test_delete_topic_rejects_translation_suggestion(self): self.assertEqual( suggestions[0].status, suggestion_models.STATUS_REJECTED) - def test_delete_story_rejects_translation_suggestion(self): + def test_delete_story_rejects_translation_suggestion(self) -> None: self.create_translation_suggestion_associated_with_exp( self.EXP_ID, self.author_id) self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id) @@ -1813,7 +3334,7 @@ def test_delete_story_rejects_translation_suggestion(self): self.assertEqual( suggestions[0].status, suggestion_models.STATUS_REJECTED) - def test_remove_exp_from_story_rejects_translation_suggestion(self): + def test_remove_exp_from_story_rejects_translation_suggestion(self) -> None: self.create_translation_suggestion_associated_with_exp( self.EXP_ID, self.author_id) self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id) @@ -1836,17 +3357,87 @@ def test_remove_exp_from_story_rejects_translation_suggestion(self): self.assertEqual( suggestions[0].status, suggestion_models.STATUS_REJECTED) + def test_get_suggestions_with_translatable_explorations(self) -> None: + # Create a translation suggestion for (state_name, content_id) = + # (State 2, content). + exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) + state_name = 'State 2' + add_translation_change_dict = { + 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, + 'state_name': state_name, + 'content_id': exploration.states[state_name].content.content_id, + 'language_code': 'hi', + 'content_html': exploration.states[state_name].content.html, + 'translation_html': '

    This is translated html.

    ', + 'data_format': 'html' + } + suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + self.EXP_ID, 1, self.author_id, add_translation_change_dict, + 'test description') + suggestions = suggestion_services.query_suggestions( + [('author_id', self.author_id), ('target_id', self.EXP_ID)]) + self.assertEqual(len(suggestions), 1) + + translatable_suggestions = [] + for suggestion in suggestions: + assert isinstance( + suggestion, suggestion_registry.SuggestionTranslateContent + ) + translatable_suggestions.append(suggestion) + + # Should return the created translation suggestion. + filtered_translatable_suggestions = ( + suggestion_services.get_suggestions_with_translatable_explorations( + translatable_suggestions + ) + ) + self.assertEqual(len(filtered_translatable_suggestions), 1) + + # Delete the exploration state corresponding to the translation + # suggestion. + init_state = exploration.states[exploration.init_state_name] + outcome_object = init_state.interaction.default_outcome + # Ruling out the possibility of None for mypy type checking. + assert outcome_object is not None + default_outcome_dict = outcome_object.to_dict() + default_outcome_dict['dest'] = 'End State' + exp_services.update_exploration( + self.owner_id, self.EXP_ID, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'property_name': ( + exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME), + 'state_name': exploration.init_state_name, + 'new_value': default_outcome_dict + }), + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_DELETE_STATE, + 'state_name': state_name, + }), + ], 'delete state') + + # The suggestion no longer corresponds to an existing exploration state, + # so it should not be returned. + filtered_translatable_suggestions = ( + suggestion_services.get_suggestions_with_translatable_explorations( + translatable_suggestions + ) + ) + self.assertEqual(len(filtered_translatable_suggestions), 0) + class UserContributionProficiencyUnitTests(test_utils.GenericTestBase): - def setUp(self): - super(UserContributionProficiencyUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup('user1@example.com', 'user1') self.signup('user2@example.com', 'user2') self.user_1_id = self.get_user_id_from_email('user1@example.com') self.user_2_id = self.get_user_id_from_email('user2@example.com') - def test_get_all_user_ids_who_are_allowed_to_review(self): + def test_get_all_user_ids_who_are_allowed_to_review(self) -> None: user_models.UserContributionProficiencyModel.create( self.user_1_id, 'category1', 0) user_models.UserContributionProficiencyModel.create( @@ -1875,7 +3466,7 @@ def test_get_all_user_ids_who_are_allowed_to_review(self): self.assertFalse(suggestion_services.can_user_review_category( self.user_2_id, 'category1')) - def test_get_all_scores_of_the_user_with_multiple_scores(self): + def test_get_all_scores_of_the_user_with_multiple_scores(self) -> None: user_models.UserContributionProficiencyModel.create( self.user_1_id, 'category1', 1) user_models.UserContributionProficiencyModel.create( @@ -1894,7 +3485,7 @@ def test_get_all_scores_of_the_user_with_multiple_scores(self): self.assertEqual(len(scores_dict), 3) self.assertDictEqual(scores_dict, expected_scores_dict) - def test_get_all_scores_of_the_user_when_no_scores_exist(self): + def test_get_all_scores_of_the_user_when_no_scores_exist(self) -> None: scores_dict = suggestion_services.get_all_scores_of_user( self.user_1_id) @@ -1902,61 +3493,26 @@ def test_get_all_scores_of_the_user_when_no_scores_exist(self): self.assertDictEqual(scores_dict, {}) -class VoiceoverApplicationServiceUnitTest(test_utils.GenericTestBase): - """Tests for the ExplorationVoiceoverApplication class.""" - - def setUp(self): - super(VoiceoverApplicationServiceUnitTest, self).setUp() - self.signup('author@example.com', 'author') - self.author_id = self.get_user_id_from_email('author@example.com') - - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_id', - target_type='exploration', - target_id='0', - status='review', - author_id=self.author_id, - final_reviewer_id=None, - language_code='en', - filename='filename.mp3', - content='

    content

    ', - rejection_message=None).put() - self.voiceover_application_model = ( - suggestion_models.GeneralVoiceoverApplicationModel.get_by_id( - 'application_id')) - - def test_get_voiceover_application_from_model_with_invalid_type_raise_error( - self): - suggestion_services.get_voiceover_application( - self.voiceover_application_model.id) - - self.voiceover_application_model.target_type = 'invalid_type' - with self.assertRaisesRegexp( - Exception, - 'Invalid target type for voiceover application: invalid_type'): - suggestion_services.get_voiceover_application( - self.voiceover_application_model.id) - - class ReviewableSuggestionEmailInfoUnitTests( test_utils.GenericTestBase): """Tests the methods related to the ReviewableSuggestionEmailInfo class. """ - target_id = 'exp1' - skill_id = 'skill1' - language_code = 'en' - AUTHOR_EMAIL = 'author1@example.com' - REVIEWER_EMAIL = 'reviewer@community.org' - COMMIT_MESSAGE = 'commit message' + target_id: str = 'exp1' + skill_id: str = 'skill1' + language_code: str = 'en' + AUTHOR_EMAIL: Final = 'author1@example.com' + REVIEWER_EMAIL: Final = 'reviewer@community.org' + COMMIT_MESSAGE: Final = 'commit message' def _create_translation_suggestion_with_translation_html( - self, translation_html): + self, translation_html: str + ) -> suggestion_registry.SuggestionTranslateContent: """Creates a translation suggestion with the given translation_html.""" add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID, + 'content_id': 'content_0', 'language_code': self.language_code, 'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR, 'translation_html': translation_html, @@ -1972,24 +3528,32 @@ def _create_translation_suggestion_with_translation_html( ) def _create_question_suggestion_with_question_html_content( - self, question_html_content): + self, question_html_content: str + ) -> suggestion_registry.SuggestionAddQuestion: """Creates a question suggestion with the html content used for the question in the question suggestion. """ with self.swap( feconf, 'DEFAULT_INIT_STATE_CONTENT_STR', question_html_content): - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': self.language_code, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': self.skill_id, 'skill_difficulty': 0.3 @@ -2003,19 +3567,23 @@ def _create_question_suggestion_with_question_html_content( 'test description' ) - def _create_edit_state_content_suggestion(self): + def _create_edit_state_content_suggestion( + self + ) -> suggestion_registry.SuggestionEditStateContent: """Creates an "edit state content" suggestion.""" - edit_state_content_change_dict = { + edit_state_content_change_dict: Dict[ + str, Union[str, Dict[str, str]] + ] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'Introduction', 'new_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': 'new html content' }, 'old_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': 'old html content' } } @@ -2025,11 +3593,17 @@ def _create_edit_state_content_suggestion(self): feconf.ENTITY_TYPE_EXPLORATION, self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION, self.author_id, edit_state_content_change_dict, - 'test description') + None) def _assert_reviewable_suggestion_email_infos_are_equal( - self, reviewable_suggestion_email_info, - expected_reviewable_suggestion_email_info): + self, + reviewable_suggestion_email_info: ( + suggestion_registry.ReviewableSuggestionEmailInfo + ), + expected_reviewable_suggestion_email_info: ( + suggestion_registry.ReviewableSuggestionEmailInfo + ) + ) -> None: """Asserts that the reviewable suggestion email info is equal to the expected reviewable suggestion email info. """ @@ -2046,9 +3620,8 @@ def _assert_reviewable_suggestion_email_infos_are_equal( reviewable_suggestion_email_info.submission_datetime, expected_reviewable_suggestion_email_info.submission_datetime) - def setUp(self): - super( - ReviewableSuggestionEmailInfoUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email( self.AUTHOR_EMAIL) @@ -2058,19 +3631,20 @@ def setUp(self): self.save_new_valid_exploration(self.target_id, self.author_id) def test_create_raises_for_suggestion_type_not_on_contributor_dashboard( - self): + self + ) -> None: edit_state_content_suggestion = ( self._create_edit_state_content_suggestion()) # Mocking the SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS dict in # suggestion services so that this test still passes if the # "edit state content" suggestion type is added to the Contributor # Dashboard in the future. - suggestion_emphasized_text_getter_functions_mock = {} + suggestion_emphasized_text_getter_functions_mock: Dict[str, str] = {} with self.swap( suggestion_services, 'SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS', suggestion_emphasized_text_getter_functions_mock): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected suggestion type to be offered on the Contributor ' 'Dashboard, received: %s.' % ( @@ -2082,7 +3656,8 @@ def test_create_raises_for_suggestion_type_not_on_contributor_dashboard( ) def test_contributor_suggestion_types_are_in_suggestion_text_getter_dict( - self): + self + ) -> None: # This test will fail if a new suggestion type is added to the # Contributor Dashboard but hasn't been added to # SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS. @@ -2096,7 +3671,9 @@ def test_contributor_suggestion_types_are_in_suggestion_text_getter_dict( sorted_text_getter_dict_suggestion_types, sorted_contributor_dashboard_suggestion_types) - def test_create_from_suggestion_returns_info_for_question_suggestion(self): + def test_create_from_suggestion_returns_info_for_question_suggestion( + self + ) -> None: question_suggestion = ( self._create_question_suggestion_with_question_html_content( '

    default question content

    ')) @@ -2120,7 +3697,8 @@ def test_create_from_suggestion_returns_info_for_question_suggestion(self): ) def test_create_from_suggestion_returns_info_for_translation_suggestion( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( '

    default translation content

    ')) @@ -2143,7 +3721,7 @@ def test_create_from_suggestion_returns_info_for_translation_suggestion( expected_reviewable_suggestion_email_info ) - def test_create_from_suggestion_returns_info_for_empty_html(self): + def test_create_from_suggestion_returns_info_for_empty_html(self) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( '')) @@ -2166,7 +3744,8 @@ def test_create_from_suggestion_returns_info_for_empty_html(self): ) def test_create_from_suggestion_returns_info_with_no_trailing_whitespace( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( '

    test whitespace

    ')) @@ -2190,11 +3769,15 @@ def test_create_from_suggestion_returns_info_with_no_trailing_whitespace( ) def test_create_returns_info_for_translation_suggestion_if_html_math_rte( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( - '

    translation with rte' - '

    ')) + '

    translation with rte' + '

    ')) expected_reviewable_suggestion_email_info = ( suggestion_registry.ReviewableSuggestionEmailInfo( translation_suggestion.suggestion_type, @@ -2215,12 +3798,15 @@ def test_create_returns_info_for_translation_suggestion_if_html_math_rte( ) def test_create_returns_info_for_translation_suggestion_if_html_image_rte( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( - '

    translation with rte' - '' - '

    ')) + '

    translation with rte' + '

    ')) expected_reviewable_suggestion_email_info = ( suggestion_registry.ReviewableSuggestionEmailInfo( translation_suggestion.suggestion_type, @@ -2241,11 +3827,15 @@ def test_create_returns_info_for_translation_suggestion_if_html_image_rte( ) def test_create_returns_info_for_translation_suggestion_if_html_link_rte( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( '

    translation with rte' - '

    ')) + '' + '

    ')) expected_reviewable_suggestion_email_info = ( suggestion_registry.ReviewableSuggestionEmailInfo( translation_suggestion.suggestion_type, @@ -2266,12 +3856,19 @@ def test_create_returns_info_for_translation_suggestion_if_html_link_rte( ) def test_create_returns_info_for_translation_suggestion_if_html_rte_repeats( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( '

    translation with rte' - '' - '

    ')) + '' + '

    ' + '' + '')) expected_reviewable_suggestion_email_info = ( suggestion_registry.ReviewableSuggestionEmailInfo( translation_suggestion.suggestion_type, @@ -2292,12 +3889,19 @@ def test_create_returns_info_for_translation_suggestion_if_html_rte_repeats( ) def test_create_returns_info_for_translation_suggestion_if_html_multi_rte( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( '

    translation with rte' - '' - '

    ')) + '' + '

    ' + '')) expected_reviewable_suggestion_email_info = ( suggestion_registry.ReviewableSuggestionEmailInfo( translation_suggestion.suggestion_type, @@ -2318,7 +3922,8 @@ def test_create_returns_info_for_translation_suggestion_if_html_multi_rte( ) def test_create_returns_info_for_translation_suggestion_if_html_rte_value( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_translation_html( '

    None: question_suggestion = ( self._create_question_suggestion_with_question_html_content( '

    None: question_suggestion = ( self._create_question_suggestion_with_question_html_content( '

    suggestion_registry.SuggestionTranslateContent: """Creates a translation suggestion in the given language_code with the given author id. """ add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID, + 'content_id': 'content_0', 'language_code': language_code, 'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR, 'translation_html': '

    This is the translated content.

    ', @@ -2615,18 +4251,26 @@ def _create_translation_suggestion_with_language_code_and_author( ) def _create_question_suggestion_with_skill_id_and_author_id( - self, skill_id, author_id): + self, skill_id: str, author_id: str + ) -> suggestion_registry.SuggestionAddQuestion: """Creates a question suggestion with the given skill_id.""" - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': self.language_code, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': skill_id, 'skill_difficulty': 0.3 @@ -2641,7 +4285,8 @@ def _create_question_suggestion_with_skill_id_and_author_id( ) def _create_reviewable_suggestion_email_infos_from_suggestions( - self, suggestions): + self, suggestions: List[suggestion_registry.BaseSuggestion] + ) -> List[suggestion_registry.ReviewableSuggestionEmailInfo]: """Creates a list of ReviewableSuggestionEmailInfo objects from the given suggestions. """ @@ -2655,8 +4300,14 @@ def _create_reviewable_suggestion_email_infos_from_suggestions( ] def _assert_reviewable_suggestion_email_infos_are_in_correct_order( - self, reviewable_suggestion_email_infos, - expected_reviewable_suggestion_email_infos): + self, + reviewable_suggestion_email_infos: ( + List[suggestion_registry.ReviewableSuggestionEmailInfo] + ), + expected_reviewable_suggestion_email_infos: ( + List[suggestion_registry.ReviewableSuggestionEmailInfo] + ) + ) -> None: """Asserts that the reviewable suggestion email infos are equal to the expected reviewable suggestion email infos and that the reviewable suggestion email infos are sorted in descending order according to @@ -2691,21 +4342,44 @@ def _assert_reviewable_suggestion_email_infos_are_in_correct_order( index + 1].submission_datetime ) - def setUp(self): - super( - GetSuggestionsWaitingForReviewInfoToNotifyReviewersUnitTests, - self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') - self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) + self.author_id = self.get_user_id_from_email( + self.AUTHOR_EMAIL) self.signup(self.REVIEWER_1_EMAIL, 'reviewer1') self.reviewer_1_id = self.get_user_id_from_email( self.REVIEWER_1_EMAIL) self.signup(self.REVIEWER_2_EMAIL, 'reviewer2') self.reviewer_2_id = self.get_user_id_from_email( self.REVIEWER_2_EMAIL) - self.save_new_valid_exploration(self.target_id, self.author_id) + exploration = self.save_new_valid_exploration( + self.target_id, self.author_id, + correctness_feedback_enabled=True) + audio_language_codes = set( + language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES) + model = opportunity_models.ExplorationOpportunitySummaryModel( + id=exploration.id, + topic_id='topic_id', + topic_name='topic_name', + story_id='story_id', + story_title='story_title', + chapter_title='chapter_title', + content_count=2, + incomplete_translation_language_codes=( + audio_language_codes - set(['en'])), + translation_counts={}, + language_codes_needing_voice_artists=audio_language_codes, + language_codes_with_assigned_voice_artists=[] + ) + model.update_timestamps() + model.put() + + self.save_new_skill(self.skill_id, self.author_id) - def test_get_returns_empty_for_reviewers_who_authored_the_suggestions(self): + def test_get_returns_empty_for_reviewers_who_authored_the_suggestions( + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') @@ -2723,7 +4397,8 @@ def test_get_returns_empty_for_reviewers_who_authored_the_suggestions(self): self.assertEqual(reviewable_suggestion_email_infos, [[]]) def test_get_returns_empty_for_question_reviewers_if_only_translation_exist( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) self._create_translation_suggestion_with_language_code_and_author( 'hi', self.author_id) @@ -2737,7 +4412,8 @@ def test_get_returns_empty_for_question_reviewers_if_only_translation_exist( self.assertEqual(reviewable_suggestion_email_infos, [[]]) def test_get_returns_empty_for_translation_reviewers_if_only_question_exist( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') self._create_question_suggestion_with_skill_id_and_author_id( @@ -2751,7 +4427,7 @@ def test_get_returns_empty_for_translation_reviewers_if_only_question_exist( self.assertEqual(len(reviewable_suggestion_email_infos), 1) self.assertEqual(reviewable_suggestion_email_infos, [[]]) - def test_get_returns_empty_for_accepted_suggestions(self): + def test_get_returns_empty_for_accepted_suggestions(self) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') translation_suggestion = ( @@ -2769,7 +4445,7 @@ def test_get_returns_empty_for_accepted_suggestions(self): self.assertEqual(len(reviewable_suggestion_email_infos), 1) self.assertEqual(reviewable_suggestion_email_infos, [[]]) - def test_get_returns_empty_for_rejected_suggestions(self): + def test_get_returns_empty_for_rejected_suggestions(self) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) translation_suggestion = ( self._create_translation_suggestion_with_language_code_and_author( @@ -2787,7 +4463,8 @@ def test_get_returns_empty_for_rejected_suggestions(self): self.assertEqual(reviewable_suggestion_email_infos, [[]]) def test_get_returns_suggestion_infos_for_a_translation_reviewer_same_lang( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') translation_suggestion_1 = ( @@ -2811,7 +4488,8 @@ def test_get_returns_suggestion_infos_for_a_translation_reviewer_same_lang( expected_reviewable_suggestion_email_infos) def test_get_returns_empty_for_a_translation_reviewer_with_diff_lang_rights( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'en') self._create_translation_suggestion_with_language_code_and_author( @@ -2826,7 +4504,8 @@ def test_get_returns_empty_for_a_translation_reviewer_with_diff_lang_rights( self.assertEqual(reviewable_suggestion_email_infos, [[]]) def test_get_returns_suggestion_infos_for_translation_reviewer_multi_lang( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2859,7 +4538,8 @@ def test_get_returns_suggestion_infos_for_translation_reviewer_multi_lang( expected_reviewable_suggestion_email_infos) def test_get_returns_infos_for_translation_reviewer_past_limit_same_lang( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') translation_suggestion_1 = ( @@ -2887,7 +4567,8 @@ def test_get_returns_infos_for_translation_reviewer_past_limit_same_lang( expected_reviewable_suggestion_email_infos) def test_get_returns_infos_for_translation_reviewer_past_limit_diff_lang( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2925,7 +4606,8 @@ def test_get_returns_infos_for_translation_reviewer_past_limit_diff_lang( expected_reviewable_suggestion_email_infos) def test_get_returns_suggestion_infos_for_multiple_translation_reviewers( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2966,7 +4648,8 @@ def test_get_returns_suggestion_infos_for_multiple_translation_reviewers( expected_reviewable_suggestion_email_infos_reviewer_2) def test_get_returns_suggestion_infos_for_reviewer_with_multi_review_rights( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') @@ -3005,7 +4688,7 @@ def test_get_returns_suggestion_infos_for_reviewer_with_multi_review_rights( reviewable_suggestion_email_infos[0], expected_reviewable_suggestion_email_infos) - def test_get_returns_suggestion_infos_for_a_question_reviewer(self): + def test_get_returns_suggestion_infos_for_a_question_reviewer(self) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) question_suggestion_1 = ( self._create_question_suggestion_with_skill_id_and_author_id( @@ -3029,7 +4712,9 @@ def test_get_returns_suggestion_infos_for_a_question_reviewer(self): reviewable_suggestion_email_infos[0], expected_reviewable_suggestion_email_infos) - def test_get_returns_suggestion_infos_for_multi_question_reviewers(self): + def test_get_returns_suggestion_infos_for_multi_question_reviewers( + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) user_services.allow_user_to_review_question(self.reviewer_2_id) question_suggestion_1 = ( @@ -3058,7 +4743,8 @@ def test_get_returns_suggestion_infos_for_multi_question_reviewers(self): expected_reviewable_suggestion_email_infos) def test_get_returns_suggestion_infos_for_question_reviewer_past_limit( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) question_suggestion_1 = ( self._create_question_suggestion_with_skill_id_and_author_id( @@ -3085,7 +4771,8 @@ def test_get_returns_suggestion_infos_for_question_reviewer_past_limit( expected_reviewable_suggestion_email_infos) def test_get_returns_suggestion_infos_for_multi_reviewers_with_multi_rights( - self): + self + ) -> None: # Reviewer 1's permissions. user_services.allow_user_to_review_question(self.reviewer_1_id) user_services.allow_user_to_review_translation_in_language( @@ -3143,7 +4830,8 @@ def test_get_returns_suggestion_infos_for_multi_reviewers_with_multi_rights( expected_reviewable_suggestion_email_infos_reviewer_2) def test_get_returns_infos_for_reviewer_with_multi_rights_past_limit( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_question(self.reviewer_1_id) @@ -3188,19 +4876,21 @@ class CommunityContributionStatsUnitTests(test_utils.GenericTestBase): to be added then this can be removed. See issue #10957 for more context. """ - target_id = 'exp1' - skill_id = 'skill_123456' - language_code = 'en' - AUTHOR_EMAIL = 'author@example.com' - REVIEWER_EMAIL = 'reviewer@community.org' - COMMIT_MESSAGE = 'commit message' + target_id: str = 'exp1' + skill_id: str = 'skill_123456' + language_code: str = 'en' + AUTHOR_EMAIL: Final = 'author@example.com' + REVIEWER_EMAIL: Final = 'reviewer@community.org' + COMMIT_MESSAGE: Final = 'commit message' - def _create_translation_suggestion_with_language_code(self, language_code): + def _create_translation_suggestion_with_language_code( + self, language_code: str + ) -> suggestion_registry.SuggestionTranslateContent: """Creates a translation suggestion in the given language_code.""" add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID, + 'content_id': 'content_0', 'language_code': language_code, 'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR, 'translation_html': '

    This is the translated content.

    ', @@ -3215,18 +4905,27 @@ def _create_translation_suggestion_with_language_code(self, language_code): 'test description' ) - def _create_question_suggestion(self): + def _create_question_suggestion( + self + ) -> suggestion_registry.SuggestionAddQuestion: """Creates a question suggestion.""" - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': self.language_code, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': self.skill_id, 'skill_difficulty': 0.3 @@ -3240,19 +4939,23 @@ def _create_question_suggestion(self): 'test description' ) - def _create_edit_state_content_suggestion(self): + def _create_edit_state_content_suggestion( + self + ) -> suggestion_registry.SuggestionEditStateContent: """Creates an "edit state content" suggestion.""" - edit_state_content_change_dict = { + edit_state_content_change_dict: Dict[ + str, Union[str, Dict[str, str]] + ] = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'Introduction', 'new_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': 'new html content' }, 'old_value': { - 'content_id': 'content', + 'content_id': 'content_0', 'html': 'old html content' } } @@ -3265,7 +4968,7 @@ def _create_edit_state_content_suggestion(self): 'test description' ) - def _assert_community_contribution_stats_is_in_default_state(self): + def _assert_community_contribution_stats_is_in_default_state(self) -> None: """Checks if the community contribution stats is in its default state. """ @@ -3288,26 +4991,48 @@ def _assert_community_contribution_stats_is_in_default_state(self): self.assertEqual( community_contribution_stats.question_suggestion_count, 0) - def setUp(self): - super( - CommunityContributionStatsUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email( self.AUTHOR_EMAIL) self.signup(self.REVIEWER_EMAIL, 'reviewer') self.reviewer_id = self.get_user_id_from_email( self.REVIEWER_EMAIL) - self.save_new_valid_exploration(self.target_id, self.author_id) + exploration = self.save_new_valid_exploration( + self.target_id, self.author_id, + correctness_feedback_enabled=True) + audio_language_codes = set( + language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES) + model = opportunity_models.ExplorationOpportunitySummaryModel( + id=exploration.id, + topic_id='topic_id', + topic_name='topic_name', + story_id='story_id', + story_title='story_title', + chapter_title='chapter_title', + content_count=2, + incomplete_translation_language_codes=( + audio_language_codes - set(['en'])), + translation_counts={}, + language_codes_needing_voice_artists=audio_language_codes, + language_codes_with_assigned_voice_artists=[] + ) + model.update_timestamps() + model.put() + self.save_new_skill(self.skill_id, self.author_id) def test_create_edit_state_content_suggestion_does_not_change_the_counts( - self): + self + ) -> None: self._create_edit_state_content_suggestion() self._assert_community_contribution_stats_is_in_default_state() def test_accept_edit_state_content_suggestion_does_not_change_the_counts( - self): + self + ) -> None: edit_state_content_suggestion = ( self._create_edit_state_content_suggestion()) self._assert_community_contribution_stats_is_in_default_state() @@ -3319,7 +5044,8 @@ def test_accept_edit_state_content_suggestion_does_not_change_the_counts( self._assert_community_contribution_stats_is_in_default_state() def test_reject_edit_state_content_suggestion_does_not_change_the_counts( - self): + self + ) -> None: edit_state_content_suggestion = ( self._create_edit_state_content_suggestion()) self._assert_community_contribution_stats_is_in_default_state() @@ -3331,7 +5057,8 @@ def test_reject_edit_state_content_suggestion_does_not_change_the_counts( self._assert_community_contribution_stats_is_in_default_state() def test_reject_edit_state_content_suggestions_does_not_change_the_counts( - self): + self + ) -> None: edit_state_content_suggestion_1 = ( self._create_edit_state_content_suggestion()) edit_state_content_suggestion_2 = ( @@ -3347,7 +5074,8 @@ def test_reject_edit_state_content_suggestions_does_not_change_the_counts( self._assert_community_contribution_stats_is_in_default_state() def test_resubmit_edit_state_content_suggestion_does_not_change_the_counts( - self): + self + ) -> None: edit_state_content_suggestion = ( self._create_edit_state_content_suggestion()) suggestion_services.reject_suggestion( @@ -3357,6 +5085,8 @@ def test_resubmit_edit_state_content_suggestion_does_not_change_the_counts( # Change the new_value of the html of the suggestion that got rejected # so we can resubmit the suggestion for review. resubmit_suggestion_change = edit_state_content_suggestion.change + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(resubmit_suggestion_change.new_value, dict) resubmit_suggestion_change.new_value['html'] = 'new html to resubmit' # Resubmit the rejected "edit state content" suggestion. @@ -3368,7 +5098,8 @@ def test_resubmit_edit_state_content_suggestion_does_not_change_the_counts( self._assert_community_contribution_stats_is_in_default_state() def test_create_question_suggestion_increases_question_suggestion_count( - self): + self + ) -> None: self._create_question_suggestion() stats = suggestion_services.get_community_contribution_stats() @@ -3379,7 +5110,9 @@ def test_create_question_suggestion_increases_question_suggestion_count( self.assertDictEqual( stats.translation_suggestion_counts_by_lang_code, {}) - def test_create_multi_question_suggestions_increases_question_count(self): + def test_create_multi_question_suggestions_increases_question_count( + self + ) -> None: self._create_question_suggestion() self._create_question_suggestion() @@ -3392,7 +5125,8 @@ def test_create_multi_question_suggestions_increases_question_count(self): stats.translation_suggestion_counts_by_lang_code, {}) def test_accept_question_suggestion_decreases_question_suggestion_count( - self): + self + ) -> None: question_suggestion = self._create_question_suggestion() # Assert that the question suggestion count increased. stats = suggestion_services.get_community_contribution_stats() @@ -3410,7 +5144,8 @@ def test_accept_question_suggestion_decreases_question_suggestion_count( self._assert_community_contribution_stats_is_in_default_state() def test_reject_question_suggestion_decreases_question_suggestion_count( - self): + self + ) -> None: question_suggestion = self._create_question_suggestion() # Assert that the question suggestion count increased. stats = suggestion_services.get_community_contribution_stats() @@ -3428,7 +5163,8 @@ def test_reject_question_suggestion_decreases_question_suggestion_count( self._assert_community_contribution_stats_is_in_default_state() def test_reject_question_suggestions_decreases_question_suggestion_count( - self): + self + ) -> None: question_suggestion_1 = self._create_question_suggestion() question_suggestion_2 = self._create_question_suggestion() # Assert that the question suggestion count increased. @@ -3449,7 +5185,8 @@ def test_reject_question_suggestions_decreases_question_suggestion_count( self._assert_community_contribution_stats_is_in_default_state() def test_resubmit_question_suggestion_increases_question_suggestion_count( - self): + self + ) -> None: question_suggestion = self._create_question_suggestion() # Assert that the question suggestion count increased. stats = suggestion_services.get_community_contribution_stats() @@ -3468,6 +5205,8 @@ def test_resubmit_question_suggestion_increases_question_suggestion_count( # Change the question_dict of the question suggestion that got rejected # so we can resubmit the suggestion for review. resubmit_question_change = question_suggestion.change + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(resubmit_question_change.question_dict, dict) resubmit_question_change.question_dict['linked_skill_ids'] = ['skill1'] # Resubmit the rejected question suggestion. @@ -3485,7 +5224,8 @@ def test_resubmit_question_suggestion_increases_question_suggestion_count( stats.translation_suggestion_counts_by_lang_code, {}) def test_create_translation_suggestion_raises_translation_suggestion_count( - self): + self + ) -> None: self._create_translation_suggestion_with_language_code( self.language_code) @@ -3499,7 +5239,8 @@ def test_create_translation_suggestion_raises_translation_suggestion_count( {self.language_code: 1}) def test_create_translation_suggestions_diff_lang_raises_translation_counts( - self): + self + ) -> None: self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('en') @@ -3513,7 +5254,8 @@ def test_create_translation_suggestions_diff_lang_raises_translation_counts( {'hi': 1, 'en': 1}) def test_create_translation_suggestions_eq_lang_increases_translation_count( - self): + self + ) -> None: self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('hi') @@ -3526,7 +5268,8 @@ def test_create_translation_suggestions_eq_lang_increases_translation_count( stats.translation_suggestion_counts_by_lang_code, {'hi': 2}) def test_accept_translation_suggestion_lowers_translation_suggestion_count( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_language_code( self.language_code)) @@ -3542,12 +5285,13 @@ def test_accept_translation_suggestion_lowers_translation_suggestion_count( suggestion_services.accept_suggestion( translation_suggestion.suggestion_id, self.reviewer_id, - self.COMMIT_MESSAGE, 'review message') + self.COMMIT_MESSAGE, 'review message') self._assert_community_contribution_stats_is_in_default_state() def test_reject_translation_suggestion_lowers_translation_suggestion_count( - self): + self + ) -> None: translation_suggestion = ( self._create_translation_suggestion_with_language_code( self.language_code)) @@ -3568,7 +5312,8 @@ def test_reject_translation_suggestion_lowers_translation_suggestion_count( self._assert_community_contribution_stats_is_in_default_state() def test_reject_one_translation_suggestion_diff_lang_lowers_only_one_count( - self): + self + ) -> None: translation_suggestion_1 = ( self._create_translation_suggestion_with_language_code('hi')) # Create a translation suggestion in a different language that won't be @@ -3597,7 +5342,8 @@ def test_reject_one_translation_suggestion_diff_lang_lowers_only_one_count( stats.translation_suggestion_counts_by_lang_code, {'en': 1}) def test_reject_translation_suggestions_diff_lang_lowers_translation_count( - self): + self + ) -> None: translation_suggestion_1 = ( self._create_translation_suggestion_with_language_code('hi')) translation_suggestion_2 = ( @@ -3621,7 +5367,8 @@ def test_reject_translation_suggestions_diff_lang_lowers_translation_count( self._assert_community_contribution_stats_is_in_default_state() def test_reject_translation_suggestions_same_lang_lowers_translation_count( - self): + self + ) -> None: translation_suggestion_1 = ( self._create_translation_suggestion_with_language_code( self.language_code)) @@ -3646,7 +5393,9 @@ def test_reject_translation_suggestions_same_lang_lowers_translation_count( self._assert_community_contribution_stats_is_in_default_state() - def test_reject_suggestions_diff_type_decreases_suggestion_counts(self): + def test_reject_suggestions_diff_type_decreases_suggestion_counts( + self + ) -> None: suggestion_1 = ( self._create_translation_suggestion_with_language_code('hi')) suggestion_2 = ( @@ -3671,7 +5420,9 @@ def test_reject_suggestions_diff_type_decreases_suggestion_counts(self): self._assert_community_contribution_stats_is_in_default_state() - def test_create_suggestions_diff_type_increases_suggestion_counts(self): + def test_create_suggestions_diff_type_increases_suggestion_counts( + self + ) -> None: self._create_translation_suggestion_with_language_code('hi') self._create_translation_suggestion_with_language_code('en') self._create_question_suggestion() @@ -3696,21 +5447,25 @@ class GetSuggestionsWaitingTooLongForReviewInfoForAdminsUnitTests( review on the Contributor Dashboard. """ - target_id = 'exp1' - skill_id = 'skill_123456' - language_code = 'en' - AUTHOR_EMAIL = 'author@example.com' - REVIEWER_1_EMAIL = 'reviewer1@community.org' - REVIEWER_2_EMAIL = 'reviewer2@community.org' - COMMIT_MESSAGE = 'commit message' - mocked_datetime_utcnow = datetime.datetime(2020, 6, 15, 5) + target_id: str = 'exp1' + skill_id: str = 'skill_123456' + language_code: str = 'en' + AUTHOR_EMAIL: str = 'author@example.com' + REVIEWER_1_EMAIL: str = 'reviewer1@community.org' + REVIEWER_2_EMAIL: str = 'reviewer2@community.org' + COMMIT_MESSAGE: str = 'commit message' + mocked_datetime_utcnow: datetime.datetime = ( + datetime.datetime(2020, 6, 15, 5) + ) - def _create_translation_suggestion(self): + def _create_translation_suggestion( + self + ) -> suggestion_registry.SuggestionTranslateContent: """Creates a translation suggestion.""" add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID, + 'content_id': 'content_0', 'language_code': self.language_code, 'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR, 'translation_html': '

    This is the translated content.

    ', @@ -3725,18 +5480,27 @@ def _create_translation_suggestion(self): 'test description' ) - def _create_question_suggestion(self): + def _create_question_suggestion( + self + ) -> suggestion_registry.SuggestionAddQuestion: """Creates a question suggestion.""" - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': self.language_code, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': self.skill_id, 'skill_difficulty': 0.3 @@ -3751,7 +5515,8 @@ def _create_question_suggestion(self): ) def _create_reviewable_suggestion_email_infos_from_suggestions( - self, suggestions): + self, suggestions: List[suggestion_registry.BaseSuggestion] + ) -> List[suggestion_registry.ReviewableSuggestionEmailInfo]: """Creates a list of ReviewableSuggestionEmailInfo objects from the given suggestions. """ @@ -3765,8 +5530,13 @@ def _create_reviewable_suggestion_email_infos_from_suggestions( ] def _assert_reviewable_suggestion_email_infos_are_in_correct_order( - self, reviewable_suggestion_email_infos, - expected_reviewable_suggestion_email_infos): + self, reviewable_suggestion_email_infos: List[ + suggestion_registry.ReviewableSuggestionEmailInfo + ], + expected_reviewable_suggestion_email_infos: List[ + suggestion_registry.ReviewableSuggestionEmailInfo + ] + ) -> None: """Asserts that the reviewable suggestion email infos are equal to the expected reviewable suggestion email infos and that the reviewable suggestion email infos are sorted in descending order according to @@ -3801,10 +5571,8 @@ def _assert_reviewable_suggestion_email_infos_are_in_correct_order( index + 1].submission_datetime ) - def setUp(self): - super( - GetSuggestionsWaitingTooLongForReviewInfoForAdminsUnitTests, - self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.signup(self.REVIEWER_1_EMAIL, 'reviewer1') @@ -3817,7 +5585,8 @@ def setUp(self): self.save_new_skill(self.skill_id, self.author_id) def test_get_returns_empty_for_suggestion_type_not_on_contributor_dashboard( - self): + self + ) -> None: self._create_translation_suggestion() # This mocked list cannot be empty because then the storage query in the # get_suggestions_waiting_too_long_for_review method will fail. @@ -3839,7 +5608,8 @@ def test_get_returns_empty_for_suggestion_type_not_on_contributor_dashboard( len(info_about_suggestions_waiting_too_long_for_review), 0) def test_get_returns_empty_if_suggestion_review_wait_time_diff_is_negative( - self): + self + ) -> None: self._create_translation_suggestion() # Make sure the threshold is nonzero. @@ -3855,7 +5625,8 @@ def test_get_returns_empty_if_suggestion_review_wait_time_diff_is_negative( len(info_about_suggestions_waiting_too_long_for_review), 0) def test_get_returns_empty_if_suggestions_have_waited_less_than_threshold( - self): + self + ) -> None: with self.mock_datetime_utcnow(self.mocked_datetime_utcnow): self._create_translation_suggestion() self._create_question_suggestion() @@ -3878,7 +5649,8 @@ def test_get_returns_empty_if_suggestions_have_waited_less_than_threshold( len(info_about_suggestions_waiting_too_long_for_review), 0) def test_get_returns_empty_if_suggestions_have_waited_threshold_review_time( - self): + self + ) -> None: with self.mock_datetime_utcnow(self.mocked_datetime_utcnow): self._create_translation_suggestion() mocked_threshold_review_wait_time_in_days = 2 @@ -3901,7 +5673,8 @@ def test_get_returns_empty_if_suggestions_have_waited_threshold_review_time( len(info_about_suggestions_waiting_too_long_for_review), 0) def test_get_returns_suggestion_waited_long_if_their_wait_is_past_threshold( - self): + self + ) -> None: with self.mock_datetime_utcnow(self.mocked_datetime_utcnow): translation_suggestion = self._create_translation_suggestion() # Give the question suggestion a slightly different review submission @@ -3936,7 +5709,8 @@ def test_get_returns_suggestion_waited_long_if_their_wait_is_past_threshold( ) def test_get_only_returns_suggestions_that_have_waited_past_wait_threshold( - self): + self + ) -> None: with self.mock_datetime_utcnow(self.mocked_datetime_utcnow): translation_suggestion = self._create_translation_suggestion() with self.mock_datetime_utcnow( @@ -3975,19 +5749,21 @@ def test_get_only_returns_suggestions_that_have_waited_past_wait_threshold( class GetSuggestionTypesThatNeedReviewersUnitTests(test_utils.GenericTestBase): """Tests for the get_suggestion_types_that_need_reviewers method.""" - sample_language_code = 'en' - target_id = 'exp1' - skill_id = 'skill_123456' - language_code = 'en' - AUTHOR_EMAIL = 'author@example.com' - REVIEWER_EMAIL = 'reviewer@community.org' + sample_language_code: str = 'en' + target_id: str = 'exp1' + skill_id: str = 'skill_123456' + language_code: str = 'en' + AUTHOR_EMAIL: Final = 'author@example.com' + REVIEWER_EMAIL: Final = 'reviewer@community.org' - def _create_translation_suggestion_with_language_code(self, language_code): + def _create_translation_suggestion_with_language_code( + self, language_code: str + ) -> suggestion_registry.SuggestionTranslateContent: """Creates a translation suggestion in the given language_code.""" add_translation_change_dict = { 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, 'state_name': feconf.DEFAULT_INIT_STATE_NAME, - 'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID, + 'content_id': 'content_0', 'language_code': language_code, 'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR, 'translation_html': '

    This is the translated content.

    ', @@ -4002,18 +5778,27 @@ def _create_translation_suggestion_with_language_code(self, language_code): 'test description' ) - def _create_question_suggestion(self): + def _create_question_suggestion( + self + ) -> suggestion_registry.SuggestionAddQuestion: """Creates a question suggestion.""" - add_question_change_dict = { + content_id_generator = translation_domain.ContentIdGenerator() + add_question_change_dict: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { + 'id': 'test_id', + 'version': 12, 'question_state_data': self._create_valid_question_data( - 'default_state').to_dict(), + 'default_state', content_id_generator).to_dict(), 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], - 'inapplicable_skill_misconception_ids': ['skillid12345-1'] + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) }, 'skill_id': self.skill_id, 'skill_difficulty': 0.3 @@ -4027,7 +5812,9 @@ def _create_question_suggestion(self): 'test description' ) - def _assert_community_contribution_stats_is_in_default_state(self): + def _assert_community_contribution_stats_is_in_default_state( + self + ) -> None: """Checks if the community contribution stats is in its default state. """ @@ -4048,10 +5835,8 @@ def _assert_community_contribution_stats_is_in_default_state(self): self.assertEqual( community_contribution_stats.question_suggestion_count, 0) - def setUp(self): - super( - GetSuggestionTypesThatNeedReviewersUnitTests, - self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.save_new_valid_exploration(self.target_id, self.author_id) @@ -4060,7 +5845,9 @@ def setUp(self): self.reviewer_id = self.get_user_id_from_email( self.REVIEWER_EMAIL) - def test_get_returns_no_reviewers_needed_if_no_suggestions_exist(self): + def test_get_returns_no_reviewers_needed_if_no_suggestions_exist( + self + ) -> None: self._assert_community_contribution_stats_is_in_default_state() suggestion_types_needing_reviewers = ( @@ -4069,7 +5856,8 @@ def test_get_returns_no_reviewers_needed_if_no_suggestions_exist(self): self.assertDictEqual(suggestion_types_needing_reviewers, {}) def test_get_returns_no_reviewers_needed_if_question_reviewer_no_question( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_id) stats = suggestion_services.get_community_contribution_stats() self.assertEqual(stats.question_reviewer_count, 1) @@ -4085,7 +5873,8 @@ def test_get_returns_no_reviewers_needed_if_question_reviewer_no_question( self.assertDictEqual(suggestion_types_needing_reviewers, {}) def test_get_returns_not_needed_if_translation_reviewers_but_no_translation( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_id, 'en') user_services.allow_user_to_review_translation_in_language( @@ -4104,7 +5893,8 @@ def test_get_returns_not_needed_if_translation_reviewers_but_no_translation( self.assertDictEqual(suggestion_types_needing_reviewers, {}) def test_get_returns_no_reviewers_needed_if_enough_translation_reviewers( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_id, 'en') user_services.allow_user_to_review_translation_in_language( @@ -4126,7 +5916,8 @@ def test_get_returns_no_reviewers_needed_if_enough_translation_reviewers( self.assertDictEqual(suggestion_types_needing_reviewers, {}) def test_get_returns_no_reviewers_needed_if_enough_question_reviewers( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_id) self._create_question_suggestion() stats = suggestion_services.get_community_contribution_stats() @@ -4143,7 +5934,8 @@ def test_get_returns_no_reviewers_needed_if_enough_question_reviewers( self.assertDictEqual(suggestion_types_needing_reviewers, {}) def test_get_returns_reviewers_needed_if_question_but_no_reviewers( - self): + self + ) -> None: self._create_question_suggestion() stats = suggestion_services.get_community_contribution_stats() self.assertEqual(stats.question_reviewer_count, 0) @@ -4158,10 +5950,11 @@ def test_get_returns_reviewers_needed_if_question_but_no_reviewers( self.assertDictEqual( suggestion_types_needing_reviewers, - {feconf.SUGGESTION_TYPE_ADD_QUESTION: {}}) + {feconf.SUGGESTION_TYPE_ADD_QUESTION: set()}) def test_get_returns_reviewers_needed_if_translation_for_a_lang_no_reviewer( - self): + self + ) -> None: self._create_translation_suggestion_with_language_code( self.sample_language_code) stats = suggestion_services.get_community_contribution_stats() @@ -4182,7 +5975,8 @@ def test_get_returns_reviewers_needed_if_translation_for_a_lang_no_reviewer( self.sample_language_code}}) def test_get_returns_reviewers_needed_if_translation_for_langs_no_reviewers( - self): + self + ) -> None: self._create_translation_suggestion_with_language_code('en') self._create_translation_suggestion_with_language_code('fr') stats = suggestion_services.get_community_contribution_stats() @@ -4202,7 +5996,8 @@ def test_get_returns_reviewers_needed_if_translation_for_langs_no_reviewers( {feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: {'en', 'fr'}}) def test_get_returns_reviewers_needed_if_multi_suggestion_types_no_reviewer( - self): + self + ) -> None: self._create_question_suggestion() self._create_translation_suggestion_with_language_code('en') self._create_translation_suggestion_with_language_code('fr') @@ -4223,5 +6018,344 @@ def test_get_returns_reviewers_needed_if_multi_suggestion_types_no_reviewer( { feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: { 'en', 'fr'}, - feconf.SUGGESTION_TYPE_ADD_QUESTION: {} + feconf.SUGGESTION_TYPE_ADD_QUESTION: set() }) + + +class EmailsTaskqueueTests(test_utils.GenericTestBase): + """Tests for tasks in emails taskqueue.""" + + def test_create_new_instant_task(self) -> None: + user_id = 'user' + ( + suggestion_services + .enqueue_contributor_ranking_notification_email_task( + user_id, feconf.CONTRIBUTION_TYPE_TRANSLATION, + feconf.CONTRIBUTION_SUBTYPE_ACCEPTANCE, 'hi', + 'Initial Contributor' + )) + + self.assertEqual( + self.count_jobs_in_taskqueue( + taskqueue_services.QUEUE_NAME_EMAILS), + 1) + + tasks = self.get_pending_tasks( + queue_name=taskqueue_services.QUEUE_NAME_EMAILS) + self.assertEqual( + tasks[0].url, + feconf + .TASK_URL_CONTRIBUTOR_DASHBOARD_ACHIEVEMENT_NOTIFICATION_EMAILS) + # Ruling out the possibility of None for mypy type checking. + assert tasks[0].payload is not None + self.assertEqual( + tasks[0].payload['contributor_user_id'], user_id) + self.assertEqual( + tasks[0].payload['contribution_type'], + feconf.CONTRIBUTION_TYPE_TRANSLATION) + self.assertEqual( + tasks[0].payload['contribution_sub_type'], + feconf.CONTRIBUTION_SUBTYPE_ACCEPTANCE) + self.assertEqual(tasks[0].payload['language_code'], 'hi') + self.assertEqual( + tasks[0].payload['rank_name'], 'Initial Contributor') + + def test_create_email_task_raises_exception_for_invalid_language_code( + self + ) -> None: + user_id = 'user' + with self.assertRaisesRegex( + Exception, + 'Not supported language code: error'): + ( + suggestion_services + .enqueue_contributor_ranking_notification_email_task + )( + user_id, feconf.CONTRIBUTION_TYPE_TRANSLATION, + feconf.CONTRIBUTION_SUBTYPE_ACCEPTANCE, 'error', + 'Initial Contributor' + ) + + def test_create_email_task_raises_exception_for_invalid_contribution_type( + self + ) -> None: + user_id = 'user' + with self.assertRaisesRegex( + Exception, + 'Invalid contribution type: test'): + ( + suggestion_services + .enqueue_contributor_ranking_notification_email_task + )( + user_id, 'test', + feconf.CONTRIBUTION_SUBTYPE_ACCEPTANCE, 'hi', + 'Initial Contributor' + ) + + def test_create_email_task_raises_exception_for_wrong_contribution_subtype( + self + ) -> None: + user_id = 'user' + with self.assertRaisesRegex( + Exception, + 'Invalid contribution subtype: test'): + ( + suggestion_services + .enqueue_contributor_ranking_notification_email_task + )( + user_id, feconf.CONTRIBUTION_TYPE_TRANSLATION, + 'test', 'hi', + 'Initial Contributor' + ) + + +class ContributorCertificateTests(test_utils.GenericTestBase): + """Tests for contributor certificate generation.""" + + AUTHOR_EMAIL: Final = 'author@example.com' + + def setUp(self) -> None: + super().setUp() + + self.signup(self.AUTHOR_EMAIL, 'author') + self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) + self.username = user_services.get_username(self.author_id) + self.from_date = datetime.datetime.today() - datetime.timedelta(days=1) + self.to_date = datetime.datetime.today() + datetime.timedelta(days=1) + + def test_create_translation_contributor_certificate(self) -> None: + score_category: str = ( + suggestion_models.SCORE_TYPE_TRANSLATION + + suggestion_models.SCORE_CATEGORY_DELIMITER + 'English') + change_cmd = { + 'cmd': 'add_translation', + 'content_id': 'content', + 'language_code': 'hi', + 'content_html': '', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ' + } + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', 1, suggestion_models.STATUS_ACCEPTED, self.author_id, + 'reviewer_1', change_cmd, score_category, + 'exploration.exp1.thread_6', 'hi') + + response = suggestion_services.generate_contributor_certificate_data( + self.username, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + 'hi', + self.from_date, + self.to_date, + ) + + self.assertIsNotNone(response) + + def test_create_translation_contributor_certificate_for_english( + self + ) -> None: + score_category: str = ( + suggestion_models.SCORE_TYPE_TRANSLATION + + suggestion_models.SCORE_CATEGORY_DELIMITER + 'English') + change_cmd = { + 'cmd': 'add_translation', + 'content_id': 'content', + 'language_code': 'en', + 'content_html': '', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ' + } + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', 1, suggestion_models.STATUS_ACCEPTED, self.author_id, + 'reviewer_1', change_cmd, score_category, + 'exploration.exp1.thread_6', 'en') + + response = suggestion_services.generate_contributor_certificate_data( + self.username, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + 'en', + self.from_date, + self.to_date, + ) + + self.assertIsNotNone(response) + + def test_create_question_contributor_certificate(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + suggestion_change: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { + 'cmd': ( + question_domain + .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), + 'question_dict': { + 'id': 'test_id', + 'version': 12, + 'question_state_data': self._create_valid_question_data( + 'default_state', content_id_generator).to_dict(), + 'language_code': 'en', + 'question_state_data_schema_version': ( + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'linked_skill_ids': ['skill_1'], + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) + }, + 'skill_id': 1, + 'skill_difficulty': 0.3 + } + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(suggestion_change['question_dict'], dict) + test_question_dict: question_domain.QuestionDict = ( + suggestion_change['question_dict'] + ) + + question_state_data = test_question_dict['question_state_data'] + question_state_data['content']['html'] = '

    No image content

    ' + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_SKILL, + 'skill_1', 1, + suggestion_models.STATUS_ACCEPTED, self.author_id, + 'reviewer_2', suggestion_change, 'category1', + 'thread_1', 'en') + + response = suggestion_services.generate_contributor_certificate_data( + self.username, + feconf.SUGGESTION_TYPE_ADD_QUESTION, + None, + self.from_date, + self.to_date, + ) + + self.assertIsNotNone(response) + + def test_create_question_contributor_certificate_with_image_content( + self + ) -> None: + content_id_generator = translation_domain.ContentIdGenerator() + suggestion_change: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { + 'cmd': ( + question_domain + .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), + 'question_dict': { + 'id': 'test_id', + 'version': 12, + 'question_state_data': self._create_valid_question_data( + 'default_state', content_id_generator).to_dict(), + 'language_code': 'en', + 'question_state_data_schema_version': ( + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'linked_skill_ids': ['skill_1'], + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) + }, + 'skill_id': 1, + 'skill_difficulty': 0.3 + } + # Ruling out the possibility of any other type for mypy type checking. + assert isinstance(suggestion_change['question_dict'], dict) + test_question_dict: question_domain.QuestionDict = ( + suggestion_change['question_dict'] + ) + + question_state_data = test_question_dict['question_state_data'] + question_state_data['content']['html'] = ( + '') + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_SKILL, + 'skill_1', 1, + suggestion_models.STATUS_ACCEPTED, self.author_id, + 'reviewer_2', suggestion_change, 'category1', + 'thread_1', 'en') + + response = suggestion_services.generate_contributor_certificate_data( + self.username, + feconf.SUGGESTION_TYPE_ADD_QUESTION, + None, + self.from_date, + self.to_date, + ) + + self.assertIsNotNone(response) + + def test_create_contributor_certificate_raises_exception_for_no_suggestions( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'There are no contributions for the given time range.' + ): + suggestion_services.generate_contributor_certificate_data( + self.username, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + 'hi', + self.from_date, + self.to_date, + ) + + def test_create_certificate_raises_exception_for_no_question_suggestions( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'There are no contributions for the given time range.' + ): + suggestion_services.generate_contributor_certificate_data( + self.username, + feconf.SUGGESTION_TYPE_ADD_QUESTION, + None, + self.from_date, + self.to_date, + ) + + def test_create_contributor_certificate_raises_exception_for_wrong_language( + self + ) -> None: + with self.assertRaisesRegex( + Exception, 'The provided language is invalid.' + ): + suggestion_services.generate_contributor_certificate_data( + self.username, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + 'test', + self.from_date, + self.to_date, + ) + + def test_create_contributor_certificate_raises_exception_for_wrong_username( + self + ) -> None: + username = 'wrong_user' + + with self.assertRaisesRegex( + Exception, 'There is no user for the given username.' + ): + suggestion_services.generate_contributor_certificate_data( + username, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + 'hi', + self.from_date, + self.to_date, + ) + + def test_create_contributor_certificate_raises_exception_for_wrong_type( + self + ) -> None: + with self.assertRaisesRegex( + Exception, 'The suggestion type is invalid.' + ): + suggestion_services.generate_contributor_certificate_data( + self.username, + feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, + 'test', + self.from_date, + self.to_date, + ) diff --git a/core/domain/summary_services.py b/core/domain/summary_services.py index 513261df441f..0d8b32c20377 100644 --- a/core/domain/summary_services.py +++ b/core/domain/summary_services.py @@ -18,10 +18,11 @@ from __future__ import annotations -from core import python_utils from core import utils from core.constants import constants +from core.domain import activity_domain from core.domain import activity_services +from core.domain import collection_domain from core.domain import collection_services from core.domain import exp_domain from core.domain import exp_fetchers @@ -30,9 +31,101 @@ from core.domain import rights_manager from core.domain import search_services from core.domain import stats_services +from core.domain import user_domain from core.domain import user_services -_LIBRARY_INDEX_GROUPS = [{ +from typing import ( + Callable, Dict, List, Optional, Sequence, Tuple, TypedDict, Union, cast) + + +class DisplayableCollectionSummaryDict(TypedDict): + """Type for the displayable collection summary dictionary.""" + + id: str + title: str + category: str + activity_type: str + objective: str + language_code: str + tags: List[str] + node_count: int + last_updated_msec: float + thumbnail_icon_url: str + thumbnail_bg_color: str + + +class DisplayableExplorationSummaryDict(TypedDict): + """Type for the displayable exploration summary dictionary.""" + + id: str + title: str + activity_type: str + category: str + created_on_msec: float + objective: str + language_code: str + last_updated_msec: float + human_readable_contributors_summary: Dict[str, Dict[str, int]] + status: str + ratings: Dict[str, int] + community_owned: bool + tags: List[str] + thumbnail_icon_url: str + thumbnail_bg_color: str + num_views: int + + +class PlaythroughDict(TypedDict): + """Type for the user-specific playthrough information dictionary.""" + + next_exploration_id: Optional[str] + completed_exploration_ids: List[str] + + +class LearnerCollectionNodeDict(collection_domain.CollectionNodeDict): + """Type for the learner collection node dictionary.""" + + exploration_summary: Optional[DisplayableExplorationSummaryDict] + + +class LearnerCollectionDict(TypedDict): + """Type for the learner collection dictionary.""" + + id: str + title: str + category: str + objective: str + language_code: str + tags: List[str] + schema_version: int + playthrough_dict: PlaythroughDict + version: int + nodes: List[LearnerCollectionNodeDict] + + +class LibraryGroupDict(TypedDict): + """Dictionary representation of library group for the library index page.""" + + header_i18n_id: str + categories: List[str] + activity_summary_dicts: Sequence[DisplayableSummaryDictsType] + has_full_results_page: bool + full_results_url: Optional[str] + + +class LibraryIndexGroupDict(TypedDict): + """Type for the _LIBRARY_INDEX_GROUPS's dictionaries.""" + + header_i18n_id: str + search_categories: List[str] + + +DisplayableSummaryDictsType = Union[ + DisplayableCollectionSummaryDict, + DisplayableExplorationSummaryDict +] + +_LIBRARY_INDEX_GROUPS: List[LibraryIndexGroupDict] = [{ 'header_i18n_id': 'I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS', 'search_categories': [ 'Mathematics', 'Algebra', 'Arithmetic', 'Calculus', 'Combinatorics', @@ -66,7 +159,9 @@ }] -def get_human_readable_contributors_summary(contributors_summary): +def get_human_readable_contributors_summary( + contributors_summary: Dict[str, int] +) -> Dict[str, Dict[str, int]]: """Gets contributors summary in human readable form. Args: @@ -95,8 +190,12 @@ def get_human_readable_contributors_summary(contributors_summary): def get_learner_collection_dict_by_id( - collection_id, user, strict=True, - allow_invalid_explorations=False, version=None): + collection_id: str, + user: user_domain.UserActionsInfo, + strict: bool = True, + allow_invalid_explorations: bool = False, + version: Optional[int] = None +) -> LearnerCollectionDict: """Gets a dictionary representation of a collection given by the provided collection ID. This dict includes user-specific playthrough information. @@ -108,7 +207,7 @@ def get_learner_collection_dict_by_id( id exists in the datastore. allow_invalid_explorations: bool. Whether to also return explorations that are invalid, such as deleted/private explorations. - version: str or None. The version number of the collection to be + version: int or None. The version number of the collection to be retrieved. If it is None, the latest version will be retrieved. Returns: @@ -121,10 +220,15 @@ def get_learner_collection_dict_by_id( Raises: ValidationError. If the collection retrieved using the given ID references non-existent explorations. + Exception. No collection exists for the given collection id. """ collection = collection_services.get_collection_by_id( collection_id, strict=strict, version=version) + if collection is None: + raise Exception( + 'No collection exists for the given collection id.' + ) exp_ids = collection.exploration_ids exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids( exp_ids, user=user) @@ -136,7 +240,7 @@ def get_learner_collection_dict_by_id( # TODO(bhenning): Users should not be recommended explorations they have # completed outside the context of a collection (see #1461). next_exploration_id = None - completed_exp_ids = None + completed_exp_ids = [] if user.user_id: completed_exp_ids = ( collection_services.get_valid_completed_exploration_ids( @@ -150,9 +254,25 @@ def get_learner_collection_dict_by_id( next_exploration_id = collection.first_exploration_id completed_exp_ids = [] - collection_dict = collection.to_dict() + # Here we use cast because the return type of 'to_dict' method + # is CollectionDict but here we need a different dictionary + # that contains 'playthrough_dict' key. So, we have defined a + # LearnerCollectionDict which is inherited from CollectionDict + # and assigned it to collection_dict. So, due to this difference + # in types, MyPy throws an error. Thus to avoid the error, + # we used cast here. + collection_dict: LearnerCollectionDict = cast( + LearnerCollectionDict, collection.to_dict() + ) + # Here we use MyPy ignore because the expression has type List[ + # CollectionNodeDict] but here we need a list of those dictionaries + # that can contain both 'exploration_summary' key and CollectionNodeDict's + # key. So, we have defined LearnerCollectionNodeDict and assigned + # its list type to the collection_dict['nodes']. So, due to this + # difference in types, MyPy throws an error. Thus, to avoid the error, + # we used ignore here. collection_dict['nodes'] = [ - node.to_dict() for node in collection.nodes] + node.to_dict() for node in collection.nodes] # type: ignore[misc] collection_dict['playthrough_dict'] = { 'next_exploration_id': next_exploration_id, @@ -188,7 +308,9 @@ def get_learner_collection_dict_by_id( return collection_dict -def get_displayable_collection_summary_dicts_matching_ids(collection_ids): +def get_displayable_collection_summary_dicts_matching_ids( + collection_ids: List[str] +) -> List[DisplayableCollectionSummaryDict]: """Returns a list of collection summary dicts corresponding to the given collection ids. @@ -200,13 +322,20 @@ def get_displayable_collection_summary_dicts_matching_ids(collection_ids): These elements are returned in the same order as that given in collection_ids. """ - collection_summaries = ( + collection_summaries_with_none = ( collection_services.get_collection_summaries_matching_ids( collection_ids)) + collection_summaries = [] + for collection_summary in collection_summaries_with_none: + collection_summaries.append(collection_summary) return _get_displayable_collection_summary_dicts(collection_summaries) -def get_exp_metadata_dicts_matching_query(query_string, search_offset, user): +def get_exp_metadata_dicts_matching_query( + query_string: str, + search_offset: Optional[int], + user: user_domain.UserActionsInfo +) -> Tuple[List[exp_domain.ExplorationSummaryMetadataDict], Optional[int]]: """Given a query string and a search offset, returns a list of exploration metadata dicts that satisfy the search query. @@ -235,7 +364,9 @@ def get_exp_metadata_dicts_matching_query(query_string, search_offset, user): return exploration_list, new_search_offset -def get_exploration_metadata_dicts(exploration_ids, user): +def get_exploration_metadata_dicts( + exploration_ids: List[str], user: user_domain.UserActionsInfo +) -> List[exp_domain.ExplorationSummaryMetadataDict]: """Given a list of exploration ids, optionally filters the list for explorations that are currently non-private and not deleted, and returns a list of dicts of the corresponding exploration summaries for collection @@ -261,8 +392,7 @@ def get_exploration_metadata_dicts(exploration_ids, user): filtered_exploration_summaries = [] for (exploration_summary, exploration_rights) in ( - python_utils.ZIP( - exploration_summaries, exploration_rights_objects)): + zip(exploration_summaries, exploration_rights_objects)): if exploration_summary is not None and exploration_rights is not None: if exploration_summary.status == ( rights_domain.ACTIVITY_STATUS_PRIVATE): @@ -280,7 +410,10 @@ def get_exploration_metadata_dicts(exploration_ids, user): for summary in filtered_exploration_summaries] -def get_displayable_exp_summary_dicts_matching_ids(exploration_ids, user=None): +def get_displayable_exp_summary_dicts_matching_ids( + exploration_ids: List[str], + user: Optional[user_domain.UserActionsInfo] = None +) -> List[DisplayableExplorationSummaryDict]: """Gets a summary of explorations in human readable form from exploration ids. @@ -323,8 +456,7 @@ def get_displayable_exp_summary_dicts_matching_ids(exploration_ids, user=None): filtered_exploration_summaries = [] for (exploration_summary, exploration_rights) in ( - python_utils.ZIP( - exploration_summaries, exploration_rights_objects)): + zip(exploration_summaries, exploration_rights_objects)): if exploration_summary is not None and exploration_rights is not None: if exploration_summary.status == ( rights_domain.ACTIVITY_STATUS_PRIVATE): @@ -339,7 +471,9 @@ def get_displayable_exp_summary_dicts_matching_ids(exploration_ids, user=None): return get_displayable_exp_summary_dicts(filtered_exploration_summaries) -def get_displayable_exp_summary_dicts(exploration_summaries): +def get_displayable_exp_summary_dicts( + exploration_summaries: List[exp_domain.ExplorationSummary] +) -> List[DisplayableExplorationSummaryDict]: """Gets a summary of explorations in human readable form. Given a list of exploration summary domain objects, returns a list, @@ -382,7 +516,7 @@ def get_displayable_exp_summary_dicts(exploration_summaries): for ind, exploration_summary in enumerate(exploration_summaries): if exploration_summary: - summary_dict = { + summary_dict: DisplayableExplorationSummaryDict = { 'id': exploration_summary.id, 'title': exploration_summary.title, 'activity_type': constants.ACTIVITY_TYPE_EXPLORATION, @@ -414,7 +548,11 @@ def get_displayable_exp_summary_dicts(exploration_summaries): return displayable_exp_summaries -def _get_displayable_collection_summary_dicts(collection_summaries): +def _get_displayable_collection_summary_dicts( + collection_summaries: Sequence[ + Optional[collection_domain.CollectionSummary] + ] +) -> List[DisplayableCollectionSummaryDict]: """Gets a summary of collections in human readable form. Args: @@ -440,7 +578,9 @@ def _get_displayable_collection_summary_dicts(collection_summaries): 'title': u'Exploration 2 Albert title', }, ] """ - displayable_collection_summaries = [] + displayable_collection_summaries: List[ + DisplayableCollectionSummaryDict + ] = [] for collection_summary in collection_summaries: if collection_summary and collection_summary.status != ( rights_domain.ACTIVITY_STATUS_PRIVATE): @@ -463,7 +603,7 @@ def _get_displayable_collection_summary_dicts(collection_summaries): return displayable_collection_summaries -def get_library_groups(language_codes): +def get_library_groups(language_codes: List[str]) -> List[LibraryGroupDict]: """Returns a list of groups for the library index page. Each group has a header and a list of dicts representing activity summaries. @@ -525,9 +665,9 @@ def get_library_groups(language_codes): for summary_dict in get_displayable_exp_summary_dicts(exp_summaries) } - results = [] + results: List[LibraryGroupDict] = [] for group in _LIBRARY_INDEX_GROUPS: - summary_dicts = [] + summary_dicts: Sequence[DisplayableSummaryDictsType] = [] collection_ids_to_display = ( header_id_to_collection_ids[group['header_i18n_id']]) summary_dicts = [ @@ -554,7 +694,9 @@ def get_library_groups(language_codes): return results -def require_activities_to_be_public(activity_references): +def require_activities_to_be_public( + activity_references: List[activity_domain.ActivityReference] +) -> None: """Raises an exception if any activity reference in the list does not exist, or is not public. @@ -593,7 +735,9 @@ def require_activities_to_be_public(activity_references): (activities_info['type'], activities_info['ids'][index])) -def get_featured_activity_summary_dicts(language_codes): +def get_featured_activity_summary_dicts( + language_codes: List[str] +) -> List[DisplayableSummaryDictsType]: """Returns a list of featured activities with the given language codes. The return value is sorted according to the list stored in the datastore. @@ -630,7 +774,9 @@ def get_featured_activity_summary_dicts(language_codes): col_summary_dicts = get_displayable_collection_summary_dicts_matching_ids( collection_ids) - summary_dicts_by_id = { + summary_dicts_by_id: Dict[ + str, Dict[str, DisplayableSummaryDictsType] + ] = { constants.ACTIVITY_TYPE_EXPLORATION: { summary_dict['id']: summary_dict for summary_dict in exp_summary_dicts @@ -650,7 +796,9 @@ def get_featured_activity_summary_dicts(language_codes): return featured_summary_dicts -def get_top_rated_exploration_summary_dicts(language_codes, limit): +def get_top_rated_exploration_summary_dicts( + language_codes: List[str], limit: int +) -> List[DisplayableExplorationSummaryDict]: """Returns a list of top rated explorations with the given language codes. The return value is sorted in decreasing order of average rating. @@ -685,15 +833,20 @@ def get_top_rated_exploration_summary_dicts(language_codes, limit): if exp_summary.language_code in language_codes and sum(exp_summary.ratings.values()) > 0] + sort_fnc: Callable[ + [exp_domain.ExplorationSummary], float + ] = lambda exp_summary: exp_summary.scaled_average_rating sorted_exp_summaries = sorted( filtered_exp_summaries, - key=lambda exp_summary: exp_summary.scaled_average_rating, + key=sort_fnc, reverse=True) return get_displayable_exp_summary_dicts(sorted_exp_summaries) -def get_recently_published_exp_summary_dicts(limit): +def get_recently_published_exp_summary_dicts( + limit: int +) -> List[DisplayableExplorationSummaryDict]: """Returns a list of recently published explorations. Args: @@ -723,9 +876,15 @@ def get_recently_published_exp_summary_dicts(limit): # Arranging recently published exploration summaries with respect to time. # sorted() is used to sort the random list of recently published summaries. + sort_fnc: Callable[ + [exp_domain.ExplorationSummary], float + ] = lambda exp_summary: ( + exp_summary.first_published_msec + if exp_summary.first_published_msec else 0 + ) summaries = sorted( recently_published_exploration_summaries, - key=lambda exp_summary: exp_summary.first_published_msec, + key=sort_fnc, reverse=True) return get_displayable_exp_summary_dicts(summaries) diff --git a/core/domain/summary_services_test.py b/core/domain/summary_services_test.py index d5fe4a400fa2..28680e3590cd 100644 --- a/core/domain/summary_services_test.py +++ b/core/domain/summary_services_test.py @@ -35,33 +35,35 @@ from core.domain import user_services from core.tests import test_utils +from typing import Final + class ExplorationDisplayableSummariesTest( exp_services_test.ExplorationServicesUnitTests): """Test functions for getting displayable exploration summary dicts.""" - ALBERT_EMAIL = 'albert@example.com' - BOB_EMAIL = 'bob@example.com' - ALBERT_NAME = 'albert' - BOB_NAME = 'bob' + ALBERT_EMAIL: Final = 'albert@example.com' + BOB_EMAIL: Final = 'bob@example.com' + ALBERT_NAME: Final = 'albert' + BOB_NAME: Final = 'bob' - USER_C_NAME = 'c' - USER_D_NAME = 'd' - USER_C_EMAIL = 'c@example.com' - USER_D_EMAIL = 'd@example.com' + USER_C_NAME: Final = 'c' + USER_D_NAME: Final = 'd' + USER_C_EMAIL: Final = 'c@example.com' + USER_D_EMAIL: Final = 'd@example.com' - USER_C_PROFILE_PICTURE = 'c_profile_picture' + USER_C_PROFILE_PICTURE: Final = 'c_profile_picture' - EXP_ID_1 = 'eid1' - EXP_ID_2 = 'eid2' - EXP_ID_3 = 'eid3' - EXP_ID_4 = 'eid4' - EXP_ID_5 = 'eid5' + EXP_ID_1: Final = 'eid1' + EXP_ID_2: Final = 'eid2' + EXP_ID_3: Final = 'eid3' + EXP_ID_4: Final = 'eid4' + EXP_ID_5: Final = 'eid5' - EXPECTED_VERSION_1 = 4 - EXPECTED_VERSION_2 = 2 + EXPECTED_VERSION_1: Final = 4 + EXPECTED_VERSION_2: Final = 2 - def setUp(self): + def setUp(self) -> None: """Populate the database of explorations and their summaries. The sequence of events is: @@ -82,7 +84,7 @@ def setUp(self): - (3) User_4 edits the title of EXP_ID_4. """ - super(ExplorationDisplayableSummariesTest, self).setUp() + super().setUp() self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.signup(self.BOB_EMAIL, self.BOB_NAME) @@ -120,7 +122,7 @@ def setUp(self): exp_services.revert_exploration(self.bob_id, self.EXP_ID_1, 3, 2) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'This exploration cannot be published' ): rights_manager.publish_exploration(self.bob, self.EXP_ID_2) @@ -154,7 +156,7 @@ def setUp(self): self.save_new_valid_exploration(self.EXP_ID_5, self.bob_id) - def test_get_human_readable_contributors_summary(self): + def test_get_human_readable_contributors_summary(self) -> None: contributors_summary = {self.albert_id: 10, self.bob_id: 13} self.assertEqual({ self.ALBERT_NAME: { @@ -177,7 +179,9 @@ def test_get_human_readable_contributors_summary(self): }, summary_services.get_human_readable_contributors_summary( contributors_summary)) - def test_get_human_readable_contributors_summary_with_deleted_user(self): + def test_get_human_readable_contributors_summary_with_deleted_user( + self + ) -> None: contributors_summary = {self.albert_id: 10} user_services.mark_user_for_deletion(self.albert_id) self.assertEqual( @@ -187,7 +191,7 @@ def test_get_human_readable_contributors_summary_with_deleted_user(self): ) ) - def test_get_displayable_exp_summary_dicts_matching_ids(self): + def test_get_displayable_exp_summary_dicts_matching_ids(self) -> None: # A list of exp_id's are passed in: # EXP_ID_1 -- private exploration owned by Albert. # EXP_ID_2 -- pubished exploration owned by Albert. @@ -199,7 +203,7 @@ def test_get_displayable_exp_summary_dicts_matching_ids(self): summary_services.get_displayable_exp_summary_dicts_matching_ids( [self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5])) expected_summary = { - 'category': u'A category', + 'category': u'Algebra', 'community_owned': False, 'id': self.EXP_ID_2, 'language_code': constants.DEFAULT_LANGUAGE_CODE, @@ -208,8 +212,8 @@ def test_get_displayable_exp_summary_dicts_matching_ids(self): 'ratings': feconf.get_empty_ratings(), 'status': 'public', 'tags': [], - 'thumbnail_bg_color': '#a33f40', - 'thumbnail_icon_url': '/subjects/Lightbulb.svg', + 'thumbnail_bg_color': '#cd672b', + 'thumbnail_icon_url': '/subjects/Algebra.svg', 'title': u'Exploration 2 Albert title', } self.assertIn('last_updated_msec', displayable_summaries[0]) @@ -217,13 +221,16 @@ def test_get_displayable_exp_summary_dicts_matching_ids(self): expected_summary, displayable_summaries[0]) def test_get_displayable_exp_summary_dicts_matching_ids_with_invalid_exp_id( - self): + self + ) -> None: displayable_summaries = ( summary_services.get_displayable_exp_summary_dicts_matching_ids( ['invalid_exp_id'])) self.assertEqual(displayable_summaries, []) - def test_get_public_and_filtered_private_summary_dicts_for_creator(self): + def test_get_public_and_filtered_private_summary_dicts_for_creator( + self + ) -> None: # If a new exploration is created by another user (Bob) and not public, # then Albert cannot see it when querying for explorations. displayable_summaries = ( @@ -260,50 +267,54 @@ def test_get_public_and_filtered_private_summary_dicts_for_creator(self): class LibraryGroupsTest(exp_services_test.ExplorationServicesUnitTests): """Test functions for getting summary dicts for library groups.""" - def setUp(self): + def setUp(self) -> None: """Populate the database of explorations and their summaries. The sequence of events is: - (1) Admin logs in. - (2) Admin access admin page. - - (3) Admin reloads exploration with id '2'. + - (3) Admin reloads exploration with id '3'. - (4) Admin logs out. """ - super(LibraryGroupsTest, self).setUp() + super().setUp() self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) csrf_token = self.get_new_csrf_token() self.post_json( '/adminhandler', { 'action': 'reload_exploration', - 'exploration_id': '2' + 'exploration_id': '3' }, csrf_token=csrf_token) self.logout() - def test_get_library_groups(self): - """The exploration with id '2' is an exploration in the Mathematics + def test_get_library_groups(self) -> None: + """The exploration with id '3' is an exploration in the Mathematics category. The call to get_library_groups() should return the exploration as part of the Mathematics & Statistics group. """ library_groups = summary_services.get_library_groups([]) expected_exploration_summary_dict = { - 'category': u'Algorithms', - 'community_owned': True, - 'id': '2', - 'language_code': constants.DEFAULT_LANGUAGE_CODE, - 'num_views': 0, - 'objective': u'discover the binary search algorithm', - 'ratings': feconf.get_empty_ratings(), - 'status': u'public', - 'tags': [], - 'title': u'The Lazy Magician', - 'thumbnail_bg_color': '#d0982a', - 'thumbnail_icon_url': '/subjects/Algorithms.svg', + 'id': '3', + 'title': 'Root Linear Coefficient Theorem', + 'activity_type': 'exploration', + 'category': u'Algebra', + 'objective': 'discover the Root Linear Coefficient Theorem', + 'language_code': 'en', + 'human_readable_contributors_summary': {}, + 'status': 'public', + 'ratings': {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}, + 'thumbnail_icon_url': '/subjects/Algebra.svg', + 'thumbnail_bg_color': '#cd672b', + 'num_views': 0 } expected_group = { - 'categories': ['Algorithms', 'Computing', 'Programming'], - 'header_i18n_id': 'I18N_LIBRARY_GROUPS_COMPUTING', + 'categories': [ + 'Mathematics', 'Algebra', 'Arithmetic', + 'Calculus', 'Combinatorics', 'Geometry', 'Graph Theory', + 'Logic', 'Probability', 'Statistics', 'Trigonometry' + ], + 'header_i18n_id': 'I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS', } self.assertEqual(len(library_groups), 1) @@ -322,14 +333,14 @@ class FeaturedExplorationDisplayableSummariesTest( summary dicts. """ - ALBERT_NAME = 'albert' - ALBERT_EMAIL = 'albert@example.com' + ALBERT_NAME: Final = 'albert' + ALBERT_EMAIL: Final = 'albert@example.com' - EXP_ID_1 = 'eid1' - EXP_ID_2 = 'eid2' - LANGUAGE_CODE_ES = 'es' + EXP_ID_1: Final = 'eid1' + EXP_ID_2: Final = 'eid2' + LANGUAGE_CODE_ES: Final = 'es' - def setUp(self): + def setUp(self) -> None: """Populate the database of explorations and their summaries. The sequence of events is: @@ -340,7 +351,7 @@ def setUp(self): - (5) Admin user is set up. """ - super(FeaturedExplorationDisplayableSummariesTest, self).setUp() + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) @@ -357,7 +368,7 @@ def setUp(self): self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - def test_for_featured_explorations(self): + def test_for_featured_explorations(self) -> None: """Note that both EXP_ID_1 and EXP_ID_2 are public. However, only EXP_ID_2 is featured, so the call to get_featured_explorations() should only return [EXP_ID_2]. @@ -373,20 +384,20 @@ def test_for_featured_explorations(self): self.assertEqual(len(featured_activity_summaries), 1) self.assertDictContainsSubset({ 'status': 'public', - 'thumbnail_bg_color': '#a33f40', + 'thumbnail_bg_color': '#cd672b', 'community_owned': False, 'tags': [], - 'thumbnail_icon_url': '/subjects/Lightbulb.svg', + 'thumbnail_icon_url': '/subjects/Algebra.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_2, - 'category': 'A category', + 'category': 'Algebra', 'ratings': feconf.get_empty_ratings(), 'title': 'A title', 'num_views': 0, 'objective': 'An objective' }, featured_activity_summaries[0]) - def test_language_code_filter(self): + def test_language_code_filter(self) -> None: """Note that both EXP_ID_1 is in Spanish and EXP_ID_2 is in English.""" activity_services.update_featured_activity_references([ activity_domain.ActivityReference( @@ -439,12 +450,12 @@ def test_language_code_filter(self): class CollectionLearnerDictTests(test_utils.GenericTestBase): """Test get_learner_collection_dict_by_id.""" - EXP_ID = 'exploration_id' - EXP_ID_1 = 'exp_id1' - COLLECTION_ID = 'A_collection_id' + EXP_ID: Final = 'exploration_id' + EXP_ID_1: Final = 'exp_id1' + COLLECTION_ID: Final = 'A_collection_id' - def setUp(self): - super(CollectionLearnerDictTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) @@ -455,7 +466,52 @@ def setUp(self): self.owner = user_services.get_user_actions_info(self.owner_id) self.editor = user_services.get_user_actions_info(self.editor_id) - def test_get_learner_dict_with_deleted_exp_fails_validation(self): + def test_get_displayable_collection_summary_dicts_matching_ids( + self + ) -> None: + collection_id_1 = self.COLLECTION_ID + '_1' + self.save_new_valid_collection(self.COLLECTION_ID, self.owner_id) + self.save_new_valid_collection(collection_id_1, self.owner_id) + rights_manager.publish_collection(self.owner, self.COLLECTION_ID) + rights_manager.publish_collection(self.owner, collection_id_1) + collection_id_list = [collection_id_1, self.COLLECTION_ID] + collection_summaries = ( + summary_services. + get_displayable_collection_summary_dicts_matching_ids( + collection_id_list)) + self.assertEqual(len(collection_summaries), 2) + for collection_summary in collection_summaries: + self.assertIn(collection_summary['id'], collection_id_list) + + def test_get_learner_collection_dict_by_id_without_user_id(self) -> None: + self.save_new_valid_exploration(self.EXP_ID, self.owner_id) + self.save_new_valid_collection( + self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID) + rights_manager.publish_exploration(self.owner, self.EXP_ID) + rights_manager.publish_collection(self.owner, self.COLLECTION_ID) + mock_user = user_services.get_user_actions_info(None) + collection_dict = ( + summary_services.get_learner_collection_dict_by_id( + self.COLLECTION_ID, mock_user) + ) + self.assertEqual( + len( + collection_dict['playthrough_dict'] + ['completed_exploration_ids']), 0) + self.assertEqual( + collection_dict['playthrough_dict']['next_exploration_id'], + self.EXP_ID + ) + + def test_raises_error_if_invalid_collection_id_provided(self) -> None: + with self.assertRaisesRegex( + Exception, + 'No collection exists for the given collection id'): + summary_services.get_learner_collection_dict_by_id( + 'Invalid_id', self.owner, strict=False + ) + + def test_get_learner_dict_with_deleted_exp_fails_validation(self) -> None: self.save_new_valid_collection( self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID) summary_services.get_learner_collection_dict_by_id( @@ -463,14 +519,16 @@ def test_get_learner_dict_with_deleted_exp_fails_validation(self): exp_services.delete_exploration(self.owner_id, self.EXP_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected collection to only reference valid explorations, but ' 'found an exploration with ID: exploration_id'): summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) - def test_get_learner_dict_when_referencing_inaccessible_explorations(self): + def test_get_learner_dict_when_referencing_inaccessible_explorations( + self + ) -> None: self.save_new_default_collection(self.COLLECTION_ID, self.owner_id) self.save_new_valid_exploration(self.EXP_ID, self.editor_id) collection_services.update_collection( @@ -481,7 +539,7 @@ def test_get_learner_dict_when_referencing_inaccessible_explorations(self): # A collection cannot access someone else's private exploration. rights_manager.publish_collection(self.owner, self.COLLECTION_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected collection to only reference valid explorations, but ' 'found an exploration with ID: exploration_id'): @@ -493,7 +551,7 @@ def test_get_learner_dict_when_referencing_inaccessible_explorations(self): summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) - def test_get_learner_dict_with_private_exp_fails_validation(self): + def test_get_learner_dict_with_private_exp_fails_validation(self) -> None: self.save_new_valid_collection( self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID) @@ -504,7 +562,7 @@ def test_get_learner_dict_with_private_exp_fails_validation(self): # A public collection referencing a private exploration is bad, however. rights_manager.publish_collection(self.owner, self.COLLECTION_ID) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Cannot reference a private exploration within a public ' 'collection, exploration ID: exploration_id'): @@ -517,7 +575,7 @@ def test_get_learner_dict_with_private_exp_fails_validation(self): summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) - def test_get_learner_dict_with_allowed_private_exps(self): + def test_get_learner_dict_with_allowed_private_exps(self) -> None: self.save_new_valid_collection( self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID) self.save_new_valid_exploration(self.EXP_ID_1, self.editor_id) @@ -537,9 +595,10 @@ def test_get_learner_dict_with_allowed_private_exps(self): # collection since invalid explorations are being allowed, but the # private exploration of another author will not. collection_node_dicts = collection_dict['nodes'] + exploration_summary = collection_node_dicts[0]['exploration_summary'] + assert exploration_summary is not None self.assertEqual( - collection_node_dicts[0]['exploration_summary']['id'], - self.EXP_ID) + exploration_summary['id'], self.EXP_ID) self.assertIsNone(collection_node_dicts[1]['exploration_summary']) @@ -549,24 +608,24 @@ class TopRatedExplorationDisplayableSummariesTest( summary dicts. """ - ALBERT_EMAIL = 'albert@example.com' - ALICE_EMAIL = 'alice@example.com' - BOB_EMAIL = 'bob@example.com' - ALBERT_NAME = 'albert' - ALICE_NAME = 'alice' - BOB_NAME = 'bob' - - EXP_ID_1 = 'eid1' - EXP_ID_2 = 'eid2' - EXP_ID_3 = 'eid3' - EXP_ID_4 = 'eid4' - EXP_ID_5 = 'eid5' - EXP_ID_6 = 'eid6' - EXP_ID_7 = 'eid7' - EXP_ID_8 = 'eid8' - EXP_ID_9 = 'eid9' - - def setUp(self): + ALBERT_EMAIL: Final = 'albert@example.com' + ALICE_EMAIL: Final = 'alice@example.com' + BOB_EMAIL: Final = 'bob@example.com' + ALBERT_NAME: Final = 'albert' + ALICE_NAME: Final = 'alice' + BOB_NAME: Final = 'bob' + + EXP_ID_1: Final = 'eid1' + EXP_ID_2: Final = 'eid2' + EXP_ID_3: Final = 'eid3' + EXP_ID_4: Final = 'eid4' + EXP_ID_5: Final = 'eid5' + EXP_ID_6: Final = 'eid6' + EXP_ID_7: Final = 'eid7' + EXP_ID_8: Final = 'eid8' + EXP_ID_9: Final = 'eid9' + + def setUp(self) -> None: """Populate the database of explorations and their summaries. The sequence of events is: @@ -591,7 +650,7 @@ def setUp(self): - (19) Admin user is set up. """ - super(TopRatedExplorationDisplayableSummariesTest, self).setUp() + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) @@ -627,7 +686,7 @@ def setUp(self): self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - def test_at_most_eight_top_rated_explorations(self): + def test_at_most_eight_top_rated_explorations(self) -> None: """Note that at most 8 explorations should be returned.""" rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_2, 5) @@ -662,13 +721,13 @@ def test_at_most_eight_top_rated_explorations(self): feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE)) expected_summary = { 'status': u'public', - 'thumbnail_bg_color': '#a33f40', + 'thumbnail_bg_color': '#cd672b', 'community_owned': False, 'tags': [], 'language_code': constants.DEFAULT_LANGUAGE_CODE, - 'thumbnail_icon_url': '/subjects/Lightbulb.svg', + 'thumbnail_icon_url': '/subjects/Algebra.svg', 'id': self.EXP_ID_3, - 'category': u'A category', + 'category': u'Algebra', 'ratings': {u'1': 0, u'3': 0, u'2': 0, u'5': 1, u'4': 1}, 'title': u'A title', 'num_views': 0, @@ -687,7 +746,7 @@ def test_at_most_eight_top_rated_explorations(self): self.assertEqual(expected_ordering, actual_ordering) - def test_only_explorations_with_ratings_are_returned(self): + def test_only_explorations_with_ratings_are_returned(self) -> None: """Note that only explorations with ratings will be included.""" rating_services.assign_rating_to_exploration( @@ -700,13 +759,13 @@ def test_only_explorations_with_ratings_are_returned(self): expected_summary = { 'status': u'public', - 'thumbnail_bg_color': '#a33f40', + 'thumbnail_bg_color': '#cd672b', 'community_owned': False, 'tags': [], - 'thumbnail_icon_url': '/subjects/Lightbulb.svg', + 'thumbnail_icon_url': '/subjects/Algebra.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_2, - 'category': u'A category', + 'category': u'Algebra', 'ratings': {u'1': 0, u'3': 0, u'2': 0, u'5': 1, u'4': 0}, 'title': u'A title', 'num_views': 0, @@ -729,14 +788,14 @@ class RecentlyPublishedExplorationDisplayableSummariesTest( summary dicts. """ - ALBERT_NAME = 'albert' - ALBERT_EMAIL = 'albert@example.com' + ALBERT_NAME: Final = 'albert' + ALBERT_EMAIL: Final = 'albert@example.com' - EXP_ID_1 = 'eid1' - EXP_ID_2 = 'eid2' - EXP_ID_3 = 'eid3' + EXP_ID_1: Final = 'eid1' + EXP_ID_2: Final = 'eid2' + EXP_ID_3: Final = 'eid3' - def setUp(self): + def setUp(self) -> None: """Populate the database of explorations and their summaries. The sequence of events is: @@ -749,8 +808,7 @@ def setUp(self): - (7) Admin user is set up. """ - super( - RecentlyPublishedExplorationDisplayableSummariesTest, self).setUp() + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) @@ -774,7 +832,7 @@ def setUp(self): self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - def test_for_recently_published_explorations(self): + def test_for_recently_published_explorations(self) -> None: """Tests for recently published explorations.""" self.process_and_flush_pending_tasks() @@ -783,13 +841,13 @@ def test_for_recently_published_explorations(self): feconf.RECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE)) test_summary_1 = { 'status': 'public', - 'thumbnail_bg_color': '#a33f40', + 'thumbnail_bg_color': '#cd672b', 'community_owned': False, 'tags': [], - 'thumbnail_icon_url': '/subjects/Lightbulb.svg', + 'thumbnail_icon_url': '/subjects/Algebra.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_1, - 'category': u'A category', + 'category': u'Algebra', 'ratings': feconf.get_empty_ratings(), 'title': u'A title', 'num_views': 0, @@ -797,13 +855,13 @@ def test_for_recently_published_explorations(self): } test_summary_2 = { 'status': 'public', - 'thumbnail_bg_color': '#a33f40', + 'thumbnail_bg_color': '#cd672b', 'community_owned': False, 'tags': [], - 'thumbnail_icon_url': '/subjects/Lightbulb.svg', + 'thumbnail_icon_url': '/subjects/Algebra.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_2, - 'category': u'A category', + 'category': u'Algebra', 'ratings': feconf.get_empty_ratings(), 'title': u'A title', 'num_views': 0, @@ -811,13 +869,13 @@ def test_for_recently_published_explorations(self): } test_summary_3 = { 'status': 'public', - 'thumbnail_bg_color': '#a33f40', + 'thumbnail_bg_color': '#cd672b', 'community_owned': False, 'tags': [], - 'thumbnail_icon_url': '/subjects/Lightbulb.svg', + 'thumbnail_icon_url': '/subjects/Algebra.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_3, - 'category': u'A category', + 'category': u'Algebra', 'ratings': feconf.get_empty_ratings(), 'title': u'A title', 'num_views': 0, @@ -853,42 +911,46 @@ def test_for_recently_published_explorations(self): class ActivityReferenceAccessCheckerTests(test_utils.GenericTestBase): """Tests for requiring that activity references are public.""" - EXP_ID_0 = 'exp_id_0' - EXP_ID_1 = 'exp_id_1' - COL_ID_2 = 'col_id_2' + EXP_ID_0: Final = 'exp_id_0' + EXP_ID_1: Final = 'exp_id_1' + COL_ID_2: Final = 'col_id_2' - def setUp(self): - super(ActivityReferenceAccessCheckerTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner = user_services.get_user_actions_info(self.owner_id) - def test_requiring_nonexistent_activities_be_public_raises_exception(self): - with self.assertRaisesRegexp(Exception, 'non-existent exploration'): + def test_requiring_nonexistent_activities_be_public_raises_exception( + self + ) -> None: + with self.assertRaisesRegex(Exception, 'non-existent exploration'): summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_EXPLORATION, 'fake')]) - with self.assertRaisesRegexp(Exception, 'non-existent collection'): + with self.assertRaisesRegex(Exception, 'non-existent collection'): summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_COLLECTION, 'fake')]) - def test_requiring_private_activities_to_be_public_raises_exception(self): + def test_requiring_private_activities_to_be_public_raises_exception( + self + ) -> None: self.save_new_valid_exploration(self.EXP_ID_0, self.owner_id) self.save_new_valid_exploration(self.EXP_ID_1, self.owner_id) self.save_new_valid_collection( self.COL_ID_2, self.owner_id, exploration_id=self.EXP_ID_0) - with self.assertRaisesRegexp(Exception, 'private exploration'): + with self.assertRaisesRegex(Exception, 'private exploration'): summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_0)]) - with self.assertRaisesRegexp(Exception, 'private collection'): + with self.assertRaisesRegex(Exception, 'private collection'): summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_COLLECTION, self.COL_ID_2)]) - def test_requiring_public_activities_to_be_public_succeeds(self): + def test_requiring_public_activities_to_be_public_succeeds(self) -> None: self.save_new_valid_exploration(self.EXP_ID_0, self.owner_id) self.save_new_valid_collection( self.COL_ID_2, self.owner_id, exploration_id=self.EXP_ID_0) @@ -908,21 +970,21 @@ class CollectionNodeMetadataDictsTest( exp_services_test.ExplorationServicesUnitTests): """Test functions for getting collection node metadata dicts.""" - ALBERT_EMAIL = 'albert@example.com' - ALBERT_NAME = 'albert' + ALBERT_EMAIL: Final = 'albert@example.com' + ALBERT_NAME: Final = 'albert' - BOB_EMAIL = 'bob@example.com' - BOB_NAME = 'bob' + BOB_EMAIL: Final = 'bob@example.com' + BOB_NAME: Final = 'bob' - EXP_ID1 = 'eid1' - EXP_ID2 = 'eid2' - EXP_ID3 = 'eid3' - EXP_ID4 = 'eid4' - EXP_ID5 = 'eid5' - INVALID_EXP_ID = 'invalid_exp_id' + EXP_ID1: Final = 'eid1' + EXP_ID2: Final = 'eid2' + EXP_ID3: Final = 'eid3' + EXP_ID4: Final = 'eid4' + EXP_ID5: Final = 'eid5' + INVALID_EXP_ID: Final = 'invalid_exp_id' - def setUp(self): - super(CollectionNodeMetadataDictsTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.signup(self.BOB_EMAIL, self.BOB_NAME) @@ -966,7 +1028,7 @@ def setUp(self): self.EXP_ID1, self.EXP_ID2, self.EXP_ID3, self.EXP_ID4]) - def test_get_exploration_metadata_dicts(self): + def test_get_exploration_metadata_dicts(self) -> None: metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID1, self.EXP_ID2, self.EXP_ID3], self.albert)) @@ -985,13 +1047,15 @@ def test_get_exploration_metadata_dicts(self): }] self.assertEqual(expected_metadata_dicts, metadata_dicts) - def test_get_exploration_metadata_dicts_with_invalid_exploration_id(self): + def test_get_exploration_metadata_dicts_with_invalid_exploration_id( + self + ) -> None: metadata_dicts = (summary_services.get_exploration_metadata_dicts( ['invalid_exp_id'], self.albert)) self.assertEqual(metadata_dicts, []) - def test_private_exps_of_another_user_are_not_returned(self): + def test_private_exps_of_another_user_are_not_returned(self) -> None: metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID5, self.EXP_ID4], self.bob)) @@ -1002,7 +1066,7 @@ def test_private_exps_of_another_user_are_not_returned(self): }] self.assertEqual(expected_metadata_dicts, metadata_dicts) - def test_public_exps_of_another_user_are_returned(self): + def test_public_exps_of_another_user_are_returned(self) -> None: metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID2, self.EXP_ID3, self.EXP_ID4], self.bob)) @@ -1021,7 +1085,7 @@ def test_public_exps_of_another_user_are_returned(self): }] self.assertEqual(expected_metadata_dicts, metadata_dicts) - def test_deleted_exps_are_not_returned(self): + def test_deleted_exps_are_not_returned(self) -> None: exp_services.delete_exploration(self.albert_id, self.EXP_ID2) metadata_dicts = (summary_services.get_exploration_metadata_dicts( @@ -1038,7 +1102,7 @@ def test_deleted_exps_are_not_returned(self): }] self.assertEqual(expected_metadata_dicts, metadata_dicts) - def test_exp_metadata_dicts_matching_query(self): + def test_exp_metadata_dicts_matching_query(self) -> None: metadata_dicts, _ = ( summary_services.get_exp_metadata_dicts_matching_query( 'Exploration 1', None, self.albert)) @@ -1050,7 +1114,7 @@ def test_exp_metadata_dicts_matching_query(self): }] self.assertEqual(expected_metadata_dicts, metadata_dicts) - def test_invalid_exp_ids(self): + def test_invalid_exp_ids(self) -> None: metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID3, self.INVALID_EXP_ID], self.albert)) @@ -1061,8 +1125,8 @@ def test_invalid_exp_ids(self): }] self.assertEqual(expected_metadata_dicts, metadata_dicts) - def test_guest_can_fetch_public_exploration_metadata_dicts(self): - new_guest_user = user_services.get_user_actions_info(None) + def test_guest_can_fetch_public_exploration_metadata_dicts(self) -> None: + new_guest_user = user_services.get_user_actions_info('mock_user') metadata_dicts = summary_services.get_exploration_metadata_dicts( [self.EXP_ID3, self.EXP_ID4], new_guest_user) @@ -1078,7 +1142,9 @@ def test_guest_can_fetch_public_exploration_metadata_dicts(self): self.assertEqual(metadata_dicts, expected_metadata_dicts) - def test_guest_cannot_fetch_private_exploration_metadata_dicts(self): + def test_guest_cannot_fetch_private_exploration_metadata_dicts( + self + ) -> None: new_guest_user = user_services.get_user_actions_info(None) self.save_new_valid_exploration('exp_id', self.albert_id) metadata_dicts = summary_services.get_exploration_metadata_dicts( diff --git a/core/domain/takeout_domain.py b/core/domain/takeout_domain.py index 33f3383b502f..f6067349f08a 100644 --- a/core/domain/takeout_domain.py +++ b/core/domain/takeout_domain.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Any, Dict, List, Tuple +from typing import Dict, List, Tuple class TakeoutData: @@ -26,7 +26,7 @@ class TakeoutData: def __init__( self, - user_data: Dict[str, Dict[str, Any]], + user_data: Dict[str, Dict[str, str]], user_images: List[TakeoutImage] ) -> None: """Constructs a TakeoutData domain object. @@ -67,7 +67,10 @@ class TakeoutImageReplacementInstruction: """ def __init__( - self, dictionary_path: Tuple[str], export_filename: str, new_key: str + self, + dictionary_path: Tuple[str, ...], + export_filename: str, + new_key: str ) -> None: """Constructs a TakeoutImageReplacementInstruction object. diff --git a/core/domain/takeout_domain_test.py b/core/domain/takeout_domain_test.py new file mode 100644 index 000000000000..2db222faa872 --- /dev/null +++ b/core/domain/takeout_domain_test.py @@ -0,0 +1,59 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for takeout_domain.py""" + +from __future__ import annotations + +from core.domain import takeout_domain +from core.tests import test_utils + + +class TakeoutDataTests(test_utils.GenericTestBase): + + def test_that_domain_object_is_created_correctly(self) -> None: + user_data = { + 'model_name': { + 'property1': 'value1', + 'property2': 'value2' + } + } + takeout_data = takeout_domain.TakeoutData(user_data, []) + self.assertEqual(takeout_data.user_data, user_data) + self.assertEqual(takeout_data.user_images, []) + + +class TakeoutImageTests(test_utils.GenericTestBase): + + def test_that_domain_object_is_created_correctly(self) -> None: + takeout_image_data = takeout_domain.TakeoutImage( + 'b64_fake_image_data', '/test/') + self.assertEqual( + takeout_image_data.b64_image_data, 'b64_fake_image_data') + self.assertEqual( + takeout_image_data.image_export_path, '/test/') + + +class TakeoutImageReplacementInstructionTests(test_utils.GenericTestBase): + + def test_that_domain_object_is_created_correctly(self) -> None: + image_replacement_data = ( + takeout_domain.TakeoutImageReplacementInstruction( + ('exp1',), 'test', 'key1')) + self.assertEqual( + image_replacement_data.dictionary_path, ('exp1',)) + self.assertEqual(image_replacement_data.export_filename, 'test') + self.assertEqual(image_replacement_data.new_key, 'key1') diff --git a/core/domain/takeout_service.py b/core/domain/takeout_service.py index 290931ddb3b4..b4eb60a9e683 100644 --- a/core/domain/takeout_service.py +++ b/core/domain/takeout_service.py @@ -18,6 +18,8 @@ from __future__ import annotations +import json +import logging import re from core import feconf @@ -25,16 +27,16 @@ from core.domain import user_services from core.platform import models -( - base_models, collection_models, email_models, - exploration_models, feedback_models, topic_models, - suggestion_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.collection, models.NAMES.email, - models.NAMES.exploration, models.NAMES.feedback, models.NAMES.topic, - models.NAMES.suggestion, models.NAMES.user]) +from typing import List, Type +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models -def get_models_which_should_be_exported(): +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) + + +def get_models_which_should_be_exported() -> List[Type[base_models.BaseModel]]: """Returns list of models to export. Returns: @@ -57,7 +59,7 @@ def get_models_which_should_be_exported(): not model_class.__name__ in exempt_base_classes] -def export_data_for_user(user_id): +def export_data_for_user(user_id: str) -> takeout_domain.TakeoutData: """Exports selected models according to model defined export_data functions. Args: @@ -69,8 +71,11 @@ def export_data_for_user(user_id): _data: }. + + Raises: + NotImplementedError. Takeout for profile users is not implemented. """ - user_settings = user_services.get_user_settings(user_id) + user_settings = user_services.get_user_settings(user_id, strict=False) if user_settings is not None and ( feconf.ROLE_ID_MOBILE_LEARNER in user_settings.roles): raise NotImplementedError( @@ -80,8 +85,21 @@ def export_data_for_user(user_id): for model in models_to_export: split_name = re.findall('[A-Z][^A-Z]*', model.__name__)[:-1] # Join the split name with underscores and add _data for final name. + + exported_model_data = model.export_data(user_id) + exported_model_data_json_string = json.dumps(exported_model_data) + user_id_match_object = re.search( + feconf.USER_ID_REGEX, exported_model_data_json_string) + if user_id_match_object: + logging.error( + '[TAKEOUT] User ID (%s) found in the JSON generated ' + 'for %s and user with ID %s' % ( + user_id_match_object.group(0), model.__name__, user_id + ) + ) + final_name = ('_').join([x.lower() for x in split_name]) - exported_data[final_name] = model.export_data(user_id) + exported_data[final_name] = exported_model_data # Separate out images. We store the images that need to be separated here # as a dictionary mapping tuples to strings. The tuple value indicates the @@ -94,7 +112,7 @@ def export_data_for_user(user_id): 'profile_picture_filename' ) ] - takeout_image_files = [] + takeout_image_files: List[takeout_domain.TakeoutImage] = [] for replacement_instruction in replacement_instructions: dictionary_path = replacement_instruction.dictionary_path replacement_filename = replacement_instruction.export_filename @@ -109,6 +127,8 @@ def export_data_for_user(user_id): image_key = dictionary_path[-1] image_data = pointer[image_key] if image_data is not None: + # Ruling out the possibility of Any for mypy type checking. + assert isinstance(image_data, str) takeout_image_files.append( takeout_domain.TakeoutImage(image_data, replacement_filename)) pointer[image_key] = replacement_filename diff --git a/core/domain/takeout_service_test.py b/core/domain/takeout_service_test.py index 0e3e7cf89496..6c3eb08a4871 100644 --- a/core/domain/takeout_service_test.py +++ b/core/domain/takeout_service_test.py @@ -18,6 +18,7 @@ import datetime import json +import logging from core import feconf from core import utils @@ -26,63 +27,110 @@ from core.domain import exp_services from core.domain import feedback_services from core.domain import rights_domain +from core.domain import stats_domain from core.domain import takeout_domain from core.domain import takeout_service from core.domain import topic_domain from core.platform import models from core.tests import test_utils +from typing import Any, Dict, Final, List, Optional, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import app_feedback_report_models + from mypy_imports import auth_models + from mypy_imports import base_models + from mypy_imports import blog_models + from mypy_imports import collection_models + from mypy_imports import config_models + from mypy_imports import exp_models as exploration_models + from mypy_imports import feedback_models + from mypy_imports import improvements_models + from mypy_imports import learner_group_models + from mypy_imports import question_models + from mypy_imports import skill_models + from mypy_imports import story_models + from mypy_imports import subtopic_models + from mypy_imports import suggestion_models + from mypy_imports import topic_models + from mypy_imports import user_models + ( - app_feedback_report_models, auth_models, base_models, blog_models, - collection_models, config_models, email_models, exploration_models, - feedback_models, improvements_models, question_models, skill_models, - story_models, subtopic_models, suggestion_models, topic_models, user_models + app_feedback_report_models, + auth_models, + base_models, + blog_models, + collection_models, + config_models, + exploration_models, + feedback_models, + improvements_models, + learner_group_models, + question_models, + skill_models, + story_models, + subtopic_models, + suggestion_models, + topic_models, + user_models ) = models.Registry.import_models([ - models.NAMES.app_feedback_report, models.NAMES.auth, - models.NAMES.base_model, models.NAMES.blog, models.NAMES.collection, - models.NAMES.config, models.NAMES.email, models.NAMES.exploration, - models.NAMES.feedback, models.NAMES.improvements, models.NAMES.question, - models.NAMES.skill, models.NAMES.story, models.NAMES.subtopic, - models.NAMES.suggestion, models.NAMES.topic, models.NAMES.user + models.Names.APP_FEEDBACK_REPORT, + models.Names.AUTH, + models.Names.BASE_MODEL, + models.Names.BLOG, + models.Names.COLLECTION, + models.Names.CONFIG, + models.Names.EXPLORATION, + models.Names.FEEDBACK, + models.Names.IMPROVEMENTS, + models.Names.LEARNER_GROUP, + models.Names.QUESTION, + models.Names.SKILL, + models.Names.STORY, + models.Names.SUBTOPIC, + models.Names.SUGGESTION, + models.Names.TOPIC, + models.Names.USER ]) class TakeoutServiceProfileUserUnitTests(test_utils.GenericTestBase): """Tests for the takeout service for profile user.""" - USER_ID_1 = 'user_1' - PROFILE_ID_1 = 'profile_1' - USER_1_ROLE = feconf.ROLE_ID_CURRICULUM_ADMIN - PROFILE_1_ROLE = feconf.ROLE_ID_MOBILE_LEARNER - USER_1_EMAIL = 'user1@example.com' - GENERIC_USERNAME = 'user' - GENERIC_DATE = datetime.datetime(2019, 5, 20) - GENERIC_EPOCH = utils.get_time_in_millisecs(GENERIC_DATE) - GENERIC_IMAGE_URL = 'www.example.com/example.png' - GENERIC_USER_BIO = 'I am a user of Oppia!' - GENERIC_SUBJECT_INTERESTS = ['Math', 'Science'] - GENERIC_LANGUAGE_CODES = ['en', 'es'] - GENERIC_DISPLAY_ALIAS = 'display_alias' - GENERIC_DISPLAY_ALIAS_2 = 'display_alias2' - EXPLORATION_IDS = ['exp_1'] - EXPLORATION_IDS_2 = ['exp_2'] - COLLECTION_IDS = ['23', '42', '4'] - COLLECTION_IDS_2 = ['32', '44', '6'] - STORY_IDS = ['12', '22', '32'] - STORY_IDS_2 = ['42', '52', '62'] - TOPIC_IDS = ['11', '21', '31'] - TOPIC_IDS_2 = ['41', '51', '61'] - SKILL_ID_1 = 'skill_id_1' - SKILL_ID_2 = 'skill_id_2' - SKILL_ID_3 = 'skill_id_3' - DEGREE_OF_MASTERY = 0.5 - DEGREE_OF_MASTERY_2 = 0.6 - EXP_VERSION = 1 - STATE_NAME = 'state_name' - STORY_ID_1 = 'story_id_1' - COMPLETED_NODE_IDS_1 = ['node_id_1', 'node_id_2'] - - def set_up_non_trivial(self): + USER_ID_1: Final = 'user_1' + PROFILE_ID_1: Final = 'profile_1' + USER_1_ROLE: Final = feconf.ROLE_ID_CURRICULUM_ADMIN + PROFILE_1_ROLE: Final = feconf.ROLE_ID_MOBILE_LEARNER + USER_1_EMAIL: Final = 'user1@example.com' + GENERIC_USERNAME: Final = 'user' + GENERIC_DATE: Final = datetime.datetime(2019, 5, 20) + GENERIC_EPOCH: Final = utils.get_time_in_millisecs(GENERIC_DATE) + GENERIC_IMAGE_URL: Final = 'www.example.com/example.png' + GENERIC_USER_BIO: Final = 'I am a user of Oppia!' + GENERIC_SUBJECT_INTERESTS: Final = ['Math', 'Science'] + GENERIC_LANGUAGE_CODES: Final = ['en', 'es'] + GENERIC_DISPLAY_ALIAS: Final = 'display_alias' + GENERIC_DISPLAY_ALIAS_2: Final = 'display_alias2' + EXPLORATION_IDS: Final = ['exp_1'] + EXPLORATION_IDS_2: Final = ['exp_2'] + COLLECTION_IDS: Final = ['23', '42', '4'] + COLLECTION_IDS_2: Final = ['32', '44', '6'] + STORY_IDS: Final = ['12', '22', '32'] + STORY_IDS_2: Final = ['42', '52', '62'] + TOPIC_IDS: Final = ['11', '21', '31'] + TOPIC_IDS_2: Final = ['41', '51', '61'] + SKILL_ID_1: Final = 'skill_id_1' + SKILL_ID_2: Final = 'skill_id_2' + SKILL_ID_3: Final = 'skill_id_3' + DEGREE_OF_MASTERY: Final = 0.5 + DEGREE_OF_MASTERY_2: Final = 0.6 + EXP_VERSION: Final = 1 + STATE_NAME: Final = 'state_name' + STORY_ID_1: Final = 'story_id_1' + COMPLETED_NODE_IDS_1: Final = ['node_id_1', 'node_id_2'] + + def set_up_non_trivial(self) -> None: """Set up all models for use in testing. 1) Simulates skill mastery of user_1 and profile_1. 2) Simulates completion of some activities of user_1 and profile_1. @@ -211,7 +259,7 @@ def set_up_non_trivial(self): display_alias=self.GENERIC_DISPLAY_ALIAS_2 ).put() - def set_up_trivial(self): + def set_up_trivial(self) -> None: """Setup for trivial test of export_data functionality.""" user_models.UserSettingsModel( id=self.USER_ID_1, @@ -224,52 +272,52 @@ def set_up_trivial(self): roles=[self.PROFILE_1_ROLE] ).put() - def test_export_data_for_profile_user_trivial_raises_error(self): + def test_export_data_for_profile_user_trivial_raises_error(self) -> None: """Trivial test of export_data functionality.""" self.set_up_trivial() error_msg = 'Takeout for profile users is not yet supported.' - with self.assertRaisesRegexp(NotImplementedError, error_msg): + with self.assertRaisesRegex(NotImplementedError, error_msg): takeout_service.export_data_for_user(self.PROFILE_ID_1) - def test_export_data_for_profile_user_nontrivial_raises_error(self): + def test_export_data_for_profile_user_nontrivial_raises_error(self) -> None: """Nontrivial test of export_data functionality.""" self.set_up_non_trivial() error_msg = 'Takeout for profile users is not yet supported.' - with self.assertRaisesRegexp(NotImplementedError, error_msg): + with self.assertRaisesRegex(NotImplementedError, error_msg): takeout_service.export_data_for_user(self.PROFILE_ID_1) class TakeoutServiceFullUserUnitTests(test_utils.GenericTestBase): """Tests for the takeout service for full user.""" - USER_ID_1 = 'user_1' - PROFILE_ID_1 = 'profile_1' - THREAD_ID_1 = 'thread_id_1' - THREAD_ID_2 = 'thread_id_2' - BLOG_POST_ID_1 = 'blog_post_id_1' - BLOG_POST_ID_2 = 'blog_post_id_2' - TOPIC_ID_1 = 'topic_id_1' - TOPIC_ID_2 = 'topic_id_2' - USER_1_ROLE = feconf.ROLE_ID_CURRICULUM_ADMIN - PROFILE_1_ROLE = feconf.ROLE_ID_MOBILE_LEARNER - USER_1_EMAIL = 'user1@example.com' - GENERIC_USERNAME = 'user' - GENERIC_PIN = '12345' - GENERIC_DATE = datetime.datetime(2019, 5, 20) - GENERIC_EPOCH = utils.get_time_in_millisecs(GENERIC_DATE) - GENERIC_IMAGE_URL = 'www.example.com/example.png' - GENERIC_USER_BIO = 'I am a user of Oppia!' - GENERIC_SUBJECT_INTERESTS = ['Math', 'Science'] - GENERIC_LANGUAGE_CODES = ['en', 'es'] - GENERIC_DISPLAY_ALIAS = 'display_alias' - GENERIC_DISPLAY_ALIAS_2 = 'display_alias2' - USER_1_IMPACT_SCORE = 0.87 - USER_1_TOTAL_PLAYS = 33 - USER_1_AVERAGE_RATINGS = 4.37 - USER_1_NUM_RATINGS = 22 - USER_1_WEEKLY_CREATOR_STATS_LIST = [ + USER_ID_1: Final = 'user_1' + PROFILE_ID_1: Final = 'profile_1' + THREAD_ID_1: Final = 'thread_id_1' + THREAD_ID_2: Final = 'thread_id_2' + BLOG_POST_ID_1: Final = 'blog_post_id_1' + BLOG_POST_ID_2: Final = 'blog_post_id_2' + TOPIC_ID_1: Final = 'topic_id_1' + TOPIC_ID_2: Final = 'topic_id_2' + USER_1_ROLE: Final = feconf.ROLE_ID_CURRICULUM_ADMIN + PROFILE_1_ROLE: Final = feconf.ROLE_ID_MOBILE_LEARNER + USER_1_EMAIL: Final = 'user1@example.com' + GENERIC_USERNAME: Final = 'user' + GENERIC_PIN: Final = '12345' + GENERIC_DATE: Final = datetime.datetime(2019, 5, 20) + GENERIC_EPOCH: Final = utils.get_time_in_millisecs(GENERIC_DATE) + GENERIC_IMAGE_URL: Final = 'www.example.com/example.png' + GENERIC_USER_BIO: Final = 'I am a user of Oppia!' + GENERIC_SUBJECT_INTERESTS: Final = ['Math', 'Science'] + GENERIC_LANGUAGE_CODES: Final = ['en', 'es'] + GENERIC_DISPLAY_ALIAS: Final = 'display_alias' + GENERIC_DISPLAY_ALIAS_2: Final = 'display_alias2' + USER_1_IMPACT_SCORE: Final = 0.87 + USER_1_TOTAL_PLAYS: Final = 33 + USER_1_AVERAGE_RATINGS: Final = 4.37 + USER_1_NUM_RATINGS: Final = 22 + USER_1_WEEKLY_CREATOR_STATS_LIST: Final = [ { ('2019-05-21'): { 'average_ratings': 4.00, @@ -283,69 +331,76 @@ class TakeoutServiceFullUserUnitTests(test_utils.GenericTestBase): } } ] - EXPLORATION_IDS = ['exp_1'] - EXPLORATION_IDS_2 = ['exp_2'] - STORY_IDS = ['12', '22', '32'] - STORY_IDS_2 = ['42', '52', '62'] - TOPIC_IDS = ['11', '21', '31'] - TOPIC_IDS_2 = ['41', '51', '61'] - CREATOR_IDS = ['4', '8', '16'] - CREATOR_USERNAMES = ['username4', 'username8', 'username16'] - COLLECTION_IDS = ['23', '42', '4'] - COLLECTION_IDS_2 = ['32', '44', '6'] - TOPIC_IDS = ['12', '13', '14'] - GENERAL_FEEDBACK_THREAD_IDS = ['42', '4', '8'] - MESSAGE_IDS_READ_BY_USER = [0, 1] - SKILL_ID_1 = 'skill_id_1' - SKILL_ID_2 = 'skill_id_2' - SKILL_ID_3 = 'skill_id_3' - DEGREE_OF_MASTERY = 0.5 - DEGREE_OF_MASTERY_2 = 0.6 - EXP_VERSION = 1 - STATE_NAME = 'state_name' - STORY_ID_1 = 'story_id_1' - STORY_ID_2 = 'story_id_2' - COMPLETED_NODE_IDS_1 = ['node_id_1', 'node_id_2'] - COMPLETED_NODE_IDS_2 = ['node_id_3', 'node_id_4'] - THREAD_ENTITY_TYPE = feconf.ENTITY_TYPE_EXPLORATION - THREAD_ENTITY_ID = 'exp_id_2' - THREAD_STATUS = 'open' - THREAD_SUBJECT = 'dummy subject' - THREAD_HAS_SUGGESTION = True - THREAD_SUMMARY = 'This is a great summary.' - THREAD_MESSAGE_COUNT = 0 - MESSAGE_TEXT = 'Export test text.' - MESSAGE_RECEIEVED_VIA_EMAIL = False - CHANGE_CMD = {} - SCORE_CATEGORY_1 = 'category_1' - SCORE_CATEGORY_2 = 'category_2' - SCORE_CATEGORY = ( + EXPLORATION_IDS: Final = ['exp_1'] + EXPLORATION_IDS_2: Final = ['exp_2'] + STORY_IDS: Final = ['12', '22', '32'] + STORY_IDS_2: Final = ['42', '52', '62'] + TOPIC_IDS_2: Final = ['41', '51', '61'] + CREATOR_IDS: Final = ['4', '8', '16'] + CREATOR_USERNAMES: Final = ['username4', 'username8', 'username16'] + COLLECTION_IDS: Final = ['23', '42', '4'] + COLLECTION_IDS_2: Final = ['32', '44', '6'] + TOPIC_IDS: Final = ['12', '13', '14'] + GENERAL_FEEDBACK_THREAD_IDS: Final = ['42', '4', '8'] + MESSAGE_IDS_READ_BY_USER: Final = [0, 1] + SKILL_ID_1: Final = 'skill_id_1' + SKILL_ID_2: Final = 'skill_id_2' + SKILL_ID_3: Final = 'skill_id_3' + DEGREE_OF_MASTERY: Final = 0.5 + DEGREE_OF_MASTERY_2: Final = 0.6 + EXP_VERSION: Final = 1 + STATE_NAME: Final = 'state_name' + STORY_ID_1: Final = 'story_id_1' + STORY_ID_2: Final = 'story_id_2' + COMPLETED_NODE_IDS_1: Final = ['node_id_1', 'node_id_2'] + COMPLETED_NODE_IDS_2: Final = ['node_id_3', 'node_id_4'] + LEARNER_GROUP_ID: Final = 'learner_group_1' + THREAD_ENTITY_TYPE: Final = feconf.ENTITY_TYPE_EXPLORATION + THREAD_ENTITY_ID: Final = 'exp_id_2' + THREAD_STATUS: Final = 'open' + THREAD_SUBJECT: Final = 'dummy subject' + THREAD_HAS_SUGGESTION: Final = True + THREAD_SUMMARY: Final = 'This is a great summary.' + THREAD_MESSAGE_COUNT: Final = 0 + MESSAGE_TEXT: Final = 'Export test text.' + MESSAGE_RECEIEVED_VIA_EMAIL: Final = False + CHANGE_CMD: Dict[str, str] = {} + SCORE_CATEGORY_1: Final = 'category_1' + SCORE_CATEGORY_2: Final = 'category_2' + SCORE_CATEGORY: str = ( suggestion_models.SCORE_TYPE_TRANSLATION + - suggestion_models.SCORE_CATEGORY_DELIMITER + 'English') - GENERIC_MODEL_ID = 'model-id-1' - COMMIT_TYPE = 'create' - COMMIT_MESSAGE = 'This is a commit.' - COMMIT_CMDS = [ + suggestion_models.SCORE_CATEGORY_DELIMITER + 'English' + ) + GENERIC_MODEL_ID: Final = 'model-id-1' + COMMIT_TYPE: Final = 'create' + COMMIT_MESSAGE: Final = 'This is a commit.' + COMMIT_CMDS: Final = [ {'cmd': 'some_command'}, {'cmd2': 'another_command'} ] - PLATFORM_ANDROID = 'android' + PLATFORM_ANDROID: Final = 'android' # Timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC. - REPORT_SUBMITTED_TIMESTAMP = datetime.datetime.fromtimestamp(1615151836) + REPORT_SUBMITTED_TIMESTAMP: Final = ( + datetime.datetime.fromtimestamp(1615151836) + ) # Timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC. - TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836) - TICKET_ID = '%s.%s.%s' % ( + TICKET_CREATION_TIMESTAMP: Final = ( + datetime.datetime.fromtimestamp(1616173836) + ) + TICKET_ID: Final = '%s.%s.%s' % ( 'random_hash', TICKET_CREATION_TIMESTAMP.second, '16CharString1234') - REPORT_TYPE_SUGGESTION = 'suggestion' - CATEGORY_OTHER = 'other' - PLATFORM_VERSION = '0.1-alpha-abcdef1234' - DEVICE_COUNTRY_LOCALE_CODE_INDIA = 'in' - ANDROID_DEVICE_MODEL = 'Pixel 4a' - ANDROID_SDK_VERSION = 28 - ENTRY_POINT_NAVIGATION_DRAWER = 'navigation_drawer' - TEXT_LANGUAGE_CODE_ENGLISH = 'en' - AUDIO_LANGUAGE_CODE_ENGLISH = 'en' - ANDROID_REPORT_INFO = { + REPORT_TYPE_SUGGESTION: Final = 'suggestion' + CATEGORY_OTHER: Final = 'other' + PLATFORM_VERSION: Final = '0.1-alpha-abcdef1234' + DEVICE_COUNTRY_LOCALE_CODE_INDIA: Final = 'in' + ANDROID_DEVICE_MODEL: Final = 'Pixel 4a' + ANDROID_SDK_VERSION: Final = 28 + ENTRY_POINT_NAVIGATION_DRAWER: Final = 'navigation_drawer' + TEXT_LANGUAGE_CODE_ENGLISH: Final = 'en' + AUDIO_LANGUAGE_CODE_ENGLISH: Final = 'en' + ANDROID_REPORT_INFO: Dict[ + str, Union[str, List[str], int, bool, Dict[str, str]] + ] = { 'user_feedback_other_text_input': 'add an admin', 'event_logs': ['event1', 'event2'], 'logcat_logs': ['logcat1', 'logcat2'], @@ -359,22 +414,32 @@ class TakeoutServiceFullUserUnitTests(test_utils.GenericTestBase): 'automatically_update_topics': False, 'is_curriculum_admin': False } - ANDROID_REPORT_INFO_SCHEMA_VERSION = 1 - SUGGESTION_LANGUAGE_CODE = 'en' - SUBMITTED_TRANSLATIONS_COUNT = 2 - SUBMITTED_TRANSLATION_WORD_COUNT = 100 - ACCEPTED_TRANSLATIONS_COUNT = 1 - ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT = 0 - ACCEPTED_TRANSLATION_WORD_COUNT = 50 - REJECTED_TRANSLATIONS_COUNT = 0 - REJECTED_TRANSLATION_WORD_COUNT = 0 + ANDROID_REPORT_INFO_SCHEMA_VERSION: Final = 1 + SUGGESTION_LANGUAGE_CODE: Final = 'en' + SUBMITTED_TRANSLATIONS_COUNT: Final = 2 + SUBMITTED_TRANSLATION_WORD_COUNT: Final = 100 + ACCEPTED_TRANSLATIONS_COUNT: Final = 1 + ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT: Final = 0 + ACCEPTED_TRANSLATION_WORD_COUNT: Final = 50 + REJECTED_TRANSLATIONS_COUNT: Final = 0 + REJECTED_TRANSLATION_WORD_COUNT: Final = 0 + REVIEWED_TRANSLATIONS_COUNT: Final = 0 + REVIEWED_TRANSLATION_WORD_COUNT: Final = 0 + ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT: Final = 0 + SUBMITTED_QUESTION_COUNT: Final = 20 + ACCEPTED_QUESTIONS_COUNT: Final = 2 + ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT: Final = 0 + REVIEWED_QUESTIONS_COUNT: Final = 2 + ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT: Final = 0 # Timestamp dates in sec since epoch for Mar 19 2021 UTC. - CONTRIBUTION_DATES = [ + CONTRIBUTION_DATES: Final = [ datetime.date.fromtimestamp(1616173836), datetime.date.fromtimestamp(1616173837) ] + FIRST_CONTRIBUTION_DATE: Final = datetime.datetime(2021, 5, 20) + LAST_CONTRIBUTION_DATE: Final = datetime.datetime(2022, 5, 20) - def set_up_non_trivial(self): + def set_up_non_trivial(self) -> None: """Set up all models for use in testing. 1) Simulates the creation of a user, user_1, and their stats model. 2) Simulates skill mastery of user_1 with two skills. @@ -397,6 +462,7 @@ def set_up_non_trivial(self): 19) Simulates user_1 scrubbing a report. 20) Creates new BlogPostModel and BlogPostRightsModel. 21) Creates a TranslationContributionStatsModel. + 22) Creates new LearnerGroupModel and LearnerGroupsUserModel. """ # Setup for UserStatsModel. user_models.UserStatsModel( @@ -641,30 +707,6 @@ def set_up_non_trivial(self): display_alias=self.GENERIC_DISPLAY_ALIAS_2 ).put() - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_1_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id=self.USER_ID_1, - final_reviewer_id='reviewer_id', - language_code=self.SUGGESTION_LANGUAGE_CODE, - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_2_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id=self.USER_ID_1, - final_reviewer_id=None, - language_code=self.SUGGESTION_LANGUAGE_CODE, - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - suggestion_models.TranslationContributionStatsModel.create( language_code=self.SUGGESTION_LANGUAGE_CODE, contributor_user_id=self.USER_ID_1, @@ -683,6 +725,44 @@ def set_up_non_trivial(self): contribution_dates=self.CONTRIBUTION_DATES ) + suggestion_models.TranslationReviewStatsModel.create( + language_code=self.SUGGESTION_LANGUAGE_CODE, + reviewer_user_id=self.USER_ID_1, + topic_id=self.TOPIC_ID_1, + reviewed_translations_count=self.REVIEWED_TRANSLATIONS_COUNT, + reviewed_translation_word_count=( + self.REVIEWED_TRANSLATION_WORD_COUNT), + accepted_translations_count=self.ACCEPTED_TRANSLATIONS_COUNT, + accepted_translations_with_reviewer_edits_count=( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + accepted_translation_word_count=( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id=self.USER_ID_1, + topic_id=self.TOPIC_ID_1, + submitted_questions_count=self.SUBMITTED_QUESTION_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_without_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + + suggestion_models.QuestionReviewStatsModel.create( + reviewer_user_id=self.USER_ID_1, + topic_id=self.TOPIC_ID_1, + reviewed_questions_count=self.REVIEWED_QUESTIONS_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_with_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + user_models.UserContributionRightsModel( id=self.USER_ID_1, can_review_translation_for_language_codes=['hi', 'en'], @@ -763,16 +843,16 @@ def set_up_non_trivial(self): commit_cmds=self.COMMIT_CMDS ).put() - improvements_models.TaskEntryModel( + improvements_models.ExplorationStatsTaskEntryModel( id=self.GENERIC_MODEL_ID, composite_entity_id=self.GENERIC_MODEL_ID, - entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, entity_id=self.GENERIC_MODEL_ID, entity_version=1, - task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - target_type=improvements_models.TASK_TARGET_TYPE_STATE, + task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE, + target_type=constants.TASK_TARGET_TYPE_STATE, target_id=self.GENERIC_MODEL_ID, - status=improvements_models.TASK_STATUS_OPEN, + status=constants.TASK_STATUS_OPEN, resolver_id=self.USER_ID_1 ).put() @@ -860,7 +940,40 @@ def set_up_non_trivial(self): blog_post_rights_for_post_2.update_timestamps() blog_post_rights_for_post_2.put() - def set_up_trivial(self): + blog_models.BlogAuthorDetailsModel.create( + author_id=self.USER_ID_1, + displayed_author_name='general name', + author_bio='general blog author' + ) + + learner_group_model = learner_group_models.LearnerGroupModel( + id=self.LEARNER_GROUP_ID, + title='sample title', + description='sample description', + facilitator_user_ids=[self.USER_ID_1], + learner_user_ids=['user_id_2'], + invited_learner_user_ids=['user_id_3'], + subtopic_page_ids=['subtopic_id_1', 'subtopic_id_2'], + story_ids=['skill_id_1', 'skill_id_2'] + ) + learner_group_model.update_timestamps() + learner_group_model.put() + + learner_grp_user_model = user_models.LearnerGroupsUserModel( + id=self.USER_ID_1, + invited_to_learner_groups_ids=['group_id_1'], + learner_groups_user_details=[ + { + 'group_id': 'group_id_2', + 'progress_sharing_is_turned_on': False + } + ], + learner_groups_user_details_schema_version=1 + ) + learner_grp_user_model.update_timestamps() + learner_grp_user_model.put() + + def set_up_trivial(self) -> None: """Setup for trivial test of export_data functionality.""" user_models.UserSettingsModel( id=self.USER_ID_1, @@ -874,45 +987,50 @@ def set_up_trivial(self): ).put() user_models.UserSubscriptionsModel(id=self.USER_ID_1).put() - def test_export_nonexistent_full_user_raises_error(self): + def test_export_nonexistent_full_user_raises_error(self) -> None: """Setup for nonexistent user test of export_data functionality.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( user_models.UserSettingsModel.EntityNotFoundError, 'Entity for class UserSettingsModel with id fake_user_id ' 'not found'): takeout_service.export_data_for_user('fake_user_id') - def test_export_data_for_full_user_trivial_is_correct(self): + def test_export_data_for_full_user_trivial_is_correct(self) -> None: """Trivial test of export_data functionality.""" self.set_up_trivial() - self.maxDiff = None + self.maxDiff = 0 # Generate expected output. - app_feedback_report = {} - collection_progress_data = {} - collection_rights_data = { + app_feedback_report: Dict[str, Dict[str, Union[str, int]]] = {} + collection_progress_data: Dict[str, List[str]] = {} + collection_rights_data: Dict[str, List[str]] = { 'editable_collection_ids': [], 'owned_collection_ids': [], 'viewable_collection_ids': [], 'voiced_collection_ids': [] } - completed_activities_data = {} - contribution_data = {} - exploration_rights_data = { + completed_activities_data: Dict[str, List[str]] = {} + contribution_data: Dict[str, List[str]] = {} + exploration_rights_data: Dict[str, List[str]] = { 'editable_exploration_ids': [], 'owned_exploration_ids': [], 'viewable_exploration_ids': [], 'voiced_exploration_ids': [] } - exploration_data = {} - general_feedback_message_data = {} - general_feedback_thread_data = {} - general_feedback_thread_user_data = {} - general_suggestion_data = {} - last_playthrough_data = {} - learner_goals_data = {} - learner_playlist_data = {} - incomplete_activities_data = {} - user_settings_data = { + exploration_data: Dict[ + str, + Dict[str, Union[str, int, Dict[str, str]]] + ] = {} + general_feedback_message_data: Dict[ + str, Dict[str, Union[int, str]] + ] = {} + general_feedback_thread_data: Dict[str, Dict[str, Union[int, str]]] = {} + general_feedback_thread_user_data: Dict[str, Dict[str, List[int]]] = {} + general_suggestion_data: Dict[str, Dict[str, Union[int, str]]] = {} + last_playthrough_data: Dict[str, Dict[str, Union[int, str]]] = {} + learner_goals_data: Dict[str, List[str]] = {} + learner_playlist_data: Dict[str, List[str]] = {} + incomplete_activities_data: Dict[str, List[str]] = {} + user_settings_data: Dict[str, Union[List[str], Optional[str], int]] = { 'email': 'user1@example.com', 'roles': [feconf.ROLE_ID_CURRICULUM_ADMIN], 'banned': False, @@ -933,55 +1051,83 @@ def test_export_data_for_full_user_trivial_is_correct(self): 'preferred_language_codes': [], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'display_alias': None, + 'has_viewed_lesson_info_modal_once': False, } - skill_data = {} - stats_data = {} - story_progress_data = {} - subscriptions_data = { + skill_data: Dict[str, str] = {} + stats_data: Dict[str, stats_domain.AggregatedStatsDict] = {} + story_progress_data: Dict[str, List[str]] = {} + subscriptions_data: Dict[str, Optional[List[str]]] = { 'exploration_ids': [], 'collection_ids': [], 'creator_usernames': [], 'general_feedback_thread_ids': [], 'last_checked_msec': None } - task_entry_data = { + task_entry_data: Dict[str, List[str]] = { 'task_ids_resolved_by_user': [], 'issue_descriptions': [], 'resolution_msecs': [], 'statuses': [] } - topic_rights_data = { + topic_rights_data: Dict[str, List[str]] = { 'managed_topic_ids': [] } - expected_voiceover_application_data = {} - expected_contrib_proficiency_data = {} - expected_contribution_rights_data = {} - expected_collection_rights_sm = {} - expected_collection_sm = {} - expected_skill_sm = {} - expected_subtopic_page_sm = {} - expected_topic_rights_sm = {} - expected_topic_sm = {} - expected_translation_contribution_stats = {} - expected_story_sm = {} - expected_question_sm = {} - expected_config_property_sm = {} - expected_exploration_rights_sm = {} - expected_exploration_sm = {} - expected_platform_parameter_sm = {} - expected_user_auth_details = {} - expected_user_email_preferences = {} - expected_blog_post_data = {} - expected_blog_post_rights = { + expected_contrib_proficiency_data: Dict[ + str, Dict[str, Union[int, bool]] + ] = {} + expected_contribution_rights_data: Dict[ + str, Union[bool, List[str]] + ] = {} + expected_collection_rights_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_collection_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_skill_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_subtopic_page_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_topic_rights_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_topic_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_translation_contribution_stats: Dict[ + str, Dict[str, Dict[str, str]] + ] = {} + expected_translation_review_stats: Dict[ + str, Dict[str, Dict[str, str]] + ] = {} + expected_question_contribution_stats: Dict[ + str, Dict[str, Dict[str, str]] + ] = {} + expected_question_review_stats: Dict[ + str, Dict[str, Dict[str, str]] + ] = {} + expected_story_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_question_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_config_property_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_exploration_rights_sm: Dict[ + str, Dict[str, Dict[str, str]] + ] = {} + expected_exploration_sm: Dict[str, Dict[str, Dict[str, str]]] = {} + expected_platform_parameter_sm: Dict[ + str, Dict[str, Dict[str, str]] + ] = {} + expected_user_auth_details: Dict[str, str] = {} + expected_user_email_preferences: Dict[str, str] = {} + expected_blog_post_data: Dict[str, Union[str, float, List[str]]] = {} + expected_blog_post_rights: Dict[str, List[str]] = { 'editable_blog_post_ids': [] } - - expected_user_data = { + expected_blog_author_details: Dict[str, Dict[str, str]] = {} + expected_learner_group_model_data: Dict[str, str] = {} + expected_learner_grp_user_model_data: Dict[str, str] = {} + + # Here we use type Any because this dictionary contains other + # different types of dictionaries whose values can vary from int + # to complex Union types. So, to make this Dict generalized for + # every other Dict. We used Any here. + expected_user_data: Dict[str, Dict[str, Any]] = { 'app_feedback_report': app_feedback_report, 'blog_post': expected_blog_post_data, 'blog_post_rights': expected_blog_post_rights, + 'blog_author_details': expected_blog_author_details, 'user_stats': stats_data, 'user_settings': user_settings_data, 'user_subscriptions': subscriptions_data, @@ -993,7 +1139,9 @@ def test_export_data_for_full_user_trivial_is_correct(self): 'exp_user_last_playthrough': last_playthrough_data, 'learner_goals': learner_goals_data, 'learner_playlist': learner_playlist_data, - 'task_entry': task_entry_data, + 'learner_group': expected_learner_group_model_data, + 'learner_groups_user': expected_learner_grp_user_model_data, + 'exploration_stats_task_entry': task_entry_data, 'topic_rights': topic_rights_data, 'collection_progress': collection_progress_data, 'story_progress': story_progress_data, @@ -1004,8 +1152,6 @@ def test_export_data_for_full_user_trivial_is_correct(self): 'collection_rights': collection_rights_data, 'general_suggestion': general_suggestion_data, 'exploration_rights': exploration_rights_data, - 'general_voiceover_application': - expected_voiceover_application_data, 'user_contribution_proficiency': expected_contrib_proficiency_data, 'user_contribution_rights': expected_contribution_rights_data, 'collection_rights_snapshot_metadata': @@ -1021,6 +1167,12 @@ def test_export_data_for_full_user_trivial_is_correct(self): 'topic_snapshot_metadata': expected_topic_sm, 'translation_contribution_stats': expected_translation_contribution_stats, + 'translation_review_stats': + expected_translation_review_stats, + 'question_contribution_stats': + expected_question_contribution_stats, + 'question_review_stats': + expected_question_review_stats, 'story_snapshot_metadata': expected_story_sm, 'question_snapshot_metadata': expected_question_sm, 'config_property_snapshot_metadata': @@ -1043,10 +1195,30 @@ def test_export_data_for_full_user_trivial_is_correct(self): observed_json = json.dumps(observed_data) expected_json = json.dumps(expected_user_data) self.assertEqual(json.loads(expected_json), json.loads(observed_json)) - expected_images = [] + expected_images: List[takeout_domain.TakeoutImage] = [] self.assertEqual(expected_images, observed_images) - def test_exports_have_single_takeout_dict_key(self): + def test_export_data_for_full_user_when_user_id_is_leaked_fails( + self + ) -> None: + user_models.UserSettingsModel( + id=self.USER_ID_1, + email=self.USER_1_EMAIL, + roles=[self.USER_1_ROLE], + user_bio='I want to leak uid_abcdefghijabcdefghijabcdefghijab' + ).put() + with self.capture_logging(min_level=logging.ERROR) as log_messages: + takeout_service.export_data_for_user(self.USER_ID_1) + self.assertEqual( + [ + '[TAKEOUT] User ID (uid_abcdefghijabcdefghijabcdefghijab) ' + 'found in the JSON generated for UserSettingsModel and ' + 'user with ID user_1' + ], + log_messages + ) + + def test_exports_have_single_takeout_dict_key(self) -> None: """Test to ensure that all export policies that specify a key for the Takeout dict are also models that specify this policy are type MULTIPLE_INSTANCES_PER_USER. @@ -1112,7 +1284,7 @@ def test_exports_have_single_takeout_dict_key(self): else: self.assertEqual(num_takeout_keys, 0) - def test_exports_follow_export_policies(self): + def test_exports_follow_export_policies(self) -> None: """Test to ensure that all fields that should be exported per the export policy are exported, and exported in the proper format. """ @@ -1191,10 +1363,20 @@ def test_exports_follow_export_policies(self): base_models .MODEL_ASSOCIATION_TO_USER .ONE_INSTANCE_SHARED_ACROSS_USERS): + # Here we use MyPy ignore because model is of + # BaseModel type and BaseModel does not contain + # get_field_name_mapping_to_takeout_keys attribute, + # so because of this MyPy throws an error. Thus to + # avoid the error, we used ignore here. self.assertIsNotNone( - model.get_field_name_mapping_to_takeout_keys) + model.get_field_name_mapping_to_takeout_keys) # type: ignore[attr-defined] exported_data = model.export_data(self.USER_ID_1) - field_mapping = model.get_field_name_mapping_to_takeout_keys() + # Here we use MyPy ignore because model is of + # BaseModel type and BaseModel does not contain + # get_field_name_mapping_to_takeout_keys(), so + # because of this MyPy throws an error. Thus to + # avoid the error, we used ignore here. + field_mapping = model.get_field_name_mapping_to_takeout_keys() # type: ignore[attr-defined] self.assertEqual( sorted(exported_field_names), sorted(field_mapping.keys()) @@ -1224,7 +1406,7 @@ def test_exports_follow_export_policies(self): sorted(exported_field_names) ) - def test_export_data_for_full_user_nontrivial_is_correct(self): + def test_export_data_for_full_user_nontrivial_is_correct(self) -> None: """Nontrivial test of export_data functionality.""" self.set_up_non_trivial() # We set up the feedback_thread_model here so that we can easily @@ -1446,27 +1628,6 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): 'managed_topic_ids': [self.TOPIC_ID_1, self.TOPIC_ID_2] } - expected_voiceover_application_data = { - 'application_1_id': { - 'target_type': 'exploration', - 'target_id': 'exp_id', - 'status': 'review', - 'language_code': 'en', - 'filename': 'application_audio.mp3', - 'content': '

    Some content

    ', - 'rejection_message': None - }, - 'application_2_id': { - 'target_type': 'exploration', - 'target_id': 'exp_id', - 'status': 'review', - 'language_code': 'en', - 'filename': 'application_audio.mp3', - 'content': '

    Some content

    ', - 'rejection_message': None - } - } - expected_contribution_rights_data = { 'can_review_translation_for_language_codes': ['hi', 'en'], 'can_review_voiceover_for_language_codes': ['hi'], @@ -1565,8 +1726,8 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): 'commit_message': self.COMMIT_MESSAGE, } } - expected_user_email_preferences = {} - expected_user_auth_details = {} + expected_user_email_preferences: Dict[str, str] = {} + expected_user_auth_details: Dict[str, str] = {} expected_app_feedback_report = { '%s.%s.%s' % ( self.PLATFORM_ANDROID, self.REPORT_SUBMITTED_TIMESTAMP.second, @@ -1593,6 +1754,27 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): self.BLOG_POST_ID_2 ], } + expected_blog_author_details = { + 'author_name': 'test name', + 'author_bio': '' + } + expected_learner_group_data = { + 'title': 'sample title', + 'description': 'sample description', + 'role_in_group': 'facilitator', + 'subtopic_page_ids': ['subtopic_id_1', 'subtopic_id_2'], + 'story_ids': ['skill_id_1', 'skill_id_2'] + } + expected_learner_groups_user_data = { + 'invited_to_learner_groups_ids': ['group_id_1'], + 'learner_groups_user_details': [ + { + 'group_id': 'group_id_2', + 'progress_sharing_is_turned_on': False + } + ] + } + expected_translation_contribution_stats_data = { '%s.%s.%s' % ( self.SUGGESTION_LANGUAGE_CODE, self.USER_ID_1, @@ -1618,6 +1800,63 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): date.isoformat() for date in self.CONTRIBUTION_DATES] } } + expected_translation_review_stats_data = { + '%s.%s.%s' % ( + self.SUGGESTION_LANGUAGE_CODE, self.USER_ID_1, + self.TOPIC_ID_1): { + 'language_code': self.SUGGESTION_LANGUAGE_CODE, + 'topic_id': self.TOPIC_ID_1, + 'reviewed_translations_count': ( + self.REVIEWED_TRANSLATIONS_COUNT), + 'reviewed_translation_word_count': ( + self.REVIEWED_TRANSLATION_WORD_COUNT), + 'accepted_translations_count': ( + self.ACCEPTED_TRANSLATIONS_COUNT), + 'accepted_translations_with_reviewer_edits_count': ( + self + .ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + 'accepted_translation_word_count': ( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + } + } + expected_question_contribution_stats_data = { + '%s.%s' % ( + self.USER_ID_1, self.TOPIC_ID_1): { + 'topic_id': self.TOPIC_ID_1, + 'submitted_questions_count': ( + self.SUBMITTED_QUESTION_COUNT), + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_without_reviewer_edits_count': ( + self + .ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + } + } + expected_question_review_stats_data = { + '%s.%s' % ( + self.USER_ID_1, self.TOPIC_ID_1): { + 'topic_id': self.TOPIC_ID_1, + 'reviewed_questions_count': ( + self.REVIEWED_QUESTIONS_COUNT), + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_with_reviewer_edits_count': ( + self + .ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + } + } expected_user_data = { 'user_stats': expected_stats_data, 'user_settings': expected_user_settings_data, @@ -1630,7 +1869,9 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): 'exp_user_last_playthrough': expected_last_playthrough_data, 'learner_goals': expected_learner_goals_data, 'learner_playlist': expected_learner_playlist_data, - 'task_entry': expected_task_entry_data, + 'learner_group': expected_learner_group_data, + 'learner_groups_user': expected_learner_groups_user_data, + 'exploration_stats_task_entry': expected_task_entry_data, 'topic_rights': expected_topic_data, 'collection_progress': expected_collection_progress_data, 'story_progress': expected_story_progress_data, @@ -1644,8 +1885,6 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): expected_collection_rights_data, 'general_suggestion': expected_general_suggestion_data, 'exploration_rights': expected_exploration_rights_data, - 'general_voiceover_application': - expected_voiceover_application_data, 'user_contribution_proficiency': expected_contrib_proficiency_data, 'user_contribution_rights': expected_contribution_rights_data, 'collection_rights_snapshot_metadata': @@ -1661,6 +1900,12 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): 'topic_snapshot_metadata': expected_topic_sm, 'translation_contribution_stats': expected_translation_contribution_stats_data, + 'translation_review_stats': + expected_translation_review_stats_data, + 'question_contribution_stats': + expected_question_contribution_stats_data, + 'question_review_stats': + expected_question_review_stats_data, 'story_snapshot_metadata': expected_story_sm, 'question_snapshot_metadata': expected_question_sm, 'config_property_snapshot_metadata': @@ -1674,7 +1919,8 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): 'user_auth_details': expected_user_auth_details, 'app_feedback_report': expected_app_feedback_report, 'blog_post': expected_blog_post_data, - 'blog_post_rights': expected_blog_post_rights + 'blog_post_rights': expected_blog_post_rights, + 'blog_author_details': expected_blog_author_details } user_takeout_object = takeout_service.export_data_for_user( @@ -1701,7 +1947,7 @@ def test_export_data_for_full_user_nontrivial_is_correct(self): observed_images[i].image_export_path ) - def test_export_for_full_user_does_not_export_profile_data(self): + def test_export_for_full_user_does_not_export_profile_data(self) -> None: """Test that exporting data for a full user does not export data for any profile user, atleast for the models that were populated for the profile user. @@ -1738,9 +1984,9 @@ def test_export_for_full_user_does_not_export_profile_data(self): 'completed_story_ids': self.STORY_IDS, 'learnt_topic_ids': self.TOPIC_IDS } - incomplete_activities_data = {} - last_playthrough_data = {} - learner_goals_data = {} + incomplete_activities_data: Dict[str, List[str]] = {} + last_playthrough_data: Dict[str, Dict[str, Union[str, int]]] = {} + learner_goals_data: Dict[str, List[str]] = {} learner_playlist_data = { 'playlist_exploration_ids': self.EXPLORATION_IDS_2, 'playlist_collection_ids': self.COLLECTION_IDS_2 diff --git a/core/domain/taskqueue_services.py b/core/domain/taskqueue_services.py index 40f226e4132e..7ef348d63ed9 100644 --- a/core/domain/taskqueue_services.py +++ b/core/domain/taskqueue_services.py @@ -24,20 +24,26 @@ from core import feconf from core.platform import models +from typing import Any, Dict, Final + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import platform_taskqueue_services + platform_taskqueue_services = models.Registry.import_taskqueue_services() # NOTE: The following constants should match the queue names in queue.yaml. # Taskqueue for backing up state. -QUEUE_NAME_BACKUPS = 'backups' +QUEUE_NAME_BACKUPS: Final = 'backups' # Default queue for processing tasks (including MapReduce ones). -QUEUE_NAME_DEFAULT = 'default' +QUEUE_NAME_DEFAULT: Final = 'default' # Taskqueue for sending email. -QUEUE_NAME_EMAILS = 'emails' +QUEUE_NAME_EMAILS: Final = 'emails' # Taskqueue for running one-off jobs. -QUEUE_NAME_ONE_OFF_JOBS = 'one-off-jobs' +QUEUE_NAME_ONE_OFF_JOBS: Final = 'one-off-jobs' # Taskqueue for updating stats models. -QUEUE_NAME_STATS = 'stats' +QUEUE_NAME_STATS: Final = 'stats' # Function identifiers inform the deferred task handler of which deferred # function should be run for the relevant task. @@ -48,19 +54,31 @@ # correct FUNCTION_ID and defer the function using that FUNCTION_ID. # 2. If the function does not exist in the handler, add it to the handler and # add another FUNCTION_ID to this list. -FUNCTION_ID_UPDATE_STATS = 'update_stats' -FUNCTION_ID_DELETE_EXPS_FROM_USER_MODELS = 'delete_exps_from_user_models' -FUNCTION_ID_DELETE_EXPS_FROM_ACTIVITIES = 'delete_exps_from_activities' -FUNCTION_ID_DELETE_USERS_PENDING_TO_BE_DELETED = ( +FUNCTION_ID_UPDATE_STATS: Final = 'update_stats' +FUNCTION_ID_DELETE_EXPS_FROM_USER_MODELS: Final = 'delete_exps_from_user_models' +FUNCTION_ID_DELETE_EXPS_FROM_ACTIVITIES: Final = 'delete_exps_from_activities' +FUNCTION_ID_DELETE_USERS_PENDING_TO_BE_DELETED: Final = ( 'delete_users_pending_to_be_deleted') -FUNCTION_ID_CHECK_COMPLETION_OF_USER_DELETION = ( +FUNCTION_ID_CHECK_COMPLETION_OF_USER_DELETION: Final = ( 'check_completion_of_user_deletion') -FUNCTION_ID_REGENERATE_EXPLORATION_SUMMARY = 'regenerate_exploration_summary' -FUNCTION_ID_UNTAG_DELETED_MISCONCEPTIONS = 'untag_deleted_misconceptions' -FUNCTION_ID_REMOVE_USER_FROM_RIGHTS_MODELS = 'remove_user_from_rights_models' - - -def defer(fn_identifier, queue_name, *args, **kwargs): +FUNCTION_ID_REGENERATE_EXPLORATION_SUMMARY: Final = ( + 'regenerate_exploration_summary') +FUNCTION_ID_UNTAG_DELETED_MISCONCEPTIONS: Final = 'untag_deleted_misconceptions' +FUNCTION_ID_REMOVE_USER_FROM_RIGHTS_MODELS: Final = ( + 'remove_user_from_rights_models') + + +# Here we use type Any because in defer() function '*args' points to the +# positional arguments of any other function and those arguments can be of +# type str, list, int and other types too. Similarly, '**kwargs' points to +# the keyword arguments of any other function and those can also accept +# different types of values like '*args'. +def defer( + fn_identifier: str, + queue_name: str, + *args: Any, + **kwargs: Any +) -> None: """Adds a new task to a specified deferred queue scheduled for immediate execution. @@ -74,7 +92,7 @@ def defer(fn_identifier, queue_name, *args, **kwargs): **kwargs: dict(str : *). Keyword arguments for fn. Raises: - Exception. The arguments and keyword arguments that are passed in are + ValueError. The arguments and keyword arguments that are passed in are not JSON serializable. """ payload = { @@ -84,11 +102,11 @@ def defer(fn_identifier, queue_name, *args, **kwargs): } try: json.dumps(payload) - except TypeError: + except TypeError as e: raise ValueError( 'The args or kwargs passed to the deferred call with ' 'function_identifier, %s, are not json serializable.' % - fn_identifier) + fn_identifier) from e # This is a workaround for a known python bug. # See https://bugs.python.org/issue7980 datetime.datetime.strptime('', '') @@ -96,7 +114,10 @@ def defer(fn_identifier, queue_name, *args, **kwargs): queue_name=queue_name, url=feconf.TASK_URL_DEFERRED, payload=payload) -def enqueue_task(url, params, countdown): +# Here we use type Any because the argument 'params' can accept payload +# dictionaries which can hold the values of type string, set, int and +# other types too. +def enqueue_task(url: str, params: Dict[str, Any], countdown: int) -> None: """Adds a new task for sending email. Args: @@ -107,13 +128,14 @@ def enqueue_task(url, params, countdown): task. Raises: - Exception. The params that are passed in are not JSON serializable. + ValueError. The params that are passed in are not JSON serializable. """ try: json.dumps(params) - except TypeError: + except TypeError as e: raise ValueError( - 'The params added to the email task call cannot be json serialized') + 'The params added to the email task call cannot be json serialized' + ) from e scheduled_datetime = datetime.datetime.utcnow() + datetime.timedelta( seconds=countdown) platform_taskqueue_services.create_http_task( diff --git a/core/domain/taskqueue_services_test.py b/core/domain/taskqueue_services_test.py index 1d7a6318b1bd..a9e6f87cd51b 100644 --- a/core/domain/taskqueue_services_test.py +++ b/core/domain/taskqueue_services_test.py @@ -18,25 +18,38 @@ from __future__ import annotations +import datetime + from core import feconf -from core import python_utils +from core import utils from core.domain import taskqueue_services +from core.platform import models from core.tests import test_utils +from typing import Dict, Optional, Set + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import platform_taskqueue_services + +platform_taskqueue_services = models.Registry.import_taskqueue_services() + class TaskqueueDomainServicesUnitTests(test_utils.TestBase): """Tests for domain taskqueue services.""" - def test_exception_raised_when_deferred_payload_is_not_serializable(self): + def test_exception_raised_when_deferred_payload_is_not_serializable( + self + ) -> None: class NonSerializableArgs: """Object that is not JSON serializable.""" - def __init__(self): + def __init__(self) -> None: self.x = 1 self.y = 2 arg1 = NonSerializableArgs() - serialization_exception = self.assertRaisesRegexp( + serialization_exception = self.assertRaisesRegex( ValueError, 'The args or kwargs passed to the deferred call with ' 'function_identifier, %s, are not json serializable.' % @@ -46,11 +59,13 @@ def __init__(self): taskqueue_services.FUNCTION_ID_UPDATE_STATS, taskqueue_services.QUEUE_NAME_DEFAULT, arg1) - def test_exception_raised_when_email_task_params_is_not_serializable(self): - params = { + def test_exception_raised_when_email_task_params_is_not_serializable( + self + ) -> None: + params: Dict[str, Set[str]] = { 'param1': set() } - serialization_exception = self.assertRaisesRegexp( + serialization_exception = self.assertRaisesRegex( ValueError, 'The params added to the email task call cannot be json serialized') with serialization_exception: @@ -59,7 +74,38 @@ def test_exception_raised_when_email_task_params_is_not_serializable(self): params, 0) - def test_enqueue_task_makes_the_correct_request(self): + def test_defer_makes_the_correct_request(self) -> None: + correct_fn_identifier = '/task/deferredtaskshandler' + correct_args = (1, 2, 3) + correct_kwargs = {'a': 'b', 'c': 'd'} + + expected_queue_name = taskqueue_services.QUEUE_NAME_EMAILS + expected_url = feconf.TASK_URL_DEFERRED + expected_payload = { + 'fn_identifier': correct_fn_identifier, + 'args': correct_args, + 'kwargs': correct_kwargs + } + + create_http_task_swap = self.swap_with_checks( + platform_taskqueue_services, + 'create_http_task', + lambda queue_name, url, payload=None, scheduled_for=None: None, + expected_kwargs=[{ + 'queue_name': expected_queue_name, + 'url': expected_url, + 'payload': expected_payload + }] + ) + + with create_http_task_swap: + taskqueue_services.defer( + correct_fn_identifier, + taskqueue_services.QUEUE_NAME_EMAILS, + *correct_args, **correct_kwargs + ) + + def test_enqueue_task_makes_the_correct_request(self) -> None: correct_payload = { 'user_id': '1' } @@ -67,8 +113,12 @@ def test_enqueue_task_makes_the_correct_request(self): correct_queue_name = taskqueue_services.QUEUE_NAME_EMAILS def mock_create_http_task( - queue_name, url, payload=None, scheduled_for=None, - task_name=None): + queue_name: str, + url: str, + payload: Optional[Dict[str, str]] = None, + scheduled_for: Optional[datetime.datetime] = None, + task_name: Optional[str] = None + ) -> None: self.assertEqual(queue_name, correct_queue_name) self.assertEqual(url, correct_url) self.assertEqual(payload, correct_payload) @@ -76,21 +126,21 @@ def mock_create_http_task( self.assertIsNone(task_name) swap_create_http_task = self.swap( - taskqueue_services.platform_taskqueue_services, 'create_http_task', + platform_taskqueue_services, 'create_http_task', mock_create_http_task) with swap_create_http_task: taskqueue_services.enqueue_task( correct_url, correct_payload, 0) - def test_that_queue_names_are_in_sync_with_queue_yaml_file(self): + def test_that_queue_names_are_in_sync_with_queue_yaml_file(self) -> None: """Checks that all of the queues that are instantiated in the queue.yaml file has a corresponding QUEUE_NAME_* constant instantiated in taskqueue_services. """ queue_name_dict = {} # Parse the queue.yaml file for the correct queue names. - with python_utils.open_file('queue.yaml', 'r') as f: + with utils.open_file('queue.yaml', 'r') as f: lines = f.readlines() for line in lines: if 'name' in line: diff --git a/core/domain/topic_domain.py b/core/domain/topic_domain.py index 582209e2fbc1..548cfb05a8ce 100644 --- a/core/domain/topic_domain.py +++ b/core/domain/topic_domain.py @@ -19,6 +19,7 @@ from __future__ import annotations import copy +import datetime import functools import json import re @@ -28,10 +29,15 @@ from core import utils from core.constants import constants from core.domain import change_domain -from core.domain import fs_domain -from core.domain import fs_services from core.domain import subtopic_page_domain -from core.domain import user_services + +from typing import List, Literal, Optional, TypedDict + +# The fs_services module is required in one of the migration +# functions in Topic class. This import should be removed +# once the schema migration functions are moved outside the +# domain file. +from core.domain import fs_services # pylint: disable=invalid-import-from # isort:skip CMD_CREATE_NEW = feconf.CMD_CREATE_NEW CMD_CHANGE_ROLE = feconf.CMD_CHANGE_ROLE @@ -56,6 +62,7 @@ TOPIC_PROPERTY_META_TAG_CONTENT = 'meta_tag_content' TOPIC_PROPERTY_PRACTICE_TAB_IS_DISPLAYED = 'practice_tab_is_displayed' TOPIC_PROPERTY_PAGE_TITLE_FRAGMENT_FOR_WEB = 'page_title_fragment_for_web' +TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST = 'skill_ids_for_diagnostic_test' SUBTOPIC_PROPERTY_TITLE = 'title' SUBTOPIC_PROPERTY_THUMBNAIL_FILENAME = 'thumbnail_filename' @@ -82,7 +89,10 @@ CMD_UPDATE_TOPIC_PROPERTY = 'update_topic_property' CMD_UPDATE_SUBTOPIC_PROPERTY = 'update_subtopic_property' -CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION = 'migrate_subtopic_schema_to_latest_version' # pylint: disable=line-too-long +CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION = ( + 'migrate_subtopic_schema_to_latest_version') +CMD_MIGRATE_STORY_REFERENCE_SCHEMA_TO_LATEST_VERSION = ( + 'migrate_story_reference_schema_to_latest_version') class TopicChange(change_domain.BaseChange): @@ -104,12 +114,14 @@ class TopicChange(change_domain.BaseChange): new_value and old_value) - 'migrate_subtopic_schema_to_latest_version' (with from_version and to_version) + - 'migrate_story_reference_schema_to_latest_version' (with + from_version and to_version) - 'create_new' (with name) """ # The allowed list of topic properties which can be used in # update_topic_property command. - TOPIC_PROPERTIES = ( + TOPIC_PROPERTIES: List[str] = [ TOPIC_PROPERTY_NAME, TOPIC_PROPERTY_ABBREVIATED_NAME, TOPIC_PROPERTY_DESCRIPTION, TOPIC_PROPERTY_CANONICAL_STORY_REFERENCES, @@ -120,130 +132,484 @@ class TopicChange(change_domain.BaseChange): TOPIC_PROPERTY_URL_FRAGMENT, TOPIC_PROPERTY_META_TAG_CONTENT, TOPIC_PROPERTY_PRACTICE_TAB_IS_DISPLAYED, - TOPIC_PROPERTY_PAGE_TITLE_FRAGMENT_FOR_WEB) + TOPIC_PROPERTY_PAGE_TITLE_FRAGMENT_FOR_WEB, + TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST + ] # The allowed list of subtopic properties which can be used in # update_subtopic_property command. - SUBTOPIC_PROPERTIES = ( + SUBTOPIC_PROPERTIES: List[str] = [ SUBTOPIC_PROPERTY_TITLE, SUBTOPIC_PROPERTY_THUMBNAIL_FILENAME, SUBTOPIC_PROPERTY_THUMBNAIL_BG_COLOR, - SUBTOPIC_PROPERTY_URL_FRAGMENT) + SUBTOPIC_PROPERTY_URL_FRAGMENT + ] # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. - SUBTOPIC_PAGE_PROPERTIES = ( + SUBTOPIC_PAGE_PROPERTIES: List[str] = ( subtopic_page_domain.SubtopicPageChange.SUBTOPIC_PAGE_PROPERTIES) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['name'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_ADD_SUBTOPIC, - 'required_attribute_names': ['title', 'subtopic_id'], + 'required_attribute_names': ['title', 'subtopic_id', 'url_fragment'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_SUBTOPIC, 'required_attribute_names': ['subtopic_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_ADD_CANONICAL_STORY, 'required_attribute_names': ['story_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_CANONICAL_STORY, 'required_attribute_names': ['story_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_REARRANGE_CANONICAL_STORY, 'required_attribute_names': ['from_index', 'to_index'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_ADD_ADDITIONAL_STORY, 'required_attribute_names': ['story_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_ADDITIONAL_STORY, 'required_attribute_names': ['story_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_PUBLISH_STORY, 'required_attribute_names': ['story_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UNPUBLISH_STORY, 'required_attribute_names': ['story_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_ADD_UNCATEGORIZED_SKILL_ID, 'required_attribute_names': ['new_uncategorized_skill_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_REMOVE_UNCATEGORIZED_SKILL_ID, 'required_attribute_names': ['uncategorized_skill_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_MOVE_SKILL_ID_TO_SUBTOPIC, 'required_attribute_names': [ 'old_subtopic_id', 'new_subtopic_id', 'skill_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_REARRANGE_SKILL_IN_SUBTOPIC, 'required_attribute_names': ['subtopic_id', 'from_index', 'to_index'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_REARRANGE_SUBTOPIC, 'required_attribute_names': ['from_index', 'to_index'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC, 'required_attribute_names': ['subtopic_id', 'skill_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_SUBTOPIC_PROPERTY, 'required_attribute_names': [ 'subtopic_id', 'property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': SUBTOPIC_PROPERTIES} + 'allowed_values': {'property_name': SUBTOPIC_PROPERTIES}, + 'deprecated_values': {} }, { 'name': subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} + 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_TOPIC_PROPERTY, 'required_attribute_names': ['property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], - 'allowed_values': {'property_name': TOPIC_PROPERTIES} + 'allowed_values': {'property_name': TOPIC_PROPERTIES}, + 'deprecated_values': {} }, { 'name': CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION, 'required_attribute_names': ['from_version', 'to_version'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} + }, { + 'name': CMD_MIGRATE_STORY_REFERENCE_SCHEMA_TO_LATEST_VERSION, + 'required_attribute_names': ['from_version', 'to_version'], + 'optional_attribute_names': [], + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }] +class CreateNewTopicCmd(TopicChange): + """Class representing the TopicChange's + CMD_CREATE_NEW command. + """ + + name: str + + +class AddSubtopicCmd(TopicChange): + """Class representing the TopicChange's + CMD_ADD_SUBTOPIC command. + """ + + title: str + subtopic_id: int + url_fragment: str + + +class DeleteSubtopicCmd(TopicChange): + """Class representing the TopicChange's + CMD_DELETE_SUBTOPIC command. + """ + + subtopic_id: int + + +class AddCanonicalStoryCmd(TopicChange): + """Class representing the TopicChange's + CMD_ADD_CANONICAL_STORY command. + """ + + story_id: str + + +class DeleteCanonicalStoryCmd(TopicChange): + """Class representing the TopicChange's + CMD_DELETE_CANONICAL_STORY command. + """ + + story_id: str + + +class RearrangeCanonicalStoryCmd(TopicChange): + """Class representing the TopicChange's + CMD_REARRANGE_CANONICAL_STORY command. + """ + + from_index: int + to_index: int + + +class AddAdditionalStoryCmd(TopicChange): + """Class representing the TopicChange's + CMD_ADD_ADDITIONAL_STORY command. + """ + + story_id: str + + +class DeleteAdditionalStoryCmd(TopicChange): + """Class representing the TopicChange's + CMD_DELETE_ADDITIONAL_STORY command. + """ + + story_id: str + + +class PublishStoryCmd(TopicChange): + """Class representing the TopicChange's + CMD_PUBLISH_STORY command. + """ + + story_id: str + + +class UnpublishStoryCmd(TopicChange): + """Class representing the TopicChange's + CMD_UNPUBLISH_STORY command. + """ + + story_id: str + + +class AddUncategorizedSkillIdCmd(TopicChange): + """Class representing the TopicChange's + CMD_ADD_UNCATEGORIZED_SKILL_ID command. + """ + + new_uncategorized_skill_id: str + + +class RemoveUncategorizedSkillIdCmd(TopicChange): + """Class representing the TopicChange's + CMD_REMOVE_UNCATEGORIZED_SKILL_ID command. + """ + + uncategorized_skill_id: str + + +class MoveSkillIdToSubtopicCmd(TopicChange): + """Class representing the TopicChange's + CMD_MOVE_SKILL_ID_TO_SUBTOPIC command. + """ + + old_subtopic_id: int + new_subtopic_id: int + skill_id: str + + +class RearrangeSkillInSubtopicCmd(TopicChange): + """Class representing the TopicChange's + CMD_REARRANGE_SKILL_IN_SUBTOPIC command. + """ + + subtopic_id: int + from_index: int + to_index: int + + +class RearrangeSubtopicCmd(TopicChange): + """Class representing the TopicChange's + CMD_REARRANGE_SUBTOPIC command. + """ + + from_index: int + to_index: int + + +class RemoveSkillIdFromSubtopicCmd(TopicChange): + """Class representing the TopicChange's + CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC command. + """ + + subtopic_id: int + skill_id: str + + +class UpdateSubtopicPropertyCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_SUBTOPIC_PROPERTY command. + """ + + subtopic_id: int + property_name: str + new_value: str + old_value: str + + +class UpdateTopicPropertyNameCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_NAME as allowed value. + """ + + property_name: Literal['name'] + new_value: str + old_value: str + + +class UpdateTopicPropertyAbbreviatedNameCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_ABBREVIATED_NAME as allowed value. + """ + + property_name: Literal['abbreviated_name'] + new_value: str + old_value: str + + +class UpdateTopicPropertyDescriptionCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_DESCRIPTION as allowed value. + """ + + property_name: Literal['description'] + new_value: str + old_value: str + + +class UpdateTopicPropertyCanonicalStoryReferencesCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_CANONICAL_STORY_REFERENCES + as allowed value. + """ + + property_name: Literal['canonical_story_references'] + new_value: List[StoryReference] + old_value: List[StoryReference] + + +class UpdateTopicPropertyAdditionalStoryReferencesCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_ADDITIONAL_STORY_REFERENCES + as allowed value. + """ + + property_name: Literal['additional_story_references'] + new_value: List[StoryReference] + old_value: List[StoryReference] + + +class UpdateTopicPropertyLanguageCodeCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_LANGUAGE_CODE as allowed value. + """ + + property_name: Literal['language_code'] + new_value: str + old_value: str + + +class UpdateTopicPropertyThumbnailFilenameCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_THUMBNAIL_FILENAME as + allowed value. + """ + + property_name: Literal['thumbnail_filename'] + new_value: str + old_value: str + + +class UpdateTopicPropertyThumbnailBGColorCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_THUMBNAIL_BG_COLOR as + allowed value. + """ + + property_name: Literal['thumbnail_bg_color'] + new_value: str + old_value: str + + +class UpdateTopicPropertyUrlFragmentCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_URL_FRAGMENT as allowed value. + """ + + property_name: Literal['url_fragment'] + new_value: str + old_value: str + + +class UpdateTopicPropertyMetaTagContentCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_META_TAG_CONTENT as allowed value. + """ + + property_name: Literal['meta_tag_content'] + new_value: str + old_value: str + + +class UpdateTopicPropertyPracticeTabIsDisplayedCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_PRACTICE_TAB_IS_DISPLAYED + as allowed value. + """ + + property_name: Literal['practice_tab_is_displayed'] + new_value: bool + old_value: bool + + +class UpdateTopicPropertyTitleFragmentForWebCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_PAGE_TITLE_FRAGMENT_FOR_WEB + as allowed value. + """ + + property_name: Literal['page_title_fragment_for_web'] + new_value: str + old_value: str + + +class UpdateTopicPropertySkillIdsForDiagnosticTestCmd(TopicChange): + """Class representing the TopicChange's + CMD_UPDATE_TOPIC_PROPERTY command with + TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST + as allowed value. + """ + + property_name: Literal['skill_ids_for_diagnostic_test'] + new_value: List[str] + old_value: List[str] + + +class MigrateSubtopicSchemaToLatestVersionCmd(TopicChange): + """Class representing the TopicChange's + CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION command. + """ + + from_version: int + to_version: int + + class TopicRightsChange(change_domain.BaseChange): """Domain object for changes made to a topic rights object. @@ -257,10 +623,69 @@ class TopicRightsChange(change_domain.BaseChange): ALLOWED_COMMANDS = feconf.TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS +class CreateNewTopicRightsCmd(TopicRightsChange): + """Class representing the TopicRightsChange's + CMD_CREATE_NEW command. + """ + + pass + + +class ChangeRoleTopicRightsCmd(TopicRightsChange): + """Class representing the TopicRightsChange's + CMD_CHANGE_ROLE command. + """ + + assignee_id: str + new_value: str + old_value: str + + +class RemoveManagerRoleCmd(TopicRightsChange): + """Class representing the TopicRightsChange's + CMD_REMOVE_MANAGER_ROLE command. + """ + + removed_user_id: str + + +class PublishTopicCmd(TopicRightsChange): + """Class representing the TopicRightsChange's + CMD_PUBLISH_TOPIC command. + """ + + pass + + +class UnpublishTopicCmd(TopicRightsChange): + """Class representing the TopicRightsChange's + CMD_UNPUBLISH_TOPIC command. + """ + + pass + + +class DeleteCommitTopicRightsCmd(TopicRightsChange): + """Class representing the TopicRightsChange's + CMD_DELETE_COMMIT command. + """ + + pass + + +class StoryReferenceDict(TypedDict): + """Dictionary that represents StoryReference.""" + + story_id: str + story_is_published: bool + + class StoryReference: """Domain object for a Story reference.""" - def __init__(self, story_id, story_is_published): + def __init__( + self, story_id: str, story_is_published: bool + ) -> None: """Constructs a StoryReference domain object. Args: @@ -270,7 +695,7 @@ def __init__(self, story_id, story_is_published): self.story_id = story_id self.story_is_published = story_is_published - def to_dict(self): + def to_dict(self) -> StoryReferenceDict: """Returns a dict representing this StoryReference domain object. Returns: @@ -282,7 +707,9 @@ def to_dict(self): } @classmethod - def from_dict(cls, story_reference_dict): + def from_dict( + cls, story_reference_dict: StoryReferenceDict + ) -> StoryReference: """Returns a StoryReference domain object from a dict. Args: @@ -298,7 +725,7 @@ def from_dict(cls, story_reference_dict): return story_reference @classmethod - def create_default_story_reference(cls, story_id): + def create_default_story_reference(cls, story_id: str) -> StoryReference: """Creates a StoryReference object with default values. Args: @@ -310,29 +737,43 @@ def create_default_story_reference(cls, story_id): """ return cls(story_id, False) - def validate(self): + def validate(self) -> None: """Validates various properties of the StoryReference object. Raises: ValidationError. One or more attributes of the StoryReference are invalid. """ - if not isinstance(self.story_id, str): + if not bool(re.match(constants.ENTITY_ID_REGEX, self.story_id)): raise utils.ValidationError( - 'Expected story id to be a string, received %s' % - self.story_id) - if not isinstance(self.story_is_published, bool): - raise utils.ValidationError( - 'Expected story_is_published to be a boolean, received %s' % - self.story_is_published) + 'Invalid story ID: %s' % self.story_id) + + +class SubtopicDict(TypedDict): + """Dictionary representation of Subtopic.""" + + id: int + title: str + skill_ids: List[str] + thumbnail_filename: Optional[str] + thumbnail_bg_color: Optional[str] + thumbnail_size_in_bytes: Optional[int] + url_fragment: str class Subtopic: """Domain object for a Subtopic.""" def __init__( - self, subtopic_id, title, skill_ids, thumbnail_filename, - thumbnail_bg_color, thumbnail_size_in_bytes, url_fragment): + self, + subtopic_id: int, + title: str, + skill_ids: List[str], + thumbnail_filename: Optional[str], + thumbnail_bg_color: Optional[str], + thumbnail_size_in_bytes: Optional[int], + url_fragment: str + ) -> None: """Constructs a Subtopic domain object. Args: @@ -356,7 +797,7 @@ def __init__( self.thumbnail_size_in_bytes = thumbnail_size_in_bytes self.url_fragment = url_fragment - def to_dict(self): + def to_dict(self) -> SubtopicDict: """Returns a dict representing this Subtopic domain object. Returns: @@ -373,7 +814,7 @@ def to_dict(self): } @classmethod - def from_dict(cls, subtopic_dict): + def from_dict(cls, subtopic_dict: SubtopicDict) -> Subtopic: """Returns a Subtopic domain object from a dict. Args: @@ -391,21 +832,27 @@ def from_dict(cls, subtopic_dict): return subtopic @classmethod - def create_default_subtopic(cls, subtopic_id, title): + def create_default_subtopic( + cls, + subtopic_id: int, + title: str, + url_frag: str + ) -> Subtopic: """Creates a Subtopic object with default values. Args: - subtopic_id: str. ID of the new subtopic. + subtopic_id: int. ID of the new subtopic. title: str. The title for the new subtopic. + url_frag: str. The url fragment for the new subtopic. Returns: Subtopic. A subtopic object with given id, title and empty skill ids list. """ - return cls(subtopic_id, title, [], None, None, None, '') + return cls(subtopic_id, title, [], None, None, None, url_frag) @classmethod - def require_valid_thumbnail_filename(cls, thumbnail_filename): + def require_valid_thumbnail_filename(cls, thumbnail_filename: str) -> None: """Checks whether the thumbnail filename of the subtopic is a valid one. @@ -415,7 +862,7 @@ def require_valid_thumbnail_filename(cls, thumbnail_filename): utils.require_valid_thumbnail_filename(thumbnail_filename) @classmethod - def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): + def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color: str) -> bool: """Checks whether the thumbnail background color of the subtopic is a valid one. @@ -429,14 +876,15 @@ def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): return thumbnail_bg_color in constants.ALLOWED_THUMBNAIL_BG_COLORS[ 'subtopic'] - def validate(self): + def validate(self) -> None: """Validates various properties of the Subtopic object. Raises: ValidationError. One or more attributes of the subtopic are invalid. """ - self.require_valid_thumbnail_filename(self.thumbnail_filename) + if self.thumbnail_filename is not None: + self.require_valid_thumbnail_filename(self.thumbnail_filename) if self.thumbnail_bg_color is not None and not ( self.require_valid_thumbnail_bg_color(self.thumbnail_bg_color)): raise utils.ValidationError( @@ -453,49 +901,106 @@ def validate(self): raise utils.ValidationError( 'Subtopic thumbnail size in bytes cannot be zero.') - if not isinstance(self.id, int): - raise utils.ValidationError( - 'Expected subtopic id to be an int, received %s' % self.id) - - if not isinstance(self.title, str): - raise utils.ValidationError( - 'Expected subtopic title to be a string, received %s' % - self.title) - title_limit = android_validation_constants.MAX_CHARS_IN_SUBTOPIC_TITLE if len(self.title) > title_limit: raise utils.ValidationError( 'Expected subtopic title to be less than %d characters, ' 'received %s' % (title_limit, self.title)) - if not isinstance(self.skill_ids, list): + url_fragment_limit = ( + android_validation_constants.MAX_CHARS_IN_SUBTOPIC_URL_FRAGMENT) + regex = android_validation_constants.SUBTOPIC_URL_FRAGMENT_REGEXP + if len(self.url_fragment) > url_fragment_limit: raise utils.ValidationError( - 'Expected skill ids to be a list, received %s' % - self.skill_ids) + 'Expected subtopic url fragment to be less ' + 'than or equal to %d characters, received %s' + % (url_fragment_limit, self.url_fragment)) - for skill_id in self.skill_ids: - if not isinstance(skill_id, str): + if len(self.url_fragment) > 0: + if not bool(re.match(regex, self.url_fragment)): raise utils.ValidationError( - 'Expected each skill id to be a string, received %s' % - skill_id) + 'Invalid url fragment: %s' % self.url_fragment) + else: + raise utils.ValidationError( + 'Expected subtopic url fragment to be non ' + 'empty') if len(self.skill_ids) > len(set(self.skill_ids)): raise utils.ValidationError( 'Expected all skill ids to be distinct.') +class TopicDict(TypedDict, total=False): + """Dictionary that represents Topic.""" + + id: str + name: str + abbreviated_name: str + url_fragment: str + thumbnail_filename: Optional[str] + thumbnail_bg_color: Optional[str] + thumbnail_size_in_bytes: Optional[int] + description: str + canonical_story_references: List[StoryReferenceDict] + additional_story_references: List[StoryReferenceDict] + uncategorized_skill_ids: List[str] + subtopics: List[SubtopicDict] + subtopic_schema_version: int + next_subtopic_id: int + language_code: str + version: int + story_reference_schema_version: int + meta_tag_content: str + practice_tab_is_displayed: bool + page_title_fragment_for_web: str + skill_ids_for_diagnostic_test: List[str] + created_on: str + last_updated: str + + +class VersionedSubtopicsDict(TypedDict): + """Dictionary that represents versioned subtopics.""" + + schema_version: int + subtopics: List[SubtopicDict] + + +class VersionedStoryReferencesDict(TypedDict): + """Dictionary that represents versioned story references.""" + + schema_version: int + story_references: List[StoryReferenceDict] + + class Topic: """Domain object for an Oppia Topic.""" def __init__( - self, topic_id, name, abbreviated_name, url_fragment, - thumbnail_filename, thumbnail_bg_color, thumbnail_size_in_bytes, - description, canonical_story_references, - additional_story_references, uncategorized_skill_ids, - subtopics, subtopic_schema_version, next_subtopic_id, - language_code, version, story_reference_schema_version, - meta_tag_content, practice_tab_is_displayed, - page_title_fragment_for_web, created_on=None, last_updated=None): + self, + topic_id: str, + name: str, + abbreviated_name: str, + url_fragment: str, + thumbnail_filename: Optional[str], + thumbnail_bg_color: Optional[str], + thumbnail_size_in_bytes: Optional[int], + description: str, + canonical_story_references: List[StoryReference], + additional_story_references: List[StoryReference], + uncategorized_skill_ids: List[str], + subtopics: List[Subtopic], + subtopic_schema_version: int, + next_subtopic_id: int, + language_code: str, + version: int, + story_reference_schema_version: int, + meta_tag_content: str, + practice_tab_is_displayed: bool, + page_title_fragment_for_web: str, + skill_ids_for_diagnostic_test: List[str], + created_on: Optional[datetime.datetime] = None, + last_updated: Optional[datetime.datetime] = None + ) -> None: """Constructs a Topic domain object. Args: @@ -532,6 +1037,8 @@ def __init__( practice_tab_is_displayed: bool. Whether the practice tab is shown. page_title_fragment_for_web: str. The page title fragment in the topic viewer page. + skill_ids_for_diagnostic_test: list(str). The list of skill_id that + will be used from a topic in the diagnostic test. created_on: datetime.datetime. Date and time when the topic is created. last_updated: datetime.datetime. Date and time when the @@ -560,8 +1067,9 @@ def __init__( self.meta_tag_content = meta_tag_content self.practice_tab_is_displayed = practice_tab_is_displayed self.page_title_fragment_for_web = page_title_fragment_for_web + self.skill_ids_for_diagnostic_test = skill_ids_for_diagnostic_test - def to_dict(self): + def to_dict(self) -> TopicDict: """Returns a dict representing this Topic domain object. Returns: @@ -596,10 +1104,11 @@ def to_dict(self): self.story_reference_schema_version), 'meta_tag_content': self.meta_tag_content, 'practice_tab_is_displayed': self.practice_tab_is_displayed, - 'page_title_fragment_for_web': self.page_title_fragment_for_web + 'page_title_fragment_for_web': self.page_title_fragment_for_web, + 'skill_ids_for_diagnostic_test': self.skill_ids_for_diagnostic_test } - def serialize(self): + def serialize(self) -> str: """Returns the object serialized as a JSON string. Returns: @@ -629,8 +1138,12 @@ def serialize(self): @classmethod def from_dict( - cls, topic_dict, topic_version=0, topic_created_on=None, - topic_last_updated=None): + cls, + topic_dict: TopicDict, + topic_version: int = 0, + topic_created_on: Optional[datetime.datetime] = None, + topic_last_updated: Optional[datetime.datetime] = None + ) -> Topic: """Returns a Topic domain object from a dictionary. Args: @@ -672,13 +1185,14 @@ def from_dict( topic_dict['meta_tag_content'], topic_dict['practice_tab_is_displayed'], topic_dict['page_title_fragment_for_web'], + topic_dict['skill_ids_for_diagnostic_test'], topic_created_on, topic_last_updated) return topic @classmethod - def deserialize(cls, json_string): + def deserialize(cls, json_string: str) -> Topic: """Returns a Topic domain object decoded from a JSON string. Args: @@ -707,29 +1221,22 @@ def deserialize(cls, json_string): return topic @classmethod - def require_valid_topic_id(cls, topic_id): + def require_valid_topic_id(cls, topic_id: Optional[str]) -> None: """Checks whether the topic id is a valid one. Args: topic_id: str. The topic id to validate. """ - if not isinstance(topic_id, str): - raise utils.ValidationError( - 'Topic id should be a string, received: %s' % topic_id) - - if len(topic_id) != 12: + if topic_id is not None and len(topic_id) != 12: raise utils.ValidationError('Topic id %s is invalid' % topic_id) @classmethod - def require_valid_name(cls, name): + def require_valid_name(cls, name: str) -> None: """Checks whether the name of the topic is a valid one. Args: name: str. The name to validate. """ - if not isinstance(name, str): - raise utils.ValidationError('Name should be a string.') - if name == '': raise utils.ValidationError('Name field should not be empty') @@ -740,7 +1247,7 @@ def require_valid_name(cls, name): % (name_limit, name)) @classmethod - def require_valid_url_fragment(cls, url_fragment): + def require_valid_url_fragment(cls, url_fragment: str) -> None: """Checks whether the url fragment of the topic is a valid one. Args: @@ -751,7 +1258,7 @@ def require_valid_url_fragment(cls, url_fragment): constants.MAX_CHARS_IN_TOPIC_URL_FRAGMENT) @classmethod - def require_valid_thumbnail_filename(cls, thumbnail_filename): + def require_valid_thumbnail_filename(cls, thumbnail_filename: str) -> None: """Checks whether the thumbnail filename of the topic is a valid one. @@ -761,7 +1268,7 @@ def require_valid_thumbnail_filename(cls, thumbnail_filename): utils.require_valid_thumbnail_filename(thumbnail_filename) @classmethod - def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): + def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color: str) -> bool: """Checks whether the thumbnail background color of the topic is a valid one. @@ -775,7 +1282,7 @@ def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): return thumbnail_bg_color in constants.ALLOWED_THUMBNAIL_BG_COLORS[ 'topic'] - def get_all_skill_ids(self): + def get_all_skill_ids(self) -> List[str]: """Returns all the ids of all the skills present in the topic. Returns: @@ -787,7 +1294,7 @@ def get_all_skill_ids(self): skill_ids.extend(copy.deepcopy(subtopic.skill_ids)) return skill_ids - def publish_story(self, story_id): + def publish_story(self, story_id: str) -> None: """Marks story with the given id as published. Raises: @@ -804,7 +1311,7 @@ def publish_story(self, story_id): return raise Exception('Story with given id doesn\'t exist in the topic') - def unpublish_story(self, story_id): + def unpublish_story(self, story_id: str) -> None: """Marks story with the given id as unpublished. Raises: @@ -821,7 +1328,10 @@ def unpublish_story(self, story_id): return raise Exception('Story with given id doesn\'t exist in the topic') - def get_canonical_story_ids(self, include_only_published=False): + def get_canonical_story_ids( + self, + include_only_published: bool = False + ) -> List[str]: """Returns a list of canonical story ids that are part of the topic. Args: @@ -837,7 +1347,7 @@ def get_canonical_story_ids(self, include_only_published=False): ] return story_ids - def get_all_story_references(self): + def get_all_story_references(self) -> List[StoryReference]: """Returns all the story references in the topic - both canonical and additional. @@ -847,7 +1357,9 @@ def get_all_story_references(self): return ( self.canonical_story_references + self.additional_story_references) - def get_additional_story_ids(self, include_only_published=False): + def get_additional_story_ids( + self, include_only_published: bool = False + ) -> List[str]: """Returns a list of additional story ids that are part of the topic. Args: @@ -863,7 +1375,7 @@ def get_additional_story_ids(self, include_only_published=False): ] return story_ids - def get_all_uncategorized_skill_ids(self): + def get_all_uncategorized_skill_ids(self) -> List[str]: """Returns ids of all the uncategorized skills present in the topic. Returns: @@ -872,7 +1384,7 @@ def get_all_uncategorized_skill_ids(self): """ return self.uncategorized_skill_ids - def delete_canonical_story(self, story_id): + def delete_canonical_story(self, story_id: str) -> None: """Removes a story from the canonical_story_references list. Args: @@ -893,7 +1405,7 @@ def delete_canonical_story(self, story_id): 'The story_id %s is not present in the canonical ' 'story references list of the topic.' % story_id) - def rearrange_canonical_story(self, from_index, to_index): + def rearrange_canonical_story(self, from_index: int, to_index: int) -> None: """Rearranges or moves a canonical story to another position. Args: @@ -904,16 +1416,6 @@ def rearrange_canonical_story(self, from_index, to_index): Raises: Exception. Invalid input. """ - if not isinstance(from_index, int): - raise Exception( - 'Expected from_index value to be a number, ' - 'received %s' % from_index) - - if not isinstance(to_index, int): - raise Exception( - 'Expected to_index value to be a number, ' - 'received %s' % to_index) - if from_index == to_index: raise Exception( 'Expected from_index and to_index values to be different.') @@ -932,11 +1434,15 @@ def rearrange_canonical_story(self, from_index, to_index): self.canonical_story_references.insert( to_index, canonical_story_reference_to_move) - def add_canonical_story(self, story_id): + def add_canonical_story(self, story_id: str) -> None: """Adds a story to the canonical_story_references list. Args: story_id: str. The story id to add to the list. + + Raises: + Exception. The story ID is already present in the canonical + story references list of the topic. """ canonical_story_ids = self.get_canonical_story_ids() if story_id in canonical_story_ids: @@ -947,11 +1453,15 @@ def add_canonical_story(self, story_id): StoryReference.create_default_story_reference(story_id) ) - def add_additional_story(self, story_id): + def add_additional_story(self, story_id: str) -> None: """Adds a story to the additional_story_references list. Args: story_id: str. The story id to add to the list. + + Raises: + Exception. The story ID is already present in the additional + story references list of the topic. """ additional_story_ids = self.get_additional_story_ids() if story_id in additional_story_ids: @@ -962,14 +1472,14 @@ def add_additional_story(self, story_id): StoryReference.create_default_story_reference(story_id) ) - def delete_additional_story(self, story_id): + def delete_additional_story(self, story_id: str) -> None: """Removes a story from the additional_story_references list. Args: story_id: str. The story id to remove from the list. Raises: - Exception. The story_id is not present in the additional stories + Exception. The story ID is not present in the additional stories list of the topic. """ deleted = False @@ -983,7 +1493,7 @@ def delete_additional_story(self, story_id): 'The story_id %s is not present in the additional ' 'story references list of the topic.' % story_id) - def validate(self, strict=False): + def validate(self, strict: bool = False) -> None: """Validates all properties of this topic and its constituents. Args: @@ -996,11 +1506,8 @@ def validate(self, strict=False): """ self.require_valid_name(self.name) self.require_valid_url_fragment(self.url_fragment) - self.require_valid_thumbnail_filename(self.thumbnail_filename) - if not isinstance(self.practice_tab_is_displayed, bool): - raise utils.ValidationError( - 'Practice tab is displayed property should be a boolean.' - 'Received %s.' % self.practice_tab_is_displayed) + if self.thumbnail_filename is not None: + self.require_valid_thumbnail_filename(self.thumbnail_filename) utils.require_valid_meta_tag_content(self.meta_tag_content) utils.require_valid_page_title_fragment_for_web( self.page_title_fragment_for_web) @@ -1012,6 +1519,11 @@ def validate(self, strict=False): if self.thumbnail_bg_color and self.thumbnail_filename is None: raise utils.ValidationError( 'Topic thumbnail image is not provided.') + if self.canonical_story_references: + for reference in self.canonical_story_references: + if not isinstance(reference.story_is_published, bool): + raise utils.ValidationError( + 'story_is_published value should be boolean type') if self.thumbnail_filename and self.thumbnail_bg_color is None: raise utils.ValidationError( 'Topic thumbnail background color is not specified.') @@ -1020,10 +1532,6 @@ def validate(self, strict=False): raise utils.ValidationError( 'Expected thumbnail filename to be a string, received %s.' % self.thumbnail_filename) - if not isinstance(self.description, str): - raise utils.ValidationError( - 'Expected description to be a string, received %s' - % self.description) description_limit = ( android_validation_constants.MAX_CHARS_IN_TOPIC_DESCRIPTION) @@ -1032,26 +1540,6 @@ def validate(self, strict=False): 'Topic description should be at most %d characters, ' 'received %s.' % (description_limit, self.description)) - if not isinstance(self.subtopics, list): - raise utils.ValidationError( - 'Expected subtopics to be a list, received %s' - % self.subtopics) - - if not isinstance(self.next_subtopic_id, int): - raise utils.ValidationError( - 'Expected next_subtopic_id to be an int, received %s' - % self.next_subtopic_id) - - if not isinstance(self.subtopic_schema_version, int): - raise utils.ValidationError( - 'Expected subtopic schema version to be an integer, received %s' - % self.subtopic_schema_version) - - if not isinstance(self.story_reference_schema_version, int): - raise utils.ValidationError( - 'Expected story reference schema version to be an integer, ' - 'received %s' % self.story_reference_schema_version) - if (self.subtopic_schema_version != feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION): raise utils.ValidationError( @@ -1061,10 +1549,6 @@ def validate(self, strict=False): self.subtopic_schema_version)) for subtopic in self.subtopics: - if not isinstance(subtopic, Subtopic): - raise utils.ValidationError( - 'Expected each subtopic to be a Subtopic object, ' - 'received %s' % subtopic) subtopic.validate() if subtopic.id >= self.next_subtopic_id: raise utils.ValidationError( @@ -1077,6 +1561,16 @@ def validate(self, strict=False): 'Subtopic with title %s does not have any skills ' 'linked.' % subtopic.title) + all_skill_ids = self.get_all_skill_ids() + skill_ids_for_diagnostic_that_are_not_in_topic = ( + set(self.skill_ids_for_diagnostic_test) - + set(all_skill_ids)) + if len(skill_ids_for_diagnostic_that_are_not_in_topic) > 0: + raise utils.ValidationError( + 'The skill_ids %s are selected for the diagnostic test but they' + ' are not associated with the topic.' % + skill_ids_for_diagnostic_that_are_not_in_topic) + if strict: if len(self.subtopics) == 0: raise utils.ValidationError( @@ -1087,28 +1581,27 @@ def validate(self, strict=False): 'Subtopic url fragments are not unique across ' 'subtopics in the topic') - if not isinstance(self.language_code, str): + if ( + strict and + len(self.skill_ids_for_diagnostic_test) == 0 + ): raise utils.ValidationError( - 'Expected language code to be a string, received %s' % - self.language_code) - if not utils.is_valid_language_code(self.language_code): + 'The skill_ids_for_diagnostic_test field should not be empty.') + + if len(self.skill_ids_for_diagnostic_test) > 3: raise utils.ValidationError( - 'Invalid language code: %s' % self.language_code) + 'The skill_ids_for_diagnostic_test field should contain at ' + 'most 3 skill_ids.') - if not isinstance(self.canonical_story_references, list): + if not utils.is_valid_language_code(self.language_code): raise utils.ValidationError( - 'Expected canonical story references to be a list, received %s' - % self.canonical_story_references) + 'Invalid language code: %s' % self.language_code) canonical_story_ids = self.get_canonical_story_ids() if len(canonical_story_ids) > len(set(canonical_story_ids)): raise utils.ValidationError( 'Expected all canonical story ids to be distinct.') - if not isinstance(self.additional_story_references, list): - raise utils.ValidationError( - 'Expected additional story references to be a list, received %s' - % self.additional_story_references) additional_story_ids = self.get_additional_story_ids() if len(additional_story_ids) > len(set(additional_story_ids)): raise utils.ValidationError( @@ -1125,14 +1618,11 @@ def validate(self, strict=False): for reference in all_story_references: reference.validate() - if not isinstance(self.uncategorized_skill_ids, list): - raise utils.ValidationError( - 'Expected uncategorized skill ids to be a list, received %s' - % self.uncategorized_skill_ids) - @classmethod def create_default_topic( - cls, topic_id, name, url_fragment, description): + cls, topic_id: str, name: str, url_fragment: str, description: str, + page_title_frag: str + ) -> Topic: """Returns a topic domain object with default values. This is for the frontend where a default blank topic would be shown to the user when the topic is created for the first time. @@ -1142,6 +1632,7 @@ def create_default_topic( name: str. The initial name for the topic. url_fragment: str. The url fragment for the topic. description: str. The description for the topic. + page_title_frag: str. The page title fragment for web. Returns: Topic. The Topic domain object with the default values. @@ -1151,10 +1642,13 @@ def create_default_topic( description, [], [], [], [], feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, 1, constants.DEFAULT_LANGUAGE_CODE, 0, - feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION, '', False, '') + feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION, '', + False, page_title_frag, []) @classmethod - def _convert_subtopic_v3_dict_to_v4_dict(cls, topic_id, subtopic_dict): + def _convert_subtopic_v3_dict_to_v4_dict( + cls, topic_id: str, subtopic_dict: SubtopicDict + ) -> SubtopicDict: """Converts old Subtopic schema to the modern v4 schema. v4 schema introduces the thumbnail_size_in_bytes field. @@ -1167,9 +1661,7 @@ def _convert_subtopic_v3_dict_to_v4_dict(cls, topic_id, subtopic_dict): Returns: dict. The converted subtopic_dict. """ - file_system_class = fs_services.get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_TOPIC, topic_id)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_TOPIC, topic_id) filepath = '%s/%s' % ( constants.ASSET_TYPE_THUMBNAIL, subtopic_dict['thumbnail_filename']) subtopic_dict['thumbnail_size_in_bytes'] = ( @@ -1178,7 +1670,9 @@ def _convert_subtopic_v3_dict_to_v4_dict(cls, topic_id, subtopic_dict): return subtopic_dict @classmethod - def _convert_subtopic_v2_dict_to_v3_dict(cls, subtopic_dict): + def _convert_subtopic_v2_dict_to_v3_dict( + cls, subtopic_dict: SubtopicDict + ) -> SubtopicDict: """Converts old Subtopic schema to the modern v3 schema. v3 schema introduces the url_fragment field. @@ -1195,7 +1689,9 @@ def _convert_subtopic_v2_dict_to_v3_dict(cls, subtopic_dict): return subtopic_dict @classmethod - def _convert_subtopic_v1_dict_to_v2_dict(cls, subtopic_dict): + def _convert_subtopic_v1_dict_to_v2_dict( + cls, subtopic_dict: SubtopicDict + ) -> SubtopicDict: """Converts old Subtopic schema to the modern v2 schema. v2 schema introduces the thumbnail_filename and thumbnail_bg_color field. @@ -1212,7 +1708,11 @@ def _convert_subtopic_v1_dict_to_v2_dict(cls, subtopic_dict): @classmethod def update_subtopics_from_model( - cls, versioned_subtopics, current_version, topic_id): + cls, + versioned_subtopics: VersionedSubtopicsDict, + current_version: int, + topic_id: str + ) -> None: """Converts the subtopics blob contained in the given versioned_subtopics dict from current_version to current_version + 1. Note that the versioned_subtopics being @@ -1245,7 +1745,10 @@ def update_subtopics_from_model( @classmethod def update_story_references_from_model( - cls, versioned_story_references, current_version): + cls, + versioned_story_references: VersionedStoryReferencesDict, + current_version: int + ) -> None: """Converts the story_references blob contained in the given versioned_story_references dict from current_version to current_version + 1. Note that the versioned_story_references being @@ -1273,7 +1776,7 @@ def update_story_references_from_model( versioned_story_references['story_references'] = ( updated_story_references) - def update_name(self, new_name): + def update_name(self, new_name: str) -> None: """Updates the name of a topic object. Args: @@ -1287,7 +1790,7 @@ def update_name(self, new_name): self.name = new_name self.canonical_name = new_name.lower() - def update_abbreviated_name(self, new_abbreviated_name): + def update_abbreviated_name(self, new_abbreviated_name: str) -> None: """Updates the abbreviated_name of a topic object. Args: @@ -1296,7 +1799,7 @@ def update_abbreviated_name(self, new_abbreviated_name): """ self.abbreviated_name = new_abbreviated_name - def update_url_fragment(self, new_url_fragment): + def update_url_fragment(self, new_url_fragment: str) -> None: """Updates the url_fragment of a topic object. Args: @@ -1304,28 +1807,22 @@ def update_url_fragment(self, new_url_fragment): """ self.url_fragment = new_url_fragment - def update_thumbnail_filename(self, new_thumbnail_filename): + def update_thumbnail_filename_and_size( + self, new_thumbnail_filename: str, new_thumbnail_size: int + ) -> None: """Updates the thumbnail filename and file size of a topic object. Args: new_thumbnail_filename: str|None. The updated thumbnail filename for the topic. + new_thumbnail_size: int. The updated thumbnail file size. """ - file_system_class = fs_services.get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_TOPIC, self.id)) - - filepath = '%s/%s' % ( - constants.ASSET_TYPE_THUMBNAIL, new_thumbnail_filename) - if fs.isfile(filepath): - self.thumbnail_filename = new_thumbnail_filename - self.thumbnail_size_in_bytes = len(fs.get(filepath)) - else: - raise Exception( - 'The thumbnail %s for topic with id %s does not exist' - ' in the filesystem.' % (new_thumbnail_filename, self.id)) + self.thumbnail_filename = new_thumbnail_filename + self.thumbnail_size_in_bytes = new_thumbnail_size - def update_thumbnail_bg_color(self, new_thumbnail_bg_color): + def update_thumbnail_bg_color( + self, new_thumbnail_bg_color: Optional[str] + ) -> None: """Updates the thumbnail background color of a topic object. Args: @@ -1334,7 +1831,7 @@ def update_thumbnail_bg_color(self, new_thumbnail_bg_color): """ self.thumbnail_bg_color = new_thumbnail_bg_color - def update_description(self, new_description): + def update_description(self, new_description: str) -> None: """Updates the description of a topic object. Args: @@ -1342,7 +1839,7 @@ def update_description(self, new_description): """ self.description = new_description - def update_language_code(self, new_language_code): + def update_language_code(self, new_language_code: str) -> None: """Updates the language code of a topic object. Args: @@ -1350,7 +1847,7 @@ def update_language_code(self, new_language_code): """ self.language_code = new_language_code - def update_meta_tag_content(self, new_meta_tag_content): + def update_meta_tag_content(self, new_meta_tag_content: str) -> None: """Updates the meta tag content of a topic object. Args: @@ -1360,7 +1857,8 @@ def update_meta_tag_content(self, new_meta_tag_content): self.meta_tag_content = new_meta_tag_content def update_page_title_fragment_for_web( - self, new_page_title_fragment_for_web): + self, new_page_title_fragment_for_web: str + ) -> None: """Updates the page title fragment of a topic object. Args: @@ -1369,16 +1867,33 @@ def update_page_title_fragment_for_web( """ self.page_title_fragment_for_web = new_page_title_fragment_for_web - def update_practice_tab_is_displayed(self, new_practice_tab_is_displayed): + def update_practice_tab_is_displayed( + self, new_practice_tab_is_displayed: bool + ) -> None: """Updates the language code of a topic object. Args: - new_practice_tab_is_displayed: str. The updated practice tab is + new_practice_tab_is_displayed: bool. The updated practice tab is displayed property for the topic. """ self.practice_tab_is_displayed = new_practice_tab_is_displayed - def add_uncategorized_skill_id(self, new_uncategorized_skill_id): + def update_skill_ids_for_diagnostic_test( + self, skill_ids_for_diagnostic_test: List[str] + ) -> None: + """Updates the skill_ids_for_diagnostic_test field for the topic + instance. + + Args: + skill_ids_for_diagnostic_test: list(str). A list of skill_ids that + will be used to update skill_ids_for_diagnostic_test field for + the topic. + """ + self.skill_ids_for_diagnostic_test = skill_ids_for_diagnostic_test + + def add_uncategorized_skill_id( + self, new_uncategorized_skill_id: str + ) -> None: """Updates the skill id list of a topic object. Args: @@ -1401,7 +1916,9 @@ def add_uncategorized_skill_id(self, new_uncategorized_skill_id): self.uncategorized_skill_ids.append(new_uncategorized_skill_id) - def remove_uncategorized_skill_id(self, uncategorized_skill_id): + def remove_uncategorized_skill_id( + self, uncategorized_skill_id: str + ) -> None: """Updates the skill id list of a topic object. Args: @@ -1416,9 +1933,12 @@ def remove_uncategorized_skill_id(self, uncategorized_skill_id): raise Exception( 'The skill id %s is not present in the topic.' % uncategorized_skill_id) + + if uncategorized_skill_id in self.skill_ids_for_diagnostic_test: + self.skill_ids_for_diagnostic_test.remove(uncategorized_skill_id) self.uncategorized_skill_ids.remove(uncategorized_skill_id) - def get_all_subtopics(self): + def get_all_subtopics(self) -> List[SubtopicDict]: """Returns all subtopics in the topic. Returns: @@ -1430,36 +1950,43 @@ def get_all_subtopics(self): subtopics.append(subtopic.to_dict()) return subtopics - def get_subtopic_index(self, subtopic_id): + def get_subtopic_index(self, subtopic_id: int) -> int: """Gets the index of the subtopic with the given id in the subtopics list. Args: - subtopic_id: str. The id of the subtopic for which the index is to + subtopic_id: int. The id of the subtopic for which the index is to be found. Returns: - int or None. Returns the index of the subtopic if it exists or else + int. Returns the index of the subtopic if it exists or else None. + + Raises: + Exception. The subtopic does not exist. """ for ind, subtopic in enumerate(self.subtopics): if subtopic.id == subtopic_id: return ind - return None - - def add_subtopic(self, new_subtopic_id, title): + raise Exception( + 'The subtopic with id %s does not exist.' % subtopic_id) + + def add_subtopic( + self, + new_subtopic_id: int, + title: str, + url_frag: str + ) -> None: """Adds a subtopic with the given id and title. Args: new_subtopic_id: int. The id of the new subtopic. title: str. The title for the new subtopic. + url_frag: str. The url fragment of the new subtopic. Raises: - Exception. The new_subtopic_id and the expected next subtopic id - differs. - - Returns: - int. The id of the newly created subtopic. + Exception. The new subtopic ID is not equal to the expected next + subtopic ID. """ if self.next_subtopic_id != new_subtopic_id: raise Exception( @@ -1468,96 +1995,85 @@ def add_subtopic(self, new_subtopic_id, title): % (new_subtopic_id, self.next_subtopic_id)) self.next_subtopic_id = self.next_subtopic_id + 1 self.subtopics.append( - Subtopic.create_default_subtopic(new_subtopic_id, title)) + Subtopic.create_default_subtopic(new_subtopic_id, title, url_frag)) - def delete_subtopic(self, subtopic_id): + def delete_subtopic(self, subtopic_id: int) -> None: """Deletes the subtopic with the given id and adds all its skills to uncategorized skill ids section. Args: - subtopic_id: str. The id of the subtopic to remove. + subtopic_id: int. The id of the subtopic to remove. Raises: Exception. A subtopic with the given id doesn't exist. """ subtopic_index = self.get_subtopic_index(subtopic_id) - if subtopic_index is None: - raise Exception( - 'A subtopic with id %s doesn\'t exist. ' % subtopic_id) for skill_id in self.subtopics[subtopic_index].skill_ids: self.uncategorized_skill_ids.append(skill_id) del self.subtopics[subtopic_index] - def update_subtopic_title(self, subtopic_id, new_title): + def update_subtopic_title(self, subtopic_id: int, new_title: str) -> None: """Updates the title of the new subtopic. Args: - subtopic_id: str. The id of the subtopic to edit. + subtopic_id: int. The id of the subtopic to edit. new_title: str. The new title for the subtopic. Raises: Exception. The subtopic with the given id doesn't exist. """ subtopic_index = self.get_subtopic_index(subtopic_id) - if subtopic_index is None: - raise Exception( - 'The subtopic with id %s does not exist.' % subtopic_id) self.subtopics[subtopic_index].title = new_title - def update_subtopic_thumbnail_filename( - self, subtopic_id, new_thumbnail_filename): - """Updates the thumbnail filename property of the new subtopic. + def update_subtopic_thumbnail_filename_and_size( + self, + subtopic_id: int, + new_thumbnail_filename: str, + new_thumbnail_size: int + ) -> None: + """Updates the thumbnail filename and file size property + of the new subtopic. Args: - subtopic_id: str. The id of the subtopic to edit. + subtopic_id: int. The id of the subtopic to edit. new_thumbnail_filename: str. The new thumbnail filename for the subtopic. + new_thumbnail_size: int. The updated thumbnail file size. Raises: Exception. The subtopic with the given id doesn't exist. """ subtopic_index = self.get_subtopic_index(subtopic_id) - if subtopic_index is None: - raise Exception( - 'The subtopic with id %s does not exist.' % subtopic_id) - - file_system_class = fs_services.get_entity_file_system_class() - fs = fs_domain.AbstractFileSystem(file_system_class( - feconf.ENTITY_TYPE_TOPIC, self.id)) - filepath = '%s/%s' % ( - constants.ASSET_TYPE_THUMBNAIL, new_thumbnail_filename) - if fs.isfile(filepath): - self.subtopics[subtopic_index].thumbnail_filename = ( - new_thumbnail_filename) - self.subtopics[subtopic_index].thumbnail_size_in_bytes = ( - len(fs.get(filepath))) - else: - raise Exception( - 'The thumbnail %s for subtopic with topic_id %s does not exist' - ' in the filesystem.' % (new_thumbnail_filename, self.id)) - - def update_subtopic_url_fragment(self, subtopic_id, new_url_fragment): + self.subtopics[subtopic_index].thumbnail_filename = ( + new_thumbnail_filename) + self.subtopics[subtopic_index].thumbnail_size_in_bytes = ( + new_thumbnail_size) + + def update_subtopic_url_fragment( + self, subtopic_id: int, new_url_fragment: str + ) -> None: """Updates the url fragment of the subtopic. Args: - subtopic_id: str. The id of the subtopic to edit. + subtopic_id: int. The id of the subtopic to edit. new_url_fragment: str. The new url fragment of the subtopic. + + Raises: + Exception. The subtopic with the given id doesn't exist. """ subtopic_index = self.get_subtopic_index(subtopic_id) - if subtopic_index is None: - raise Exception( - 'The subtopic with id %s does not exist.' % subtopic_id) utils.require_valid_url_fragment( new_url_fragment, 'Subtopic Url Fragment', constants.MAX_CHARS_IN_SUBTOPIC_URL_FRAGMENT) self.subtopics[subtopic_index].url_fragment = new_url_fragment def update_subtopic_thumbnail_bg_color( - self, subtopic_id, new_thumbnail_bg_color): + self, subtopic_id: int, new_thumbnail_bg_color: str + ) -> None: """Updates the thumbnail background color property of the new subtopic. Args: - subtopic_id: str. The id of the subtopic to edit. + subtopic_id: int. The id of the subtopic to edit. new_thumbnail_bg_color: str. The new thumbnail background color for the subtopic. @@ -1565,13 +2081,12 @@ def update_subtopic_thumbnail_bg_color( Exception. The subtopic with the given id doesn't exist. """ subtopic_index = self.get_subtopic_index(subtopic_id) - if subtopic_index is None: - raise Exception( - 'The subtopic with id %s does not exist.' % subtopic_id) self.subtopics[subtopic_index].thumbnail_bg_color = ( new_thumbnail_bg_color) - def rearrange_skill_in_subtopic(self, subtopic_id, from_index, to_index): + def rearrange_skill_in_subtopic( + self, subtopic_id: int, from_index: int, to_index: int + ) -> None: """Rearranges the skills in the subtopic with the given id. Args: @@ -1582,16 +2097,6 @@ def rearrange_skill_in_subtopic(self, subtopic_id, from_index, to_index): Raises: Exception. Invalid input. """ - if not isinstance(from_index, int): - raise Exception( - 'Expected from_index value to be a number, ' - 'received %s' % from_index) - - if not isinstance(to_index, int): - raise Exception( - 'Expected to_index value to be a number, ' - 'received %s' % to_index) - if from_index == to_index: raise Exception( 'Expected from_index and to_index values to be different.') @@ -1612,7 +2117,7 @@ def rearrange_skill_in_subtopic(self, subtopic_id, from_index, to_index): self.subtopics[subtopic_index].skill_ids.insert( to_index, skill_to_move) - def rearrange_subtopic(self, from_index, to_index): + def rearrange_subtopic(self, from_index: int, to_index: int) -> None: """Rearranges the subtopic in the topic. Args: @@ -1622,16 +2127,6 @@ def rearrange_subtopic(self, from_index, to_index): Raises: Exception. Invalid input. """ - if not isinstance(from_index, int): - raise Exception( - 'Expected from_index value to be a number, ' - 'received %s' % from_index) - - if not isinstance(to_index, int): - raise Exception( - 'Expected to_index value to be a number, ' - 'received %s' % to_index) - if from_index == to_index: raise Exception( 'Expected from_index and to_index values to be different.') @@ -1648,14 +2143,18 @@ def rearrange_subtopic(self, from_index, to_index): self.subtopics.insert(to_index, skill_to_move) def move_skill_id_to_subtopic( - self, old_subtopic_id, new_subtopic_id, skill_id): + self, + old_subtopic_id: Optional[int], + new_subtopic_id: int, + skill_id: str + ) -> None: """Moves the skill_id to a new subtopic or to uncategorized skill ids. Args: - old_subtopic_id: str or None. The id of the subtopic in which the + old_subtopic_id: int or None. The id of the subtopic in which the skill is present currently (before moving) or None if it is uncategorized. - new_subtopic_id: str. The id of the new subtopic to which the skill + new_subtopic_id: int. The id of the new subtopic to which the skill is to be moved. skill_id: str. The skill id which is to be moved. @@ -1667,9 +2166,6 @@ def move_skill_id_to_subtopic( """ if old_subtopic_id is not None: old_subtopic_index = self.get_subtopic_index(old_subtopic_id) - if old_subtopic_index is None: - raise Exception( - 'The subtopic with id %s does not exist.' % old_subtopic_id) if skill_id not in self.subtopics[old_subtopic_index].skill_ids: raise Exception( 'Skill id %s is not present in the given old subtopic' @@ -1680,9 +2176,6 @@ def move_skill_id_to_subtopic( 'Skill id %s is not an uncategorized skill id.' % skill_id) new_subtopic_index = self.get_subtopic_index(new_subtopic_id) - if new_subtopic_index is None: - raise Exception( - 'The subtopic with id %s does not exist.' % new_subtopic_id) if skill_id in self.subtopics[new_subtopic_index].skill_ids: raise Exception( 'Skill id %s is already present in the target subtopic' @@ -1695,12 +2188,14 @@ def move_skill_id_to_subtopic( self.subtopics[new_subtopic_index].skill_ids.append(skill_id) - def remove_skill_id_from_subtopic(self, subtopic_id, skill_id): + def remove_skill_id_from_subtopic( + self, subtopic_id: int, skill_id: str + ) -> None: """Removes the skill_id from a subtopic and adds it to uncategorized skill ids. Args: - subtopic_id: str. The subtopic from which the skill is + subtopic_id: int. The subtopic from which the skill is to be removed. skill_id: str. The skill id which is to be removed. @@ -1711,9 +2206,6 @@ def remove_skill_id_from_subtopic(self, subtopic_id, skill_id): """ subtopic_index = self.get_subtopic_index(subtopic_id) - if subtopic_index is None: - raise Exception( - 'The subtopic with id %s does not exist.' % subtopic_id) if skill_id not in self.subtopics[subtopic_index].skill_ids: raise Exception( 'Skill id %s is not present in the old subtopic' @@ -1722,7 +2214,7 @@ def remove_skill_id_from_subtopic(self, subtopic_id, skill_id): self.subtopics[subtopic_index].skill_ids.remove(skill_id) self.uncategorized_skill_ids.append(skill_id) - def are_subtopic_url_fragments_unique(self): + def are_subtopic_url_fragments_unique(self) -> bool: """Checks if all the subtopic url fragments are unique across the topic. @@ -1735,16 +2227,50 @@ def are_subtopic_url_fragments_unique(self): return len(url_fragments_list) == len(url_fragments_set) +class TopicSummaryDict(TypedDict): + """Dictionary that represents TopicSummary.""" + + id: str + name: str + url_fragment: str + language_code: str + description: str + version: int + canonical_story_count: int + additional_story_count: int + uncategorized_skill_count: int + subtopic_count: int + total_skill_count: int + total_published_node_count: int + thumbnail_filename: Optional[str] + thumbnail_bg_color: Optional[str] + topic_model_created_on: float + topic_model_last_updated: float + + class TopicSummary: """Domain object for Topic Summary.""" def __init__( - self, topic_id, name, canonical_name, language_code, description, - version, canonical_story_count, additional_story_count, - uncategorized_skill_count, subtopic_count, total_skill_count, - total_published_node_count, thumbnail_filename, - thumbnail_bg_color, url_fragment, topic_model_created_on, - topic_model_last_updated): + self, + topic_id: str, + name: str, + canonical_name: str, + language_code: str, + description: str, + version: int, + canonical_story_count: int, + additional_story_count: int, + uncategorized_skill_count: int, + subtopic_count: int, + total_skill_count: int, + total_published_node_count: int, + thumbnail_filename: Optional[str], + thumbnail_bg_color: Optional[str], + url_fragment: str, + topic_model_created_on: datetime.datetime, + topic_model_last_updated: datetime.datetime + ) -> None: """Constructs a TopicSummary domain object. Args: @@ -1765,8 +2291,11 @@ def __init__( (including those that are uncategorized). total_published_node_count: int. The total number of chapters that are published and associated with the stories of the topic. - thumbnail_filename: str. The filename for the topic thumbnail. - thumbnail_bg_color: str. The background color for the thumbnail. + thumbnail_filename: str|None. The filename for the topic thumbnail, + or None if no filename is provided. + thumbnail_bg_color: str|None. The background color for the + thumbnail, or None if no background color provided for + the thumbnail. url_fragment: str. The url fragment of the topic. topic_model_created_on: datetime.datetime. Date and time when the topic model is created. @@ -1792,7 +2321,7 @@ def __init__( self.url_fragment = url_fragment @classmethod - def require_valid_url_fragment(cls, url_fragment): + def require_valid_url_fragment(cls, url_fragment: str) -> None: """Checks whether the url fragment of the topic is a valid one. Args: @@ -1802,7 +2331,7 @@ def require_valid_url_fragment(cls, url_fragment): url_fragment, 'Topic URL Fragment', constants.MAX_CHARS_IN_TOPIC_URL_FRAGMENT) - def validate(self): + def validate(self) -> None: """Validates all properties of this topic summary. Raises: @@ -1810,17 +2339,11 @@ def validate(self): are not valid. """ self.require_valid_url_fragment(self.url_fragment) - if not isinstance(self.name, str): - raise utils.ValidationError('Name should be a string.') if self.name == '': raise utils.ValidationError('Name field should not be empty') - if not isinstance(self.description, str): - raise utils.ValidationError( - 'Expected description to be a string, received %s' - % self.description) - - utils.require_valid_thumbnail_filename(self.thumbnail_filename) + if self.thumbnail_filename is not None: + utils.require_valid_thumbnail_filename(self.thumbnail_filename) if ( self.thumbnail_bg_color is not None and not ( Topic.require_valid_thumbnail_bg_color( @@ -1835,55 +2358,29 @@ def validate(self): raise utils.ValidationError( 'Topic thumbnail background color is not specified.') - if not isinstance(self.canonical_name, str): - raise utils.ValidationError('Canonical name should be a string.') if self.canonical_name == '': raise utils.ValidationError( 'Canonical name field should not be empty') - if not isinstance(self.language_code, str): - raise utils.ValidationError( - 'Expected language code to be a string, received %s' % - self.language_code) if not utils.is_valid_language_code(self.language_code): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code) - if not isinstance(self.canonical_story_count, int): - raise utils.ValidationError( - 'Expected canonical story count to be an integer, ' - 'received \'%s\'' % self.canonical_story_count) - if self.canonical_story_count < 0: raise utils.ValidationError( 'Expected canonical_story_count to be non-negative, ' 'received \'%s\'' % self.canonical_story_count) - if not isinstance(self.additional_story_count, int): - raise utils.ValidationError( - 'Expected additional story count to be an integer, ' - 'received \'%s\'' % self.additional_story_count) - if self.additional_story_count < 0: raise utils.ValidationError( 'Expected additional_story_count to be non-negative, ' 'received \'%s\'' % self.additional_story_count) - if not isinstance(self.uncategorized_skill_count, int): - raise utils.ValidationError( - 'Expected uncategorized skill count to be an integer, ' - 'received \'%s\'' % self.uncategorized_skill_count) - if self.uncategorized_skill_count < 0: raise utils.ValidationError( 'Expected uncategorized_skill_count to be non-negative, ' 'received \'%s\'' % self.uncategorized_skill_count) - if not isinstance(self.total_skill_count, int): - raise utils.ValidationError( - 'Expected total skill count to be an integer, received \'%s\'' - % self.total_skill_count) - if self.total_skill_count < 0: raise utils.ValidationError( 'Expected total_skill_count to be non-negative, ' @@ -1895,27 +2392,17 @@ def validate(self): 'uncategorized_skill_count %s, received \'%s\'' % ( self.uncategorized_skill_count, self.total_skill_count)) - if not isinstance(self.total_published_node_count, int): - raise utils.ValidationError( - 'Expected total published node count to be an integer, ' - 'received \'%s\'' % self.total_published_node_count) - if self.total_published_node_count < 0: raise utils.ValidationError( 'Expected total_published_node_count to be non-negative, ' 'received \'%s\'' % self.total_published_node_count) - if not isinstance(self.subtopic_count, int): - raise utils.ValidationError( - 'Expected subtopic count to be an integer, received \'%s\'' - % self.subtopic_count) - if self.subtopic_count < 0: raise utils.ValidationError( 'Expected subtopic_count to be non-negative, ' 'received \'%s\'' % self.subtopic_count) - def to_dict(self): + def to_dict(self) -> TopicSummaryDict: """Returns a dictionary representation of this domain object. Returns: @@ -1946,7 +2433,12 @@ def to_dict(self): class TopicRights: """Domain object for topic rights.""" - def __init__(self, topic_id, manager_ids, topic_is_published): + def __init__( + self, + topic_id: str, + manager_ids: List[str], + topic_is_published: bool + ) -> None: """Constructs a TopicRights domain object. Args: @@ -1960,25 +2452,11 @@ def __init__(self, topic_id, manager_ids, topic_is_published): self.manager_ids = manager_ids self.topic_is_published = topic_is_published - def to_dict(self): - """Returns a dict suitable for use by the frontend. - - Returns: - dict. A dict version of TopicRights suitable for use by the - frontend. - """ - return { - 'topic_id': self.id, - 'manager_names': user_services.get_human_readable_user_ids( - self.manager_ids), - 'topic_is_published': self.topic_is_published - } - - def is_manager(self, user_id): + def is_manager(self, user_id: str) -> bool: """Checks whether given user is a manager of the topic. Args: - user_id: str or None. Id of the user. + user_id: str. Id of the user. Returns: bool. Whether user is a topic manager of this topic. diff --git a/core/domain/topic_domain_test.py b/core/domain/topic_domain_test.py index 7f3c98ffc092..87b10ade6c34 100644 --- a/core/domain/topic_domain_test.py +++ b/core/domain/topic_domain_test.py @@ -19,13 +19,11 @@ from __future__ import annotations import datetime -import os +from core import android_validation_constants from core import feconf -from core import python_utils from core import utils from core.constants import constants -from core.domain import fs_domain from core.domain import topic_domain from core.domain import user_services from core.tests import test_utils @@ -36,18 +34,19 @@ class TopicDomainUnitTests(test_utils.GenericTestBase): topic_id = 'topic_id' - def setUp(self): - super(TopicDomainUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup('a@example.com', 'A') self.signup('b@example.com', 'B') self.topic = topic_domain.Topic.create_default_topic( - self.topic_id, 'Name', 'abbrev', 'description') + self.topic_id, 'Name', 'abbrev', 'description', 'fragm') self.topic.subtopics = [ topic_domain.Subtopic( 1, 'Title', ['skill_id_1'], 'image.svg', constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-url')] self.topic.next_subtopic_id = 2 + self.topic.skill_ids_for_diagnostic_test = ['skill_id_1'] self.user_id_a = self.get_user_id_from_email('a@example.com') self.user_id_b = self.get_user_id_from_email('b@example.com') @@ -55,11 +54,11 @@ def setUp(self): self.user_a = user_services.get_user_actions_info(self.user_id_a) self.user_b = user_services.get_user_actions_info(self.user_id_b) - def test_create_default_topic(self): + def test_create_default_topic(self) -> None: """Tests the create_default_topic() function.""" topic = topic_domain.Topic.create_default_topic( - self.topic_id, 'Name', 'abbrev', 'description') - expected_topic_dict = { + self.topic_id, 'Name', 'abbrev', 'description', 'fragm') + expected_topic_dict: topic_domain.TopicDict = { 'id': self.topic_id, 'name': 'Name', 'abbreviated_name': 'Name', @@ -80,23 +79,24 @@ def test_create_default_topic(self): 'version': 0, 'practice_tab_is_displayed': False, 'meta_tag_content': '', - 'page_title_fragment_for_web': '' + 'page_title_fragment_for_web': 'fragm', + 'skill_ids_for_diagnostic_test': [] } self.assertEqual(topic.to_dict(), expected_topic_dict) - def test_get_all_skill_ids(self): + def test_get_all_skill_ids(self) -> None: self.topic.uncategorized_skill_ids = ['skill_id_2', 'skill_id_3'] self.assertEqual( self.topic.get_all_skill_ids(), ['skill_id_2', 'skill_id_3', 'skill_id_1']) - def test_get_all_uncategorized_skill_ids(self): + def test_get_all_uncategorized_skill_ids(self) -> None: self.topic.uncategorized_skill_ids = ['skill_id_1', 'skill_id_2'] self.assertEqual( self.topic.get_all_uncategorized_skill_ids(), ['skill_id_1', 'skill_id_2']) - def test_get_all_subtopics(self): + def test_get_all_subtopics(self) -> None: subtopics = self.topic.get_all_subtopics() self.assertEqual( subtopics, [{ @@ -108,7 +108,22 @@ def test_get_all_subtopics(self): 'title': 'Title', 'url_fragment': 'dummy-subtopic-url'}]) - def test_delete_canonical_story(self): + def test_get_subtopic_index_fail_with_invalid_subtopic_id(self) -> None: + with self.assertRaisesRegex( + Exception, 'The subtopic with id -2 does not exist.' + ): + self.topic.get_subtopic_index(-2) + + def test_validation_story_id_with_invalid_data(self) -> None: + story_reference = ( + topic_domain.StoryReference.create_default_story_reference( + '#6*5&A0%')) + with self.assertRaisesRegex( + utils.ValidationError, 'Invalid story ID:' + ): + story_reference.validate() + + def test_delete_canonical_story(self) -> None: self.topic.canonical_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id'), @@ -121,61 +136,43 @@ def test_delete_canonical_story(self): canonical_story_ids = self.topic.get_canonical_story_ids() self.assertEqual( canonical_story_ids, ['story_id', 'story_id_2']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The story_id story_id_5 is not present in the canonical' ' story references list of the topic.'): self.topic.delete_canonical_story('story_id_5') - def test_rearrange_canonical_story_fail_with_invalid_from_index_value(self): - with self.assertRaisesRegexp( - Exception, 'Expected from_index value to be a number, ' - 'received None'): - self.topic.rearrange_canonical_story(None, 2) - - with self.assertRaisesRegexp( - Exception, 'Expected from_index value to be a number, ' - 'received a'): - self.topic.rearrange_canonical_story('a', 2) - - def test_rearrange_canonical_story_fail_with_invalid_to_index_value(self): - with self.assertRaisesRegexp( - Exception, 'Expected to_index value to be a number, ' - 'received None'): - self.topic.rearrange_canonical_story(1, None) - - with self.assertRaisesRegexp( - Exception, 'Expected to_index value to be a number, ' - 'received a'): - self.topic.rearrange_canonical_story(1, 'a') - - def test_rearrange_canonical_story_fail_with_out_of_bound_indexes(self): + def test_rearrange_canonical_story_fail_with_out_of_bound_indexes( + self + ) -> None: self.topic.canonical_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id_1') ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected from_index value to be with-in bounds.'): self.topic.rearrange_canonical_story(10, 0) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected from_index value to be with-in bounds.'): self.topic.rearrange_canonical_story(-1, 0) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected to_index value to be with-in bounds.'): self.topic.rearrange_canonical_story(0, 10) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected to_index value to be with-in bounds.'): self.topic.rearrange_canonical_story(0, -1) - def test_rearrange_canonical_story_fail_with_identical_index_values(self): - with self.assertRaisesRegexp( + def test_rearrange_canonical_story_fail_with_identical_index_values( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Expected from_index and to_index values to be ' 'different.'): self.topic.rearrange_canonical_story(1, 1) - def test_rearrange_canonical_story(self): + def test_rearrange_canonical_story(self) -> None: self.topic.canonical_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id_1'), @@ -208,52 +205,34 @@ def test_rearrange_canonical_story(self): self.assertEqual(canonical_story_ids[1], 'story_id_2') self.assertEqual(canonical_story_ids[2], 'story_id_3') - def test_rearrange_skill_in_subtopic_fail_with_invalid_from_index(self): - with self.assertRaisesRegexp( - Exception, 'Expected from_index value to be a number, ' - 'received None'): - self.topic.rearrange_skill_in_subtopic(1, None, 2) - - with self.assertRaisesRegexp( - Exception, 'Expected from_index value to be a number, ' - 'received a'): - self.topic.rearrange_skill_in_subtopic(1, 'a', 2) - - def test_rearrange_skill_in_subtopic_fail_with_invalid_to_index_value(self): - with self.assertRaisesRegexp( - Exception, 'Expected to_index value to be a number, ' - 'received None'): - self.topic.rearrange_skill_in_subtopic(1, 1, None) - - with self.assertRaisesRegexp( - Exception, 'Expected to_index value to be a number, ' - 'received a'): - self.topic.rearrange_skill_in_subtopic(1, 1, 'a') - - def test_rearrange_skill_in_subtopic_fail_with_out_of_bound_indexes(self): - with self.assertRaisesRegexp( + def test_rearrange_skill_in_subtopic_fail_with_out_of_bound_indexes( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Expected from_index value to be with-in bounds.'): self.topic.rearrange_skill_in_subtopic(1, 10, 1) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected from_index value to be with-in bounds.'): self.topic.rearrange_skill_in_subtopic(1, -1, 0) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected to_index value to be with-in bounds.'): self.topic.rearrange_skill_in_subtopic(1, 0, 10) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected to_index value to be with-in bounds.'): self.topic.rearrange_skill_in_subtopic(1, 0, -10) - def test_rearrange_skill_in_subtopic_fail_with_identical_index_values(self): - with self.assertRaisesRegexp( + def test_rearrange_skill_in_subtopic_fail_with_identical_index_values( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Expected from_index and to_index values to be ' 'different.'): self.topic.rearrange_skill_in_subtopic(1, 1, 1) - def test_rearrange_skill_in_subtopic(self): + def test_rearrange_skill_in_subtopic(self) -> None: self.topic.subtopics = [ topic_domain.Subtopic( 1, 'Title', ['skill_id_1', 'skill_id_2', 'skill_id_3'], @@ -282,52 +261,30 @@ def test_rearrange_skill_in_subtopic(self): self.assertEqual(skill_ids[1], 'skill_id_2') self.assertEqual(skill_ids[2], 'skill_id_3') - def test_rearrange_subtopic_fail_with_invalid_from_index(self): - with self.assertRaisesRegexp( - Exception, 'Expected from_index value to be a number, ' - 'received None'): - self.topic.rearrange_subtopic(None, 2) - - with self.assertRaisesRegexp( - Exception, 'Expected from_index value to be a number, ' - 'received a'): - self.topic.rearrange_subtopic('a', 2) - - def test_rearrange_subtopic_fail_with_invalid_to_index_value(self): - with self.assertRaisesRegexp( - Exception, 'Expected to_index value to be a number, ' - 'received None'): - self.topic.rearrange_subtopic(1, None) - - with self.assertRaisesRegexp( - Exception, 'Expected to_index value to be a number, ' - 'received a'): - self.topic.rearrange_subtopic(1, 'a') - - def test_rearrange_subtopic_fail_with_out_of_bound_indexes(self): - with self.assertRaisesRegexp( + def test_rearrange_subtopic_fail_with_out_of_bound_indexes(self) -> None: + with self.assertRaisesRegex( Exception, 'Expected from_index value to be with-in bounds.'): self.topic.rearrange_subtopic(10, 1) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected from_index value to be with-in bounds.'): self.topic.rearrange_subtopic(-1, 0) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected to_index value to be with-in bounds.'): self.topic.rearrange_subtopic(0, 10) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected to_index value to be with-in bounds.'): self.topic.rearrange_subtopic(0, -10) - def test_rearrange_subtopic_fail_with_identical_index_values(self): - with self.assertRaisesRegexp( + def test_rearrange_subtopic_fail_with_identical_index_values(self) -> None: + with self.assertRaisesRegex( Exception, 'Expected from_index and to_index values to be ' 'different.'): self.topic.rearrange_subtopic(1, 1) - def test_rearrange_subtopic(self): + def test_rearrange_subtopic(self) -> None: self.topic.subtopics = [ topic_domain.Subtopic( 1, 'Title1', [], None, None, None, 'title-one'), @@ -357,7 +314,7 @@ def test_rearrange_subtopic(self): self.assertEqual(subtopics[1].id, 2) self.assertEqual(subtopics[2].id, 3) - def test_get_all_story_references(self): + def test_get_all_story_references(self) -> None: self.topic.canonical_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id'), @@ -377,7 +334,7 @@ def test_get_all_story_references(self): self.assertEqual(all_story_references[2].story_id, 'story_id_2') self.assertEqual(all_story_references[3].story_id, 'story_id_3') - def test_add_canonical_story(self): + def test_add_canonical_story(self) -> None: self.topic.canonical_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id'), @@ -389,12 +346,12 @@ def test_add_canonical_story(self): self.assertEqual( canonical_story_ids, ['story_id', 'story_id_1', 'story_id_2']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The story_id story_id_2 is already present in the ' 'canonical story references list of the topic.'): self.topic.add_canonical_story('story_id_2') - def test_delete_additional_story(self): + def test_delete_additional_story(self) -> None: self.topic.additional_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id'), @@ -407,13 +364,13 @@ def test_delete_additional_story(self): additional_story_ids = self.topic.get_additional_story_ids() self.assertEqual( additional_story_ids, ['story_id', 'story_id_2']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The story_id story_id_5 is not present in the additional' ' story references list of the topic.'): self.topic.delete_additional_story('story_id_5') - def test_add_additional_story(self): + def test_add_additional_story(self) -> None: self.topic.additional_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id'), @@ -425,59 +382,86 @@ def test_add_additional_story(self): self.assertEqual( additional_story_ids, ['story_id', 'story_id_1', 'story_id_2']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The story_id story_id_2 is already present in the ' 'additional story references list of the topic.'): self.topic.add_additional_story('story_id_2') - def _assert_validation_error(self, expected_error_substring): + # Here we use MyPy ignore because we override the definition of the function + # from the parent class, but that is fine as _assert_validation_error is + # supposed to be customizable and thus we add an ignore. + def _assert_validation_error( # type: ignore[override] + self, + expected_error_substring: str + ) -> None: """Checks that the topic passes strict validation.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): self.topic.validate() - def _assert_strict_validation_error(self, expected_error_substring): + def _assert_strict_validation_error( + self, + expected_error_substring: str + ) -> None: """Checks that the topic passes prepublish validation.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): self.topic.validate(strict=True) - def _assert_valid_topic_id(self, expected_error_substring, topic_id): + def _assert_valid_topic_id( + self, + expected_error_substring: str, + topic_id: str + ) -> None: """Checks that the skill passes strict validation.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): topic_domain.Topic.require_valid_topic_id(topic_id) - def _assert_valid_abbreviated_name( - self, expected_error_substring, name): + def _assert_valid_name_for_topic( + self, + expected_error_substring: str, + name: str + ) -> None: """Checks that the topic passes strict validation.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): - topic_domain.Topic.require_valid_abbreviated_name(name) + topic_domain.Topic.require_valid_name(name) def _assert_valid_thumbnail_filename_for_topic( - self, expected_error_substring, thumbnail_filename): + self, + expected_error_substring: str, + thumbnail_filename: str + ) -> None: """Checks that topic passes validation for thumbnail filename.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): topic_domain.Topic.require_valid_thumbnail_filename( thumbnail_filename) def _assert_valid_thumbnail_filename_for_subtopic( - self, expected_error_substring, thumbnail_filename): + self, + expected_error_substring: str, + thumbnail_filename: str + ) -> None: """Checks that subtopic passes validation for thumbnail filename.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): topic_domain.Subtopic.require_valid_thumbnail_filename( thumbnail_filename) - def test_valid_topic_id(self): - self._assert_valid_topic_id('Topic id should be a string', 10) + def test_valid_topic_id(self) -> None: self._assert_valid_topic_id('Topic id abc is invalid', 'abc') - def test_thumbnail_filename_validation_for_topic(self): - self._assert_valid_thumbnail_filename_for_topic( - 'Expected thumbnail filename to be a string, received 10', 10) + def test_valid_name_topic(self) -> None: + self._assert_valid_name_for_topic( + 'Name field should not be empty', '') + self._assert_valid_name_for_topic( + 'Topic name should be at most 39 characters, received ' + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') + + def test_thumbnail_filename_validation_for_topic(self) -> None: self._assert_valid_thumbnail_filename_for_topic( 'Thumbnail filename should not start with a dot.', '.name') self._assert_valid_thumbnail_filename_for_topic( @@ -491,11 +475,12 @@ def test_thumbnail_filename_validation_for_topic(self): self._assert_valid_thumbnail_filename_for_topic( 'Expected a filename ending in svg, received name.jpg', 'name.jpg') - def test_subtopic_strict_validation(self): + def test_subtopic_strict_validation(self) -> None: self.topic.thumbnail_filename = 'filename.svg' self.topic.thumbnail_bg_color = ( constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]) self.topic.subtopics[0].skill_ids = [] + self.topic.skill_ids_for_diagnostic_test = [] self._assert_strict_validation_error( 'Subtopic with title Title does not have any skills linked') @@ -503,36 +488,30 @@ def test_subtopic_strict_validation(self): self._assert_strict_validation_error( 'Topic should have at least 1 subtopic.') - def test_subtopic_title_validation(self): - self.topic.subtopics[0].title = 1 - self._assert_validation_error('Expected subtopic title to be a string') - + def test_subtopic_title_validation(self) -> None: self.topic.subtopics[0].title = ( 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefgh' 'ijklmnopqrstuvwxyz') self._assert_validation_error( 'Expected subtopic title to be less than 64 characters') - def test_story_id_validation(self): - self.topic.canonical_story_references = [ - topic_domain.StoryReference(123, True) - ] - self._assert_validation_error('Expected story id to be a string') + def test_subtopic_url_fragment_validation(self) -> None: + self.topic.subtopics[0].url_fragment = 'a' * 26 + self._assert_validation_error( + 'Expected subtopic url fragment to be less ' + 'than or equal to %d characters' % + android_validation_constants.MAX_CHARS_IN_SUBTOPIC_URL_FRAGMENT) - def test_story_is_published_validation(self): - self.topic.canonical_story_references = [ - topic_domain.StoryReference('story_id', 'published') - ] + self.topic.subtopics[0].url_fragment = '' self._assert_validation_error( - 'Expected story_is_published to be a boolean') + 'Expected subtopic url fragment to be non ' + 'empty') - def test_subtopic_id_validation(self): - self.topic.subtopics[0].id = 'invalid_id' - self._assert_validation_error('Expected subtopic id to be an int') + self.topic.subtopics[0].url_fragment = 'invalidFragment' + self._assert_validation_error( + 'Invalid url fragment: %s' % self.topic.subtopics[0].url_fragment) - def test_thumbnail_filename_validation_for_subtopic(self): - self._assert_valid_thumbnail_filename_for_subtopic( - 'Expected thumbnail filename to be a string, received 10', 10) + def test_thumbnail_filename_validation_for_subtopic(self) -> None: self._assert_valid_thumbnail_filename_for_subtopic( 'Thumbnail filename should not start with a dot.', '.name') self._assert_valid_thumbnail_filename_for_subtopic( @@ -546,19 +525,21 @@ def test_thumbnail_filename_validation_for_subtopic(self): self._assert_valid_thumbnail_filename_for_subtopic( 'Expected a filename ending in svg, received name.jpg', 'name.jpg') - def test_topic_thumbnail_filename_in_strict_mode(self): + def test_topic_thumbnail_filename_in_strict_mode(self) -> None: self.topic.thumbnail_bg_color = None - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected thumbnail filename to be a string, received None.'): self.topic.validate(strict=True) - def test_topic_thumbnail_bg_validation(self): + def test_topic_thumbnail_bg_validation(self) -> None: self.topic.thumbnail_bg_color = '#FFFFFF' self._assert_validation_error( 'Topic thumbnail background color #FFFFFF is not supported.') - def test_topic_thumbnail_filename_or_thumbnail_bg_color_is_none(self): + def test_topic_thumbnail_filename_or_thumbnail_bg_color_is_none( + self + ) -> None: self.topic.thumbnail_bg_color = '#C6DCDA' self.topic.thumbnail_filename = None self._assert_validation_error( @@ -568,12 +549,14 @@ def test_topic_thumbnail_filename_or_thumbnail_bg_color_is_none(self): self._assert_validation_error( 'Topic thumbnail background color is not specified.') - def test_subtopic_thumbnail_bg_validation(self): + def test_subtopic_thumbnail_bg_validation(self) -> None: self.topic.subtopics[0].thumbnail_bg_color = '#CACACA' self._assert_validation_error( 'Subtopic thumbnail background color #CACACA is not supported.') - def test_subtopic_thumbnail_filename_or_thumbnail_bg_color_is_none(self): + def test_subtopic_thumbnail_filename_or_thumbnail_bg_color_is_none( + self + ) -> None: self.topic.subtopics[0].thumbnail_bg_color = '#FFFFFF' self.topic.subtopics[0].thumbnail_filename = None self._assert_validation_error( @@ -583,88 +566,64 @@ def test_subtopic_thumbnail_filename_or_thumbnail_bg_color_is_none(self): self._assert_validation_error( 'Subtopic thumbnail background color is not specified.') - def test_subtopic_thumbnail_size_in_bytes_validation(self): + def test_subtopic_thumbnail_size_in_bytes_validation(self) -> None: self.topic.subtopics[0].thumbnail_size_in_bytes = 0 self._assert_validation_error( 'Subtopic thumbnail size in bytes cannot be zero.') - def test_topic_practice_tab_is_displayed_validation(self): - self.topic.practice_tab_is_displayed = 0 - self._assert_validation_error( - 'Practice tab is displayed property should be a boolean.' - 'Received 0.') - - def test_subtopic_skill_ids_validation(self): - self.topic.subtopics[0].skill_ids = 'abc' - self._assert_validation_error('Expected skill ids to be a list') + def test_subtopic_skill_ids_validation(self) -> None: self.topic.subtopics[0].skill_ids = ['skill_id', 'skill_id'] self._assert_validation_error( 'Expected all skill ids to be distinct.') - self.topic.subtopics[0].skill_ids = [1, 2] - self._assert_validation_error('Expected each skill id to be a string') - def test_subtopics_validation(self): - self.topic.subtopics = 'abc' - self._assert_validation_error('Expected subtopics to be a list') - - def test_name_validation(self): - self.topic.name = 1 - self._assert_validation_error('Name should be a string') + def test_name_validation(self) -> None: self.topic.name = '' self._assert_validation_error('Name field should not be empty') self.topic.name = 'Very long and therefore invalid topic name' self._assert_validation_error( 'Topic name should be at most 39 characters') - def test_validation_fails_with_invalid_url_fragment(self): - self.topic.url_fragment = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Topic URL Fragment field must be a string. Received 0.'): - self.topic.validate() + def test_validation_fails_with_story_is_published_set_to_non_bool_value( + self + ) -> None: + self.topic.canonical_story_references = [ + topic_domain.StoryReference.create_default_story_reference( + 'story_id') + ] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + # Here, a bool value is expected but for test purpose we're assigning it + # a string type. Thus to avoid MyPy error, we added an ignore here. + self.topic.canonical_story_references[0].story_is_published = 'no' # type: ignore[assignment] + self._assert_validation_error( + 'story_is_published value should be boolean type') - def test_validation_fails_with_empty_url_fragment(self): + def test_validation_fails_with_empty_url_fragment(self) -> None: self.topic.url_fragment = '' validation_message = 'Topic URL Fragment field should not be empty.' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, validation_message): self.topic.validate() - def test_validation_fails_with_lengthy_url_fragment(self): + def test_validation_fails_with_lengthy_url_fragment(self) -> None: self.topic.url_fragment = 'a' * 25 url_fragment_char_limit = constants.MAX_CHARS_IN_TOPIC_URL_FRAGMENT validation_message = ( 'Topic URL Fragment field should not exceed %d characters, ' 'received %s.' % ( url_fragment_char_limit, self.topic.url_fragment)) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, validation_message): self.topic.validate() - def test_subtopic_schema_version_type_validation(self): - self.topic.subtopic_schema_version = 'invalid_version' - self._assert_validation_error( - 'Expected subtopic schema version to be an integer') - - def test_story_reference_schema_version_type_validation(self): - self.topic.story_reference_schema_version = 'invalid_version' - self._assert_validation_error( - 'Expected story reference schema version to be an integer') - - def test_subtopic_schema_version_validation(self): + def test_subtopic_schema_version_validation(self) -> None: self.topic.subtopic_schema_version = 0 self._assert_validation_error( 'Expected subtopic schema version to be %s' % (feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION)) - def test_subtopic_type_validation(self): - self.topic.subtopics = ['subtopic'] - self._assert_validation_error( - 'Expected each subtopic to be a Subtopic object') - - def test_description_validation(self): - self.topic.description = 1 - self._assert_validation_error('Expected description to be a string') + def test_description_validation(self) -> None: self.topic.description = ( 'Lorem ipsum dolor sit amet, consectetuer ' 'adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. ' @@ -675,22 +634,17 @@ def test_description_validation(self): self._assert_validation_error( 'Topic description should be at most 240 characters.') - def test_next_subtopic_id_validation(self): - self.topic.next_subtopic_id = '1' - self._assert_validation_error('Expected next_subtopic_id to be an int') + def test_next_subtopic_id_validation(self) -> None: self.topic.next_subtopic_id = 1 self._assert_validation_error( 'The id for subtopic 1 is greater than or equal to ' 'next_subtopic_id 1') - def test_language_code_validation(self): - self.topic.language_code = 0 - self._assert_validation_error('Expected language code to be a string') - + def test_language_code_validation(self) -> None: self.topic.language_code = 'xz' self._assert_validation_error('Invalid language code') - def test_canonical_story_references_validation(self): + def test_canonical_story_references_validation(self) -> None: self.topic.canonical_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id'), @@ -701,11 +655,8 @@ def test_canonical_story_references_validation(self): ] self._assert_validation_error( 'Expected all canonical story ids to be distinct.') - self.topic.canonical_story_references = 'story_id' - self._assert_validation_error( - 'Expected canonical story references to be a list') - def test_additional_story_references_validation(self): + def test_additional_story_references_validation(self) -> None: self.topic.additional_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id'), @@ -716,11 +667,8 @@ def test_additional_story_references_validation(self): ] self._assert_validation_error( 'Expected all additional story ids to be distinct.') - self.topic.additional_story_references = 'story_id' - self._assert_validation_error( - 'Expected additional story references to be a list') - def test_additional_canonical_story_intersection_validation(self): + def test_additional_canonical_story_intersection_validation(self) -> None: self.topic.additional_story_references = [ topic_domain.StoryReference.create_default_story_reference( 'story_id'), @@ -737,34 +685,29 @@ def test_additional_canonical_story_intersection_validation(self): 'Expected additional story ids list and canonical story ' 'ids list to be mutually exclusive.') - def test_uncategorized_skill_ids_validation(self): - self.topic.uncategorized_skill_ids = 'uncategorized_skill_id' - self._assert_validation_error( - 'Expected uncategorized skill ids to be a list') - - def test_add_uncategorized_skill_id(self): + def test_add_uncategorized_skill_id(self) -> None: self.topic.subtopics.append( topic_domain.Subtopic( - 'id_2', 'Title2', ['skill_id_2'], 'image.svg', + 1, 'Title2', ['skill_id_2'], 'image.svg', constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-title-two')) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The skill id skill_id_1 already exists in subtopic with id 1'): self.topic.add_uncategorized_skill_id('skill_id_1') self.topic.add_uncategorized_skill_id('skill_id_3') self.assertEqual(self.topic.uncategorized_skill_ids, ['skill_id_3']) - def test_remove_uncategorized_skill_id(self): + def test_remove_uncategorized_skill_id(self) -> None: self.topic.uncategorized_skill_ids = ['skill_id_5'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The skill id skill_id_3 is not present in the topic'): self.topic.remove_uncategorized_skill_id('skill_id_3') self.topic.remove_uncategorized_skill_id('skill_id_5') self.assertEqual(self.topic.uncategorized_skill_ids, []) - def test_move_skill_id_to_subtopic(self): + def test_move_skill_id_to_subtopic(self) -> None: self.topic.uncategorized_skill_ids = ['skill_id_1'] self.topic.subtopics[0].skill_ids = ['skill_id_2'] self.topic.move_skill_id_to_subtopic(None, 1, 'skill_id_1') @@ -774,157 +717,79 @@ def test_move_skill_id_to_subtopic(self): self.topic.uncategorized_skill_ids = ['skill_id_1'] self.topic.subtopics[0].skill_ids = ['skill_id_2'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Skill id skill_id_3 is not an uncategorized skill id'): - self.topic.move_skill_id_to_subtopic(None, 'id_1', 'skill_id_3') + self.topic.move_skill_id_to_subtopic(None, 1, 'skill_id_3') - def test_get_subtopic_index(self): - self.assertIsNone(self.topic.get_subtopic_index(2)) + def test_get_subtopic_index(self) -> None: self.assertEqual(self.topic.get_subtopic_index(1), 0) - def test_to_dict(self): - user_ids = [self.user_id_a, self.user_id_b] - topic_rights = topic_domain.TopicRights(self.topic_id, user_ids, False) - expected_dict = { - 'topic_id': self.topic_id, - 'manager_names': ['A', 'B'], - 'topic_is_published': False - } - - self.assertEqual(expected_dict, topic_rights.to_dict()) - - def test_is_manager(self): + def test_is_manager(self) -> None: user_ids = [self.user_id_a, self.user_id_b] + assert user_ids[0] is not None + assert user_ids[1] is not None topic_rights = topic_domain.TopicRights(self.topic_id, user_ids, False) self.assertTrue(topic_rights.is_manager(self.user_id_a)) self.assertTrue(topic_rights.is_manager(self.user_id_b)) self.assertFalse(topic_rights.is_manager('fakeuser')) - def test_cannot_create_topic_rights_change_class_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_cannot_create_topic_rights_change_class_with_invalid_cmd( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Command invalid cmd is not allowed'): topic_domain.TopicRightsChange({ 'cmd': 'invalid cmd' }) def test_cannot_create_topic_rights_change_class_with_invalid_changelist( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Missing cmd key in change dict'): topic_domain.TopicRightsChange({}) - def test_create_new_topic_rights_change_class(self): + def test_create_new_topic_rights_change_class(self) -> None: topic_rights = topic_domain.TopicRightsChange({ 'cmd': 'create_new' }) self.assertEqual(topic_rights.to_dict(), {'cmd': 'create_new'}) - def test_update_language_code(self): + def test_update_language_code(self) -> None: self.assertEqual(self.topic.language_code, 'en') self.topic.update_language_code('bn') self.assertEqual(self.topic.language_code, 'bn') - def test_update_abbreviated_name(self): + def test_update_abbreviated_name(self) -> None: self.assertEqual(self.topic.abbreviated_name, 'Name') self.topic.update_abbreviated_name('abbrev') self.assertEqual(self.topic.abbreviated_name, 'abbrev') - def test_update_thumbnail_filename(self): - self.assertEqual(self.topic.thumbnail_filename, None) - # Test exception when thumbnail is not found on filesystem. - with self.assertRaisesRegexp( - Exception, - 'The thumbnail img.svg for topic with id %s does not exist' - ' in the filesystem.' % (self.topic_id)): - self.topic.update_thumbnail_filename('img.svg') - - # Save the dummy image to the filesystem to be used as thumbnail. - with python_utils.open_file( - os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), - 'rb', encoding=None) as f: - raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_TOPIC, self.topic.id)) - fs.commit( - '%s/img.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, - mimetype='image/svg+xml') - - # Test successful update of thumbnail present in the filesystem. - self.topic.update_thumbnail_filename('img.svg') - self.assertEqual(self.topic.thumbnail_filename, 'img.svg') - self.assertEqual(self.topic.thumbnail_size_in_bytes, len(raw_image)) - - def test_update_thumbnail_bg_color(self): + def test_update_thumbnail_bg_color(self) -> None: self.assertEqual(self.topic.thumbnail_bg_color, None) self.topic.update_thumbnail_bg_color('#C6DCDA') self.assertEqual(self.topic.thumbnail_bg_color, '#C6DCDA') def test_cannot_add_uncategorized_skill_with_existing_uncategorized_skill( - self): + self + ) -> None: self.assertEqual(self.topic.uncategorized_skill_ids, []) self.topic.uncategorized_skill_ids = ['skill_id1'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The skill id skill_id1 is already an uncategorized skill.'): self.topic.add_uncategorized_skill_id('skill_id1') - def test_cannot_delete_subtopic_with_invalid_subtopic_id(self): - with self.assertRaisesRegexp( - Exception, 'A subtopic with id invalid_id doesn\'t exist.'): - self.topic.delete_subtopic('invalid_id') - - def test_cannot_update_subtopic_title_with_invalid_subtopic_id(self): - with self.assertRaisesRegexp( - Exception, 'The subtopic with id invalid_id does not exist.'): - self.topic.update_subtopic_title('invalid_id', 'new title') - - def test_update_subtopic_title(self): + def test_update_subtopic_title(self) -> None: self.assertEqual(len(self.topic.subtopics), 1) self.assertEqual(self.topic.subtopics[0].title, 'Title') self.topic.update_subtopic_title(1, 'new title') self.assertEqual(self.topic.subtopics[0].title, 'new title') - def test_update_subtopic_thumbnail_filename(self): - self.assertEqual(len(self.topic.subtopics), 1) - self.assertEqual( - self.topic.subtopics[0].thumbnail_filename, 'image.svg') - self.assertEqual( - self.topic.subtopics[0].thumbnail_size_in_bytes, 21131) - - # Test Exception when the thumbnail is not found in filesystem. - with self.assertRaisesRegexp( - Exception, 'The thumbnail %s for subtopic with topic_id %s does' - ' not exist in the filesystem.' % ( - 'new_image.svg', self.topic_id)): - self.topic.update_subtopic_thumbnail_filename(1, 'new_image.svg') - - # Test successful update of thumbnail_filename when the thumbnail - # is found in the filesystem. - with python_utils.open_file( - os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', - encoding=None) as f: - raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_TOPIC, self.topic_id)) - fs.commit( - 'thumbnail/new_image.svg', raw_image, mimetype='image/svg+xml') - self.topic.update_subtopic_thumbnail_filename(1, 'new_image.svg') - self.assertEqual( - self.topic.subtopics[0].thumbnail_filename, 'new_image.svg') - self.assertEqual( - self.topic.subtopics[0].thumbnail_size_in_bytes, len(raw_image)) - - with self.assertRaisesRegexp( - Exception, 'The subtopic with id invalid_id does not exist.'): - self.topic.update_subtopic_thumbnail_filename( - 'invalid_id', 'new title') - - def test_update_subtopic_url_fragment(self): + def test_update_subtopic_url_fragment(self) -> None: self.assertEqual(len(self.topic.subtopics), 1) self.assertEqual( self.topic.subtopics[0].url_fragment, 'dummy-subtopic-url') @@ -932,11 +797,7 @@ def test_update_subtopic_url_fragment(self): self.assertEqual( self.topic.subtopics[0].url_fragment, 'new-subtopic-url') - with self.assertRaisesRegexp( - Exception, 'The subtopic with id invalid_id does not exist.'): - self.topic.update_subtopic_url_fragment('invalid_id', 'new-url') - - def test_update_subtopic_thumbnail_bg_color(self): + def test_update_subtopic_thumbnail_bg_color(self) -> None: self.assertEqual(len(self.topic.subtopics), 1) self.topic.subtopics[0].thumbnail_bg_color = None self.assertEqual( @@ -945,24 +806,7 @@ def test_update_subtopic_thumbnail_bg_color(self): self.assertEqual( self.topic.subtopics[0].thumbnail_bg_color, '#FFFFFF') - with self.assertRaisesRegexp( - Exception, 'The subtopic with id invalid_id does not exist.'): - self.topic.update_subtopic_thumbnail_bg_color( - 'invalid_id', '#FFFFFF') - - def test_cannot_remove_skill_id_from_subtopic_with_invalid_subtopic_id( - self): - with self.assertRaisesRegexp( - Exception, 'The subtopic with id invalid_id does not exist.'): - self.topic.remove_skill_id_from_subtopic('invalid_id', 'skill_id1') - - def test_cannot_move_skill_id_to_subtopic_with_invalid_subtopic_id(self): - with self.assertRaisesRegexp( - Exception, 'The subtopic with id old_subtopic_id does not exist.'): - self.topic.move_skill_id_to_subtopic( - 'old_subtopic_id', 'new_subtopic_id', 'skill_id1') - - def test_cannot_move_existing_skill_to_subtopic(self): + def test_cannot_move_existing_skill_to_subtopic(self) -> None: self.topic.subtopics = [ topic_domain.Subtopic( 1, 'Title', ['skill_id_1'], 'image.svg', @@ -972,12 +816,543 @@ def test_cannot_move_existing_skill_to_subtopic(self): 2, 'Another title', ['skill_id_1'], 'image.svg', constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-two')] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Skill id skill_id_1 is already present in the target subtopic'): self.topic.move_skill_id_to_subtopic(1, 2, 'skill_id_1') - def test_topic_export_import_returns_original_object(self): + def test_skill_id_not_present_old_subtopic(self) -> None: + self.topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-one'), + topic_domain.Subtopic( + 2, 'Another title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-two')] + with self.assertRaisesRegex( + Exception, + 'Skill id skill_not_exist is not present in the given old subtopic' + ): + self.topic.move_skill_id_to_subtopic(1, 2, 'skill_not_exist') + + def test_validate_topic_bad_story_reference(self) -> None: + self.topic.canonical_story_references = [ + topic_domain.StoryReference.create_default_story_reference( + 'story_id'), + topic_domain.StoryReference.create_default_story_reference( + 'story_id_1') + ] + self.topic.additional_story_references = [ + topic_domain.StoryReference.create_default_story_reference( + 'story_id_2#'), + topic_domain.StoryReference.create_default_story_reference( + 'story_id_3') + ] + with self.assertRaisesRegex( + utils.ValidationError, + 'Invalid story ID: story_id_2#' + ): + self.topic.validate() + + def test_story_ref_to_dict(self) -> None: + test_story_dict = { + 'story_id': 'story_id_1', + 'story_is_published': False + } + story_ref_obj = ( + topic_domain.StoryReference. + create_default_story_reference('story_id_1') + ) + story_ref_dict = story_ref_obj.to_dict() + self.assertDictEqual(test_story_dict, story_ref_dict) + + def test_story_ref_from_dict(self) -> None: + test_story_dict = topic_domain.StoryReference( + 'story_id_1', False + ).to_dict() + test_story_obj = topic_domain.StoryReference.from_dict(test_story_dict) + self.assertEqual(test_story_obj.story_id, 'story_id_1') + self.assertEqual(test_story_obj.story_is_published, False) + + def test_create_default_subtopic(self) -> None: + subtopic_id = 1 + subtopic_title = 'subtopic_title' + url_frag = 'url_frag' + subtopic_obj = topic_domain.Subtopic.create_default_subtopic( + subtopic_id, + subtopic_title, + url_frag + ) + self.assertEqual(subtopic_id, subtopic_obj.id) + self.assertEqual(subtopic_title, subtopic_obj.title) + self.assertEqual(url_frag, subtopic_obj.url_fragment) + + def test_remove_skill_id_not_present_exception(self) -> None: + skill_id = 'skill_id_123' + topic = self.topic + with self.assertRaisesRegex( + Exception, + 'Skill id %s is not present in the old subtopic' % skill_id + ): + topic.remove_skill_id_from_subtopic(1, skill_id) + + def test_update_subtopic_thumbnail(self) -> None: + """Tests that when we update the subtopic thumbail size + and filename that those attributes of the object come + back with the updated values. + """ + self.topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-one' + ), + topic_domain.Subtopic( + 2, 'Another title', ['skill_id_2'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-two' + ) + ] + new_filename = 'new_filename.svg' + new_filesize = 12345 + subtopic_index = self.topic.get_subtopic_index(1) + self.assertNotEqual( + new_filename, + self.topic.subtopics[subtopic_index].thumbnail_filename + ) + self.assertNotEqual( + new_filesize, + self.topic.subtopics[subtopic_index].thumbnail_size_in_bytes + ) + self.topic.update_subtopic_thumbnail_filename_and_size( + 1, new_filename, new_filesize + ) + self.assertEqual( + new_filename, + self.topic.subtopics[subtopic_index].thumbnail_filename + ) + self.assertEqual( + new_filesize, + self.topic.subtopics[subtopic_index].thumbnail_size_in_bytes + ) + + def test_delete_subtopic(self) -> None: + """Tests that when we delete a subtopic, its skill_id gets moved to + uncategorized, that subtopic doesn't exist on the topic and that + there are the correct number of subtopics on the topic. + """ + subtopic_id_to_delete = 1 + skill_id_moved = 'skill_id_1' + self.topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-one'), + topic_domain.Subtopic( + 2, 'Another title', ['skill_id_2'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-two')] + self.assertNotEqual(1, len(self.topic.subtopics)) + self.assertNotEqual( + [skill_id_moved], + self.topic.uncategorized_skill_ids + ) + self.topic.delete_subtopic(subtopic_id_to_delete) + self.assertEqual(1, len(self.topic.subtopics)) + self.assertEqual([skill_id_moved], self.topic.uncategorized_skill_ids) + with self.assertRaisesRegex( + Exception, + 'The subtopic with id %s does not exist.' % subtopic_id_to_delete + ): + self.topic.get_subtopic_index(1) + + def test_move_skill_id_from_subtopic_to_subtopic(self) -> None: + """Checks that move_skill_id_to_subtopic works when moving a skill_id + from an existing subtopic to a new subtopic returns the expected + updated values for skill_ids associated with each subtopic. + """ + expected_subtopic1_skills: list[str] = [] + expected_subtopic2_skills = ['skill_id_2', 'skill_id_1'] + self.topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-one'), + topic_domain.Subtopic( + 2, 'Another title', ['skill_id_2'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-two')] + self.assertNotEqual( + self.topic.subtopics[0].skill_ids, + expected_subtopic1_skills + ) + self.assertNotEqual( + self.topic.subtopics[1].skill_ids, + expected_subtopic2_skills + ) + self.topic.move_skill_id_to_subtopic(1, 2, 'skill_id_1') + self.assertEqual( + self.topic.subtopics[0].skill_ids, + expected_subtopic1_skills + ) + self.assertEqual( + self.topic.subtopics[1].skill_ids, + expected_subtopic2_skills + ) + + def test_move_skill_id_from_uncategorized_to_subtopic(self) -> None: + """Checks that move_skill_id_to_subtopic works when moving a skill_id + from an existing subtopic to a new subtopic returns the expected + updated values for skill_ids associated with each subtopic. + """ + expected_subtopic_skills = ['skill_id_2', 'skill_id_3'] + expected_uncategorized_skills: list[str] = [] + self.topic.uncategorized_skill_ids = ['skill_id_3'] + self.topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-one'), + topic_domain.Subtopic( + 2, 'Another title', ['skill_id_2'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-two')] + self.assertNotEqual( + self.topic.subtopics[1].skill_ids, + expected_subtopic_skills + ) + self.assertNotEqual( + self.topic.uncategorized_skill_ids, + expected_uncategorized_skills + ) + self.topic.move_skill_id_to_subtopic(None, 2, 'skill_id_3') + self.assertEqual( + self.topic.subtopics[1].skill_ids, + expected_subtopic_skills + ) + self.assertEqual( + self.topic.uncategorized_skill_ids, + expected_uncategorized_skills + ) + + def test_add_subtopic(self) -> None: + """Checkts that if next_subtopic_id isn't correct + an exception is raised. Also checks for the sub topic + getting added to the topic. + """ + incorrect_new_subtopic_id = 3 + correct_new_subtopic_id = 2 + expected_subtopic_id = self.topic.next_subtopic_id + with self.assertRaisesRegex( + Exception, + 'The given new subtopic id %s is not equal to the expected next ' + 'subtopic id: %s' % ( + incorrect_new_subtopic_id, + expected_subtopic_id + ) + ): + self.topic.add_subtopic( + incorrect_new_subtopic_id, + 'subtopic_3', + 'url_frag' + ) + self.topic.add_subtopic( + correct_new_subtopic_id, + 'subtopic_title', + 'url_frag' + ) + self.assertEqual(2, len(self.topic.subtopics)) + + def test_update_practice_tab_is_displayed(self) -> None: + self.assertFalse(self.topic.practice_tab_is_displayed) + self.topic.update_practice_tab_is_displayed(True) + self.assertTrue(self.topic.practice_tab_is_displayed) + + def test_update_page_title_fragment_for_web(self) -> None: + updated_frag = 'updated fragment' + self.assertNotEqual( + self.topic.page_title_fragment_for_web, + updated_frag + ) + self.topic.update_page_title_fragment_for_web(updated_frag) + self.assertEqual(self.topic.page_title_fragment_for_web, updated_frag) + + def test_update_meta_tag_content(self) -> None: + updated_meta_tag = 'updated meta tag' + self.assertNotEqual(self.topic.meta_tag_content, updated_meta_tag) + self.topic.update_meta_tag_content(updated_meta_tag) + self.assertEqual(self.topic.meta_tag_content, updated_meta_tag) + + def test_update_description(self) -> None: + updated_desc = 'updated description' + self.assertNotEqual(self.topic.description, updated_desc) + self.topic.update_description(updated_desc) + self.assertEqual(self.topic.description, updated_desc) + + def test_update_thumbnail_file_and_size(self) -> None: + updated_file_name = 'file_name.svg' + updated_size = 1234 + self.assertNotEqual(self.topic.thumbnail_filename, updated_file_name) + self.assertNotEqual(self.topic.thumbnail_size_in_bytes, updated_size) + self.topic.update_thumbnail_filename_and_size( + updated_file_name, + updated_size + ) + self.assertEqual(self.topic.thumbnail_filename, updated_file_name) + self.assertEqual(self.topic.thumbnail_size_in_bytes, updated_size) + + def test_update_url_fragment(self) -> None: + url_frag = 'url fragment' + self.assertNotEqual(self.topic.url_fragment, url_frag) + self.topic.update_url_fragment(url_frag) + self.assertEqual(self.topic.url_fragment, url_frag) + + def test_update_name(self) -> None: + updated_name = 'updated name' + self.assertNotEqual(self.topic.name, updated_name) + self.topic.update_name(updated_name) + self.assertEqual(self.topic.name, updated_name) + + def test_update_name_bytes(self) -> None: + updated_name = b'updated name' + with self.assertRaisesRegex( + utils.ValidationError, + 'Name should be a string.' + ): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + self.topic.update_name(updated_name) # type: ignore[arg-type] + + @classmethod + def _schema_update_vers_dict( + cls, + current_schema: int, + topic: topic_domain.Topic + ) -> topic_domain.VersionedSubtopicsDict: + """Sets up the VersionendSubtopicsDict for the schema update tests.""" + topic.update_subtopic_title(1, 'abcdefghijklmnopqrstuvwxyz') + subtopic_dict = topic.subtopics[topic.get_subtopic_index(1)].to_dict() + vers_subtopic_dict = topic_domain.VersionedSubtopicsDict( + { + 'schema_version': current_schema, + 'subtopics': [subtopic_dict] + } + ) + topic.update_subtopics_from_model( + vers_subtopic_dict, + current_schema, + topic.id + ) + return vers_subtopic_dict + + def test_subtopic_schema_v1_to_v2(self) -> None: + current_schema = 1 + vers_subtopic_dict = TopicDomainUnitTests._schema_update_vers_dict( + current_schema, + self.topic + ) + self.assertEqual( + vers_subtopic_dict['subtopics'][0]['thumbnail_filename'], + None + ) + self.assertEqual( + vers_subtopic_dict['subtopics'][0]['thumbnail_bg_color'], + None + ) + self.assertEqual( + vers_subtopic_dict['schema_version'], + current_schema + 1 + ) + + def test_subtopic_schema_v2_to_v3(self) -> None: + expected_frag = 'abcdefghijklmnopqrstuvwxy' + current_schema = 2 + vers_subtopic_dict = TopicDomainUnitTests._schema_update_vers_dict( + current_schema, + self.topic + ) + self.assertEqual( + vers_subtopic_dict['subtopics'][0]['url_fragment'], + expected_frag + ) + self.assertEqual( + vers_subtopic_dict['schema_version'], + current_schema + 1 + ) + + def test_subtopic_schema_v3_to_v4(self) -> None: + current_schema = 3 + self.topic.thumbnail_size_in_bytes = 12345 + vers_subtopic_dict = TopicDomainUnitTests._schema_update_vers_dict( + current_schema, + self.topic + ) + self.assertEqual( + vers_subtopic_dict['subtopics'][0]['thumbnail_size_in_bytes'], + None + ) + + class MockTopicObject(topic_domain.Topic): + """Mocks Topic domain object.""" + + @classmethod + def _convert_story_reference_v1_dict_to_v2_dict( + cls, story_reference: topic_domain.StoryReferenceDict + ) -> topic_domain.StoryReferenceDict: + """Converts v1 story reference dict to v2.""" + return story_reference + + def test_story_schema_update(self) -> None: + story_id = 'story_id' + story_published = True + schema_version = 1 + story_ref_dict = topic_domain.StoryReference( + story_id, + story_published + ).to_dict() + vers_story_ref_dict = topic_domain.VersionedStoryReferencesDict( + { + 'schema_version': 1, + 'story_references': [story_ref_dict] + } + ) + swap_topic_object = self.swap( + topic_domain, + 'Topic', + self.MockTopicObject + ) + current_schema_version_swap = self.swap( + feconf, 'CURRENT_STORY_REFERENCE_SCHEMA_VERSION', 2) + with swap_topic_object, current_schema_version_swap: + topic_domain.Topic.update_story_references_from_model( + vers_story_ref_dict, + schema_version + ) + self.assertEqual( + vers_story_ref_dict['schema_version'], + 2 + ) + + def test_is_valid_topic_id(self) -> None: + """This test is needed for complete branch coverage. + We need to go from the if statement and directly exit + the method. + """ + topic_id = 'abcdefghijkl' + try: + topic_domain.Topic.require_valid_topic_id(topic_id) + except utils.ValidationError: + self.fail('This test should pass and not raise an exception') + + def test_invalid_topic_id(self) -> None: + topic_id = 'a' + with self.assertRaisesRegex( + utils.ValidationError, + 'Topic id %s is invalid' % topic_id + ): + topic_domain.Topic.require_valid_topic_id(topic_id) + + def _setup_stories(self, topic: topic_domain.Topic) -> None: + """This setups up stories for various story tests.""" + topic.canonical_story_references = [ + topic_domain.StoryReference.create_default_story_reference( + 'story_id'), + topic_domain.StoryReference.create_default_story_reference( + 'story_id_1'), + topic_domain.StoryReference.create_default_story_reference( + 'story_id_2') + ] + topic.additional_story_references = [ + topic_domain.StoryReference.create_default_story_reference( + 'story_id_10'), + topic_domain.StoryReference.create_default_story_reference( + 'story_id_11'), + topic_domain.StoryReference.create_default_story_reference( + 'story_id_12') + ] + + def test_publish_story(self) -> None: + topic = self.topic + self._setup_stories(topic) + topic.publish_story('story_id') + self.assertEqual( + topic.canonical_story_references[0].story_is_published, + True + ) + topic.publish_story('story_id_10') + self.assertEqual( + topic.additional_story_references[0].story_is_published, + True + ) + + def test_publish_story_not_exist(self) -> None: + topic = self.topic + self._setup_stories(topic) + with self.assertRaisesRegex( + Exception, + 'Story with given id doesn\'t exist in the topic' + ): + topic.publish_story('story_id_110') + + def test_unpublish_story(self) -> None: + topic = self.topic + self._setup_stories(topic) + topic.publish_story('story_id_11') + topic.unpublish_story('story_id_11') + topic.publish_story('story_id') + topic.unpublish_story('story_id') + self.assertEqual( + topic.additional_story_references[0].story_is_published, + False + ) + self.assertEqual( + topic.canonical_story_references[1].story_is_published, + False + ) + + def test_validate_same_subtopic_url(self) -> None: + self.topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-one'), + topic_domain.Subtopic( + 1, 'Another title', ['skill_id_2'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-two')] + self.topic.subtopics[0].url_fragment = 'abc' + self.topic.subtopics[1].url_fragment = 'abc' + with self.assertRaisesRegex( + utils.ValidationError, + 'Subtopic url fragments are not unique across ' + 'subtopics in the topic' + ): + self.topic.validate() + + def test_validate_no_story_references(self) -> None: + """This is needed for branch coverage when there are no + story references and validate is run on a topic. + """ + self.topic.canonical_story_references = [] + self.topic.additional_story_references = [] + try: + self.topic.validate() + except Exception: + self.fail('There are no story references for topic') + + def test_unpublish_story_not_exist(self) -> None: + topic = self.topic + self._setup_stories(topic) + with self.assertRaisesRegex( + Exception, + 'Story with given id doesn\'t exist in the topic' + ): + topic.unpublish_story('story_id_110') + + def test_topic_export_import_returns_original_object(self) -> None: """Checks that to_dict and from_dict preserves all the data within a Topic during export and import. """ @@ -985,7 +1360,7 @@ def test_topic_export_import_returns_original_object(self): topic_from_dict = topic_domain.Topic.from_dict(topic_dict) self.assertEqual(topic_from_dict.to_dict(), topic_dict) - def test_serialize_and_deserialize_returns_unchanged_topic(self): + def test_serialize_and_deserialize_returns_unchanged_topic(self) -> None: """Checks that serializing and then deserializing a default topic works as intended by leaving the topic unchanged. """ @@ -994,21 +1369,96 @@ def test_serialize_and_deserialize_returns_unchanged_topic(self): topic_domain.Topic.deserialize( self.topic.serialize()).to_dict()) + def test_serialize_with_created_on_last_updated_set(self) -> None: + """Checks that serializing and then deserializing a default topic + works as intended by leaving the topic unchanged. Added values + for self.topic.created_on and last_updated. + """ + self.topic.created_on = datetime.datetime.now() + self.topic.last_updated = datetime.datetime.now() + self.assertEqual( + self.topic.to_dict(), + topic_domain.Topic.deserialize( + self.topic.serialize()).to_dict()) + + def test_skill_ids_for_diagnostic_test_update(self) -> None: + """Checks the update method for the skill_ids_for_diagnostic_test field + for a topic. + """ + self.topic.subtopics[0].skill_ids = [] + self.assertEqual( + self.topic.skill_ids_for_diagnostic_test, ['skill_id_1']) + self.topic.update_skill_ids_for_diagnostic_test([]) + self.assertEqual(self.topic.skill_ids_for_diagnostic_test, []) + + def test_skill_ids_for_diagnostic_test_validation(self) -> None: + """Checks the validation of skill_ids_for_diagnostic_test field + for a topic. + """ + self.topic.update_skill_ids_for_diagnostic_test(['test_skill_id']) + error_msg = ( + 'The skill_ids {\'test_skill_id\'} are selected for the ' + 'diagnostic test but they are not associated with the topic.') + self._assert_validation_error(error_msg) + + def test_min_skill_ids_for_diagnostic_test_validation(self) -> None: + """Validates empty skill_ids_for_diagnostic_test field must raise + exception. + """ + self.topic.thumbnail_filename = 'filename.svg' + self.topic.thumbnail_bg_color = ( + constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]) + self.topic.skill_ids_for_diagnostic_test = [] + error_msg = ( + 'The skill_ids_for_diagnostic_test field should not be empty.') + self._assert_strict_validation_error(error_msg) + + def test_max_skill_ids_for_diagnostic_test_validation(self) -> None: + """Validates maximum length for the skill_ids_for_diagnostic_test field + for a topic. + """ + skill_ids = ['skill_1', 'skill_2', 'skill_3', 'skill_4'] + self.topic.subtopics[0].skill_ids = skill_ids + self.topic.skill_ids_for_diagnostic_test = skill_ids + error_msg = ( + 'The skill_ids_for_diagnostic_test field should contain at most 3 ' + 'skill_ids.') + self._assert_validation_error(error_msg) + + def test_removing_uncatgorized_skill_removes_diagnostic_test_skill_if_any( + self + ) -> None: + """Validates the skill id removal from uncategorized skills must also + remove from the diagnostic tests if any. + """ + self.assertEqual(self.topic.uncategorized_skill_ids, []) + + self.topic.remove_skill_id_from_subtopic(1, 'skill_id_1') + self.assertEqual( + self.topic.skill_ids_for_diagnostic_test, ['skill_id_1']) + self.assertEqual(self.topic.uncategorized_skill_ids, ['skill_id_1']) + self.assertEqual( + self.topic.skill_ids_for_diagnostic_test, ['skill_id_1']) + + self.topic.remove_uncategorized_skill_id('skill_id_1') + self.assertEqual(self.topic.uncategorized_skill_ids, []) + self.assertEqual(self.topic.skill_ids_for_diagnostic_test, []) + class TopicChangeTests(test_utils.GenericTestBase): - def test_topic_change_object_with_missing_cmd(self): - with self.assertRaisesRegexp( + def test_topic_change_object_with_missing_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): topic_domain.TopicChange({'invalid': 'data'}) - def test_topic_change_object_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_topic_change_object_with_invalid_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): topic_domain.TopicChange({'cmd': 'invalid'}) - def test_topic_change_object_with_missing_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_topic_change_object_with_missing_attribute_in_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following required attributes are missing: ' 'new_value, old_value')): @@ -1017,19 +1467,20 @@ def test_topic_change_object_with_missing_attribute_in_cmd(self): 'property_name': 'name', }) - def test_topic_change_object_with_extra_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_topic_change_object_with_extra_attribute_in_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following extra attributes are present: invalid')): topic_domain.TopicChange({ 'cmd': 'add_subtopic', 'title': 'title', 'subtopic_id': 'subtopic_id', + 'url_fragment': 'url-fragment', 'invalid': 'invalid' }) - def test_topic_change_object_with_invalid_topic_property(self): - with self.assertRaisesRegexp( + def test_topic_change_object_with_invalid_topic_property(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd update_topic_property: ' 'invalid is not allowed')): @@ -1040,8 +1491,8 @@ def test_topic_change_object_with_invalid_topic_property(self): 'new_value': 'new_value', }) - def test_topic_change_object_with_invalid_subtopic_property(self): - with self.assertRaisesRegexp( + def test_topic_change_object_with_invalid_subtopic_property(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd update_subtopic_property: ' 'invalid is not allowed')): @@ -1053,8 +1504,10 @@ def test_topic_change_object_with_invalid_subtopic_property(self): 'new_value': 'new_value', }) - def test_topic_change_object_with_invalid_subtopic_page_property(self): - with self.assertRaisesRegexp( + def test_topic_change_object_with_invalid_subtopic_page_property( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for property_name in cmd update_subtopic_page_property: ' 'invalid is not allowed')): @@ -1066,18 +1519,20 @@ def test_topic_change_object_with_invalid_subtopic_page_property(self): 'new_value': 'new_value', }) - def test_topic_change_object_with_add_subtopic(self): + def test_topic_change_object_with_add_subtopic(self) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'add_subtopic', 'subtopic_id': 'subtopic_id', - 'title': 'title' + 'title': 'title', + 'url_fragment': 'url-fragment' }) self.assertEqual(topic_change_object.cmd, 'add_subtopic') self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id') self.assertEqual(topic_change_object.title, 'title') + self.assertEqual(topic_change_object.url_fragment, 'url-fragment') - def test_topic_change_object_with_delete_subtopic(self): + def test_topic_change_object_with_delete_subtopic(self) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'delete_subtopic', 'subtopic_id': 'subtopic_id' @@ -1086,7 +1541,7 @@ def test_topic_change_object_with_delete_subtopic(self): self.assertEqual(topic_change_object.cmd, 'delete_subtopic') self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id') - def test_topic_change_object_with_add_uncategorized_skill_id(self): + def test_topic_change_object_with_add_uncategorized_skill_id(self) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'add_uncategorized_skill_id', 'new_uncategorized_skill_id': 'new_uncategorized_skill_id' @@ -1097,7 +1552,9 @@ def test_topic_change_object_with_add_uncategorized_skill_id(self): topic_change_object.new_uncategorized_skill_id, 'new_uncategorized_skill_id') - def test_topic_change_object_with_remove_uncategorized_skill_id(self): + def test_topic_change_object_with_remove_uncategorized_skill_id( + self + ) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'remove_uncategorized_skill_id', 'uncategorized_skill_id': 'uncategorized_skill_id' @@ -1109,7 +1566,7 @@ def test_topic_change_object_with_remove_uncategorized_skill_id(self): topic_change_object.uncategorized_skill_id, 'uncategorized_skill_id') - def test_topic_change_object_with_move_skill_id_to_subtopic(self): + def test_topic_change_object_with_move_skill_id_to_subtopic(self) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'move_skill_id_to_subtopic', 'skill_id': 'skill_id', @@ -1122,7 +1579,9 @@ def test_topic_change_object_with_move_skill_id_to_subtopic(self): self.assertEqual(topic_change_object.old_subtopic_id, 'old_subtopic_id') self.assertEqual(topic_change_object.new_subtopic_id, 'new_subtopic_id') - def test_topic_change_object_with_remove_skill_id_from_subtopic(self): + def test_topic_change_object_with_remove_skill_id_from_subtopic( + self + ) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'remove_skill_id_from_subtopic', 'skill_id': 'skill_id', @@ -1134,7 +1593,7 @@ def test_topic_change_object_with_remove_skill_id_from_subtopic(self): self.assertEqual(topic_change_object.skill_id, 'skill_id') self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id') - def test_topic_change_object_with_update_subtopic_property(self): + def test_topic_change_object_with_update_subtopic_property(self) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'update_subtopic_property', 'subtopic_id': 'subtopic_id', @@ -1149,7 +1608,9 @@ def test_topic_change_object_with_update_subtopic_property(self): self.assertEqual(topic_change_object.new_value, 'new_value') self.assertEqual(topic_change_object.old_value, 'old_value') - def test_topic_change_object_with_update_subtopic_page_property(self): + def test_topic_change_object_with_update_subtopic_page_property( + self + ) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'update_subtopic_page_property', 'subtopic_id': 'subtopic_id', @@ -1166,7 +1627,7 @@ def test_topic_change_object_with_update_subtopic_page_property(self): self.assertEqual(topic_change_object.new_value, 'new_value') self.assertEqual(topic_change_object.old_value, 'old_value') - def test_topic_change_object_with_update_topic_property(self): + def test_topic_change_object_with_update_topic_property(self) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'update_topic_property', 'property_name': 'name', @@ -1179,7 +1640,7 @@ def test_topic_change_object_with_update_topic_property(self): self.assertEqual(topic_change_object.new_value, 'new_value') self.assertEqual(topic_change_object.old_value, 'old_value') - def test_topic_change_object_with_create_new(self): + def test_topic_change_object_with_create_new(self) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'create_new', 'name': 'name', @@ -1189,7 +1650,8 @@ def test_topic_change_object_with_create_new(self): self.assertEqual(topic_change_object.name, 'name') def test_topic_change_object_with_migrate_subtopic_schema_to_latest_version( - self): + self + ) -> None: topic_change_object = topic_domain.TopicChange({ 'cmd': 'migrate_subtopic_schema_to_latest_version', 'from_version': 'from_version', @@ -1202,7 +1664,7 @@ def test_topic_change_object_with_migrate_subtopic_schema_to_latest_version( self.assertEqual(topic_change_object.from_version, 'from_version') self.assertEqual(topic_change_object.to_version, 'to_version') - def test_to_dict(self): + def test_to_dict(self) -> None: topic_change_dict = { 'cmd': 'create_new', 'name': 'name' @@ -1213,18 +1675,20 @@ def test_to_dict(self): class TopicRightsChangeTests(test_utils.GenericTestBase): - def test_topic_rights_change_object_with_missing_cmd(self): - with self.assertRaisesRegexp( + def test_topic_rights_change_object_with_missing_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Missing cmd key in change dict'): topic_domain.TopicRightsChange({'invalid': 'data'}) - def test_topic_change_rights_object_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_topic_change_rights_object_with_invalid_cmd(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Command invalid is not allowed'): topic_domain.TopicRightsChange({'cmd': 'invalid'}) - def test_topic_rights_change_object_with_missing_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_topic_rights_change_object_with_missing_attribute_in_cmd( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following required attributes are missing: ' 'new_role, old_role')): @@ -1233,8 +1697,10 @@ def test_topic_rights_change_object_with_missing_attribute_in_cmd(self): 'assignee_id': 'assignee_id', }) - def test_topic_rights_change_object_with_extra_attribute_in_cmd(self): - with self.assertRaisesRegexp( + def test_topic_rights_change_object_with_extra_attribute_in_cmd( + self + ) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'The following extra attributes are present: invalid')): topic_domain.TopicRightsChange({ @@ -1242,8 +1708,8 @@ def test_topic_rights_change_object_with_extra_attribute_in_cmd(self): 'invalid': 'invalid' }) - def test_topic_rights_change_object_with_invalid_role(self): - with self.assertRaisesRegexp( + def test_topic_rights_change_object_with_invalid_role(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, ( 'Value for old_role in cmd change_role: ' 'invalid is not allowed')): @@ -1254,14 +1720,14 @@ def test_topic_rights_change_object_with_invalid_role(self): 'new_role': topic_domain.ROLE_MANAGER }) - def test_topic_rights_change_object_with_create_new(self): + def test_topic_rights_change_object_with_create_new(self) -> None: topic_rights_change_object = topic_domain.TopicRightsChange({ 'cmd': 'create_new' }) self.assertEqual(topic_rights_change_object.cmd, 'create_new') - def test_topic_rights_change_object_with_change_role(self): + def test_topic_rights_change_object_with_change_role(self) -> None: topic_rights_change_object = topic_domain.TopicRightsChange({ 'cmd': 'change_role', 'assignee_id': 'assignee_id', @@ -1276,21 +1742,21 @@ def test_topic_rights_change_object_with_change_role(self): self.assertEqual( topic_rights_change_object.new_role, topic_domain.ROLE_MANAGER) - def test_topic_rights_change_object_with_publish_topic(self): + def test_topic_rights_change_object_with_publish_topic(self) -> None: topic_rights_change_object = topic_domain.TopicRightsChange({ 'cmd': 'publish_topic' }) self.assertEqual(topic_rights_change_object.cmd, 'publish_topic') - def test_topic_rights_change_object_with_unpublish_topic(self): + def test_topic_rights_change_object_with_unpublish_topic(self) -> None: topic_rights_change_object = topic_domain.TopicRightsChange({ 'cmd': 'unpublish_topic' }) self.assertEqual(topic_rights_change_object.cmd, 'unpublish_topic') - def test_to_dict(self): + def test_to_dict(self) -> None: topic_rights_change_dict = { 'cmd': 'change_role', 'assignee_id': 'assignee_id', @@ -1305,8 +1771,8 @@ def test_to_dict(self): class TopicSummaryTests(test_utils.GenericTestBase): - def setUp(self): - super(TopicSummaryTests, self).setUp() + def setUp(self) -> None: + super().setUp() current_time = datetime.datetime.utcnow() time_in_millisecs = utils.get_time_in_millisecs(current_time) self.topic_summary_dict = { @@ -1333,169 +1799,123 @@ def setUp(self): 1, 1, 1, 1, 1, 1, 1, 'image.svg', '#C6DCDA', 'url-frag', current_time, current_time) - def _assert_validation_error(self, expected_error_substring): + # Here we use MyPy ignore because we override the definition of the function + # from the parent class, but that is fine as _assert_validation_error is + # supposed to be customizable and thus we add an ignore. + def _assert_validation_error( # type: ignore[override] + self, + expected_error_substring: str + ) -> None: """Checks that the topic summary passes validation. Args: expected_error_substring: str. String that should be a substring of the expected error message. """ - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, expected_error_substring): self.topic_summary.validate() - def test_topic_summary_gets_created(self): + def test_topic_summary_gets_created(self) -> None: self.assertEqual( self.topic_summary.to_dict(), self.topic_summary_dict) - def test_validation_passes_with_valid_properties(self): + def test_validation_passes_with_valid_properties(self) -> None: self.topic_summary.validate() - def test_validation_fails_with_invalid_name(self): - self.topic_summary.name = 0 - self._assert_validation_error('Name should be a string.') - - def test_thumbnail_filename_validation(self): - self.topic_summary.thumbnail_filename = [] - self._assert_validation_error( - 'Expected thumbnail filename to be a string') - - def test_thumbnail_bg_validation(self): + def test_thumbnail_bg_validation(self) -> None: self.topic_summary.thumbnail_bg_color = '#FFFFFF' self._assert_validation_error( 'Topic thumbnail background color #FFFFFF is not supported.') - def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self): + def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self) -> None: self.topic_summary.thumbnail_bg_color = '#C6DCDA' + self.topic_summary.thumbnail_filename = None self._assert_validation_error( 'Topic thumbnail image is not provided.') + self.topic_summary.thumbnail_bg_color = None self.topic_summary.thumbnail_filename = 'test.svg' self._assert_validation_error( 'Topic thumbnail background color is not specified.') - def test_validation_fails_with_empty_name(self): + def test_validation_fails_with_empty_name(self) -> None: self.topic_summary.name = '' self._assert_validation_error('Name field should not be empty') - def test_validation_fails_with_invalid_url_fragment(self): - self.topic_summary.url_fragment = 0 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Topic URL Fragment field must be a string. Received 0.'): - self.topic_summary.validate() - - def test_validation_fails_with_empty_url_fragment(self): + def test_validation_fails_with_empty_url_fragment(self) -> None: self.topic_summary.url_fragment = '' validation_message = 'Topic URL Fragment field should not be empty.' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, validation_message): self.topic_summary.validate() - def test_validation_fails_with_lenghty_url_fragment(self): + def test_validation_fails_with_lenghty_url_fragment(self) -> None: self.topic_summary.url_fragment = 'a' * 25 url_fragment_char_limit = constants.MAX_CHARS_IN_TOPIC_URL_FRAGMENT validation_message = ( 'Topic URL Fragment field should not exceed %d characters, ' 'received %s.' % ( url_fragment_char_limit, self.topic_summary.url_fragment)) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, validation_message): self.topic_summary.validate() - def test_validation_fails_with_invalid_description(self): - self.topic_summary.description = 3 - self._assert_validation_error( - 'Expected description to be a string, received 3') - - def test_validation_fails_with_invalid_canonical_name(self): - self.topic_summary.canonical_name = 0 - self._assert_validation_error('Canonical name should be a string.') - - def test_validation_fails_with_empty_canonical_name(self): + def test_validation_fails_with_empty_canonical_name(self) -> None: self.topic_summary.canonical_name = '' self._assert_validation_error( 'Canonical name field should not be empty') - def test_validation_fails_with_invalid_language_code(self): - self.topic_summary.language_code = 0 - self._assert_validation_error( - 'Expected language code to be a string, received 0') - - def test_validation_fails_with_unallowed_language_code(self): + def test_validation_fails_with_unallowed_language_code(self) -> None: self.topic_summary.language_code = 'invalid' self._assert_validation_error('Invalid language code: invalid') - def test_validation_fails_with_invalid_canonical_story_count(self): - self.topic_summary.canonical_story_count = '10' - self._assert_validation_error( - 'Expected canonical story count to be an integer, received \'10\'') - - def test_validation_fails_with_negative_canonical_story_count(self): + def test_validation_fails_with_negative_canonical_story_count(self) -> None: self.topic_summary.canonical_story_count = -1 self._assert_validation_error( 'Expected canonical_story_count to be non-negative, ' 'received \'-1\'') - def test_validation_fails_with_invalid_additional_story_count(self): - self.topic_summary.additional_story_count = '10' - self._assert_validation_error( - 'Expected additional story count to be an integer, received \'10\'') - - def test_validation_fails_with_negative_additional_story_count(self): + def test_validation_fails_with_negative_additional_story_count( + self + ) -> None: self.topic_summary.additional_story_count = -1 self._assert_validation_error( 'Expected additional_story_count to be non-negative, ' 'received \'-1\'') - def test_validation_fails_with_invalid_uncategorized_skill_count(self): - self.topic_summary.uncategorized_skill_count = '10' - self._assert_validation_error( - 'Expected uncategorized skill count to be an integer, ' - 'received \'10\'') - - def test_validation_fails_with_negative_uncategorized_skill_count(self): + def test_validation_fails_with_negative_uncategorized_skill_count( + self + ) -> None: self.topic_summary.uncategorized_skill_count = -1 self._assert_validation_error( 'Expected uncategorized_skill_count to be non-negative, ' 'received \'-1\'') - def test_validation_fails_with_invalid_total_skill_count(self): - self.topic_summary.total_skill_count = '10' - self._assert_validation_error( - 'Expected total skill count to be an integer, received \'10\'') - - def test_validation_fails_with_negative_total_skill_count(self): + def test_validation_fails_with_negative_total_skill_count(self) -> None: self.topic_summary.total_skill_count = -1 self._assert_validation_error( 'Expected total_skill_count to be non-negative, received \'-1\'') - def test_validation_fails_with_invalid_total_skill_count_value(self): + def test_validation_fails_with_invalid_total_skill_count_value( + self + ) -> None: self.topic_summary.total_skill_count = 5 self.topic_summary.uncategorized_skill_count = 10 self._assert_validation_error( 'Expected total_skill_count to be greater than or equal to ' 'uncategorized_skill_count 10, received \'5\'') - def test_validation_fails_with_invalid_total_published_node_count(self): - self.topic_summary.total_published_node_count = '10' - self._assert_validation_error( - 'Expected total published node count to be an integer, ' - 'received \'10\'') - - def test_validation_fails_with_negative_total_published_node_count(self): + def test_validation_fails_with_negative_total_published_node_count( + self + ) -> None: self.topic_summary.total_published_node_count = -1 self._assert_validation_error( 'Expected total_published_node_count to be non-negative, ' 'received \'-1\'') - def test_validation_fails_with_invalid_subtopic_count(self): - self.topic_summary.subtopic_count = '10' - self._assert_validation_error( - 'Expected subtopic count to be an integer, received \'10\'') - - def test_validation_fails_with_negative_subtopic_count(self): + def test_validation_fails_with_negative_subtopic_count(self) -> None: self.topic_summary.subtopic_count = -1 self._assert_validation_error( 'Expected subtopic_count to be non-negative, received \'-1\'') @@ -1503,25 +1923,16 @@ def test_validation_fails_with_negative_subtopic_count(self): class TopicRightsTests(test_utils.GenericTestBase): - def setUp(self): - super(TopicRightsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup('a@example.com', 'A') self.signup('b@example.com', 'B') self.user_id_a = self.get_user_id_from_email('a@example.com') self.user_id_b = self.get_user_id_from_email('b@example.com') - self.topic_summary_dict = { - 'topic_id': 'topic_id', - 'manager_names': ['A'], - 'topic_is_published': False, - } self.topic_summary = topic_domain.TopicRights( 'topic_id', [self.user_id_a], False) - def test_topic_summary_gets_created(self): - self.assertEqual( - self.topic_summary.to_dict(), self.topic_summary_dict) - - def test_is_manager(self): + def test_is_manager(self) -> None: self.assertTrue(self.topic_summary.is_manager(self.user_id_a)) self.assertFalse(self.topic_summary.is_manager(self.user_id_b)) diff --git a/core/domain/topic_fetchers.py b/core/domain/topic_fetchers.py index 48c57d658ffe..71aa12e1d5dd 100644 --- a/core/domain/topic_fetchers.py +++ b/core/domain/topic_fetchers.py @@ -24,15 +24,24 @@ from core import utils from core.domain import caching_services from core.domain import classroom_services +from core.domain import story_domain from core.domain import story_fetchers from core.domain import topic_domain from core.platform import models -(skill_models, topic_models,) = models.Registry.import_models([ - models.NAMES.skill, models.NAMES.topic]) +from typing import ( + Dict, List, Literal, Optional, Sequence, Set, TypedDict, overload) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import topic_models -def _migrate_subtopics_to_latest_schema(versioned_subtopics, topic_id): +(topic_models,) = models.Registry.import_models([models.Names.TOPIC]) + + +def _migrate_subtopics_to_latest_schema( + versioned_subtopics: topic_domain.VersionedSubtopicsDict, topic_id: str +) -> None: """Holds the responsibility of performing a step-by-step, sequential update of the subtopics structure based on the schema version of the input subtopics dictionary. If the current subtopics schema changes, a @@ -64,7 +73,9 @@ def _migrate_subtopics_to_latest_schema(versioned_subtopics, topic_id): subtopic_schema_version += 1 -def _migrate_story_references_to_latest_schema(versioned_story_references): +def _migrate_story_references_to_latest_schema( + versioned_story_references: topic_domain.VersionedStoryReferencesDict +) -> None: """Holds the responsibility of performing a step-by-step, sequential update of the story reference structure based on the schema version of the input story reference dictionary. If the current story reference schema changes, a @@ -97,7 +108,9 @@ def _migrate_story_references_to_latest_schema(versioned_story_references): story_reference_schema_version += 1 -def get_topic_from_model(topic_model): +def get_topic_from_model( + topic_model: topic_models.TopicModel +) -> topic_domain.Topic: """Returns a topic domain object given a topic model loaded from the datastore. @@ -109,15 +122,19 @@ def get_topic_from_model(topic_model): topic. A Topic domain object corresponding to the given topic model. """ - versioned_subtopics = { + versioned_subtopics: topic_domain.VersionedSubtopicsDict = { 'schema_version': topic_model.subtopic_schema_version, 'subtopics': copy.deepcopy(topic_model.subtopics) } - versioned_canonical_story_references = { + versioned_canonical_story_references: ( + topic_domain.VersionedStoryReferencesDict + ) = { 'schema_version': topic_model.story_reference_schema_version, 'story_references': topic_model.canonical_story_references } - versioned_additional_story_references = { + versioned_additional_story_references: ( + topic_domain.VersionedStoryReferencesDict + ) = { 'schema_version': topic_model.story_reference_schema_version, 'story_references': topic_model.additional_story_references } @@ -156,11 +173,46 @@ def get_topic_from_model(topic_model): topic_model.language_code, topic_model.version, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION, topic_model.meta_tag_content, topic_model.practice_tab_is_displayed, - topic_model.page_title_fragment_for_web, topic_model.created_on, + topic_model.page_title_fragment_for_web, + topic_model.skill_ids_for_diagnostic_test, topic_model.created_on, topic_model.last_updated) -def get_topic_by_id(topic_id, strict=True, version=None): +@overload +def get_topic_by_id( + topic_id: str +) -> topic_domain.Topic: ... + + +@overload +def get_topic_by_id( + topic_id: str, + *, + version: Optional[int] = None +) -> topic_domain.Topic: ... + + +@overload +def get_topic_by_id( + topic_id: str, + *, + strict: Literal[True], + version: Optional[int] = None +) -> topic_domain.Topic: ... + + +@overload +def get_topic_by_id( + topic_id: str, + *, + strict: Literal[False], + version: Optional[int] = None +) -> Optional[topic_domain.Topic]: ... + + +def get_topic_by_id( + topic_id: str, strict: bool = True, version: Optional[int] = None +) -> Optional[topic_domain.Topic]: """Returns a domain object representing a topic. Args: @@ -174,7 +226,7 @@ def get_topic_by_id(topic_id, strict=True, version=None): Topic or None. The domain object representing a topic with the given id, or None if it does not exist. """ - sub_namespace = str(version) if version else None + sub_namespace: Optional[str] = str(version) if version else None cached_topic = caching_services.get_multi( caching_services.CACHE_NAMESPACE_TOPIC, sub_namespace, @@ -196,42 +248,102 @@ def get_topic_by_id(topic_id, strict=True, version=None): return None -def get_topics_by_ids(topic_ids): +@overload +def get_topics_by_ids( + topic_ids: List[str], *, strict: Literal[True] +) -> List[topic_domain.Topic]: ... + + +@overload +def get_topics_by_ids( + topic_ids: List[str] +) -> List[Optional[topic_domain.Topic]]: ... + + +@overload +def get_topics_by_ids( + topic_ids: List[str], *, strict: Literal[False] +) -> List[Optional[topic_domain.Topic]]: ... + + +def get_topics_by_ids( + topic_ids: List[str], strict: bool = False +) -> Sequence[Optional[topic_domain.Topic]]: """Returns a list of topics matching the IDs provided. Args: topic_ids: list(str). List of IDs to get topics for. + strict: bool. Whether to fail noisily if no topic model exists + with a given ID exists in the datastore. Returns: list(Topic|None). The list of topics corresponding to given ids (with None in place of topic ids corresponding to deleted topics). + + Raises: + Exception. No topic model exists for the given topic_id. """ - all_topic_models = topic_models.TopicModel.get_multi(topic_ids) - topics = [ - get_topic_from_model(topic_model) if topic_model is not None else None - for topic_model in all_topic_models] + all_topic_models: List[Optional[topic_models.TopicModel]] = ( + topic_models.TopicModel.get_multi(topic_ids) + ) + topics: List[Optional[topic_domain.Topic]] = [] + for index, topic_model in enumerate(all_topic_models): + if topic_model is None: + if strict: + raise Exception( + 'No topic model exists for the topic_id: %s' + % topic_ids[index] + ) + topics.append(topic_model) + if topic_model is not None: + topics.append(get_topic_from_model(topic_model)) return topics -def get_topic_by_name(topic_name): +@overload +def get_topic_by_name( + topic_name: str, *, strict: Literal[True] = ... +) -> topic_domain.Topic: ... + + +@overload +def get_topic_by_name( + topic_name: str, *, strict: Literal[False] = ... +) -> Optional[topic_domain.Topic]: ... + + +def get_topic_by_name( + topic_name: str, strict: bool = False +) -> Optional[topic_domain.Topic]: """Returns a domain object representing a topic. Args: topic_name: str. The name of the topic. + strict: bool. Whether to fail noisily if no Topic exists for + the given topic name. Returns: Topic or None. The domain object representing a topic with the given id, or None if it does not exist. + + Raises: + Exception. No Topic exists for the given topic name. """ - topic_model = topic_models.TopicModel.get_by_name(topic_name) + topic_model: Optional[topic_models.TopicModel] = ( + topic_models.TopicModel.get_by_name(topic_name)) if topic_model is None: + if strict: + raise Exception( + 'No Topic exists for the given topic name: %s' % topic_name + ) return None - topic = get_topic_from_model(topic_model) - return topic + return get_topic_from_model(topic_model) -def get_topic_by_url_fragment(url_fragment): +def get_topic_by_url_fragment( + url_fragment: str +) -> Optional[topic_domain.Topic]: """Returns a domain object representing a topic. Args: @@ -241,28 +353,47 @@ def get_topic_by_url_fragment(url_fragment): Topic or None. The domain object representing a topic with the given id, or None if it does not exist. """ - topic_model = ( + topic_model: Optional[topic_models.TopicModel] = ( topic_models.TopicModel.get_by_url_fragment(url_fragment)) if topic_model is None: return None - topic = get_topic_from_model(topic_model) - return topic + return get_topic_from_model(topic_model) -def get_all_topics(): +def get_all_topics() -> List[topic_domain.Topic]: """Returns all the topics present in the datastore. Returns: list(Topic). The list of topics present in the datastore. """ backend_topic_models = topic_models.TopicModel.get_all() - topics = [ + topics: List[topic_domain.Topic] = [ get_topic_from_model(topic) for topic in backend_topic_models] return topics -def get_topic_rights(topic_id, strict=True): +@overload +def get_topic_rights( + topic_id: str +) -> topic_domain.TopicRights: ... + + +@overload +def get_topic_rights( + topic_id: str, *, strict: Literal[True] +) -> topic_domain.TopicRights: ... + + +@overload +def get_topic_rights( + topic_id: str, *, strict: Literal[False] +) -> Optional[topic_domain.TopicRights]: ... + + +def get_topic_rights( + topic_id: str, strict: bool = True +) -> Optional[topic_domain.TopicRights]: """Retrieves the rights object for the given topic. Args: @@ -271,14 +402,16 @@ def get_topic_rights(topic_id, strict=True): exists in the datastore. Returns: - TopicRights. The rights object associated with the given topic. + TopicRights or None. The rights object associated with the given topic, + or None if it does not exist. Raises: - EntityNotFoundError. The topic with ID topic_id was not + EntityNotFoundError. The TopicRights with ID topic_id was not found in the datastore. """ - model = topic_models.TopicRightsModel.get(topic_id, strict=strict) + model: Optional[topic_models.TopicRightsModel] = ( + topic_models.TopicRightsModel.get(topic_id, strict=strict)) if model is None: return None @@ -286,7 +419,9 @@ def get_topic_rights(topic_id, strict=True): return get_topic_rights_from_model(model) -def get_topic_rights_from_model(topic_rights_model): +def get_topic_rights_from_model( + topic_rights_model: topic_models.TopicRightsModel +) -> topic_domain.TopicRights: """Constructs a TopicRights object from the given topic rights model. Args: @@ -304,7 +439,7 @@ def get_topic_rights_from_model(topic_rights_model): ) -def get_all_topic_summaries(): +def get_all_topic_summaries() -> List[topic_domain.TopicSummary]: """Returns the summaries of all topics present in the datastore. Returns: @@ -312,13 +447,15 @@ def get_all_topic_summaries(): datastore. """ topic_summaries_models = topic_models.TopicSummaryModel.get_all() - topic_summaries = [ + topic_summaries: List[topic_domain.TopicSummary] = [ get_topic_summary_from_model(summary) for summary in topic_summaries_models] return topic_summaries -def get_multi_topic_summaries(topic_ids): +def get_multi_topic_summaries( + topic_ids: List[str] +) -> List[Optional[topic_domain.TopicSummary]]: """Returns the summaries of all topics whose topic ids are passed in. Args: @@ -326,8 +463,8 @@ def get_multi_topic_summaries(topic_ids): returned. Returns: - list(TopicSummary). The list of summaries of all given topics present in - the datastore. + list(TopicSummary) or None. The list of summaries of all given topics + present in the datastore, or None if it does not exist. """ topic_summaries_models = topic_models.TopicSummaryModel.get_multi(topic_ids) topic_summaries = [ @@ -336,21 +473,44 @@ def get_multi_topic_summaries(topic_ids): return topic_summaries -def get_all_skill_ids_assigned_to_some_topic(): +def get_published_topic_summaries() -> List[topic_domain.TopicSummary]: + """Returns the summaries of all published topics present in the datastore. + + Returns: + list(TopicSummary). The list of summaries of all published topics + present in the datastore. + """ + topic_id_to_topic_rights = get_all_topic_rights() + published_topic_ids = [ + topic_id + for topic_id, topic_rights in topic_id_to_topic_rights.items() + if topic_rights.topic_is_published] + topic_summaries_list = [ + topic_summary for topic_summary in get_multi_topic_summaries( + published_topic_ids + ) if topic_summary is not None + ] + return topic_summaries_list + + +def get_all_skill_ids_assigned_to_some_topic() -> Set[str]: """Returns the ids of all the skills that are linked to some topics. Returns: set([str]). The ids of all the skills linked to some topic. """ - skill_ids = set([]) + skill_ids: Set[str] = set() all_topic_models = topic_models.TopicModel.get_all() - all_topics = [get_topic_from_model(topic) for topic in all_topic_models] + all_topics: List[topic_domain.Topic] = [ + get_topic_from_model(topic) for topic in all_topic_models] for topic in all_topics: skill_ids.update(topic.get_all_skill_ids()) return skill_ids -def get_topic_summary_from_model(topic_summary_model): +def get_topic_summary_from_model( + topic_summary_model: topic_models.TopicSummaryModel +) -> topic_domain.TopicSummary: """Returns a domain object for an Oppia topic summary given a topic summary model. @@ -381,28 +541,50 @@ def get_topic_summary_from_model(topic_summary_model): ) -def get_topic_summary_by_id(topic_id, strict=True): +@overload +def get_topic_summary_by_id( + topic_id: str +) -> topic_domain.TopicSummary: ... + + +@overload +def get_topic_summary_by_id( + topic_id: str, *, strict: Literal[True] +) -> topic_domain.TopicSummary: ... + + +@overload +def get_topic_summary_by_id( + topic_id: str, *, strict: Literal[False] +) -> Optional[topic_domain.TopicSummary]: ... + + +def get_topic_summary_by_id( + topic_id: str, strict: bool = True +) -> Optional[topic_domain.TopicSummary]: """Returns a domain object representing a topic summary. Args: topic_id: str. ID of the topic summary. strict: bool. Whether to fail noisily if no topic summary with the given - id exists in the datastore. + id exist in the datastore. Returns: TopicSummary or None. The topic summary domain object corresponding to a topic with the given topic_id, if it exists, or else None. """ - topic_summary_model = topic_models.TopicSummaryModel.get( - topic_id, strict=strict) + topic_summary_model: Optional[topic_models.TopicSummaryModel] = ( + topic_models.TopicSummaryModel.get( + topic_id, strict=strict)) if topic_summary_model: - topic_summary = get_topic_summary_from_model(topic_summary_model) + topic_summary: topic_domain.TopicSummary = ( + get_topic_summary_from_model(topic_summary_model)) return topic_summary else: return None -def get_new_topic_id(): +def get_new_topic_id() -> str: """Returns a new topic id. Returns: @@ -411,25 +593,61 @@ def get_new_topic_id(): return topic_models.TopicModel.get_new_id('') -def get_multi_topic_rights(topic_ids): +@overload +def get_multi_topic_rights( + topic_ids: List[str], *, strict: Literal[True] +) -> List[topic_domain.TopicRights]: ... + + +@overload +def get_multi_topic_rights( + topic_ids: List[str] +) -> List[Optional[topic_domain.TopicRights]]: ... + + +@overload +def get_multi_topic_rights( + topic_ids: List[str], *, strict: Literal[False] +) -> List[Optional[topic_domain.TopicRights]]: ... + + +def get_multi_topic_rights( + topic_ids: List[str], strict: bool = False +) -> Sequence[Optional[topic_domain.TopicRights]]: """Returns the rights of all topics whose topic ids are passed in. Args: topic_ids: list(str). The IDs of topics for which rights are to be returned. + strict: bool. Whether to fail noisily if no TopicRights exists for + the given topic id. Returns: - list(TopicRights). The list of rights of all given topics present in - the datastore. + Sequence[Optional[TopicRights]]. The list of rights of all given topics + present in the datastore. + + Raises: + Exception. No topic_rights exists for the given topic_id. """ - topic_rights_models = topic_models.TopicRightsModel.get_multi(topic_ids) - topic_rights = [ - get_topic_rights_from_model(rights) if rights else None - for rights in topic_rights_models] + topic_rights_models: List[Optional[topic_models.TopicRightsModel]] = ( + topic_models.TopicRightsModel.get_multi(topic_ids)) + topic_rights: List[Optional[topic_domain.TopicRights]] = [] + for index, rights in enumerate(topic_rights_models): + if rights is None: + if strict: + raise Exception( + 'No topic_rights exists for the given topic_id: %s' % + topic_ids[index] + ) + topic_rights.append(rights) + else: + topic_rights.append( + get_topic_rights_from_model(rights) + ) return topic_rights -def get_topic_rights_with_user(user_id): +def get_topic_rights_with_user(user_id: str) -> List[topic_domain.TopicRights]: """Retrieves the rights object for all topics assigned to given user. Args: @@ -439,14 +657,15 @@ def get_topic_rights_with_user(user_id): list(TopicRights). The rights objects associated with the topics assigned to given user. """ - topic_rights_models = topic_models.TopicRightsModel.get_by_user(user_id) + topic_rights_models: Sequence[topic_models.TopicRightsModel] = ( + topic_models.TopicRightsModel.get_by_user(user_id)) return [ get_topic_rights_from_model(model) for model in topic_rights_models if model is not None] -def get_all_topic_rights(): +def get_all_topic_rights() -> Dict[str, topic_domain.TopicRights]: """Returns the rights object of all topics present in the datastore. Returns: @@ -454,14 +673,33 @@ def get_all_topic_rights(): keyed by topic id. """ topic_rights_models = topic_models.TopicRightsModel.get_all() - topic_rights = {} + topic_rights: Dict[str, topic_domain.TopicRights] = {} for model in topic_rights_models: - rights = get_topic_rights_from_model(model) + rights: topic_domain.TopicRights = get_topic_rights_from_model(model) topic_rights[rights.id] = rights return topic_rights -def get_canonical_story_dicts(user_id, topic): +class CannonicalStoryDict(TypedDict): + """Dictionary that represents cannonical stories.""" + + id: str + title: str + description: str + node_titles: List[str] + thumbnail_bg_color: Optional[str] + thumbnail_filename: Optional[str] + url_fragment: str + topic_url_fragment: str + classroom_url_fragment: str + story_is_published: bool + completed_node_titles: List[str] + all_node_dicts: List[story_domain.StoryNodeDict] + + +def get_canonical_story_dicts( + user_id: str, topic: topic_domain.Topic +) -> List[CannonicalStoryDict]: """Returns a list of canonical story dicts in the topic. Args: @@ -471,9 +709,9 @@ def get_canonical_story_dicts(user_id, topic): Returns: list(dict). A list of canonical story dicts in the given topic. """ - canonical_story_ids = topic.get_canonical_story_ids( + canonical_story_ids: List[str] = topic.get_canonical_story_ids( include_only_published=True) - canonical_story_summaries = [ + canonical_story_summaries: List[story_domain.StorySummary] = [ story_fetchers.get_story_summary_by_id( canonical_story_id) for canonical_story_id in canonical_story_ids] @@ -487,7 +725,17 @@ def get_canonical_story_dicts(user_id, topic): pending_node_titles = [node.title for node in pending_nodes] completed_node_titles = utils.compute_list_difference( story_summary.node_titles, pending_node_titles) - story_summary_dict = story_summary.to_human_readable_dict() + # Here we use MyPy ignore because the return type of + # 'to_human_readable_dict()' method is HumanReadableStorySummaryDict + # which do not contain topic_url_fragment, story_is_published and + # other keys. To overcome this missing keys issue, we have defined + # a CannonicalStoryDict and assigned it to the `story_summary_dict`. + # So, due to this a conflict in type assignment is raised which cause + # MyPy to throw `Incompatible types in assignment` error. Thus, to + # avoid the error, we used ignore here. + story_summary_dict: CannonicalStoryDict = ( + story_summary.to_human_readable_dict() # type: ignore[assignment] + ) story_summary_dict['topic_url_fragment'] = topic.url_fragment story_summary_dict['classroom_url_fragment'] = ( classroom_services.get_classroom_url_fragment_for_topic_id( diff --git a/core/domain/topic_fetchers_test.py b/core/domain/topic_fetchers_test.py index 09f818887015..434d0c7479a5 100644 --- a/core/domain/topic_fetchers_test.py +++ b/core/domain/topic_fetchers_test.py @@ -26,27 +26,45 @@ from core.platform import models from core.tests import test_utils -(topic_models,) = models.Registry.import_models([models.NAMES.topic]) +from typing import List, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import topic_models + +(topic_models,) = models.Registry.import_models([models.Names.TOPIC]) + + +class MockTopicObject(topic_domain.Topic): + """Mocks Topic domain object.""" + + @classmethod + def _convert_story_reference_v1_dict_to_v2_dict( + cls, story_reference: topic_domain.StoryReferenceDict + ) -> topic_domain.StoryReferenceDict: + """Converts v1 story reference dict to v2.""" + return story_reference class TopicFetchersUnitTests(test_utils.GenericTestBase): """Tests for topic fetchers.""" - user_id = 'user_id' - story_id_1 = 'story_1' - story_id_2 = 'story_2' - story_id_3 = 'story_3' - subtopic_id = 1 - skill_id_1 = 'skill_1' - skill_id_2 = 'skill_2' + user_id: str = 'user_id' + story_id_1: str = 'story_1' + story_id_2: str = 'story_2' + story_id_3: str = 'story_3' + subtopic_id: int = 1 + skill_id_1: str = 'skill_1' + skill_id_2: str = 'skill_2' - def setUp(self): - super(TopicFetchersUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.TOPIC_ID = topic_fetchers.get_new_topic_id() changelist = [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title', - 'subtopic_id': 1 + 'subtopic_id': 1, + 'url_fragment': 'sample-fragment' })] self.save_new_topic( self.TOPIC_ID, self.user_id, name='Name', @@ -74,7 +92,9 @@ def setUp(self): topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Added a subtopic') - self.topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) + self.topic: Optional[topic_domain.Topic] = ( + topic_fetchers.get_topic_by_id(self.TOPIC_ID) + ) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.set_topic_managers( [user_services.get_username(self.user_id_a)], self.TOPIC_ID) @@ -83,17 +103,115 @@ def setUp(self): self.user_admin = user_services.get_user_actions_info( self.user_id_admin) - def test_get_topic_from_model(self): - topic_model = topic_models.TopicModel.get(self.TOPIC_ID) - topic = topic_fetchers.get_topic_from_model(topic_model) + def test_get_topic_from_model(self) -> None: + topic_model: Optional[topic_models.TopicModel] = ( + topic_models.TopicModel.get(self.TOPIC_ID) + ) + # Ruling out the possibility of None for mypy type checking. + assert topic_model is not None + topic: topic_domain.Topic = ( + topic_fetchers.get_topic_from_model(topic_model) + ) + # Ruling out the possibility of None for mypy type checking. + assert self.topic is not None self.assertEqual(topic.to_dict(), self.topic.to_dict()) - def test_get_all_topics(self): + def test_get_topic_by_name(self) -> None: + topic: Optional[topic_domain.Topic] = ( + topic_fetchers.get_topic_by_name('Name') + ) + # Ruling out the possibility of None for mypy type checking. + assert topic is not None + self.assertEqual(topic.name, 'Name') + + def test_raises_error_if_wrong_name_is_used_to_get_topic_by_name( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'No Topic exists for the given topic name: wrong_topic_name' + ): + topic_fetchers.get_topic_by_name('wrong_topic_name', strict=True) + + def test_get_topic_rights_is_none(self) -> None: + fake_topic_id = topic_fetchers.get_new_topic_id() + fake_topic: Optional[topic_domain.TopicRights] = ( + topic_fetchers.get_topic_rights(fake_topic_id, strict=False) + ) + self.assertIsNone(fake_topic) + + def test_get_topic_by_url_fragment(self) -> None: + topic: Optional[topic_domain.Topic] = ( + topic_fetchers.get_topic_by_url_fragment('name-one') + ) + # Ruling out the possibility of None for mypy type checking. + assert topic is not None + self.assertEqual(topic.url_fragment, 'name-one') + + def test_get_all_topic_rights(self) -> None: + topic_rights = topic_fetchers.get_all_topic_rights() + topic_id_list = [self.TOPIC_ID] + for topic_key in topic_rights: + self.assertIn(topic_key, topic_id_list) + + def test_get_canonical_story_dicts(self) -> None: + self.save_new_story(self.story_id_2, self.user_id, self.TOPIC_ID) + topic_services.publish_story( + self.TOPIC_ID, self.story_id_1, self.user_id_admin) + topic_services.publish_story( + self.TOPIC_ID, self.story_id_2, self.user_id_admin) + topic: Optional[topic_domain.Topic] = ( + topic_fetchers.get_topic_by_id(self.TOPIC_ID) + ) + + # Ruling out the possibility of None for mypy type checking. + assert topic is not None + canonical_dict_list: List[topic_fetchers.CannonicalStoryDict] = ( + topic_fetchers.get_canonical_story_dicts(self.user_id_admin, topic) + ) + + self.assertEqual(len(canonical_dict_list), 2) + + story_dict_1: topic_fetchers.CannonicalStoryDict = { + 'id': 'story_1', + 'title': 'Title', + 'description': 'Description', + 'node_titles': [], + 'thumbnail_bg_color': None, + 'thumbnail_filename': None, + 'url_fragment': 'title', + 'topic_url_fragment': 'name-one', + 'classroom_url_fragment': 'staging', + 'story_is_published': True, + 'completed_node_titles': [], 'all_node_dicts': []} + + story_dict_2: topic_fetchers.CannonicalStoryDict = { + 'id': 'story_2', + 'title': 'Title', + 'description': 'Description', + 'node_titles': [], + 'thumbnail_bg_color': None, + 'thumbnail_filename': None, + 'url_fragment': 'title', + 'topic_url_fragment': 'name-one', + 'classroom_url_fragment': 'staging', + 'story_is_published': True, + 'completed_node_titles': [], 'all_node_dicts': []} + + story_dict_list = [story_dict_1, story_dict_2] + for canonical_story_dict in canonical_dict_list: + self.assertIn(canonical_story_dict, story_dict_list) + + def test_get_all_topics(self) -> None: topics = topic_fetchers.get_all_topics() self.assertEqual(len(topics), 1) + # Ruling out the possibility of None for mypy type checking. + assert self.topic is not None self.assertEqual(topics[0].id, self.topic.id) - def test_cannot_get_topic_from_model_with_invalid_schema_version(self): + def test_cannot_get_topic_from_model_with_invalid_schema_version( + self + ) -> None: topic_services.create_new_topic_rights('topic_id', self.user_id_a) commit_cmd = topic_domain.TopicChange({ 'cmd': topic_domain.CMD_CREATE_NEW, @@ -115,13 +233,14 @@ def test_cannot_get_topic_from_model_with_invalid_schema_version(self): language_code='en', subtopics=[subtopic_dict], subtopic_schema_version=0, - story_reference_schema_version=0 + story_reference_schema_version=0, + page_title_fragment_for_web='fragm' ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( self.user_id_a, 'topic model created', commit_cmd_dicts) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d subtopic schemas at ' 'present.' % feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION): @@ -139,24 +258,72 @@ def test_cannot_get_topic_from_model_with_invalid_schema_version(self): language_code='en', subtopics=[subtopic_dict], subtopic_schema_version=1, - story_reference_schema_version=0 + story_reference_schema_version=0, + page_title_fragment_for_web='fragm' ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( self.user_id_a, 'topic model created', commit_cmd_dicts) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d story reference schemas at ' 'present.' % feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION): topic_fetchers.get_topic_from_model(model) - def test_get_topic_by_id(self): + def test_topic_model_migration_to_higher_version(self) -> None: + topic_services.create_new_topic_rights('topic_id', self.user_id_a) + commit_cmd = topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_CREATE_NEW, + 'name': 'name' + }) + subtopic_v1_dict = { + 'id': 1, + 'title': 'subtopic_title', + 'skill_ids': [] + } + model = topic_models.TopicModel( + id='topic_id', + name='name 2', + description='description 2', + abbreviated_name='abbrev', + url_fragment='name-three', + canonical_name='canonical_name_2', + next_subtopic_id=1, + language_code='en', + subtopics=[subtopic_v1_dict], + subtopic_schema_version=1, + story_reference_schema_version=1, + page_title_fragment_for_web='fragment' + ) + commit_cmd_dicts = [commit_cmd.to_dict()] + model.commit( + self.user_id_a, 'topic model created', commit_cmd_dicts) + swap_topic_object = self.swap(topic_domain, 'Topic', MockTopicObject) + current_story_refrence_schema_version_swap = self.swap( + feconf, 'CURRENT_STORY_REFERENCE_SCHEMA_VERSION', 2) + with swap_topic_object, current_story_refrence_schema_version_swap: + topic: topic_domain.Topic = ( + topic_fetchers.get_topic_from_model(model)) + self.assertEqual(topic.story_reference_schema_version, 2) + + def test_get_topic_by_id(self) -> None: + # Ruling out the possibility of None for mypy type checking. + assert self.topic is not None expected_topic = self.topic.to_dict() - topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) + topic: Optional[topic_domain.Topic] = ( + topic_fetchers.get_topic_by_id(self.TOPIC_ID) + ) + # Ruling out the possibility of None for mypy type checking. + assert topic is not None self.assertEqual(topic.to_dict(), expected_topic) + fake_topic_id = topic_fetchers.get_new_topic_id() + fake_topic: Optional[topic_domain.Topic] = ( + topic_fetchers.get_topic_by_id(fake_topic_id, strict=False) + ) + self.assertIsNone(fake_topic) - def test_get_topic_by_version(self): + def test_get_topic_by_version(self) -> None: topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.user_id, name='topic name', @@ -174,39 +341,73 @@ def test_get_topic_by_version(self): topic_services.update_topic_and_subtopic_pages( self.user_id, topic_id, changelist, 'Change language code') - topic_v0 = topic_fetchers.get_topic_by_id(topic_id, version=0) - topic_v1 = topic_fetchers.get_topic_by_id(topic_id, version=1) + topic_v0: Optional[topic_domain.Topic] = ( + topic_fetchers.get_topic_by_id(topic_id, version=0) + ) + topic_v1: Optional[topic_domain.Topic] = ( + topic_fetchers.get_topic_by_id(topic_id, version=1) + ) + # Ruling out the possibility of None for mypy type checking. + assert topic_v0 is not None + assert topic_v1 is not None self.assertEqual(topic_v1.language_code, 'en') self.assertEqual(topic_v0.language_code, 'bn') - def test_get_topics_by_id(self): + def test_get_topics_by_id(self) -> None: + # Ruling out the possibility of None for mypy type checking. + assert self.topic is not None expected_topic = self.topic.to_dict() - topics = topic_fetchers.get_topics_by_ids([self.TOPIC_ID]) + topics: List[Optional[topic_domain.Topic]] = ( + topic_fetchers.get_topics_by_ids([self.TOPIC_ID]) + ) + # Ruling out the possibility of None for mypy type checking. + assert topics[0] is not None self.assertEqual(topics[0].to_dict(), expected_topic) self.assertEqual(len(topics), 1) - topics = topic_fetchers.get_topics_by_ids([self.TOPIC_ID, 'topic']) + topics = ( + topic_fetchers.get_topics_by_ids([self.TOPIC_ID, 'topic']) + ) + # Ruling out the possibility of None for mypy type checking. + assert topics[0] is not None self.assertEqual(topics[0].to_dict(), expected_topic) self.assertIsNone(topics[1]) self.assertEqual(len(topics), 2) - def test_get_all_topic_rights_of_user(self): - topic_rights = topic_fetchers.get_topic_rights_with_user(self.user_id_a) + def test_raises_error_if_topics_fetched_with_invalid_ids_and_strict( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'No topic model exists for the topic_id: invalid_id' + ): + topic_fetchers.get_topics_by_ids(['invalid_id'], strict=True) + + def test_get_all_topic_rights_of_user(self) -> None: + topic_rights: List[topic_domain.TopicRights] = ( + topic_fetchers.get_topic_rights_with_user(self.user_id_a) + ) self.assertEqual(len(topic_rights), 1) self.assertEqual(topic_rights[0].id, self.TOPIC_ID) self.assertEqual(topic_rights[0].manager_ids, [self.user_id_a]) - def test_commit_log_entry(self): - topic_commit_log_entry = ( - topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 1) + def test_commit_log_entry(self) -> None: + topic_commit_log_entry: ( + Optional[topic_models.TopicCommitLogEntryModel] + ) = ( + topic_models.TopicCommitLogEntryModel.get_commit( + self.TOPIC_ID, 1 + ) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'create') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id) - def test_get_all_summaries(self): + def test_get_all_summaries(self) -> None: topic_summaries = topic_fetchers.get_all_topic_summaries() self.assertEqual(len(topic_summaries), 1) @@ -217,10 +418,15 @@ def test_get_all_summaries(self): self.assertEqual(topic_summaries[0].uncategorized_skill_count, 2) self.assertEqual(topic_summaries[0].subtopic_count, 1) - def test_get_multi_summaries(self): - topic_summaries = topic_fetchers.get_multi_topic_summaries([ - self.TOPIC_ID, 'invalid_id']) + def test_get_multi_summaries(self) -> None: + topic_summaries: List[Optional[topic_domain.TopicSummary]] = ( + topic_fetchers.get_multi_topic_summaries([ + self.TOPIC_ID, 'invalid_id' + ]) + ) + # Ruling out the possibility of None for mypy type checking. + assert topic_summaries[0] is not None self.assertEqual(len(topic_summaries), 2) self.assertEqual(topic_summaries[0].name, 'Name') self.assertEqual(topic_summaries[0].description, 'Description') @@ -231,7 +437,42 @@ def test_get_multi_summaries(self): self.assertEqual(topic_summaries[0].subtopic_count, 1) self.assertIsNone(topic_summaries[1]) - def test_get_all_skill_ids_assigned_to_some_topic(self): + def test_get_published_summaries(self) -> None: + # Unpublished topics should not be returned. + topic_summaries = topic_fetchers.get_published_topic_summaries() + self.assertEqual(len(topic_summaries), 0) + old_value: List[str] = [] + # Publish the topic. + changelist = [topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, + 'old_subtopic_id': None, + 'new_subtopic_id': self.subtopic_id, + 'skill_id': self.skill_id_1 + }), topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': old_value, + 'new_value': [self.skill_id_1] + })] + topic_services.update_topic_and_subtopic_pages( + self.user_id_admin, self.TOPIC_ID, changelist, + 'Updated subtopic skill ids.') + topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin) + + topic_summaries = topic_fetchers.get_published_topic_summaries() + + self.assertEqual(len(topic_summaries), 1) + # Ruling out the possibility of None for mypy type checking. + assert topic_summaries[0] is not None + self.assertEqual(topic_summaries[0].name, 'Name') + self.assertEqual(topic_summaries[0].canonical_story_count, 0) + self.assertEqual(topic_summaries[0].additional_story_count, 0) + self.assertEqual(topic_summaries[0].total_skill_count, 2) + self.assertEqual(topic_summaries[0].uncategorized_skill_count, 1) + self.assertEqual(topic_summaries[0].subtopic_count, 1) + + def test_get_all_skill_ids_assigned_to_some_topic(self) -> None: change_list = [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, 'old_subtopic_id': None, @@ -252,10 +493,15 @@ def test_get_all_skill_ids_assigned_to_some_topic(self): topic_fetchers.get_all_skill_ids_assigned_to_some_topic(), {self.skill_id_1, self.skill_id_2, 'skill_3'}) - def test_get_topic_summary_from_model(self): - topic_summary_model = topic_models.TopicSummaryModel.get(self.TOPIC_ID) - topic_summary = topic_fetchers.get_topic_summary_from_model( - topic_summary_model) + def test_get_topic_summary_from_model(self) -> None: + topic_summary_model: Optional[topic_models.TopicSummaryModel] = ( + topic_models.TopicSummaryModel.get(self.TOPIC_ID) + ) + # Ruling out the possibility of None for mypy type checking. + assert topic_summary_model is not None + topic_summary: topic_domain.TopicSummary = ( + topic_fetchers.get_topic_summary_from_model( + topic_summary_model)) self.assertEqual(topic_summary.id, self.TOPIC_ID) self.assertEqual(topic_summary.name, 'Name') @@ -268,8 +514,12 @@ def test_get_topic_summary_from_model(self): self.assertEqual(topic_summary.thumbnail_filename, 'topic.svg') self.assertEqual(topic_summary.thumbnail_bg_color, '#C6DCDA') - def test_get_topic_summary_by_id(self): - topic_summary = topic_fetchers.get_topic_summary_by_id(self.TOPIC_ID) + def test_get_topic_summary_by_id(self) -> None: + topic_summary: Optional[topic_domain.TopicSummary] = ( + topic_fetchers.get_topic_summary_by_id(self.TOPIC_ID) + ) + # Ruling out the possibility of None for mypy type checking. + assert topic_summary is not None self.assertEqual(topic_summary.id, self.TOPIC_ID) self.assertEqual(topic_summary.name, 'Name') @@ -281,18 +531,40 @@ def test_get_topic_summary_by_id(self): self.assertEqual(topic_summary.thumbnail_filename, 'topic.svg') self.assertEqual(topic_summary.thumbnail_bg_color, '#C6DCDA') - def test_get_new_topic_id(self): + fake_topic_id = topic_fetchers.get_new_topic_id() + fake_topic: Optional[topic_domain.TopicSummary] = ( + topic_fetchers.get_topic_summary_by_id( + fake_topic_id, strict=False + ) + ) + self.assertIsNone(fake_topic) + + def test_get_new_topic_id(self) -> None: new_topic_id = topic_fetchers.get_new_topic_id() self.assertEqual(len(new_topic_id), 12) self.assertEqual(topic_models.TopicModel.get_by_id(new_topic_id), None) - def test_get_multi_rights(self): - topic_rights = topic_fetchers.get_multi_topic_rights([ - self.TOPIC_ID, 'invalid_id']) + def test_get_multi_rights(self) -> None: + topic_rights: List[Optional[topic_domain.TopicRights]] = ( + topic_fetchers.get_multi_topic_rights([ + self.TOPIC_ID, 'invalid_id' + ]) + ) + # Ruling out the possibility of None for mypy type checking. + assert topic_rights[0] is not None self.assertEqual(len(topic_rights), 2) self.assertEqual(topic_rights[0].id, self.TOPIC_ID) self.assertEqual(topic_rights[0].manager_ids, [self.user_id_a]) self.assertFalse(topic_rights[0].topic_is_published) self.assertIsNone(topic_rights[1]) + + def test_raises_error_if_wrong_topic_rights_fetched_strictly(self) -> None: + with self.assertRaisesRegex( + Exception, + 'No topic_rights exists for the given topic_id: invalid_topic_id' + ): + topic_fetchers.get_multi_topic_rights( + ['invalid_topic_id'], strict=True + ) diff --git a/core/domain/topic_services.py b/core/domain/topic_services.py index b59893834785..c70de51bf91e 100644 --- a/core/domain/topic_services.py +++ b/core/domain/topic_services.py @@ -23,12 +23,16 @@ from core import feconf from core import utils +from core.constants import constants from core.domain import caching_services +from core.domain import change_domain from core.domain import feedback_services +from core.domain import fs_services from core.domain import opportunity_services from core.domain import rights_domain from core.domain import role_services from core.domain import state_domain +from core.domain import story_domain from core.domain import story_fetchers from core.domain import story_services from core.domain import subtopic_page_domain @@ -36,14 +40,25 @@ from core.domain import suggestion_services from core.domain import topic_domain from core.domain import topic_fetchers +from core.domain import user_domain from core.domain import user_services from core.platform import models -(topic_models,) = models.Registry.import_models([models.NAMES.topic]) -datastore_services = models.Registry.import_datastore_services() +from typing import Dict, List, Optional, Sequence, Tuple, cast +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import topic_models -def _create_topic(committer_id, topic, commit_message, commit_cmds): +(topic_models,) = models.Registry.import_models([models.Names.TOPIC]) + + +def _create_topic( + committer_id: str, + topic: topic_domain.Topic, + commit_message: str, + commit_cmds: List[topic_domain.TopicChange] +) -> None: """Creates a new topic, and ensures that rights for a new topic are saved first. @@ -87,7 +102,8 @@ def _create_topic(committer_id, topic, commit_message, commit_cmds): subtopics=[subtopic.to_dict() for subtopic in topic.subtopics], meta_tag_content=topic.meta_tag_content, practice_tab_is_displayed=topic.practice_tab_is_displayed, - page_title_fragment_for_web=topic.page_title_fragment_for_web + page_title_fragment_for_web=topic.page_title_fragment_for_web, + skill_ids_for_diagnostic_test=topic.skill_ids_for_diagnostic_test ) commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds] model.commit(committer_id, commit_message, commit_cmd_dicts) @@ -95,7 +111,7 @@ def _create_topic(committer_id, topic, commit_message, commit_cmds): generate_topic_summary(topic.id) -def does_topic_with_name_exist(topic_name): +def does_topic_with_name_exist(topic_name: str) -> bool: """Checks if the topic with provided name exists. Args: @@ -113,7 +129,7 @@ def does_topic_with_name_exist(topic_name): return existing_topic is not None -def does_topic_with_url_fragment_exist(url_fragment): +def does_topic_with_url_fragment_exist(url_fragment: str) -> bool: """Checks if topic with provided url fragment exists. Args: @@ -132,7 +148,7 @@ def does_topic_with_url_fragment_exist(url_fragment): return existing_topic is not None -def save_new_topic(committer_id, topic): +def save_new_topic(committer_id: str, topic: topic_domain.Topic) -> None: """Saves a new topic. Args: @@ -148,7 +164,15 @@ def save_new_topic(committer_id, topic): })]) -def apply_change_list(topic_id, change_list): +def apply_change_list( + topic_id: str, change_list: Sequence[change_domain.BaseChange] +) -> Tuple[ + topic_domain.Topic, + Dict[str, subtopic_page_domain.SubtopicPage], + List[int], + List[int], + Dict[str, List[subtopic_page_domain.SubtopicPageChange]] +]: """Applies a changelist to a topic and returns the result. The incoming changelist should not have simultaneuous creations and deletion of subtopics. @@ -159,7 +183,7 @@ def apply_change_list(topic_id, change_list): topic. Raises: - Exception. The incoming changelist had simultaneuous creation and + Exception. The incoming changelist had simultaneous creation and deletion of subtopics. Returns: @@ -171,117 +195,324 @@ def apply_change_list(topic_id, change_list): applied to modified subtopic pages. """ topic = topic_fetchers.get_topic_by_id(topic_id) - newly_created_subtopic_ids = [] - existing_subtopic_page_ids_to_be_modified = [] - deleted_subtopic_ids = [] - modified_subtopic_pages_list = [] - modified_subtopic_pages = {} - modified_subtopic_change_cmds = collections.defaultdict(list) + newly_created_subtopic_ids: List[int] = [] + existing_subtopic_page_ids_to_be_modified: List[int] = [] + deleted_subtopic_ids: List[int] = [] + modified_subtopic_pages_list: List[ + Optional[subtopic_page_domain.SubtopicPage] + ] = [] + modified_subtopic_pages: Dict[str, subtopic_page_domain.SubtopicPage] = {} + modified_subtopic_change_cmds: Dict[ + str, List[subtopic_page_domain.SubtopicPageChange] + ] = collections.defaultdict(list) for change in change_list: if (change.cmd == subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY): - if change.subtopic_id < topic.next_subtopic_id: + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + update_subtopic_page_property_cmd = cast( + subtopic_page_domain.UpdateSubtopicPagePropertyCmd, + change + ) + if ( + update_subtopic_page_property_cmd.subtopic_id < + topic.next_subtopic_id + ): existing_subtopic_page_ids_to_be_modified.append( - change.subtopic_id) + update_subtopic_page_property_cmd.subtopic_id) subtopic_page_id = ( subtopic_page_domain.SubtopicPage.get_subtopic_page_id( - topic_id, change.subtopic_id)) + topic_id, update_subtopic_page_property_cmd.subtopic_id + ) + ) modified_subtopic_change_cmds[subtopic_page_id].append( - change) + update_subtopic_page_property_cmd) modified_subtopic_pages_list = ( subtopic_page_services.get_subtopic_pages_with_ids( topic_id, existing_subtopic_page_ids_to_be_modified)) for subtopic_page in modified_subtopic_pages_list: + # Ruling out the possibility of None for mypy type checking. + assert subtopic_page is not None modified_subtopic_pages[subtopic_page.id] = subtopic_page try: for change in change_list: if change.cmd == topic_domain.CMD_ADD_SUBTOPIC: - topic.add_subtopic(change.subtopic_id, change.title) + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + add_subtopic_cmd = cast( + topic_domain.AddSubtopicCmd, change + ) + topic.add_subtopic( + add_subtopic_cmd.subtopic_id, + add_subtopic_cmd.title, + add_subtopic_cmd.url_fragment + ) subtopic_page_id = ( subtopic_page_domain.SubtopicPage.get_subtopic_page_id( - topic_id, change.subtopic_id)) + topic_id, add_subtopic_cmd.subtopic_id)) modified_subtopic_pages[subtopic_page_id] = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( # pylint: disable=line-too-long - change.subtopic_id, topic_id) + add_subtopic_cmd.subtopic_id, topic_id) ) modified_subtopic_change_cmds[subtopic_page_id].append( subtopic_page_domain.SubtopicPageChange({ 'cmd': 'create_new', 'topic_id': topic_id, - 'subtopic_id': change.subtopic_id + 'subtopic_id': add_subtopic_cmd.subtopic_id })) - newly_created_subtopic_ids.append(change.subtopic_id) + newly_created_subtopic_ids.append(add_subtopic_cmd.subtopic_id) elif change.cmd == topic_domain.CMD_DELETE_SUBTOPIC: - topic.delete_subtopic(change.subtopic_id) - if change.subtopic_id in newly_created_subtopic_ids: + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + delete_subtopic_cmd = cast( + topic_domain.DeleteSubtopicCmd, change + ) + topic.delete_subtopic(delete_subtopic_cmd.subtopic_id) + if ( + delete_subtopic_cmd.subtopic_id in + newly_created_subtopic_ids + ): raise Exception( 'The incoming changelist had simultaneous' ' creation and deletion of subtopics.') - deleted_subtopic_ids.append(change.subtopic_id) + deleted_subtopic_ids.append(delete_subtopic_cmd.subtopic_id) elif change.cmd == topic_domain.CMD_ADD_CANONICAL_STORY: - topic.add_canonical_story(change.story_id) + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + add_canonical_story_cmd = cast( + topic_domain.AddCanonicalStoryCmd, change + ) + topic.add_canonical_story(add_canonical_story_cmd.story_id) elif change.cmd == topic_domain.CMD_DELETE_CANONICAL_STORY: - topic.delete_canonical_story(change.story_id) + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + delete_canonical_story_cmd = cast( + topic_domain.DeleteCanonicalStoryCmd, change + ) + topic.delete_canonical_story( + delete_canonical_story_cmd.story_id + ) elif change.cmd == topic_domain.CMD_REARRANGE_CANONICAL_STORY: + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + rearrange_canonical_story_cmd = cast( + topic_domain.RearrangeCanonicalStoryCmd, change + ) topic.rearrange_canonical_story( - change.from_index, change.to_index) + rearrange_canonical_story_cmd.from_index, + rearrange_canonical_story_cmd.to_index + ) elif change.cmd == topic_domain.CMD_ADD_ADDITIONAL_STORY: - topic.add_additional_story(change.story_id) + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + add_additional_story_cmd = cast( + topic_domain.AddAdditionalStoryCmd, change + ) + topic.add_additional_story(add_additional_story_cmd.story_id) elif change.cmd == topic_domain.CMD_DELETE_ADDITIONAL_STORY: - topic.delete_additional_story(change.story_id) + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + delete_additional_story_cmd = cast( + topic_domain.DeleteAdditionalStoryCmd, change + ) + topic.delete_additional_story( + delete_additional_story_cmd.story_id + ) elif change.cmd == topic_domain.CMD_ADD_UNCATEGORIZED_SKILL_ID: + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + add_uncategorized_skill_id_cmd = cast( + topic_domain.AddUncategorizedSkillIdCmd, + change + ) topic.add_uncategorized_skill_id( - change.new_uncategorized_skill_id) + add_uncategorized_skill_id_cmd.new_uncategorized_skill_id + ) elif change.cmd == topic_domain.CMD_REMOVE_UNCATEGORIZED_SKILL_ID: + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + remove_uncategorized_skill_id_cmd = cast( + topic_domain.RemoveUncategorizedSkillIdCmd, + change + ) topic.remove_uncategorized_skill_id( - change.uncategorized_skill_id) + remove_uncategorized_skill_id_cmd.uncategorized_skill_id + ) elif change.cmd == topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC: + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + move_skill_id_to_subtopic_cmd = cast( + topic_domain.MoveSkillIdToSubtopicCmd, + change + ) topic.move_skill_id_to_subtopic( - change.old_subtopic_id, change.new_subtopic_id, - change.skill_id) + move_skill_id_to_subtopic_cmd.old_subtopic_id, + move_skill_id_to_subtopic_cmd.new_subtopic_id, + move_skill_id_to_subtopic_cmd.skill_id + ) elif change.cmd == topic_domain.CMD_REARRANGE_SKILL_IN_SUBTOPIC: + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + rearrange_skill_in_subtopic_cmd = cast( + topic_domain.RearrangeSkillInSubtopicCmd, change + ) topic.rearrange_skill_in_subtopic( - change.subtopic_id, change.from_index, change.to_index) + rearrange_skill_in_subtopic_cmd.subtopic_id, + rearrange_skill_in_subtopic_cmd.from_index, + rearrange_skill_in_subtopic_cmd.to_index + ) elif change.cmd == topic_domain.CMD_REARRANGE_SUBTOPIC: - topic.rearrange_subtopic(change.from_index, change.to_index) + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + rearrange_subtopic_cmd = cast( + topic_domain.RearrangeSubtopicCmd, change + ) + topic.rearrange_subtopic( + rearrange_subtopic_cmd.from_index, + rearrange_subtopic_cmd.to_index + ) elif change.cmd == topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC: + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + remove_skill_id_from_subtopic_cmd = cast( + topic_domain.RemoveSkillIdFromSubtopicCmd, change + ) topic.remove_skill_id_from_subtopic( - change.subtopic_id, change.skill_id) + remove_skill_id_from_subtopic_cmd.subtopic_id, + remove_skill_id_from_subtopic_cmd.skill_id + ) elif change.cmd == topic_domain.CMD_UPDATE_TOPIC_PROPERTY: if (change.property_name == topic_domain.TOPIC_PROPERTY_NAME): - topic.update_name(change.new_value) + # Here we use cast because this 'if' condition forces + # change to have type UpdateTopicPropertyNameCmd. + update_topic_name_cmd = cast( + topic_domain.UpdateTopicPropertyNameCmd, + change + ) + topic.update_name(update_topic_name_cmd.new_value) elif (change.property_name == topic_domain.TOPIC_PROPERTY_ABBREVIATED_NAME): - topic.update_abbreviated_name(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type UpdateTopicPropertyAbbreviatedNameCmd. + update_abbreviated_name_cmd = cast( + topic_domain.UpdateTopicPropertyAbbreviatedNameCmd, + change + ) + topic.update_abbreviated_name( + update_abbreviated_name_cmd.new_value + ) elif (change.property_name == topic_domain.TOPIC_PROPERTY_URL_FRAGMENT): - topic.update_url_fragment(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type UpdateTopicPropertyUrlFragmentCmd. + update_url_fragment_cmd = cast( + topic_domain.UpdateTopicPropertyUrlFragmentCmd, + change + ) + topic.update_url_fragment(update_url_fragment_cmd.new_value) elif (change.property_name == topic_domain.TOPIC_PROPERTY_DESCRIPTION): - topic.update_description(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type UpdateTopicPropertyDescriptionCmd. + update_topic_description_cmd = cast( + topic_domain.UpdateTopicPropertyDescriptionCmd, + change + ) + topic.update_description( + update_topic_description_cmd.new_value + ) elif (change.property_name == topic_domain.TOPIC_PROPERTY_LANGUAGE_CODE): - topic.update_language_code(change.new_value) + # Here we use cast because this 'elif' condition forces + # change to have type UpdateTopicPropertyLanguageCodeCmd. + update_topic_language_code_cmd = cast( + topic_domain.UpdateTopicPropertyLanguageCodeCmd, + change + ) + topic.update_language_code( + update_topic_language_code_cmd.new_value + ) elif (change.property_name == topic_domain.TOPIC_PROPERTY_THUMBNAIL_FILENAME): - topic.update_thumbnail_filename(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateTopicPropertyThumbnailFilenameCmd. + update_topic_thumbnail_filename_cmd = cast( + topic_domain.UpdateTopicPropertyThumbnailFilenameCmd, + change + ) + update_thumbnail_filename( + topic, update_topic_thumbnail_filename_cmd.new_value + ) elif (change.property_name == topic_domain.TOPIC_PROPERTY_THUMBNAIL_BG_COLOR): - topic.update_thumbnail_bg_color(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateTopicPropertyThumbnailBGColorCmd. + update_topic_thumbnail_bg_color_cmd = cast( + topic_domain.UpdateTopicPropertyThumbnailBGColorCmd, + change + ) + topic.update_thumbnail_bg_color( + update_topic_thumbnail_bg_color_cmd.new_value + ) elif (change.property_name == topic_domain.TOPIC_PROPERTY_META_TAG_CONTENT): - topic.update_meta_tag_content(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateTopicPropertyMetaTagContentCmd. + update_topic_meta_tag_content_cmd = cast( + topic_domain.UpdateTopicPropertyMetaTagContentCmd, + change + ) + topic.update_meta_tag_content( + update_topic_meta_tag_content_cmd.new_value + ) elif (change.property_name == topic_domain.TOPIC_PROPERTY_PRACTICE_TAB_IS_DISPLAYED): - topic.update_practice_tab_is_displayed(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateTopicPropertyPracticeTabIsDisplayedCmd. + update_practice_tab_is_displayed_cmd = cast( + topic_domain.UpdateTopicPropertyPracticeTabIsDisplayedCmd, # pylint: disable=line-too-long + change + ) + topic.update_practice_tab_is_displayed( + update_practice_tab_is_displayed_cmd.new_value + ) elif (change.property_name == topic_domain.TOPIC_PROPERTY_PAGE_TITLE_FRAGMENT_FOR_WEB): - topic.update_page_title_fragment_for_web(change.new_value) + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateTopicPropertyTitleFragmentForWebCmd. + update_title_fragment_for_web_cmd = cast( + topic_domain.UpdateTopicPropertyTitleFragmentForWebCmd, + change + ) + topic.update_page_title_fragment_for_web( + update_title_fragment_for_web_cmd.new_value + ) + elif (change.property_name == + topic_domain + .TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateTopicPropertySkillIdsForDiagnosticTestCmd. + update_skill_ids_for_diagnostic_test_cmd = cast( + topic_domain.UpdateTopicPropertySkillIdsForDiagnosticTestCmd, # pylint: disable=line-too-long + change + ) + topic.update_skill_ids_for_diagnostic_test( + update_skill_ids_for_diagnostic_test_cmd.new_value + ) elif (change.cmd == subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY): + # Ruling out the possibility of any other type for mypy + # type checking. + assert isinstance(change.subtopic_id, int) subtopic_page_id = ( subtopic_page_domain.SubtopicPage.get_subtopic_page_id( topic_id, change.subtopic_id)) @@ -294,8 +525,15 @@ def apply_change_list(topic_id, change_list): if (change.property_name == subtopic_page_domain. SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML): + # Here we use cast because this 'if' + # condition forces change to have type + # UpdateSubtopicPagePropertyPageContentsHtmlCmd. + update_subtopic_page_contents_html_cmd = cast( + subtopic_page_domain.UpdateSubtopicPagePropertyPageContentsHtmlCmd, # pylint: disable=line-too-long + change + ) page_contents = state_domain.SubtitledHtml.from_dict( - change.new_value) + update_subtopic_page_contents_html_cmd.new_value) page_contents.validate() modified_subtopic_pages[ subtopic_page_id].update_page_contents_html( @@ -304,27 +542,50 @@ def apply_change_list(topic_id, change_list): elif (change.property_name == subtopic_page_domain. SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO): + # Here we use cast because this 'elif' + # condition forces change to have type + # UpdateSubtopicPagePropertyPageContentsAudioCmd. + update_subtopic_page_contents_audio_cmd = cast( + subtopic_page_domain.UpdateSubtopicPagePropertyPageContentsAudioCmd, # pylint: disable=line-too-long + change + ) modified_subtopic_pages[ subtopic_page_id].update_page_contents_audio( state_domain.RecordedVoiceovers.from_dict( - change.new_value)) + update_subtopic_page_contents_audio_cmd.new_value + ) + ) elif change.cmd == topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY: - if (change.property_name == + # Here we use cast because we are narrowing down the type from + # TopicChange to a specific change command. + update_subtopic_property_cmd = cast( + topic_domain.UpdateSubtopicPropertyCmd, + change + ) + if (update_subtopic_property_cmd.property_name == topic_domain.SUBTOPIC_PROPERTY_TITLE): topic.update_subtopic_title( - change.subtopic_id, change.new_value) - if (change.property_name == + update_subtopic_property_cmd.subtopic_id, + update_subtopic_property_cmd.new_value + ) + if (update_subtopic_property_cmd.property_name == topic_domain.SUBTOPIC_PROPERTY_THUMBNAIL_FILENAME): - topic.update_subtopic_thumbnail_filename( - change.subtopic_id, change.new_value) - if (change.property_name == + update_subtopic_thumbnail_filename( + topic, update_subtopic_property_cmd.subtopic_id, + update_subtopic_property_cmd.new_value + ) + if (update_subtopic_property_cmd.property_name == topic_domain.SUBTOPIC_PROPERTY_THUMBNAIL_BG_COLOR): topic.update_subtopic_thumbnail_bg_color( - change.subtopic_id, change.new_value) - if (change.property_name == + update_subtopic_property_cmd.subtopic_id, + update_subtopic_property_cmd.new_value + ) + if (update_subtopic_property_cmd.property_name == topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT): topic.update_subtopic_url_fragment( - change.subtopic_id, change.new_value) + update_subtopic_property_cmd.subtopic_id, + update_subtopic_property_cmd.new_value + ) elif ( change.cmd == @@ -346,7 +607,12 @@ def apply_change_list(topic_id, change_list): raise e -def _save_topic(committer_id, topic, commit_message, change_list): +def _save_topic( + committer_id: str, + topic: topic_domain.Topic, + commit_message: Optional[str], + change_list: Sequence[change_domain.BaseChange] +) -> None: """Validates a topic and commits it to persistent storage. If successful, increments the version number of the incoming topic domain object by 1. @@ -354,7 +620,8 @@ def _save_topic(committer_id, topic, commit_message, change_list): Args: committer_id: str. ID of the given committer. topic: Topic. The topic domain object to be saved. - commit_message: str. The commit message. + commit_message: str|None. The commit description message, for + unpublished topics, it may be equal to None. change_list: list(TopicChange). List of changes applied to a topic. Raises: @@ -366,10 +633,10 @@ def _save_topic(committer_id, topic, commit_message, change_list): raise Exception( 'Unexpected error: received an invalid change list when trying to ' 'save topic %s: %s' % (topic.id, change_list)) - topic_rights = topic_fetchers.get_topic_rights(topic.id, strict=False) + topic_rights = topic_fetchers.get_topic_rights(topic.id, strict=True) topic.validate(strict=topic_rights.topic_is_published) - topic_model = topic_models.TopicModel.get(topic.id, strict=False) + topic_model = topic_models.TopicModel.get(topic.id, strict=True) # Topic model cannot be None as topic is passed as parameter here and that # is only possible if a topic model with that topic id exists. Also this is @@ -380,45 +647,26 @@ def _save_topic(committer_id, topic, commit_message, change_list): 'Unexpected error: trying to update version %s of topic ' 'from version %s. Please reload the page and try again.' % (topic_model.version, topic.version)) - elif topic.version < topic_model.version: + if topic.version < topic_model.version: raise Exception( 'Trying to update version %s of topic from version %s, ' 'which is too old. Please reload the page and try again.' % (topic_model.version, topic.version)) - topic_model.description = topic.description - topic_model.name = topic.name - topic_model.canonical_name = topic.canonical_name - topic_model.abbreviated_name = topic.abbreviated_name - topic_model.url_fragment = topic.url_fragment - topic_model.thumbnail_bg_color = topic.thumbnail_bg_color - topic_model.thumbnail_filename = topic.thumbnail_filename - topic_model.thumbnail_size_in_bytes = topic.thumbnail_size_in_bytes - topic_model.canonical_story_references = [ - reference.to_dict() for reference in topic.canonical_story_references - ] - topic_model.additional_story_references = [ - reference.to_dict() for reference in topic.additional_story_references - ] - topic_model.uncategorized_skill_ids = topic.uncategorized_skill_ids - topic_model.subtopics = [subtopic.to_dict() for subtopic in topic.subtopics] - topic_model.subtopic_schema_version = topic.subtopic_schema_version - topic_model.story_reference_schema_version = ( - topic.story_reference_schema_version) - topic_model.next_subtopic_id = topic.next_subtopic_id - topic_model.language_code = topic.language_code - topic_model.meta_tag_content = topic.meta_tag_content - topic_model.practice_tab_is_displayed = topic.practice_tab_is_displayed - topic_model.page_title_fragment_for_web = topic.page_title_fragment_for_web + topic_model_to_commit = populate_topic_model_fields(topic_model, topic) change_dicts = [change.to_dict() for change in change_list] - topic_model.commit(committer_id, commit_message, change_dicts) + topic_model_to_commit.commit(committer_id, commit_message, change_dicts) caching_services.delete_multi( caching_services.CACHE_NAMESPACE_TOPIC, None, [topic.id]) topic.version += 1 def update_topic_and_subtopic_pages( - committer_id, topic_id, change_list, commit_message): + committer_id: str, + topic_id: str, + change_list: Sequence[change_domain.BaseChange], + commit_message: Optional[str] +) -> None: """Updates a topic and its subtopic pages. Commits changes. Args: @@ -433,7 +681,7 @@ def update_topic_and_subtopic_pages( Raises: ValueError. Current user does not have enough rights to edit a topic. """ - topic_rights = topic_fetchers.get_topic_rights(topic_id, strict=False) + topic_rights = topic_fetchers.get_topic_rights(topic_id, strict=True) if topic_rights.topic_is_published and not commit_message: raise ValueError( 'Expected a commit message, received none.') @@ -484,7 +732,11 @@ def update_topic_and_subtopic_pages( updated_topic.id, updated_topic.name) -def delete_uncategorized_skill(user_id, topic_id, uncategorized_skill_id): +def delete_uncategorized_skill( + user_id: str, + topic_id: str, + uncategorized_skill_id: str +) -> None: """Removes skill with given id from the topic. Args: @@ -502,7 +754,11 @@ def delete_uncategorized_skill(user_id, topic_id, uncategorized_skill_id): 'Removed %s from uncategorized skill ids' % uncategorized_skill_id) -def add_uncategorized_skill(user_id, topic_id, uncategorized_skill_id): +def add_uncategorized_skill( + user_id: str, + topic_id: str, + uncategorized_skill_id: str +) -> None: """Adds a skill with given id to the topic. Args: @@ -520,7 +776,11 @@ def add_uncategorized_skill(user_id, topic_id, uncategorized_skill_id): 'Added %s to uncategorized skill ids' % uncategorized_skill_id) -def publish_story(topic_id, story_id, committer_id): +def publish_story( + topic_id: str, + story_id: str, + committer_id: str +) -> None: """Marks the given story as published. Args: @@ -533,7 +793,9 @@ def publish_story(topic_id, story_id, committer_id): Exception. The story is already published. Exception. The user does not have enough rights to publish the story. """ - def _are_nodes_valid_for_publishing(story_nodes): + def _are_nodes_valid_for_publishing( + story_nodes: List[story_domain.StoryNode] + ) -> None: """Validates the story nodes before publishing. Args: @@ -553,9 +815,7 @@ def _are_nodes_valid_for_publishing(story_nodes): story_services.validate_explorations_for_story( exploration_id_list, True) - topic = topic_fetchers.get_topic_by_id(topic_id, strict=None) - if topic is None: - raise Exception('A topic with the given ID doesn\'t exist') + topic = topic_fetchers.get_topic_by_id(topic_id, strict=True) user = user_services.get_user_actions_info(committer_id) if role_services.ACTION_CHANGE_STORY_STATUS not in user.actions: raise Exception( @@ -584,7 +844,9 @@ def _are_nodes_valid_for_publishing(story_nodes): story_id, linked_exp_ids) -def unpublish_story(topic_id, story_id, committer_id): +def unpublish_story( + topic_id: str, story_id: str, committer_id: str +) -> None: """Marks the given story as unpublished. Args: @@ -601,7 +863,7 @@ def unpublish_story(topic_id, story_id, committer_id): if role_services.ACTION_CHANGE_STORY_STATUS not in user.actions: raise Exception( 'The user does not have enough rights to unpublish the story.') - topic = topic_fetchers.get_topic_by_id(topic_id, strict=None) + topic = topic_fetchers.get_topic_by_id(topic_id, strict=False) if topic is None: raise Exception('A topic with the given ID doesn\'t exist') story = story_fetchers.get_story_by_id(story_id, strict=False) @@ -624,7 +886,9 @@ def unpublish_story(topic_id, story_id, committer_id): suggestion_services.auto_reject_translation_suggestions_for_exp_ids(exp_ids) -def delete_canonical_story(user_id, topic_id, story_id): +def delete_canonical_story( + user_id: str, topic_id: str, story_id: str +) -> None: """Removes story with given id from the topic. NOTE TO DEVELOPERS: Presently, this function only removes story_reference @@ -644,7 +908,9 @@ def delete_canonical_story(user_id, topic_id, story_id): 'Removed %s from canonical story ids' % story_id) -def add_canonical_story(user_id, topic_id, story_id): +def add_canonical_story( + user_id: str, topic_id: str, story_id: str +) -> None: """Adds a story to the canonical story reference list of a topic. Args: @@ -661,7 +927,9 @@ def add_canonical_story(user_id, topic_id, story_id): 'Added %s to canonical story ids' % story_id) -def delete_additional_story(user_id, topic_id, story_id): +def delete_additional_story( + user_id: str, topic_id: str, story_id: str +) -> None: """Removes story with given id from the topic. NOTE TO DEVELOPERS: Presently, this function only removes story_reference @@ -681,7 +949,9 @@ def delete_additional_story(user_id, topic_id, story_id): 'Removed %s from additional story ids' % story_id) -def add_additional_story(user_id, topic_id, story_id): +def add_additional_story( + user_id: str, topic_id: str, story_id: str +) -> None: """Adds a story to the additional story reference list of a topic. Args: @@ -698,7 +968,9 @@ def add_additional_story(user_id, topic_id, story_id): 'Added %s to additional story ids' % story_id) -def delete_topic(committer_id, topic_id, force_deletion=False): +def delete_topic( + committer_id: str, topic_id: str, force_deletion: bool = False +) -> None: """Deletes the topic with the given topic_id. Args: @@ -749,7 +1021,7 @@ def delete_topic(committer_id, topic_id, force_deletion=False): .delete_exploration_opportunities_corresponding_to_topic(topic_id)) -def delete_topic_summary(topic_id): +def delete_topic_summary(topic_id: str) -> None: """Delete a topic summary model. Args: @@ -761,7 +1033,12 @@ def delete_topic_summary(topic_id): def update_story_and_topic_summary( - committer_id, story_id, change_list, commit_message, topic_id): + committer_id: str, + story_id: str, + change_list: List[story_domain.StoryChange], + commit_message: str, + topic_id: str +) -> None: """Updates a story. Commits changes. Then generates a new topic summary. @@ -771,7 +1048,7 @@ def update_story_and_topic_summary( story_id: str. The story id. change_list: list(StoryChange). These changes are applied in sequence to produce the resulting story. - commit_message: str or None. A description of changes made to the + commit_message: str. A description of changes made to the story. topic_id: str. The id of the topic to which the story is belongs. """ @@ -783,7 +1060,7 @@ def update_story_and_topic_summary( generate_topic_summary(topic_id) -def generate_topic_summary(topic_id): +def generate_topic_summary(topic_id: str) -> None: """Creates and stores a summary of the given topic. Args: @@ -794,7 +1071,9 @@ def generate_topic_summary(topic_id): save_topic_summary(topic_summary) -def compute_summary_of_topic(topic): +def compute_summary_of_topic( + topic: topic_domain.Topic +) -> topic_domain.TopicSummary: """Create a TopicSummary domain object for a given Topic domain object and return it. @@ -803,6 +1082,9 @@ def compute_summary_of_topic(topic): Returns: TopicSummary. The computed summary for the given topic. + + Raises: + Exception. No data available for when the topic was last updated. """ canonical_story_count = 0 additional_story_count = 0 @@ -826,6 +1108,10 @@ def compute_summary_of_topic(topic): for subtopic in topic.subtopics: total_skill_count = total_skill_count + len(subtopic.skill_ids) + if topic.created_on is None or topic.last_updated is None: + raise Exception( + 'No data available for when the topic was last updated.' + ) topic_summary = topic_domain.TopicSummary( topic.id, topic.name, topic.canonical_name, topic.language_code, topic.description, topic.version, topic_model_canonical_story_count, @@ -839,48 +1125,23 @@ def compute_summary_of_topic(topic): return topic_summary -def save_topic_summary(topic_summary): +def save_topic_summary(topic_summary: topic_domain.TopicSummary) -> None: """Save a topic summary domain object as a TopicSummaryModel entity in the datastore. Args: - topic_summary: TopicSummaryModel. The topic summary object to be saved + topic_summary: TopicSummary. The topic summary object to be saved in the datastore. """ - topic_summary_dict = { - 'name': topic_summary.name, - 'description': topic_summary.description, - 'canonical_name': topic_summary.canonical_name, - 'language_code': topic_summary.language_code, - 'version': topic_summary.version, - 'additional_story_count': topic_summary.additional_story_count, - 'canonical_story_count': topic_summary.canonical_story_count, - 'uncategorized_skill_count': topic_summary.uncategorized_skill_count, - 'subtopic_count': topic_summary.subtopic_count, - 'total_skill_count': topic_summary.total_skill_count, - 'total_published_node_count': - topic_summary.total_published_node_count, - 'thumbnail_filename': topic_summary.thumbnail_filename, - 'thumbnail_bg_color': topic_summary.thumbnail_bg_color, - 'topic_model_last_updated': topic_summary.topic_model_last_updated, - 'topic_model_created_on': topic_summary.topic_model_created_on, - 'url_fragment': topic_summary.url_fragment - } - - topic_summary_model = ( + existing_topic_summary_model = ( topic_models.TopicSummaryModel.get_by_id(topic_summary.id)) - if topic_summary_model is not None: - topic_summary_model.populate(**topic_summary_dict) - topic_summary_model.update_timestamps() - topic_summary_model.put() - else: - topic_summary_dict['id'] = topic_summary.id - model = topic_models.TopicSummaryModel(**topic_summary_dict) - model.update_timestamps() - model.put() + topic_summary_model = populate_topic_summary_model_fields( + existing_topic_summary_model, topic_summary) + topic_summary_model.update_timestamps() + topic_summary_model.put() -def publish_topic(topic_id, committer_id): +def publish_topic(topic_id: str, committer_id: str) -> None: """Marks the given topic as published. Args: @@ -912,7 +1173,7 @@ def publish_topic(topic_id, committer_id): topic_rights, committer_id, 'Published the topic', commit_cmds) -def unpublish_topic(topic_id, committer_id): +def unpublish_topic(topic_id: str, committer_id: str) -> None: """Marks the given topic as unpublished. Args: @@ -942,7 +1203,12 @@ def unpublish_topic(topic_id, committer_id): topic_rights, committer_id, 'Unpublished the topic', commit_cmds) -def save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds): +def save_topic_rights( + topic_rights: topic_domain.TopicRights, + committer_id: str, + commit_message: str, + commit_cmds: List[topic_domain.TopicRightsChange] +) -> None: """Saves a TopicRights domain object to the datastore. Args: @@ -954,7 +1220,7 @@ def save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds): what kind of commit was done. """ - model = topic_models.TopicRightsModel.get(topic_rights.id, strict=False) + model = topic_models.TopicRightsModel.get(topic_rights.id, strict=True) model.manager_ids = topic_rights.manager_ids model.topic_is_published = topic_rights.topic_is_published @@ -962,7 +1228,9 @@ def save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds): model.commit(committer_id, commit_message, commit_cmd_dicts) -def create_new_topic_rights(topic_id, committer_id): +def create_new_topic_rights( + topic_id: str, committer_id: str +) -> None: """Creates a new topic rights object and saves it to the datastore. Args: @@ -979,7 +1247,7 @@ def create_new_topic_rights(topic_id, committer_id): ).commit(committer_id, 'Created new topic rights', commit_cmds) -def filter_published_topic_ids(topic_ids): +def filter_published_topic_ids(topic_ids: List[str]) -> List[str]: """Given list of topic IDs, returns the IDs of all topics that are published in that list. @@ -1001,7 +1269,10 @@ def filter_published_topic_ids(topic_ids): return published_topic_ids -def check_can_edit_topic(user, topic_rights): +def check_can_edit_topic( + user: user_domain.UserActionsInfo, + topic_rights: Optional[topic_domain.TopicRights] +) -> bool: """Checks whether the user can edit the given topic. Args: @@ -1018,13 +1289,15 @@ def check_can_edit_topic(user, topic_rights): return True if role_services.ACTION_EDIT_OWNED_TOPIC not in user.actions: return False - if topic_rights.is_manager(user.user_id): + if user.user_id and topic_rights.is_manager(user.user_id): return True return False -def deassign_user_from_all_topics(committer, user_id): +def deassign_user_from_all_topics( + committer: user_domain.UserActionsInfo, user_id: str +) -> None: """Deassigns given user from all topics assigned to them. Args: @@ -1034,8 +1307,14 @@ def deassign_user_from_all_topics(committer, user_id): Raises: Exception. The committer does not have rights to modify a role. + Exception. Guest users are not allowed to deassing users from + all topics. """ topic_rights_list = topic_fetchers.get_topic_rights_with_user(user_id) + if committer.user_id is None: + raise Exception( + 'Guest users are not allowed to deassing users from all topics.' + ) for topic_rights in topic_rights_list: topic_rights.manager_ids.remove(user_id) commit_cmds = [topic_domain.TopicRightsChange({ @@ -1044,10 +1323,18 @@ def deassign_user_from_all_topics(committer, user_id): })] save_topic_rights( topic_rights, committer.user_id, - 'Removed all assigned topics from %s' % (user_id), commit_cmds) + 'Removed all assigned topics from %s' % ( + user_services.get_username(user_id) + ), + commit_cmds + ) -def deassign_manager_role_from_topic(committer, user_id, topic_id): +def deassign_manager_role_from_topic( + committer: user_domain.UserActionsInfo, + user_id: str, + topic_id: str +) -> None: """Deassigns given user from all topics assigned to them. Args: @@ -1058,7 +1345,13 @@ def deassign_manager_role_from_topic(committer, user_id, topic_id): Raises: Exception. The committer does not have rights to modify a role. + Exception. Guest users are not allowed to deassing manager role + from topic. """ + if committer.user_id is None: + raise Exception( + 'Guest users are not allowed to deassing manager role from topic.' + ) topic_rights = topic_fetchers.get_topic_rights(topic_id) if user_id not in topic_rights.manager_ids: raise Exception('User does not have manager rights in topic.') @@ -1069,11 +1362,21 @@ def deassign_manager_role_from_topic(committer, user_id, topic_id): 'removed_user_id': user_id })] save_topic_rights( - topic_rights, committer.user_id, - 'Removed all assigned topics from %s' % (user_id), commit_cmds) + topic_rights, + committer.user_id, + 'Removed all assigned topics from %s' % ( + user_services.get_username(user_id) + ), + commit_cmds + ) -def assign_role(committer, assignee, new_role, topic_id): +def assign_role( + committer: user_domain.UserActionsInfo, + assignee: user_domain.UserActionsInfo, + new_role: str, + topic_id: str +) -> None: """Assigns a new role to the user. Args: @@ -1090,8 +1393,14 @@ def assign_role(committer, assignee, new_role, topic_id): Exception. The assignee is already a manager for the topic. Exception. The assignee doesn't have enough rights to become a manager. Exception. The role is invalid. + Exception. Guest user is not allowed to assign roles to a user. + Exception. The role of the Guest user cannot be changed. """ committer_id = committer.user_id + if committer_id is None: + raise Exception( + 'Guest user is not allowed to assign roles to a user.' + ) topic_rights = topic_fetchers.get_topic_rights(topic_id) if (role_services.ACTION_MODIFY_CORE_ROLES_FOR_ANY_ACTIVITY not in committer.actions): @@ -1102,6 +1411,10 @@ def assign_role(committer, assignee, new_role, topic_id): raise Exception( 'UnauthorizedUserException: Could not assign new role.') + if assignee.user_id is None: + raise Exception( + 'Cannot change the role of the Guest user.' + ) assignee_username = user_services.get_username(assignee.user_id) if role_services.ACTION_EDIT_OWNED_TOPIC not in assignee.actions: raise Exception( @@ -1135,7 +1448,7 @@ def assign_role(committer, assignee, new_role, topic_id): save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds) -def get_story_titles_in_topic(topic): +def get_story_titles_in_topic(topic: topic_domain.Topic) -> List[str]: """Returns titles of the stories present in the topic. Args: @@ -1149,3 +1462,227 @@ def get_story_titles_in_topic(topic): stories = story_fetchers.get_stories_by_ids(story_ids) story_titles = [story.title for story in stories if story is not None] return story_titles + + +def update_thumbnail_filename( + topic: topic_domain.Topic, new_thumbnail_filename: str +) -> None: + """Updates the thumbnail filename and file size in a topic object. + + Args: + topic: topic_domain.Topic. The topic domain object whose thumbnail + is to be updated. + new_thumbnail_filename: str. The updated thumbnail filename + for the topic. + + Raises: + Exception. The thumbnail does not exist for expected topic in + the filesystem. + """ + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_TOPIC, topic.id) + filepath = '%s/%s' % ( + constants.ASSET_TYPE_THUMBNAIL, new_thumbnail_filename) + if fs.isfile(filepath): + thumbnail_size_in_bytes = len(fs.get(filepath)) + topic.update_thumbnail_filename_and_size( + new_thumbnail_filename, thumbnail_size_in_bytes) + else: + raise Exception( + 'The thumbnail %s for topic with id %s does not exist' + ' in the filesystem.' % (new_thumbnail_filename, topic.id)) + + +def update_subtopic_thumbnail_filename( + topic: topic_domain.Topic, + subtopic_id: int, + new_thumbnail_filename: str +) -> None: + """Updates the thumbnail filename and file size in a subtopic. + + Args: + topic: topic_domain.Topic. The topic domain object containing + the subtopic whose thumbnail is to be updated. + subtopic_id: int. The id of the subtopic to edit. + new_thumbnail_filename: str. The new thumbnail filename for the + subtopic. + + Raises: + Exception. The thumbnail does not exist for expected topic in + the filesystem. + """ + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_TOPIC, topic.id) + filepath = '%s/%s' % ( + constants.ASSET_TYPE_THUMBNAIL, new_thumbnail_filename) + if fs.isfile(filepath): + thumbnail_size_in_bytes = len(fs.get(filepath)) + topic.update_subtopic_thumbnail_filename_and_size( + subtopic_id, new_thumbnail_filename, thumbnail_size_in_bytes) + else: + raise Exception( + 'The thumbnail %s for subtopic with topic_id %s does not exist' + ' in the filesystem.' % (new_thumbnail_filename, topic.id)) + + +def get_topic_id_to_diagnostic_test_skill_ids( + topic_ids: List[str] +) -> Dict[str, List[str]]: + """Returns a dict with topic ID as key and a list of diagnostic test + skill IDs as value. + + Args: + topic_ids: List(str). A list of topic IDs. + + Raises: + Exception. The topic models for some of the given topic IDs do not + exist. + + Returns: + dict(str, list(str)). A dict with topic ID as key and a list of + diagnostic test skill IDs as value. + """ + topic_id_to_diagnostic_test_skill_ids = {} + topics = topic_fetchers.get_topics_by_ids(topic_ids) + + for topic in topics: + if topic is None: + continue + topic_id_to_diagnostic_test_skill_ids[topic.id] = ( + topic.skill_ids_for_diagnostic_test) + + correct_topic_ids = list(topic_id_to_diagnostic_test_skill_ids.keys()) + # The topic IDs for which topic models do not exist are referred to as + # incorrect topic IDs. + incorrect_topic_ids = [ + topic_id for topic_id in topic_ids if topic_id not in correct_topic_ids + ] + if incorrect_topic_ids: + error_msg = ( + 'No corresponding topic models exist for these topic IDs: %s.' + % (', '.join(incorrect_topic_ids)) + ) + raise Exception(error_msg) + return topic_id_to_diagnostic_test_skill_ids + + +def populate_topic_model_fields( + topic_model: topic_models.TopicModel, + topic: topic_domain.Topic +) -> topic_models.TopicModel: + """Populate topic model with the data from topic object. + + Args: + topic_model: TopicModel. The model to populate. + topic: Topic. The topic domain object which should be used to + populate the model. + + Returns: + TopicModel. Populated model. + """ + topic_model.description = topic.description + topic_model.name = topic.name + topic_model.canonical_name = topic.canonical_name + topic_model.abbreviated_name = topic.abbreviated_name + topic_model.url_fragment = topic.url_fragment + topic_model.thumbnail_bg_color = topic.thumbnail_bg_color + topic_model.thumbnail_filename = topic.thumbnail_filename + topic_model.thumbnail_size_in_bytes = topic.thumbnail_size_in_bytes + topic_model.canonical_story_references = [ + reference.to_dict() for reference in topic.canonical_story_references + ] + topic_model.additional_story_references = [ + reference.to_dict() for reference in topic.additional_story_references + ] + topic_model.uncategorized_skill_ids = topic.uncategorized_skill_ids + topic_model.subtopics = [subtopic.to_dict() for subtopic in topic.subtopics] + topic_model.subtopic_schema_version = topic.subtopic_schema_version + topic_model.story_reference_schema_version = ( + topic.story_reference_schema_version) + topic_model.next_subtopic_id = topic.next_subtopic_id + topic_model.language_code = topic.language_code + topic_model.meta_tag_content = topic.meta_tag_content + topic_model.practice_tab_is_displayed = topic.practice_tab_is_displayed + topic_model.page_title_fragment_for_web = topic.page_title_fragment_for_web + topic_model.skill_ids_for_diagnostic_test = ( + topic.skill_ids_for_diagnostic_test) + return topic_model + + +def populate_topic_summary_model_fields( + topic_summary_model: topic_models.TopicSummaryModel, + topic_summary: topic_domain.TopicSummary +) -> topic_models.TopicSummaryModel: + """Populate topic summary model with the data from topic summary object. + + Args: + topic_summary_model: TopicSummaryModel. The model to populate. + topic_summary: TopicSummary. The topic summary domain object which + should be used to populate the model. + + Returns: + TopicSummaryModel. Populated model. + """ + topic_summary_dict = { + 'name': topic_summary.name, + 'description': topic_summary.description, + 'canonical_name': topic_summary.canonical_name, + 'language_code': topic_summary.language_code, + 'version': topic_summary.version, + 'additional_story_count': topic_summary.additional_story_count, + 'canonical_story_count': topic_summary.canonical_story_count, + 'uncategorized_skill_count': topic_summary.uncategorized_skill_count, + 'subtopic_count': topic_summary.subtopic_count, + 'total_skill_count': topic_summary.total_skill_count, + 'total_published_node_count': + topic_summary.total_published_node_count, + 'thumbnail_filename': topic_summary.thumbnail_filename, + 'thumbnail_bg_color': topic_summary.thumbnail_bg_color, + 'topic_model_last_updated': topic_summary.topic_model_last_updated, + 'topic_model_created_on': topic_summary.topic_model_created_on, + 'url_fragment': topic_summary.url_fragment + } + + if topic_summary_model is not None: + topic_summary_model.populate(**topic_summary_dict) + else: + topic_summary_dict['id'] = topic_summary.id + topic_summary_model = topic_models.TopicSummaryModel( + **topic_summary_dict) + + return topic_summary_model + + +def get_topic_id_to_topic_name_dict(topic_ids: List[str]) -> Dict[str, str]: + """Returns a dict with topic ID as key and topic name as value, for all + given topic IDs. + + Args: + topic_ids: List(str). A list of topic IDs. + + Raises: + Exception. The topic models for some of the given topic IDs do not + exist. + + Returns: + dict(str, str). A dict with topic ID as key and topic name as value. + """ + topic_id_to_topic_name = {} + topics = topic_fetchers.get_topics_by_ids(topic_ids) + + for topic in topics: + if topic is None: + continue + topic_id_to_topic_name[topic.id] = topic.name + + correct_topic_ids = list(topic_id_to_topic_name.keys()) + # The topic IDs for which topic models do not exist are referred to as + # incorrect topic IDs. + incorrect_topic_ids = [ + topic_id for topic_id in topic_ids if topic_id not in correct_topic_ids + ] + if incorrect_topic_ids: + error_msg = ( + 'No corresponding topic models exist for these topic IDs: %s.' + % (', '.join(incorrect_topic_ids)) + ) + raise Exception(error_msg) + return topic_id_to_topic_name diff --git a/core/domain/topic_services_test.py b/core/domain/topic_services_test.py index 497f6ef8b98a..9f2b0c5e9644 100644 --- a/core/domain/topic_services_test.py +++ b/core/domain/topic_services_test.py @@ -21,10 +21,10 @@ import os from core import feconf -from core import python_utils +from core import utils from core.constants import constants from core.domain import exp_services -from core.domain import fs_domain +from core.domain import fs_services from core.domain import question_domain from core.domain import rights_manager from core.domain import story_domain @@ -35,36 +35,43 @@ from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services +from core.domain import translation_domain from core.domain import user_services from core.platform import models from core.tests import test_utils -( - topic_models, suggestion_models -) = models.Registry.import_models([ - models.NAMES.topic, models.NAMES.suggestion +from typing import Dict, List, Optional, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import topic_models + +(topic_models,) = models.Registry.import_models([ + models.Names.TOPIC ]) class TopicServicesUnitTests(test_utils.GenericTestBase): """Tests for topic services.""" - user_id = 'user_id' - story_id_1 = 'story_1' - story_id_2 = 'story_2' - story_id_3 = 'story_3' - subtopic_id = 1 - skill_id_1 = 'skill_1' - skill_id_2 = 'skill_2' - skill_id_3 = 'skill_3' - - def setUp(self): - super(TopicServicesUnitTests, self).setUp() + user_id: str = 'user_id' + story_id_1: str = 'story_1' + story_id_2: str = 'story_2' + story_id_3: str = 'story_3' + subtopic_id: int = 1 + skill_id_1: str = 'skill_1' + skill_id_2: str = 'skill_2' + skill_id_3: str = 'skill_3' + + def setUp(self) -> None: + self.test_list: List[str] = [] + super().setUp() self.TOPIC_ID = topic_fetchers.get_new_topic_id() changelist = [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title', - 'subtopic_id': 1 + 'subtopic_id': 1, + 'url_fragment': 'fragment-one' })] self.save_new_topic( self.TOPIC_ID, self.user_id, name='Name', @@ -108,7 +115,51 @@ def setUp(self): self.user_admin = user_services.get_user_actions_info( self.user_id_admin) - def test_compute_summary(self): + def test_raises_error_if_guest_user_trying_to_deassign_roles_from_topic( + self + ) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Guest users are not allowed to deassing users from all topics.' + ): + topic_services.deassign_user_from_all_topics(guest_user, 'user_id') + + with self.assertRaisesRegex( + Exception, + 'Guest users are not allowed to deassing manager role from topic.' + ): + topic_services.deassign_manager_role_from_topic( + guest_user, 'user_id', 'topic_id' + ) + + def test_get_story_titles_in_topic(self) -> None: + story_titles = topic_services.get_story_titles_in_topic( + self.topic) + self.assertEqual(len(story_titles), 2) + self.assertIn('Title', story_titles) + self.assertIn('Title 2', story_titles) + + def test_update_story_and_topic_summary(self) -> None: + change_list = [ + story_domain.StoryChange( + { + 'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY, + 'property_name': story_domain.STORY_PROPERTY_TITLE, + 'old_value': 'Title', + 'new_value': 'New Title' + } + ) + ] + topic_services.update_story_and_topic_summary( + self.user_id, self.story_id_1, change_list, + 'Updated story title', self.TOPIC_ID + ) + story_titles = topic_services.get_story_titles_in_topic( + self.topic) + self.assertIn('New Title', story_titles) + + def test_compute_summary(self) -> None: topic_summary = topic_services.compute_summary_of_topic(self.topic) self.assertEqual(topic_summary.id, self.TOPIC_ID) @@ -123,12 +174,25 @@ def test_compute_summary(self): self.assertEqual(topic_summary.thumbnail_filename, 'topic.svg') self.assertEqual(topic_summary.thumbnail_bg_color, '#C6DCDA') - def test_get_topic_from_model(self): + def test_raises_error_while_computing_topic_summary_with_invalid_data( + self + ) -> None: + test_topic = self.topic + test_topic.created_on = None + with self.assertRaisesRegex( + Exception, + 'No data available for when the topic was last updated.' + ): + topic_services.compute_summary_of_topic(self.topic) + + def test_get_topic_from_model(self) -> None: topic_model = topic_models.TopicModel.get(self.TOPIC_ID) topic = topic_fetchers.get_topic_from_model(topic_model) self.assertEqual(topic.to_dict(), self.topic.to_dict()) - def test_cannot_get_topic_from_model_with_invalid_schema_version(self): + def test_cannot_get_topic_from_model_with_invalid_schema_version( + self + ) -> None: topic_services.create_new_topic_rights('topic_id', self.user_id_a) commit_cmd = topic_domain.TopicChange({ 'cmd': topic_domain.CMD_CREATE_NEW, @@ -150,13 +214,14 @@ def test_cannot_get_topic_from_model_with_invalid_schema_version(self): language_code='en', subtopics=[subtopic_dict], subtopic_schema_version=0, - story_reference_schema_version=0 + story_reference_schema_version=0, + page_title_fragment_for_web='fragm' ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( self.user_id_a, 'topic model created', commit_cmd_dicts) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d subtopic schemas at ' 'present.' % feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION): @@ -174,20 +239,23 @@ def test_cannot_get_topic_from_model_with_invalid_schema_version(self): language_code='en', subtopics=[subtopic_dict], subtopic_schema_version=1, - story_reference_schema_version=0 + story_reference_schema_version=0, + page_title_fragment_for_web='fragm' ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( self.user_id_a, 'topic model created', commit_cmd_dicts) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d story reference schemas at ' 'present.' % feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION): topic_fetchers.get_topic_from_model(model) - def test_cannot_create_topic_change_class_with_invalid_changelist(self): - with self.assertRaisesRegexp( + def test_cannot_create_topic_change_class_with_invalid_changelist( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Missing cmd key in change dict'): topic_domain.TopicChange({ 'invalid_cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, @@ -196,8 +264,8 @@ def test_cannot_create_topic_change_class_with_invalid_changelist(self): 'new_value': 'New Description' }) - def test_cannot_rearrange_story_with_missing_index_values(self): - with self.assertRaisesRegexp( + def test_cannot_rearrange_story_with_missing_index_values(self) -> None: + with self.assertRaisesRegex( Exception, ( 'The following required attributes are missing: ' 'from_index, to_index')): @@ -205,8 +273,8 @@ def test_cannot_rearrange_story_with_missing_index_values(self): 'cmd': topic_domain.CMD_REARRANGE_CANONICAL_STORY, }) - def test_cannot_rearrange_story_with_missing_from_index_value(self): - with self.assertRaisesRegexp( + def test_cannot_rearrange_story_with_missing_from_index_value(self) -> None: + with self.assertRaisesRegex( Exception, ( 'The following required attributes are missing: ' 'from_index')): @@ -215,8 +283,8 @@ def test_cannot_rearrange_story_with_missing_from_index_value(self): 'to_index': 1 }) - def test_cannot_rearrange_story_with_missing_to_index_value(self): - with self.assertRaisesRegexp( + def test_cannot_rearrange_story_with_missing_to_index_value(self) -> None: + with self.assertRaisesRegex( Exception, ( 'The following required attributes are missing: to_index')): topic_domain.TopicChange({ @@ -224,7 +292,7 @@ def test_cannot_rearrange_story_with_missing_to_index_value(self): 'from_index': 1 }) - def test_rearrange_canonical_stories_in_topic(self): + def test_rearrange_canonical_stories_in_topic(self) -> None: story_id_new = 'story_id_new' topic_services.add_canonical_story( self.user_id_admin, self.TOPIC_ID, 'story_id_new') @@ -259,6 +327,8 @@ def test_rearrange_canonical_stories_in_topic(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 4) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -266,7 +336,7 @@ def test_rearrange_canonical_stories_in_topic(self): topic_commit_log_entry.commit_message, 'Rearranged canonical story on index 2 to index 0.') - def test_rearrange_skill_in_subtopic(self): + def test_rearrange_skill_in_subtopic(self) -> None: topic_services.add_uncategorized_skill( self.user_id_admin, self.TOPIC_ID, self.skill_id_3) changelist = [topic_domain.TopicChange({ @@ -316,6 +386,8 @@ def test_rearrange_skill_in_subtopic(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 5) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -323,11 +395,12 @@ def test_rearrange_skill_in_subtopic(self): topic_commit_log_entry.commit_message, 'Rearranged skill from index 2 to index 0 for subtopic with id 1.') - def test_rearrange_subtopic(self): + def test_rearrange_subtopic(self) -> None: changelist = [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title2', - 'subtopic_id': 2 + 'subtopic_id': 2, + 'url_fragment': 'fragment-two' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY, 'property_name': topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT, @@ -337,7 +410,8 @@ def test_rearrange_subtopic(self): }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title3', - 'subtopic_id': 3 + 'subtopic_id': 3, + 'url_fragment': 'fragment-three' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY, 'property_name': topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT, @@ -375,6 +449,8 @@ def test_rearrange_subtopic(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 4) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -382,8 +458,8 @@ def test_rearrange_subtopic(self): topic_commit_log_entry.commit_message, 'Rearranged subtopic from index 2 to index 0.') - def test_cannot_update_topic_property_with_invalid_changelist(self): - with self.assertRaisesRegexp( + def test_cannot_update_topic_property_with_invalid_changelist(self) -> None: + with self.assertRaisesRegex( Exception, ( 'Value for property_name in cmd update_topic_property: ' 'invalid property is not allowed')): @@ -394,8 +470,10 @@ def test_cannot_update_topic_property_with_invalid_changelist(self): 'new_value': 'New Description' }) - def test_cannot_update_subtopic_property_with_invalid_changelist(self): - with self.assertRaisesRegexp( + def test_cannot_update_subtopic_property_with_invalid_changelist( + self + ) -> None: + with self.assertRaisesRegex( Exception, ( 'The following required attributes are ' 'missing: subtopic_id')): @@ -406,20 +484,18 @@ def test_cannot_update_subtopic_property_with_invalid_changelist(self): 'new_value': 'New Description' }) - def test_update_subtopic_property(self): + def test_update_subtopic_property(self) -> None: topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) self.assertEqual(len(topic.subtopics), 1) self.assertEqual(topic.subtopics[0].title, 'Title') # Store a dummy image in filesystem. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_TOPIC, self.TOPIC_ID)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_TOPIC, self.TOPIC_ID) fs.commit( '%s/image.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, mimetype='image/svg+xml') @@ -455,8 +531,8 @@ def test_update_subtopic_property(self): topic.subtopics[0].thumbnail_bg_color, constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0]) - def test_cannot_create_topic_change_class_with_invalid_cmd(self): - with self.assertRaisesRegexp( + def test_cannot_create_topic_change_class_with_invalid_cmd(self) -> None: + with self.assertRaisesRegex( Exception, 'Command invalid cmd is not allowed'): topic_domain.TopicChange({ 'cmd': 'invalid cmd', @@ -466,7 +542,7 @@ def test_cannot_create_topic_change_class_with_invalid_cmd(self): 'new_value': 'New Description' }) - def test_publish_and_unpublish_story(self): + def test_publish_and_unpublish_story(self) -> None: topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) self.assertEqual( topic.canonical_story_references[0].story_is_published, False) @@ -478,6 +554,8 @@ def test_publish_and_unpublish_story(self): self.TOPIC_ID, self.story_id_3, self.user_id_admin) topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) topic_summary = topic_fetchers.get_topic_summary_by_id(self.TOPIC_ID) + # Ruling out the possibility of None for mypy type checking. + assert topic_summary is not None self.assertEqual( topic.canonical_story_references[0].story_is_published, True) self.assertEqual( @@ -491,6 +569,8 @@ def test_publish_and_unpublish_story(self): self.TOPIC_ID, self.story_id_3, self.user_id_admin) topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) topic_summary = topic_fetchers.get_topic_summary_by_id(self.TOPIC_ID) + # Ruling out the possibility of None for mypy type checking. + assert topic_summary is not None self.assertEqual( topic.canonical_story_references[0].story_is_published, False) self.assertEqual( @@ -498,35 +578,41 @@ def test_publish_and_unpublish_story(self): self.assertEqual(topic_summary.canonical_story_count, 0) self.assertEqual(topic_summary.additional_story_count, 0) - def test_invalid_publish_and_unpublish_story(self): - with self.assertRaisesRegexp( - Exception, 'A topic with the given ID doesn\'t exist'): + def test_invalid_publish_and_unpublish_story(self) -> None: + with self.assertRaisesRegex( + Exception, + 'Entity for class TopicModel with id invalid_topic not found' + ): topic_services.publish_story( 'invalid_topic', 'story_id_new', self.user_id_admin) - with self.assertRaisesRegexp( - Exception, 'A topic with the given ID doesn\'t exist'): + with self.assertRaisesRegex( + Exception, + 'A topic with the given ID doesn\'t exist' + ): topic_services.unpublish_story( 'invalid_topic', 'story_id_new', self.user_id_admin) - with self.assertRaisesRegexp( - Exception, 'The user does not have enough rights to publish the ' - 'story.'): + with self.assertRaisesRegex( + Exception, + 'The user does not have enough rights to publish the story.' + ): topic_services.publish_story( self.TOPIC_ID, self.story_id_3, self.user_id_b) - with self.assertRaisesRegexp( - Exception, 'The user does not have enough rights to unpublish the ' - 'story.'): + with self.assertRaisesRegex( + Exception, + 'The user does not have enough rights to unpublish the story.' + ): topic_services.unpublish_story( self.TOPIC_ID, self.story_id_3, self.user_id_b) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'A story with the given ID doesn\'t exist'): topic_services.publish_story( self.TOPIC_ID, 'invalid_story', self.user_id_admin) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'A story with the given ID doesn\'t exist'): topic_services.unpublish_story( self.TOPIC_ID, 'invalid_story', self.user_id_admin) @@ -538,12 +624,12 @@ def test_invalid_publish_and_unpublish_story(self): title='Title 2', description='Description 2' ) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Story with given id doesn\'t exist in the topic'): topic_services.publish_story( self.TOPIC_ID, 'story_10', self.user_id_admin) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Story with given id doesn\'t exist in the topic'): topic_services.unpublish_story( self.TOPIC_ID, 'story_10', self.user_id_admin) @@ -569,7 +655,7 @@ def test_invalid_publish_and_unpublish_story(self): self.user_id_admin, 'story_id_new', changelist, 'Added node.') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Story node with id node_1 does not contain an ' 'exploration id.'): topic_services.publish_story( @@ -596,7 +682,7 @@ def test_invalid_publish_and_unpublish_story(self): self.user_admin = user_services.get_user_actions_info( self.user_id_admin) rights_manager.unpublish_exploration(self.user_admin, 'exp_id') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Exploration with ID exp_id is not public. Please ' 'publish explorations before adding them to a story.'): topic_services.publish_story( @@ -605,21 +691,19 @@ def test_invalid_publish_and_unpublish_story(self): # Throws error if exploration doesn't exist. exp_services.delete_exploration(self.user_id_admin, 'exp_id') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected story to only reference valid explorations, ' 'but found a reference to an invalid exploration with ID: exp_id'): topic_services.publish_story( self.TOPIC_ID, 'story_id_new', self.user_id_admin) - def test_update_topic(self): + def test_update_topic(self) -> None: # Save a dummy image on filesystem, to be used as thumbnail. - with python_utils.open_file( + with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb', encoding=None) as f: raw_image = f.read() - fs = fs_domain.AbstractFileSystem( - fs_domain.GcsFileSystem( - feconf.ENTITY_TYPE_TOPIC, self.TOPIC_ID)) + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_TOPIC, self.TOPIC_ID) fs.commit( '%s/thumbnail.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, mimetype='image/svg+xml') @@ -667,12 +751,20 @@ def test_update_topic(self): topic_domain.TOPIC_PROPERTY_PAGE_TITLE_FRAGMENT_FOR_WEB), 'old_value': '', 'new_value': 'topic page title' + }), topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': ['test_skill_id'], + 'new_value': self.test_list })] topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Updated Description.') topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) topic_summary = topic_fetchers.get_topic_summary_by_id(self.TOPIC_ID) + # Ruling out the possibility of None for mypy type checking. + assert topic_summary is not None self.assertEqual(topic.description, 'New Description') self.assertEqual(topic.abbreviated_name, 'short-name') self.assertEqual(topic.url_fragment, 'url-name') @@ -683,6 +775,7 @@ def test_update_topic(self): self.assertEqual(topic.practice_tab_is_displayed, True) self.assertEqual(topic.meta_tag_content, 'topic meta tag content') self.assertEqual(topic.page_title_fragment_for_web, 'topic page title') + self.assertEqual(topic.skill_ids_for_diagnostic_test, []) self.assertEqual(topic_summary.version, 3) self.assertEqual(topic_summary.thumbnail_filename, 'thumbnail.svg') self.assertEqual(topic_summary.thumbnail_bg_color, '#C6DCDA') @@ -694,7 +787,7 @@ def test_update_topic(self): 'old_value': '', 'new_value': 'dummy_thumbnail.svg' })] - with self.assertRaisesRegexp(Exception, ( + with self.assertRaisesRegex(Exception, ( 'The thumbnail dummy_thumbnail.svg for topic with id ' '%s does not exist in the filesystem.' % self.TOPIC_ID)): topic_services.update_topic_and_subtopic_pages( @@ -712,19 +805,25 @@ def test_update_topic(self): self.user_id_a, self.TOPIC_ID, changelist, 'Updated Name.') topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) topic_summary = topic_fetchers.get_topic_summary_by_id(self.TOPIC_ID) + # Ruling out the possibility of None for mypy type checking. + assert topic_summary is not None self.assertEqual(topic.name, 'New Name') self.assertEqual(topic.canonical_name, 'new name') self.assertEqual(topic.version, 4) self.assertEqual(topic_summary.name, 'New Name') self.assertEqual(topic_summary.version, 4) - def test_update_topic_and_subtopic_page(self): - changelist = [topic_domain.TopicChange({ + def test_update_topic_and_subtopic_page(self) -> None: + changelist: List[Union[ + topic_domain.TopicChange, + subtopic_page_domain.SubtopicPageChange + ]] = [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title3', - 'subtopic_id': 3 + 'subtopic_id': 3, + 'url_fragment': 'fragment-three' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The given new subtopic id 3 is not equal to ' 'the expected next subtopic id: 2'): topic_services.update_topic_and_subtopic_pages( @@ -743,14 +842,15 @@ def test_update_topic_and_subtopic_page(self): topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title2', - 'subtopic_id': 2 + 'subtopic_id': 2, + 'url_fragment': 'fragment-two' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_DELETE_SUBTOPIC, 'subtopic_id': 2 }) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The incoming changelist had simultaneous' ' creation and deletion of subtopics.'): topic_services.update_topic_and_subtopic_pages( @@ -784,7 +884,8 @@ def test_update_topic_and_subtopic_page(self): topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title2', - 'subtopic_id': 2 + 'subtopic_id': 2, + 'url_fragment': 'fragment-two' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_DELETE_SUBTOPIC, @@ -834,8 +935,7 @@ def test_update_topic_and_subtopic_page(self): 'old_subtopic_id': None, 'new_subtopic_id': 2, 'skill_id': self.skill_id_1 - }) - ] + })] topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Added and removed a subtopic.') @@ -853,6 +953,8 @@ def test_update_topic_and_subtopic_page(self): # Validate the newly created subtopic page. subtopic_page = subtopic_page_services.get_subtopic_page_by_id( self.TOPIC_ID, 2, strict=False) + # Ruling out the possibility of None for mypy type checking. + assert subtopic_page is not None self.assertEqual( subtopic_page.page_contents.subtitled_html.html, '

    New Value

    ') @@ -875,12 +977,14 @@ def test_update_topic_and_subtopic_page(self): topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title3', - 'subtopic_id': 3 + 'subtopic_id': 3, + 'url_fragment': 'fragment-three' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title4', - 'subtopic_id': 4 + 'subtopic_id': 4, + 'url_fragment': 'fragment-four' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_DELETE_SUBTOPIC, @@ -901,7 +1005,7 @@ def test_update_topic_and_subtopic_page(self): } }), ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The subtopic with id 2 doesn\'t exist'): topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, @@ -924,7 +1028,7 @@ def test_update_topic_and_subtopic_page(self): self.TOPIC_ID, 2, strict=False) self.assertIsNotNone(subtopic_page) - def test_update_topic_schema(self): + def test_update_topic_schema(self) -> None: orig_topic_dict = ( topic_fetchers.get_topic_by_id(self.TOPIC_ID).to_dict()) @@ -947,7 +1051,7 @@ def test_update_topic_schema(self): del new_topic_dict['version'] self.assertEqual(orig_topic_dict, new_topic_dict) - def test_add_uncategorized_skill(self): + def test_add_uncategorized_skill(self) -> None: topic_services.add_uncategorized_skill( self.user_id_admin, self.TOPIC_ID, 'skill_id_3') topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) @@ -957,6 +1061,8 @@ def test_add_uncategorized_skill(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -964,7 +1070,7 @@ def test_add_uncategorized_skill(self): topic_commit_log_entry.commit_message, 'Added skill_id_3 to uncategorized skill ids') - def test_delete_uncategorized_skill(self): + def test_delete_uncategorized_skill(self) -> None: topic_services.delete_uncategorized_skill( self.user_id_admin, self.TOPIC_ID, self.skill_id_1) topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) @@ -972,6 +1078,8 @@ def test_delete_uncategorized_skill(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -979,7 +1087,7 @@ def test_delete_uncategorized_skill(self): topic_commit_log_entry.commit_message, 'Removed %s from uncategorized skill ids' % self.skill_id_1) - def test_delete_canonical_story(self): + def test_delete_canonical_story(self) -> None: topic_services.delete_canonical_story( self.user_id_admin, self.TOPIC_ID, self.story_id_1) @@ -990,6 +1098,8 @@ def test_delete_canonical_story(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -997,7 +1107,7 @@ def test_delete_canonical_story(self): topic_commit_log_entry.commit_message, 'Removed %s from canonical story ids' % self.story_id_1) - def test_add_canonical_story(self): + def test_add_canonical_story(self) -> None: topic_services.add_canonical_story( self.user_id_admin, self.TOPIC_ID, 'story_id') topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) @@ -1008,6 +1118,8 @@ def test_add_canonical_story(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -1015,7 +1127,7 @@ def test_add_canonical_story(self): topic_commit_log_entry.commit_message, 'Added %s to canonical story ids' % 'story_id') - def test_delete_additional_story(self): + def test_delete_additional_story(self) -> None: topic_services.delete_additional_story( self.user_id_admin, self.TOPIC_ID, self.story_id_3) topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) @@ -1024,6 +1136,8 @@ def test_delete_additional_story(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -1031,7 +1145,7 @@ def test_delete_additional_story(self): topic_commit_log_entry.commit_message, 'Removed %s from additional story ids' % self.story_id_3) - def test_add_additional_story(self): + def test_add_additional_story(self) -> None: topic_services.add_additional_story( self.user_id_admin, self.TOPIC_ID, 'story_id_4') topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) @@ -1042,6 +1156,8 @@ def test_add_additional_story(self): topic_commit_log_entry = ( topic_models.TopicCommitLogEntryModel.get_commit(self.TOPIC_ID, 3) ) + # Ruling out the possibility of None for mypy type checking. + assert topic_commit_log_entry is not None self.assertEqual(topic_commit_log_entry.commit_type, 'edit') self.assertEqual(topic_commit_log_entry.topic_id, self.TOPIC_ID) self.assertEqual(topic_commit_log_entry.user_id, self.user_id_admin) @@ -1049,13 +1165,15 @@ def test_add_additional_story(self): topic_commit_log_entry.commit_message, 'Added story_id_4 to additional story ids') - def test_delete_topic(self): + def test_delete_topic(self) -> None: # Add suggestion for the topic to test if it is deleted too. + content_id_generator = translation_domain.ContentIdGenerator() question = self.save_new_question( 'question_id', self.user_id_admin, - self._create_valid_question_data('dest'), - [self.skill_id_1]) + self._create_valid_question_data('dest', content_id_generator), + [self.skill_id_1], + content_id_generator.next_content_id_index) suggestion = suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_ADD_QUESTION, feconf.ENTITY_TYPE_TOPIC, @@ -1083,28 +1201,34 @@ def test_delete_topic(self): subtopic_page_services.get_subtopic_page_by_id( self.TOPIC_ID, 1, strict=False)) self.assertIsNone( - suggestion_services.get_suggestion_by_id(suggestion.suggestion_id)) + suggestion_services.get_suggestion_by_id( + suggestion.suggestion_id, strict=False + ) + ) - def test_delete_subtopic_with_skill_ids(self): + def test_delete_subtopic_with_skill_ids(self) -> None: changelist = [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_DELETE_SUBTOPIC, 'subtopic_id': self.subtopic_id })] subtopic_page = subtopic_page_services.get_subtopic_page_by_id( - self.TOPIC_ID, 1, strict=False) + self.TOPIC_ID, 1, strict=True) self.assertEqual(subtopic_page.id, self.TOPIC_ID + '-1') topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Removed 1 subtopic.') - subtopic_page = subtopic_page_services.get_subtopic_page_by_id( - self.TOPIC_ID, 1, strict=False) - self.assertIsNone(subtopic_page) + subtopic_page_with_none = ( + subtopic_page_services.get_subtopic_page_by_id( + self.TOPIC_ID, 1, strict=False + ) + ) + self.assertIsNone(subtopic_page_with_none) topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) self.assertEqual( topic.uncategorized_skill_ids, [self.skill_id_1, self.skill_id_2]) self.assertEqual(topic.subtopics, []) - def test_update_subtopic_skill_ids(self): + def test_update_subtopic_skill_ids(self) -> None: # Adds a subtopic and moves skill id from one to another. changelist = [ topic_domain.TopicChange({ @@ -1122,7 +1246,8 @@ def test_update_subtopic_skill_ids(self): topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title2', - 'subtopic_id': 2 + 'subtopic_id': 2, + 'url_fragment': 'fragment-two' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, @@ -1136,8 +1261,7 @@ def test_update_subtopic_skill_ids(self): 'new_value': 'new-subtopic', 'old_value': '', 'subtopic_id': 2 - }) - ] + })] topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Updated subtopic skill ids.') @@ -1161,7 +1285,7 @@ def test_update_subtopic_skill_ids(self): 'skill_id': self.skill_id_2 }) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Skill id %s is not present in the given old subtopic' % self.skill_id_2): @@ -1178,7 +1302,7 @@ def test_update_subtopic_skill_ids(self): 'skill_id': 'skill_10' }) ] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Skill id skill_10 is not an uncategorized skill id'): topic_services.update_topic_and_subtopic_pages( @@ -1192,7 +1316,7 @@ def test_update_subtopic_skill_ids(self): 'new_subtopic_id': None, 'skill_id': self.skill_id_1 })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The subtopic with id None does not exist.'): topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, @@ -1209,8 +1333,7 @@ def test_update_subtopic_skill_ids(self): 'cmd': topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC, 'subtopic_id': self.subtopic_id, 'skill_id': self.skill_id_1 - }) - ] + })] topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Updated subtopic skill ids.') @@ -1227,29 +1350,37 @@ def test_update_subtopic_skill_ids(self): 'subtopic_id': self.subtopic_id, 'skill_id': 'skill_10' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Skill id skill_10 is not present in the old subtopic'): topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Updated subtopic skill ids.') - def test_admin_can_manage_topic(self): + def test_admin_can_manage_topic(self) -> None: topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertTrue(topic_services.check_can_edit_topic( self.user_admin, topic_rights)) - def test_filter_published_topic_ids(self): + def test_filter_published_topic_ids(self) -> None: published_topic_ids = topic_services.filter_published_topic_ids([ self.TOPIC_ID, 'invalid_id']) self.assertEqual(len(published_topic_ids), 0) - changelist = [topic_domain.TopicChange({ - 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, - 'old_subtopic_id': None, - 'new_subtopic_id': self.subtopic_id, - 'skill_id': 'skill_1' - })] + changelist = [ + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, + 'old_subtopic_id': None, + 'new_subtopic_id': self.subtopic_id, + 'skill_id': 'skill_1' + }), + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': self.test_list, + 'new_value': ['skill_1'] + })] topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Updated subtopic skill ids.') @@ -1259,21 +1390,29 @@ def test_filter_published_topic_ids(self): self.assertEqual(len(published_topic_ids), 1) self.assertEqual(published_topic_ids[0], self.TOPIC_ID) - def test_publish_and_unpublish_topic(self): + def test_publish_and_unpublish_topic(self) -> None: topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertFalse(topic_rights.topic_is_published) - changelist = [topic_domain.TopicChange({ - 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, - 'old_subtopic_id': None, - 'new_subtopic_id': self.subtopic_id, - 'skill_id': 'skill_1' - })] + changelist = [ + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, + 'old_subtopic_id': None, + 'new_subtopic_id': self.subtopic_id, + 'skill_id': 'skill_1' + }), + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': self.test_list, + 'new_value': ['skill_1'] + })] topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Updated subtopic skill ids.') topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The user does not have enough rights to unpublish the topic.'): topic_services.unpublish_topic(self.TOPIC_ID, self.user_id_a) @@ -1285,12 +1424,12 @@ def test_publish_and_unpublish_topic(self): topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertFalse(topic_rights.topic_is_published) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The user does not have enough rights to publish the topic.'): topic_services.publish_topic(self.TOPIC_ID, self.user_id_a) - def test_create_new_topic_rights(self): + def test_create_new_topic_rights(self) -> None: topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertTrue(topic_services.check_can_edit_topic( @@ -1298,7 +1437,7 @@ def test_create_new_topic_rights(self): self.assertFalse(topic_services.check_can_edit_topic( self.user_b, topic_rights)) - def test_non_admin_cannot_assign_roles(self): + def test_non_admin_cannot_assign_roles(self) -> None: self.signup('x@example.com', 'X') self.signup('y@example.com', 'Y') @@ -1307,7 +1446,7 @@ def test_non_admin_cannot_assign_roles(self): user_x = user_services.get_user_actions_info(user_id_x) user_y = user_services.get_user_actions_info(user_id_y) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'UnauthorizedUserException: Could not assign new role.'): topic_services.assign_role( @@ -1319,16 +1458,38 @@ def test_non_admin_cannot_assign_roles(self): self.assertFalse(topic_services.check_can_edit_topic( user_y, topic_rights)) - def test_role_cannot_be_assigned_to_non_topic_manager(self): - with self.assertRaisesRegexp( + def test_guest_user_cannot_assign_roles(self) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Guest user is not allowed to assign roles to a user.' + ): + topic_services.assign_role( + guest_user, self.user_b, + topic_domain.ROLE_MANAGER, self.TOPIC_ID) + + def test_roles_of_guest_user_cannot_be_changed_until_guest_is_logged_in( + self + ) -> None: + guest_user = user_services.get_user_actions_info(None) + with self.assertRaisesRegex( + Exception, + 'Cannot change the role of the Guest user.' + ): + topic_services.assign_role( + self.user_admin, guest_user, + topic_domain.ROLE_MANAGER, self.TOPIC_ID) + + def test_role_cannot_be_assigned_to_non_topic_manager(self) -> None: + with self.assertRaisesRegex( Exception, 'The assignee doesn\'t have enough rights to become a manager.'): topic_services.assign_role( self.user_admin, self.user_b, topic_domain.ROLE_MANAGER, self.TOPIC_ID) - def test_manager_cannot_assign_roles(self): - with self.assertRaisesRegexp( + def test_manager_cannot_assign_roles(self) -> None: + with self.assertRaisesRegex( Exception, 'UnauthorizedUserException: Could not assign new role.'): topic_services.assign_role( @@ -1341,8 +1502,8 @@ def test_manager_cannot_assign_roles(self): self.assertFalse(topic_services.check_can_edit_topic( self.user_b, topic_rights)) - def test_cannot_save_new_topic_with_existing_name(self): - with self.assertRaisesRegexp( + def test_cannot_save_new_topic_with_existing_name(self) -> None: + with self.assertRaisesRegex( Exception, 'Topic with name \'Name\' already exists'): self.save_new_topic( 'topic_2', self.user_id, name='Name', @@ -1350,12 +1511,15 @@ def test_cannot_save_new_topic_with_existing_name(self): canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1) - def test_does_not_update_subtopic_url_fragment_if_it_already_exists(self): + def test_does_not_update_subtopic_url_fragment_if_it_already_exists( + self + ) -> None: topic_id = topic_fetchers.get_new_topic_id() changelist = [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title', - 'subtopic_id': 1 + 'subtopic_id': 1, + 'url_fragment': 'fragment-one' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY, 'property_name': topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT, @@ -1365,7 +1529,8 @@ def test_does_not_update_subtopic_url_fragment_if_it_already_exists(self): }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title', - 'subtopic_id': 2 + 'subtopic_id': 2, + 'url_fragment': 'fragment-two' }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY, 'property_name': topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT, @@ -1378,14 +1543,16 @@ def test_does_not_update_subtopic_url_fragment_if_it_already_exists(self): description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1, url_fragment='frag-dup-subtopic') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Subtopic url fragments are not unique across subtopics ' 'in the topic'): topic_services.update_topic_and_subtopic_pages( self.user_id, topic_id, changelist, 'Update url fragment') - def test_does_not_create_topic_url_fragment_if_it_already_exists(self): + def test_does_not_create_topic_url_fragment_if_it_already_exists( + self + ) -> None: topic_id_1 = topic_fetchers.get_new_topic_id() topic_id_2 = topic_fetchers.get_new_topic_id() self.save_new_topic( @@ -1393,7 +1560,7 @@ def test_does_not_create_topic_url_fragment_if_it_already_exists(self): description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1, url_fragment='topic-frag-one') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Topic with URL Fragment \'topic-frag-one\' already exists'): self.save_new_topic( @@ -1403,7 +1570,7 @@ def test_does_not_create_topic_url_fragment_if_it_already_exists(self): subtopics=[], next_subtopic_id=1, url_fragment='topic-frag-one') - def test_does_not_update_topic_if_url_fragment_already_exists(self): + def test_does_not_update_topic_if_url_fragment_already_exists(self) -> None: topic_id_1 = topic_fetchers.get_new_topic_id() topic_id_2 = topic_fetchers.get_new_topic_id() changelist = [topic_domain.TopicChange({ @@ -1422,13 +1589,13 @@ def test_does_not_update_topic_if_url_fragment_already_exists(self): description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1, url_fragment='topic-frag-two') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Topic with URL Fragment \'topic-frag-one\' already exists'): topic_services.update_topic_and_subtopic_pages( self.user_id, topic_id_2, changelist, 'Update url fragment') - def test_does_not_update_topic_if_name_already_exists(self): + def test_does_not_update_topic_if_name_already_exists(self) -> None: topic_id_1 = topic_fetchers.get_new_topic_id() topic_id_2 = topic_fetchers.get_new_topic_id() changelist = [topic_domain.TopicChange({ @@ -1447,13 +1614,13 @@ def test_does_not_update_topic_if_name_already_exists(self): description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1, url_fragment='topic-frag-two') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Topic with name \'topic 1\' already exists'): topic_services.update_topic_and_subtopic_pages( self.user_id, topic_id_2, changelist, 'Update name') - def test_does_not_create_topic_if_name_is_non_string(self): + def test_does_not_create_topic_if_name_is_non_string(self) -> None: topic_id = topic_fetchers.get_new_topic_id() changelist = [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, @@ -1466,22 +1633,30 @@ def test_does_not_create_topic_if_name_is_non_string(self): description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1, url_fragment='topic-frag') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Name should be a string.'): topic_services.update_topic_and_subtopic_pages( self.user_id, topic_id, changelist, 'Update topic name') - def test_url_fragment_existence_fails_for_non_string_url_fragment(self): - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_url_fragment_existence_fails_for_non_string_url_fragment( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Topic URL fragment should be a string.'): - topic_services.does_topic_with_url_fragment_exist(123) + topic_services.does_topic_with_url_fragment_exist(123) # type: ignore[arg-type] - def test_name_existence_fails_for_non_string_name(self): - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_name_existence_fails_for_non_string_name(self) -> None: + with self.assertRaisesRegex( Exception, 'Name should be a string.'): - topic_services.does_topic_with_name_exist(123) + topic_services.does_topic_with_name_exist(123) # type: ignore[arg-type] - def test_update_topic_language_code(self): + def test_update_topic_language_code(self) -> None: topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) self.assertEqual(topic.language_code, 'en') @@ -1497,8 +1672,10 @@ def test_update_topic_language_code(self): topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) self.assertEqual(topic.language_code, 'bn') - def test_cannot_update_topic_and_subtopic_pages_with_empty_changelist(self): - with self.assertRaisesRegexp( + def test_cannot_update_topic_and_subtopic_pages_with_empty_changelist( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Unexpected error: received an invalid change list when trying to ' 'save topic'): @@ -1506,7 +1683,8 @@ def test_cannot_update_topic_and_subtopic_pages_with_empty_changelist(self): self.user_id, self.TOPIC_ID, [], 'commit message') def test_cannot_update_topic_and_subtopic_pages_with_mismatch_of_versions( - self): + self + ) -> None: topic_model = topic_models.TopicModel.get(self.TOPIC_ID) topic_model.version = 0 topic_model.commit(self.user_id, 'changed version', []) @@ -1518,7 +1696,7 @@ def test_cannot_update_topic_and_subtopic_pages_with_mismatch_of_versions( 'new_value': 'bn' })] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Unexpected error: trying to update version 1 of topic ' 'from version 2. Please reload the page and try again.'): @@ -1529,7 +1707,7 @@ def test_cannot_update_topic_and_subtopic_pages_with_mismatch_of_versions( topic_model.version = 100 topic_model.commit(self.user_id, 'changed version', []) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Trying to update version 101 of topic from version 2, ' 'which is too old. Please reload the page and try again.'): @@ -1537,38 +1715,55 @@ def test_cannot_update_topic_and_subtopic_pages_with_mismatch_of_versions( self.user_id, self.TOPIC_ID, changelist, 'change language_code') def test_cannot_update_topic_and_subtopic_pages_with_empty_commit_message( - self): - changelist = [topic_domain.TopicChange({ - 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, - 'old_subtopic_id': None, - 'new_subtopic_id': self.subtopic_id, - 'skill_id': 'skill_1' - })] + self + ) -> None: + changelist = [ + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, + 'old_subtopic_id': None, + 'new_subtopic_id': self.subtopic_id, + 'skill_id': 'skill_1' + }), + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': self.test_list, + 'new_value': ['skill_1'] + })] # Test can have an empty commit message when not published. topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, None) topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin) # Test must have a commit message when published. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected a commit message, received none.'): topic_services.update_topic_and_subtopic_pages( self.user_id, self.TOPIC_ID, [], None) - def test_cannot_publish_topic_with_no_topic_rights(self): - with self.assertRaisesRegexp( + def test_cannot_publish_topic_with_no_topic_rights(self) -> None: + with self.assertRaisesRegex( Exception, 'The given topic does not exist'): topic_services.publish_topic('invalid_topic_id', self.user_id_admin) - def test_cannot_publish_a_published_topic(self): + def test_cannot_publish_a_published_topic(self) -> None: topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertFalse(topic_rights.topic_is_published) - changelist = [topic_domain.TopicChange({ - 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, - 'old_subtopic_id': None, - 'new_subtopic_id': self.subtopic_id, - 'skill_id': 'skill_1' - })] + changelist = [ + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, + 'old_subtopic_id': None, + 'new_subtopic_id': self.subtopic_id, + 'skill_id': 'skill_1' + }), + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': self.test_list, + 'new_value': ['skill_1'] + })] topic_services.update_topic_and_subtopic_pages( self.user_id_admin, self.TOPIC_ID, changelist, 'Updated subtopic skill ids.') @@ -1576,33 +1771,33 @@ def test_cannot_publish_a_published_topic(self): topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertTrue(topic_rights.topic_is_published) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The topic is already published.'): topic_services.publish_topic(self.TOPIC_ID, self.user_id_admin) - def test_cannot_unpublish_topic_with_no_topic_rights(self): - with self.assertRaisesRegexp( + def test_cannot_unpublish_topic_with_no_topic_rights(self) -> None: + with self.assertRaisesRegex( Exception, 'The given topic does not exist'): topic_services.unpublish_topic( 'invalid_topic_id', self.user_id_admin) - def test_cannot_unpublish_an_unpublished_topic(self): + def test_cannot_unpublish_an_unpublished_topic(self) -> None: topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertFalse(topic_rights.topic_is_published) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'The topic is already unpublished.'): topic_services.unpublish_topic(self.TOPIC_ID, self.user_id_admin) - def test_cannot_edit_topic_with_no_topic_rights(self): + def test_cannot_edit_topic_with_no_topic_rights(self) -> None: self.assertFalse(topic_services.check_can_edit_topic(self.user_a, None)) - def test_cannot_assign_role_with_invalid_role(self): - with self.assertRaisesRegexp(Exception, 'Invalid role'): + def test_cannot_assign_role_with_invalid_role(self) -> None: + with self.assertRaisesRegex(Exception, 'Invalid role'): topic_services.assign_role( self.user_admin, self.user_a, 'invalid_role', self.TOPIC_ID) - def test_deassign_user_from_all_topics(self): + def test_deassign_user_from_all_topics(self) -> None: self.save_new_topic( 'topic_2', self.user_id, name='Name 2', abbreviated_name='name-two', url_fragment='name-six', @@ -1627,8 +1822,8 @@ def test_deassign_user_from_all_topics(self): topic_rights = topic_fetchers.get_topic_rights_with_user(self.user_id_a) self.assertEqual(len(topic_rights), 0) - def test_reassigning_manager_role_to_same_user(self): - with self.assertRaisesRegexp( + def test_reassigning_manager_role_to_same_user(self) -> None: + with self.assertRaisesRegex( Exception, 'This user already is a manager for this topic'): topic_services.assign_role( self.user_admin, self.user_a, @@ -1640,7 +1835,7 @@ def test_reassigning_manager_role_to_same_user(self): self.assertFalse(topic_services.check_can_edit_topic( self.user_b, topic_rights)) - def test_assigning_none_role(self): + def test_assigning_none_role(self) -> None: topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertTrue(topic_services.check_can_edit_topic( @@ -1665,7 +1860,7 @@ def test_assigning_none_role(self): self.assertFalse(topic_services.check_can_edit_topic( self.user_b, topic_rights)) - def test_deassigning_manager_role(self): + def test_deassigning_manager_role(self) -> None: topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertTrue(topic_services.check_can_edit_topic( @@ -1681,16 +1876,353 @@ def test_deassigning_manager_role(self): self.assertFalse(topic_services.check_can_edit_topic( self.user_b, topic_rights)) - def test_deassigning_an_unassigned_user_from_topic_raise_exception(self): + def test_deassigning_an_unassigned_user_from_topic_raise_exception( + self + ) -> None: topic_rights = topic_fetchers.get_topic_rights(self.TOPIC_ID) self.assertFalse(topic_services.check_can_edit_topic( self.user_b, topic_rights)) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'User does not have manager rights in topic.'): topic_services.deassign_manager_role_from_topic( self.user_admin, self.user_id_b, self.TOPIC_ID) + def test_update_thumbnail_filename(self) -> None: + self.assertEqual(self.topic.thumbnail_filename, 'topic.svg') + # Test exception when thumbnail is not found on filesystem. + with self.assertRaisesRegex( + Exception, + 'The thumbnail img.svg for topic with id %s does not exist' + ' in the filesystem.' % (self.TOPIC_ID) + ): + topic_services.update_thumbnail_filename(self.topic, 'img.svg') + + # Save the dummy image to the filesystem to be used as thumbnail. + with utils.open_file( + os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), + 'rb', + encoding=None + ) as f: + raw_image = f.read() + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_TOPIC, self.TOPIC_ID) + fs.commit( + '%s/img.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image, + mimetype='image/svg+xml') + # Test successful update of thumbnail present in the filesystem. + topic_services.update_thumbnail_filename(self.topic, 'img.svg') + self.assertEqual(self.topic.thumbnail_filename, 'img.svg') + self.assertEqual(self.topic.thumbnail_size_in_bytes, len(raw_image)) + + def test_update_subtopic_thumbnail_filename(self) -> None: + self.assertEqual(len(self.topic.subtopics), 1) + self.assertEqual( + self.topic.subtopics[0].thumbnail_filename, None) + + # Test Exception when the thumbnail is not found in filesystem. + with self.assertRaisesRegex( + Exception, + 'The thumbnail %s for subtopic with topic_id %s does not exist ' + 'in the filesystem.' % ('new_image.svg', self.TOPIC_ID) + ): + topic_services.update_subtopic_thumbnail_filename( + self.topic, 1, 'new_image.svg') + + # Test successful update of thumbnail_filename when the thumbnail + # is found in the filesystem. + with utils.open_file( + os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), + 'rb', + encoding=None + ) as f: + raw_image = f.read() + fs = fs_services.GcsFileSystem(feconf.ENTITY_TYPE_TOPIC, self.TOPIC_ID) + fs.commit( + 'thumbnail/new_image.svg', raw_image, mimetype='image/svg+xml') + topic_services.update_subtopic_thumbnail_filename( + self.topic, 1, 'new_image.svg') + self.assertEqual( + self.topic.subtopics[0].thumbnail_filename, 'new_image.svg') + self.assertEqual( + self.topic.subtopics[0].thumbnail_size_in_bytes, len(raw_image)) + + def test_get_topic_id_to_diagnostic_test_skill_ids(self) -> None: + fractions_id = topic_fetchers.get_new_topic_id() + self.save_new_topic( + fractions_id, self.user_id, name='Fractions', + url_fragment='fractions', description='Description of fraction', + canonical_story_ids=[self.story_id_1, self.story_id_2], + additional_story_ids=[self.story_id_3], + uncategorized_skill_ids=[self.skill_id_1, self.skill_id_2], + subtopics=[], next_subtopic_id=1) + old_value: List[str] = [] + changelist = [ + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': old_value, + 'new_value': [self.skill_id_1] + })] + topic_services.update_topic_and_subtopic_pages( + self.user_id_admin, fractions_id, changelist, + 'Adds diagnostic test.') + + additions_id = topic_fetchers.get_new_topic_id() + self.save_new_topic( + additions_id, self.user_id, name='Additions', + url_fragment='additions', description='Description of addition.', + canonical_story_ids=[self.story_id_1, self.story_id_2], + additional_story_ids=[self.story_id_3], + uncategorized_skill_ids=[self.skill_id_1, self.skill_id_2], + subtopics=[], next_subtopic_id=1) + changelist = [ + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': old_value, + 'new_value': [self.skill_id_2] + })] + topic_services.update_topic_and_subtopic_pages( + self.user_id_admin, additions_id, changelist, + 'Adds diagnostic test.') + + expected_dict = { + fractions_id: [self.skill_id_1], + additions_id: [self.skill_id_2] + } + self.assertEqual( + topic_services.get_topic_id_to_diagnostic_test_skill_ids( + [fractions_id, additions_id]), expected_dict) + + error_msg = ( + 'No corresponding topic models exist for these topic IDs: %s.' + % (', '.join([''])) + ) + with self.assertRaisesRegex(Exception, error_msg): + topic_services.get_topic_id_to_diagnostic_test_skill_ids( + [additions_id, 'incorrect_topic_id']) + + def test_get_topic_id_to_topic_name_dict(self) -> None: + fractions_id = topic_fetchers.get_new_topic_id() + self.save_new_topic( + fractions_id, self.user_id, name='Fractions', + url_fragment='fractions', description='Description of fraction', + canonical_story_ids=[self.story_id_1, self.story_id_2], + additional_story_ids=[self.story_id_3], + uncategorized_skill_ids=[self.skill_id_1, self.skill_id_2], + subtopics=[], next_subtopic_id=1) + old_value: List[str] = [] + changelist = [ + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': old_value, + 'new_value': [self.skill_id_1] + })] + topic_services.update_topic_and_subtopic_pages( + self.user_id_admin, fractions_id, changelist, + 'Adds diagnostic test.') + + additions_id = topic_fetchers.get_new_topic_id() + self.save_new_topic( + additions_id, self.user_id, name='Additions', + url_fragment='additions', description='Description of addition.', + canonical_story_ids=[self.story_id_1, self.story_id_2], + additional_story_ids=[self.story_id_3], + uncategorized_skill_ids=[self.skill_id_1, self.skill_id_2], + subtopics=[], next_subtopic_id=1) + changelist = [ + topic_domain.TopicChange({ + 'cmd': topic_domain.CMD_UPDATE_TOPIC_PROPERTY, + 'property_name': ( + topic_domain.TOPIC_PROPERTY_SKILL_IDS_FOR_DIAGNOSTIC_TEST), + 'old_value': old_value, + 'new_value': [self.skill_id_2] + })] + topic_services.update_topic_and_subtopic_pages( + self.user_id_admin, additions_id, changelist, + 'Adds diagnostic test.') + + expected_dict = { + fractions_id: 'Fractions', + additions_id: 'Additions' + } + self.assertEqual( + topic_services.get_topic_id_to_topic_name_dict( + [fractions_id, additions_id]), expected_dict) + + error_msg = ( + 'No corresponding topic models exist for these topic IDs: %s.' + % (', '.join([''])) + ) + with self.assertRaisesRegex(Exception, error_msg): + topic_services.get_topic_id_to_topic_name_dict( + [additions_id, 'incorrect_topic_id']) + + def test_populate_topic_model_fields(self) -> None: + model = topic_models.TopicModel( + id='dummy_topic_id', + name='dummy_name', + abbreviated_name='dn', + url_fragment='name-one', + description='dummy description1', + canonical_name='dummy_canonical_name', + next_subtopic_id=1, + language_code='en', + subtopic_schema_version=1, + story_reference_schema_version=2, + page_title_fragment_for_web='fragm1' + ) + topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID) + populated_model = topic_services.populate_topic_model_fields( + model, topic) + self.assertEqual(populated_model.description, topic.description) + self.assertEqual(populated_model.name, topic.name) + self.assertEqual(populated_model.canonical_name, topic.canonical_name) + self.assertEqual( + populated_model.abbreviated_name, + topic.abbreviated_name + ) + self.assertEqual(populated_model.url_fragment, topic.url_fragment) + self.assertEqual( + populated_model.thumbnail_bg_color, + topic.thumbnail_bg_color + ) + self.assertEqual( + populated_model.thumbnail_filename, + topic.thumbnail_filename + ) + self.assertEqual( + populated_model.thumbnail_size_in_bytes, + topic.thumbnail_size_in_bytes + ) + for model_reference, topic_reference in zip( + populated_model.canonical_story_references, + topic.canonical_story_references + ): + self.assertEqual(model_reference, topic_reference.to_dict()) + + for model_reference, topic_reference in zip( + populated_model.additional_story_references, + topic.additional_story_references + ): + self.assertEqual(model_reference, topic_reference.to_dict()) + self.assertEqual( + populated_model.uncategorized_skill_ids, + topic.uncategorized_skill_ids + ) + for model_subtopic, topic_subtopic in zip( + populated_model.subtopics, + topic.subtopics): + self.assertEqual(model_subtopic, topic_subtopic.to_dict()) + self.assertEqual( + populated_model.subtopic_schema_version, + topic.subtopic_schema_version + ) + self.assertEqual( + populated_model.story_reference_schema_version, + topic.story_reference_schema_version + ) + self.assertEqual( + populated_model.next_subtopic_id, + topic.next_subtopic_id + ) + self.assertEqual(populated_model.language_code, topic.language_code) + self.assertEqual( + populated_model.meta_tag_content, + topic.meta_tag_content + ) + self.assertEqual( + populated_model.practice_tab_is_displayed, + topic.practice_tab_is_displayed + ) + self.assertEqual( + populated_model.page_title_fragment_for_web, + topic.page_title_fragment_for_web + ) + self.assertEqual( + populated_model.skill_ids_for_diagnostic_test, + topic.skill_ids_for_diagnostic_test) + + def test_populate_topic_summary_model_fields(self) -> None: + model = topic_models.TopicSummaryModel( + id=self.TOPIC_ID, + name='dummy topic summary', + canonical_name='dummy topic summary', + language_code='cs', + description=' dummy description', + url_fragment='/fragm', + canonical_story_count=0, + additional_story_count=0, + total_skill_count=0, + total_published_node_count=0, + uncategorized_skill_count=0, + subtopic_count=0, + version=1 + ) + topic_summary = topic_services.compute_summary_of_topic(self.topic) + populated_model = topic_services.populate_topic_summary_model_fields( + model, topic_summary) + self.assertEqual(populated_model.name, topic_summary.name) + self.assertEqual( + populated_model.description, + topic_summary.description + ) + self.assertEqual( + populated_model.canonical_name, + topic_summary.canonical_name + ) + self.assertEqual( + populated_model.language_code, + topic_summary.language_code + ) + self.assertEqual(populated_model.version, topic_summary.version) + self.assertEqual( + populated_model.additional_story_count, + topic_summary.additional_story_count + ) + self.assertEqual( + populated_model.canonical_story_count, + topic_summary.canonical_story_count + ) + self.assertEqual( + populated_model.uncategorized_skill_count, + topic_summary.uncategorized_skill_count + ) + self.assertEqual( + populated_model.subtopic_count, + topic_summary.subtopic_count) + self.assertEqual( + populated_model.total_skill_count, + topic_summary.total_skill_count + ) + self.assertEqual( + populated_model.total_published_node_count, + topic_summary.total_published_node_count + ) + self.assertEqual( + populated_model.thumbnail_filename, + topic_summary.thumbnail_filename + ) + self.assertEqual( + populated_model.thumbnail_bg_color, + topic_summary.thumbnail_bg_color + ) + self.assertEqual( + populated_model.topic_model_last_updated, + topic_summary.topic_model_last_updated + ) + self.assertEqual( + populated_model.topic_model_created_on, + topic_summary.topic_model_created_on) + self.assertEqual( + populated_model.url_fragment, + topic_summary.url_fragment + ) + # TODO(#7009): Remove this mock class and the SubtopicMigrationTests class # once the actual functions for subtopic migrations are implemented. @@ -1698,14 +2230,16 @@ class MockTopicObject(topic_domain.Topic): """Mocks Topic domain object.""" @classmethod - def _convert_story_reference_v1_dict_to_v2_dict(cls, story_reference): + def _convert_story_reference_v1_dict_to_v2_dict( + cls, story_reference: topic_domain.StoryReferenceDict + ) -> topic_domain.StoryReferenceDict: """Converts v1 story reference dict to v2.""" return story_reference class SubtopicMigrationTests(test_utils.GenericTestBase): - def test_migrate_subtopic_to_latest_schema(self): + def test_migrate_subtopic_to_latest_schema(self) -> None: topic_services.create_new_topic_rights('topic_id', 'user_id_admin') commit_cmd = topic_domain.TopicChange({ 'cmd': topic_domain.CMD_CREATE_NEW, @@ -1716,7 +2250,7 @@ def test_migrate_subtopic_to_latest_schema(self): 'title': 'subtopic_title', 'skill_ids': [] } - subtopic_v4_dict = { + subtopic_v4_dict: Dict[str, Union[str, int, Optional[List[str]]]] = { 'id': 1, 'thumbnail_filename': None, 'thumbnail_bg_color': None, @@ -1736,7 +2270,8 @@ def test_migrate_subtopic_to_latest_schema(self): language_code='en', subtopics=[subtopic_v1_dict], subtopic_schema_version=1, - story_reference_schema_version=1 + story_reference_schema_version=1, + page_title_fragment_for_web='fragm' ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( @@ -1760,7 +2295,7 @@ def test_migrate_subtopic_to_latest_schema(self): class StoryReferenceMigrationTests(test_utils.GenericTestBase): - def test_migrate_story_reference_to_latest_schema(self): + def test_migrate_story_reference_to_latest_schema(self) -> None: topic_services.create_new_topic_rights('topic_id', 'user_id_admin') commit_cmd = topic_domain.TopicChange({ 'cmd': topic_domain.CMD_CREATE_NEW, @@ -1782,7 +2317,8 @@ def test_migrate_story_reference_to_latest_schema(self): subtopics=[], subtopic_schema_version=1, story_reference_schema_version=1, - canonical_story_references=[story_reference_dict] + canonical_story_references=[story_reference_dict], + page_title_fragment_for_web='fragm' ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( diff --git a/core/domain/translatable_object_registry.py b/core/domain/translatable_object_registry.py index 089b380de6c3..1b312a6e6b77 100644 --- a/core/domain/translatable_object_registry.py +++ b/core/domain/translatable_object_registry.py @@ -20,15 +20,33 @@ from extensions.objects.models import objects +from typing import Dict, List, Literal, Type, Union, overload + +TranslatableObjectNames = Literal[ + 'TranslatableHtml', + 'TranslatableUnicodeString', + 'TranslatableSetOfUnicodeString', + 'TranslatableSetOfNormalizedString', +] + +TranslatableObjectClasses = Union[ + Type[objects.TranslatableHtml], + Type[objects.TranslatableUnicodeString], + Type[objects.TranslatableSetOfUnicodeString], + Type[objects.TranslatableSetOfNormalizedString], +] + class Registry: """Registry of all translatable objects.""" # Dict mapping object class names to their classes. - _translatable_objects_dict = {} + _translatable_objects_dict: Dict[ + TranslatableObjectNames, TranslatableObjectClasses + ] = {} @classmethod - def _refresh_registry(cls): + def _refresh_registry(cls) -> None: """Refreshes the registry by adding new translatable object classes to the registry. """ @@ -50,7 +68,7 @@ def _refresh_registry(cls): cls._translatable_objects_dict[clazz.__name__] = clazz @classmethod - def get_all_class_names(cls): + def get_all_class_names(cls) -> List[TranslatableObjectNames]: """Gets a list of all translatable object class names. Returns: @@ -59,8 +77,34 @@ def get_all_class_names(cls): cls._refresh_registry() return sorted(cls._translatable_objects_dict.keys()) + @overload + @classmethod + def get_object_class( + cls, obj_type: Literal['TranslatableHtml'] + ) -> Type[objects.TranslatableHtml]: ... + + @overload + @classmethod + def get_object_class( + cls, obj_type: Literal['TranslatableUnicodeString'] + ) -> Type[objects.TranslatableUnicodeString]: ... + + @overload + @classmethod + def get_object_class( + cls, obj_type: Literal['TranslatableSetOfUnicodeString'] + ) -> Type[objects.TranslatableSetOfUnicodeString]: ... + + @overload + @classmethod + def get_object_class( + cls, obj_type: Literal['TranslatableSetOfNormalizedString'] + ) -> Type[objects.TranslatableSetOfNormalizedString]: ... + @classmethod - def get_object_class(cls, obj_type): + def get_object_class( + cls, obj_type: TranslatableObjectNames + ) -> TranslatableObjectClasses: """Gets a translatable object class by its type. Refreshes once if the class is not found; subsequently, throws an diff --git a/core/domain/translatable_object_registry_test.py b/core/domain/translatable_object_registry_test.py index fd271d659aa6..62bec9706694 100644 --- a/core/domain/translatable_object_registry_test.py +++ b/core/domain/translatable_object_registry_test.py @@ -24,44 +24,59 @@ class TranslatableObjectRegistryUnitTests(test_utils.GenericTestBase): """Test the Registry class in translatable_object_registry.""" - def test_get_object_class_method(self): + def test_get_object_class_method(self) -> None: """Tests the normal behavior of get_object_class().""" retrieved_class = ( translatable_object_registry.Registry.get_object_class( 'TranslatableHtml')) self.assertEqual(retrieved_class.__name__, 'TranslatableHtml') - def test_nontranslatable_class_is_not_gettable(self): + def test_nontranslatable_class_is_not_gettable(self) -> None: """Tests that trying to retrieve a non-translatable class raises an error. """ - with self.assertRaisesRegexp( - TypeError, 'not a valid translatable object class'): - translatable_object_registry.Registry.get_object_class( + with self.assertRaisesRegex( + TypeError, 'not a valid translatable object class' + ): + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + translatable_object_registry.Registry.get_object_class( # type: ignore[call-overload] 'Int') - def test_fake_class_is_not_gettable(self): + def test_fake_class_is_not_gettable(self) -> None: """Tests that trying to retrieve a fake class raises an error.""" - with self.assertRaisesRegexp( + with self.assertRaisesRegex( TypeError, 'not a valid translatable object class'): - translatable_object_registry.Registry.get_object_class('FakeClass') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + translatable_object_registry.Registry.get_object_class( # type: ignore[call-overload] + 'FakeClass') - def test_base_objects_are_not_gettable(self): + def test_base_objects_are_not_gettable(self) -> None: """Tests that the base objects exist but are not included in the registry. """ assert getattr(objects, 'BaseObject') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( TypeError, 'not a valid translatable object class'): - translatable_object_registry.Registry.get_object_class('BaseObject') + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + translatable_object_registry.Registry.get_object_class( # type: ignore[call-overload] + 'BaseObject') assert getattr(objects, 'BaseTranslatableObject') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( TypeError, 'not a valid translatable object class'): - translatable_object_registry.Registry.get_object_class( + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + translatable_object_registry.Registry.get_object_class( # type: ignore[call-overload] 'BaseTranslatableObject') - def test_get_translatable_object_classes(self): + def test_get_translatable_object_classes(self) -> None: """Tests the normal behavior of get_translatable_object_classes().""" class_names_to_classes = ( translatable_object_registry.Registry.get_all_class_names()) diff --git a/core/domain/translation_domain.py b/core/domain/translation_domain.py index 87540629cb07..e4576d973390 100644 --- a/core/domain/translation_domain.py +++ b/core/domain/translation_domain.py @@ -18,9 +18,679 @@ from __future__ import annotations +import enum + +from core import feconf from core import utils +from core.constants import constants + +from typing import Dict, List, Optional, Union +from typing_extensions import Final, TypedDict + +from core.domain import html_cleaner # pylint: disable=invalid-import-from # isort:skip +from core.domain import translatable_object_registry # pylint: disable=invalid-import-from # isort:skip + + +class ContentType(enum.Enum): + """Represents all possible content types in the State.""" + + CONTENT = 'content' + INTERACTION = 'interaction' + DEFAULT_OUTCOME = 'default_outcome' + CUSTOMIZATION_ARG = 'ca' + RULE = 'rule' + FEEDBACK = 'feedback' + HINT = 'hint' + SOLUTION = 'solution' + + +class TranslatableContentFormat(enum.Enum): + """Represents all possible data types for any translatable content.""" + + HTML = 'html' + UNICODE_STRING = 'unicode' + SET_OF_NORMALIZED_STRING = 'set_of_normalized_string' + SET_OF_UNICODE_STRING = 'set_of_unicode_string' + + @classmethod + def is_data_format_list(cls, data_format: str) -> bool: + """Checks whether the content of translation with given format is of + a list type. + + Args: + data_format: str. The format of the translation. + + Returns: + bool. Whether the content of translation is a list. + """ + return data_format in ( + cls.SET_OF_NORMALIZED_STRING.value, + cls.SET_OF_UNICODE_STRING.value + ) + + +class TranslatableContentDict(TypedDict): + """Dictionary representing TranslatableContent object.""" + + content_id: str + content_value: feconf.ContentValueType + content_type: str + content_format: str + interaction_id: Optional[str] + rule_type: Optional[str] + + +class TranslatableContent: + """TranslatableContent represents a content of a translatable object which + can be translated into multiple languages. + """ + + def __init__( + self, + content_id: str, + content_type: ContentType, + content_format: TranslatableContentFormat, + content_value: feconf.ContentValueType, + interaction_id: Optional[str] = None, + rule_type: Optional[str] = None + ) -> None: + """Constructs an TranslatableContent domain object. + + Args: + content_id: str. The id of the corresponding translatable content + value. + content_type: TranslatableContentFormat. The type of the + corresponding content value. + content_format: TranslatableContentFormat. The format of the + content. + content_value: ContentValueType. The content value which can be + translated. + interaction_id: str|None. The ID of the interaction in which the + content is used. + rule_type: str|None. The rule type of the answer group in which the + content is used. + """ + self.content_id = content_id + self.content_type = content_type + self.content_format = content_format + self.content_value = content_value + self.interaction_id = interaction_id + self.rule_type = rule_type + + def to_dict(self) -> TranslatableContentDict: + """Returns the dict representation of TranslatableContent object. + + Returns: + TranslatableContentDict. The dict representation of + TranslatableContent. + """ + return { + 'content_id': self.content_id, + 'content_type': self.content_type.value, + 'content_format': self.content_format.value, + 'content_value': self.content_value, + 'interaction_id': self.interaction_id, + 'rule_type': self.rule_type + } + + def is_data_format_list(self) -> bool: + """Checks whether the content is of a list type. + + Returns: + bool. Whether the content is a list. + """ + return TranslatableContentFormat.is_data_format_list( + self.content_format.value) + + +class TranslatedContent: + """Class representing a translation of translatable content. For example, + if translatable content 'A' is translated into 'B' in a language other than + English, then 'B' is a TranslatedContent instance that represents this + class. + A (TranslatableContent) -----(translation)-----> B (TranslatedContent). + + Args: + content_value: ContentValueType. Represents translation of translatable + content. + content_format: TranslatableContentFormat. The format of the content. + needs_update: bool. Whether the translation needs an update or not. + """ + + def __init__( + self, + content_value: feconf.ContentValueType, + content_format: TranslatableContentFormat, + needs_update: bool + ) -> None: + """Constructor for the TranslatedContent object. + + Args: + content_value: ContentValueType. The content value which can be + translated. + content_format: TranslatableContentFormat. The format of the + content. + needs_update: bool. Whether the translated content needs update. + """ + self.content_value = content_value + self.content_format = content_format + self.needs_update = needs_update + + def to_dict(self) -> feconf.TranslatedContentDict: + """Returns the dict representation of TranslatedContent object. + + Returns: + TranslatedContentDict. A dict, mapping content_value and + needs_update of a TranslatableContent instance to + corresponding keys 'content_value' and 'needs_update'. + """ + return { + 'content_value': self.content_value, + 'content_format': self.content_format.value, + 'needs_update': self.needs_update + } + + @classmethod + def from_dict( + cls, + translated_content_dict: feconf.TranslatedContentDict + ) -> TranslatedContent: + """Returns the TranslatedContent object.""" + return cls( + translated_content_dict['content_value'], + TranslatableContentFormat( + translated_content_dict['content_format']), + translated_content_dict['needs_update'] + ) + + +class TranslatableContentsCollection: + """A class to collect all TranslatableContents from a translatable object + and map them with their corresponding content-ids. + """ + + content_id_to_translatable_content: Dict[str, TranslatableContent] + + def __init__(self) -> None: + """Constructs a TranslatableContentsCollection object.""" + self.content_id_to_translatable_content = {} + + def add_translatable_field( + self, + content_id: str, + content_type: ContentType, + content_format: TranslatableContentFormat, + content_value: feconf.ContentValueType, + interaction_id: Optional[str] = None, + rule_type: Optional[str] = None + ) -> None: + """Adds translatable field parameter to + 'content_id_to_translatable_content' dict. + + Args: + content_id: str. The id of the corresponding translatable content + value. + content_type: TranslatableContentFormat. The type of the + corresponding content value. + content_format: TranslatableContentFormat. The format of the + content. + content_value: ContentValueType. The content value which can be + translated. + interaction_id: str|None. The ID of the interaction in which the + content is used. + rule_type: str|None. The rule type of the answer group in which the + content is used. + + Raises: + Exception. The content_id_to_translatable_content dict already + contains the content_id. + """ + if content_id in self.content_id_to_translatable_content: + raise Exception( + 'Content_id %s already exists in the ' + 'TranslatableContentsCollection.' % content_id) + + self.content_id_to_translatable_content[content_id] = ( + TranslatableContent( + content_id, + content_type, + content_format, + content_value, + interaction_id, + rule_type) + ) + + def add_fields_from_translatable_object( + self, + translatable_object: BaseTranslatableObject, + **kwargs: Optional[str] + ) -> None: + """Adds translatable fields from a translatable object parameter to + 'content_id_to_translatable_content' dict. + + NOTE: The functions take the entire translatable object as a param, as + the process to fetch translatable collections from different objects + are the same, and keeping this logic in one place will help avoid + duplicate patterns in the callsite. It will also help the callsite + look cleaner. + + Args: + translatable_object: BaseTranslatableObject. An instance of + BaseTranslatableObject class. + **kwargs: *. The keyword args for registring translatable object. + """ + self.content_id_to_translatable_content.update( + translatable_object.get_translatable_contents_collection(**kwargs) + .content_id_to_translatable_content) + + +class BaseTranslatableObject: + """Base class for all translatable objects which contain translatable + fields/objects. For example, a State is a translatable object in + Exploration, a Hint is a translatable object in State, and a hint_content + is a translatable field in Hint. So Exploration, State, and Hint all should + be child classes of BaseTranslatableObject. + """ + + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> TranslatableContentsCollection: + """Get all translatable fields in a translatable object. + + Raises: + NotImplementedError. The derived child class must implement the + necessary logic to get all translatable fields in a + translatable object. + """ + raise NotImplementedError('Must be implemented in subclasses.') + + def get_translatable_content_ids(self) -> List[str]: + """Get all translatable content's Ids. + + Returns: + list(str). A list of translatable content's Id. + """ + content_collection = ( + self.get_translatable_contents_collection()) + + return list( + content_collection.content_id_to_translatable_content.keys()) + + def get_all_contents_which_need_translations( + self, + entity_translation: Union[EntityTranslation, None] = None + ) -> Dict[str, TranslatableContent]: + """Returns a list of TranslatableContent instances which need new or + updated translations. + + Args: + entity_translation: EntityTranslation. An object storing the + existing translations of an entity. + + Returns: + list(TranslatableContent). Returns a list of TranslatableContent. + """ + if entity_translation is None: + entity_translation = EntityTranslation.create_empty( + entity_type=feconf.TranslatableEntityType.EXPLORATION, + entity_id='', + language_code='') + + translatable_content_list = ( + self.get_translatable_contents_collection() + .content_id_to_translatable_content.values()) + + content_id_to_translatable_content = {} + + for translatable_content in translatable_content_list: + content_value = translatable_content.content_value + if translatable_content.content_type == ( + ContentType.CUSTOMIZATION_ARG + ) and translatable_content.content_format == ( + TranslatableContentFormat.HTML + ): + assert isinstance(content_value, str) + content_value = html_cleaner.strip_html_tags(content_value) + + if content_value == '': + continue + + if isinstance(content_value, str) and content_value.isnumeric(): + continue + + if ( + translatable_content.content_id not in + entity_translation.translations + ): + content_id_to_translatable_content[ + translatable_content.content_id] = translatable_content + elif ( + entity_translation.translations[ + translatable_content.content_id].needs_update + ): + content_id_to_translatable_content[ + translatable_content.content_id] = translatable_content + + return content_id_to_translatable_content + + def get_translation_count( + self, entity_translation: EntityTranslation + ) -> int: + """Returs the number of updated translations avialable. + + Args: + entity_translation: EntityTranslation. The translation object + containing translations. + + Returns: + int. The number of translatable contnet for which translations are + available in the given translation object. + """ + count = 0 + for content_id in self.get_all_contents_which_need_translations(): + if not content_id in entity_translation.translations: + continue + + if not entity_translation.translations[content_id].needs_update: + count += 1 + + return count + + def are_translations_displayable( + self, entity_translation: EntityTranslation + ) -> bool: + """Whether the given EntityTranslation in the given lanaguage is + displayable. + + A language's translations are ready to be displayed if there are less + than five missing or update-needed translations. In addition, all + rule-related translations must be present. + + Args: + entity_translation: EntityTranslation. An object storing the + existing translations of an entity. + + Returns: + list(TranslatableContent). Returns a list of TranslatableContent. + """ + content_id_to_translatable_content = ( + self.get_translatable_contents_collection() + .content_id_to_translatable_content + ) + for content_id, translatable_content in ( + content_id_to_translatable_content.items() + ): + if ( + translatable_content.content_type == ContentType.RULE and + not content_id in entity_translation.translations + ): + # Rule-related translations cannot be missing. + return False + + translatable_content_count = self.get_content_count() + translated_content_count = self.get_translation_count( + entity_translation) + + translations_missing_count = ( + translatable_content_count - translated_content_count) + return translations_missing_count < ( + feconf.MIN_ALLOWED_MISSING_OR_UPDATE_NEEDED_WRITTEN_TRANSLATIONS) + + def get_content_count(self) -> int: + """Returns the total number of distinct content fields available in the + exploration which are user facing and can be translated into + different languages. + + (The content field includes state content, feedback, hints, solutions.) + + Returns: + int. The total number of distinct content fields available inside + the exploration. + """ + return len(self.get_all_contents_which_need_translations()) + + def get_all_html_content_strings(self) -> List[str]: + """Gets all html content strings used in the object. + + Returns: + list(str). The list of html content strings. + """ + html_list = [] + content_collection = self.get_translatable_contents_collection() + translatable_contents = ( + content_collection.content_id_to_translatable_content.values()) + for translatable_content in translatable_contents: + if translatable_content.content_format == ( + TranslatableContentFormat.HTML + ): + # Ruling out the possibility of any other type for MyPy type + # checking because content_value for rules can be a list of + # strings. + assert isinstance(translatable_content.content_value, str) + html_list.append(translatable_content.content_value) + + return html_list + + def validate_translatable_contents( + self, next_content_id_index: int + ) -> None: + """Validates the content Ids of the translatable contents. + + Args: + next_content_id_index: int. The index for generating the Id + for a content. + """ + content_id_to_translatable_content = ( + self.get_translatable_contents_collection() + .content_id_to_translatable_content) + + for content_id in content_id_to_translatable_content.keys(): + content_id_suffix = content_id.split('_')[-1] + + if ( + content_id_suffix.isdigit() and + int(content_id_suffix) > next_content_id_index + ): + raise utils.ValidationError( + 'Expected all content id indexes to be less than the "next ' + 'content id index(%s)", but received content id %s' % ( + next_content_id_index, content_id) + ) + + +class EntityTranslationDict(TypedDict): + """Dictionary representing the EntityTranslation object.""" -from typing import Dict + entity_id: str + entity_type: str + entity_version: int + language_code: str + translations: Dict[str, feconf.TranslatedContentDict] + + +class EntityTranslation: + """A domain object to store all translations for a given versioned-entity + in a given language. + + NOTE: This domain object corresponds to EntityTranslationsModel in the + storage layer. + + Args: + entity_id: str. The id of the corresponding entity. + entity_type: TranslatableEntityType. The type + of the corresponding entity. + entity_version: str. The version of the corresponding entity. + language_code: str. The language code for the corresponding entity. + translations: dict(str, TranslatedContent). A dict representing + content-id as keys and TranslatedContent instance as values. + """ + + def __init__( + self, + entity_id: str, + entity_type: feconf.TranslatableEntityType, + entity_version: int, + language_code: str, + translations: Dict[str, TranslatedContent] + ): + """Constructs an TranslatableContent domain object. + + Args: + entity_id: str. The ID of the entity. + entity_type: TranslatableEntityType. The type of the entity. + entity_version: int. The version of the entity. + language_code: str. The langauge code for the translated contents + language. + translations: dict(str, TranslatedContent). The translations dict + containing content_id as key and TranslatedContent as value. + """ + self.entity_id = entity_id + self.entity_type = entity_type.value + self.entity_version = entity_version + self.language_code = language_code + self.translations = translations + + def to_dict(self) -> EntityTranslationDict: + """Returns the dict representation of the EntityTranslation object. + + Returns: + EntityTranslationDict. The dict representation of the + EntityTranslation object. + """ + translations_dict = { + content_id: translated_content.to_dict() + for content_id, translated_content in self.translations.items() + } + + return { + 'entity_id': self.entity_id, + 'entity_type': self.entity_type, + 'entity_version': self.entity_version, + 'language_code': self.language_code, + 'translations': translations_dict + } + + @classmethod + def from_dict( + cls, entity_translation_dict: EntityTranslationDict + ) -> EntityTranslation: + """Creates the EntityTranslation from the given dict. + + Args: + entity_translation_dict: EntityTranslationDict. The dict + representation of the EntityTranslation object. + + Returns: + EntityTranslation. The EntityTranslation object created using the + given dict. + """ + translations_dict = entity_translation_dict['translations'] + content_id_to_translated_content = {} + for content_id, translated_content in translations_dict.items(): + content_id_to_translated_content[content_id] = ( + TranslatedContent.from_dict(translated_content)) + + return cls( + entity_translation_dict['entity_id'], + feconf.TranslatableEntityType( + entity_translation_dict['entity_type']), + entity_translation_dict['entity_version'], + entity_translation_dict['language_code'], + content_id_to_translated_content + ) + + def validate(self) -> None: + """Validates the EntityTranslation object.""" + if not isinstance(self.entity_type, str): + raise utils.ValidationError( + 'entity_type must be a string, recieved %r' % self.entity_type) + if not isinstance(self.entity_id, str): + raise utils.ValidationError( + 'entity_id must be a string, recieved %r' % self.entity_id) + if not isinstance(self.entity_version, int): + raise utils.ValidationError( + 'entity_version must be an int, recieved %r' % + self.entity_version) + if not isinstance(self.language_code, str): + raise utils.ValidationError( + 'language_code must be a string, recieved %r' % + self.language_code) + + for content_id, translated_content in self.translations.items(): + if not isinstance(content_id, str): + raise utils.ValidationError( + 'content_id must be a string, recieved %r' % content_id) + if not isinstance(translated_content.needs_update, bool): + raise utils.ValidationError( + 'needs_update must be a bool, recieved %r' % + translated_content.needs_update) + + def add_translation( + self, + content_id: str, + content_value: feconf.ContentValueType, + content_format: TranslatableContentFormat, + needs_update: bool + ) -> None: + """Adds new TranslatedContent in the object. + + Args: + content_id: str. The ID of the content. + content_value: ContentValueType. The translation content. + content_format: TranslatableContentFormat. The format of the + content. + needs_update: bool. Whether the translation needs update. + """ + self.translations[content_id] = TranslatedContent( + content_value, content_format, needs_update) + + def get_translation_count(self) -> int: + """Returs the number of updated translations avialable.""" + return len([ + translated_content + for translated_content in self.translations.values() + if not translated_content.needs_update + ]) + + def remove_translations(self, content_ids: List[str]) -> None: + """Remove translations for the given list of content Ids. + + Args: + content_ids: list(str). The list of content Ids for removing + translations. + """ + for content_id in content_ids: + if content_id in self.translations: + del self.translations[content_id] + + def mark_translations_needs_update(self, content_ids: List[str]) -> None: + """Marks translation needs update for the given list of content Ids. + + Args: + content_ids: list(str). The list of content Ids for to mark their + translation needs update. + """ + for content_id in content_ids: + if content_id in self.translations: + self.translations[content_id].needs_update = True + + @classmethod + def create_empty( + cls, + entity_type: feconf.TranslatableEntityType, + entity_id: str, + language_code: str, + entity_version: int = 0 + ) -> EntityTranslation: + """Creates a new and empty EntityTranslation object.""" + return cls( + entity_id=entity_id, + entity_type=entity_type, + entity_version=entity_version, + language_code=language_code, + translations={} + ) class MachineTranslation: @@ -98,3 +768,300 @@ def to_dict(self) -> Dict[str, str]: 'source_text': self.source_text, 'translated_text': self.translated_text } + + +# WrittenTrasnlation and WrittenTranslations class is still used in topic and +# subtopic entity and will be removed from here as well once Topic and subtopic +# will also support translation feature. + + +class WrittenTranslationDict(TypedDict): + """Dictionary representing the WrittenTranslation object.""" + + data_format: str + translation: Union[str, List[str]] + needs_update: bool + + +class WrittenTranslation: + """Value object representing a written translation for a content. + + Here, "content" could mean a string or a list of strings. The latter arises, + for example, in the case where we are checking for equality of a learner's + answer against a given set of strings. In such cases, the number of strings + in the translation of the original object may not be the same as the number + of strings in the original object. + """ + + DATA_FORMAT_HTML: Final = 'html' + DATA_FORMAT_UNICODE_STRING: Final = 'unicode' + DATA_FORMAT_SET_OF_NORMALIZED_STRING: Final = 'set_of_normalized_string' + DATA_FORMAT_SET_OF_UNICODE_STRING: Final = 'set_of_unicode_string' + + DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE: Dict[ + str, translatable_object_registry.TranslatableObjectNames + ] = { + DATA_FORMAT_HTML: 'TranslatableHtml', + DATA_FORMAT_UNICODE_STRING: 'TranslatableUnicodeString', + DATA_FORMAT_SET_OF_NORMALIZED_STRING: ( + 'TranslatableSetOfNormalizedString'), + DATA_FORMAT_SET_OF_UNICODE_STRING: 'TranslatableSetOfUnicodeString', + } + + def __init__( + self, + data_format: str, + translation: Union[str, List[str]], + needs_update: bool + ) -> None: + """Initializes a WrittenTranslation domain object. + + Args: + data_format: str. One of the keys in + DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE. Indicates the + type of the field (html, unicode, etc.). + translation: str|list(str). A user-submitted string or list of + strings that matches the given data format. + needs_update: bool. Whether the translation is marked as needing + review. + """ + self.data_format = data_format + self.translation = translation + self.needs_update = needs_update + + def to_dict(self) -> WrittenTranslationDict: + """Returns a dict representing this WrittenTranslation domain object. + + Returns: + dict. A dict, mapping all fields of WrittenTranslation instance. + """ + return { + 'data_format': self.data_format, + 'translation': self.translation, + 'needs_update': self.needs_update, + } + + @classmethod + def from_dict( + cls, written_translation_dict: WrittenTranslationDict + ) -> WrittenTranslation: + """Return a WrittenTranslation domain object from a dict. + + Args: + written_translation_dict: dict. The dict representation of + WrittenTranslation object. + + Returns: + WrittenTranslation. The corresponding WrittenTranslation domain + object. + """ + return cls( + written_translation_dict['data_format'], + written_translation_dict['translation'], + written_translation_dict['needs_update']) + + def validate(self) -> None: + """Validates properties of the WrittenTranslation, normalizing the + translation if needed. + + Raises: + ValidationError. One or more attributes of the WrittenTranslation + are invalid. + """ + if self.data_format not in ( + self.DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE): + raise utils.ValidationError( + 'Invalid data_format: %s' % self.data_format) + + translatable_class_name = ( + self.DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE[self.data_format]) + translatable_obj_class = ( + translatable_object_registry.Registry.get_object_class( + translatable_class_name)) + self.translation = translatable_obj_class.normalize_value( + self.translation) + + if not isinstance(self.needs_update, bool): + raise utils.ValidationError( + 'Expected needs_update to be a bool, received %s' % + self.needs_update) + + +class WrittenTranslationsDict(TypedDict): + """Dictionary representing the WrittenTranslations object.""" + + translations_mapping: Dict[str, Dict[str, WrittenTranslationDict]] + + +class WrittenTranslations: + """Value object representing a content translations which stores + translated contents of all state contents (like hints, feedback etc.) in + different languages linked through their content_id. + """ + + def __init__( + self, + translations_mapping: Dict[str, Dict[str, WrittenTranslation]] + ) -> None: + """Initializes a WrittenTranslations domain object. + + Args: + translations_mapping: dict. A dict mapping the content Ids + to the dicts which is the map of abbreviated code of the + languages to WrittenTranslation objects. + """ + self.translations_mapping = translations_mapping + + def to_dict(self) -> WrittenTranslationsDict: + """Returns a dict representing this WrittenTranslations domain object. + + Returns: + dict. A dict, mapping all fields of WrittenTranslations instance. + """ + translations_mapping: Dict[str, Dict[str, WrittenTranslationDict]] = {} + for (content_id, language_code_to_written_translation) in ( + self.translations_mapping.items()): + translations_mapping[content_id] = {} + for (language_code, written_translation) in ( + language_code_to_written_translation.items()): + translations_mapping[content_id][language_code] = ( + written_translation.to_dict()) + written_translations_dict: WrittenTranslationsDict = { + 'translations_mapping': translations_mapping + } + + return written_translations_dict + + @classmethod + def from_dict( + cls, written_translations_dict: WrittenTranslationsDict + ) -> WrittenTranslations: + """Returns a WrittenTranslations domain object from a dict. + + Args: + written_translations_dict: dict. The dict representation of + WrittenTranslations object. + + Returns: + WrittenTranslations. The corresponding WrittenTranslations domain + object. + """ + translations_mapping: Dict[str, Dict[str, WrittenTranslation]] = {} + for (content_id, language_code_to_written_translation) in ( + written_translations_dict['translations_mapping'].items()): + translations_mapping[content_id] = {} + for (language_code, written_translation) in ( + language_code_to_written_translation.items()): + translations_mapping[content_id][language_code] = ( + WrittenTranslation.from_dict(written_translation)) + + return cls(translations_mapping) + + def validate(self, expected_content_id_list: Optional[List[str]]) -> None: + """Validates properties of the WrittenTranslations. + + Args: + expected_content_id_list: list(str)|None. A list of content id which + are expected to be inside they WrittenTranslations. + + Raises: + ValidationError. One or more attributes of the WrittenTranslations + are invalid. + """ + if expected_content_id_list is not None: + if not set(self.translations_mapping.keys()) == ( + set(expected_content_id_list)): + raise utils.ValidationError( + 'Expected state written_translations to match the listed ' + 'content ids %s, found %s' % ( + expected_content_id_list, + list(self.translations_mapping.keys())) + ) + + for (content_id, language_code_to_written_translation) in ( + self.translations_mapping.items()): + if not isinstance(content_id, str): + raise utils.ValidationError( + 'Expected content_id to be a string, received %s' + % content_id) + if not isinstance(language_code_to_written_translation, dict): + raise utils.ValidationError( + 'Expected content_id value to be a dict, received %s' + % language_code_to_written_translation) + for (language_code, written_translation) in ( + language_code_to_written_translation.items()): + if not isinstance(language_code, str): + raise utils.ValidationError( + 'Expected language_code to be a string, received %s' + % language_code) + # Currently, we assume written translations are used by the + # voice-artist to voiceover the translated text so written + # translations can be in supported audio/voiceover languages. + allowed_language_codes = [language['id'] for language in ( + constants.SUPPORTED_AUDIO_LANGUAGES)] + if language_code not in allowed_language_codes: + raise utils.ValidationError( + 'Invalid language_code: %s' % language_code) + + written_translation.validate() + + def add_content_id_for_translation(self, content_id: str) -> None: + """Adds a content id as a key for the translation into the + content_translation dict. + + Args: + content_id: str. The id representing a subtitled html. + + Raises: + Exception. The content id isn't a string. + """ + if not isinstance(content_id, str): + raise Exception( + 'Expected content_id to be a string, received %s' % content_id) + if content_id in self.translations_mapping: + raise Exception( + 'The content_id %s already exist.' % content_id) + + self.translations_mapping[content_id] = {} + + def delete_content_id_for_translation(self, content_id: str) -> None: + """Deletes a content id from the content_translation dict. + + Args: + content_id: str. The id representing a subtitled html. + + Raises: + Exception. The content id isn't a string. + """ + if not isinstance(content_id, str): + raise Exception( + 'Expected content_id to be a string, received %s' % content_id) + if content_id not in self.translations_mapping: + raise Exception( + 'The content_id %s does not exist.' % content_id) + + self.translations_mapping.pop(content_id, None) + + +class ContentIdGenerator: + """Class to generate the content-id for a translatable content based on the + next_content_id_index variable. + """ + + def __init__(self, start_index: int = 0) -> None: + """Constructs an ContentIdGenerator object.""" + self.next_content_id_index = start_index + + def generate( + self, + content_type: ContentType, + extra_prefix: Optional[str] = None + ) -> str: + """Generates the new content-id from the next content id.""" + content_id = content_type.value + '_' + if extra_prefix: + content_id += extra_prefix + '_' + content_id += str(self.next_content_id_index) + + self.next_content_id_index += 1 + return content_id diff --git a/core/domain/translation_domain_test.py b/core/domain/translation_domain_test.py index b78d7ad1a118..fd97f2940706 100644 --- a/core/domain/translation_domain_test.py +++ b/core/domain/translation_domain_test.py @@ -18,10 +18,688 @@ from __future__ import annotations +import re + +from core import feconf from core import utils from core.domain import translation_domain from core.tests import test_utils +from typing import Optional + +from core.domain import translatable_object_registry # pylint: disable=invalid-import-from # isort:skip + + +class DummyTranslatableObjectWithTwoParams( + translation_domain.BaseTranslatableObject): + """A dummy translatable object with a translatable field and a + TranslatableObject as its properties. + """ + + def __init__( + self, + param1: str, + param2: DummyTranslatableObjectWithSingleParam + ) -> None: + self.param1 = param1 + self.param2 = param2 + + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_translatable_field( + 'content_id_1', + translation_domain.ContentType.CONTENT, + translation_domain.TranslatableContentFormat.UNICODE_STRING, + self.param1) + translatable_contents_collection.add_fields_from_translatable_object( + self.param2) + return translatable_contents_collection + + +class DummyTranslatableObjectWithSingleParam( + translation_domain.BaseTranslatableObject): + """A dummy translatable object with a translatable field as its + properties. + """ + + def __init__( + self, + param3: str + ) -> None: + self.param3 = param3 + + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_translatable_field( + 'content_id_2', + translation_domain.ContentType.CONTENT, + translation_domain.TranslatableContentFormat.UNICODE_STRING, + self.param3) + return translatable_contents_collection + + +class DummyTranslatableObjectWithDuplicateContentIdForParams( + translation_domain.BaseTranslatableObject): + """A dummy translatable object with two translatable fields and on + registering with same content_id an error is raised. + """ + + def __init__( + self, + param1: str, + param2: str + ) -> None: + self.param1 = param1 + self.param2 = param2 + + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_translatable_field( + 'content_id_2', + translation_domain.ContentType.CONTENT, + translation_domain.TranslatableContentFormat.UNICODE_STRING, + self.param1) + translatable_contents_collection.add_translatable_field( + 'content_id_2', + translation_domain.ContentType.CONTENT, + translation_domain.TranslatableContentFormat.UNICODE_STRING, + self.param2) + return translatable_contents_collection + + +class DummyTranslatableObjectWithoutRegisterMethod( + translation_domain.BaseTranslatableObject): + """A dummy translatable object without + get_translatable_contents_collection() method should raise an exception. + """ + + def __init__( + self, + param1: str, + param2: str + ) -> None: + self.param1 = param1 + self.param2 = param2 + + +class DummyTranslatableObjectWithFourParams( + translation_domain.BaseTranslatableObject): + """A dummy translatable object with four translatable fields as its + properties. + """ + + def __init__( + self, + param1: str, + param2: str, + param3: str, + param4: str + ) -> None: + self.param1 = param1 + self.param2 = param2 + self.param3 = param3 + self.param4 = param4 + + def get_translatable_contents_collection( + self, + **kwargs: Optional[str] + ) -> translation_domain.TranslatableContentsCollection: + translatable_contents_collection = ( + translation_domain.TranslatableContentsCollection()) + + translatable_contents_collection.add_translatable_field( + 'content_id_1', + translation_domain.ContentType.CUSTOMIZATION_ARG, + translation_domain.TranslatableContentFormat.HTML, + self.param1) + translatable_contents_collection.add_translatable_field( + 'content_id_2', + translation_domain.ContentType.DEFAULT_OUTCOME, + translation_domain.TranslatableContentFormat.UNICODE_STRING, + self.param2) + translatable_contents_collection.add_translatable_field( + 'content_id_3', + translation_domain.ContentType.RULE, + translation_domain.TranslatableContentFormat.UNICODE_STRING, + self.param3) + translatable_contents_collection.add_translatable_field( + 'content_id_4', + translation_domain.ContentType.CONTENT, + translation_domain.TranslatableContentFormat.UNICODE_STRING, + self.param4) + return translatable_contents_collection + + +class BaseTranslatableObjectUnitTest(test_utils.GenericTestBase): + """Test class for BaseTranslatableObject.""" + + def setUp(self) -> None: + super().setUp() + self.translatable_object1 = DummyTranslatableObjectWithTwoParams( + 'My name is jhon.', DummyTranslatableObjectWithSingleParam( + 'My name is jack.')) + + def test_get_all_translatable_content_returns_correct_items(self) -> None: + expected_contents = [ + 'My name is jhon.', + 'My name is jack.' + ] + translatable_contents = ( + self.translatable_object1.get_translatable_contents_collection() + .content_id_to_translatable_content.values()) + + self.assertItemsEqual(expected_contents, [ + translatable_content.content_value + for translatable_content in translatable_contents + ]) + + def test_unregistered_translatable_object_raises_exception(self) -> None: + translatable_object = DummyTranslatableObjectWithoutRegisterMethod( + 'My name is jack.', 'My name is jhon.') + + with self.assertRaisesRegex( + Exception, 'Must be implemented in subclasses.'): + translatable_object.get_translatable_contents_collection() + + def test_registering_duplicate_content_id_raises_exception(self) -> None: + translatable_object = ( + DummyTranslatableObjectWithDuplicateContentIdForParams( + 'My name is jack.', 'My name is jhon.') + ) + + with self.assertRaisesRegex( + Exception, + 'Content_id content_id_2 already exists in the ' + 'TranslatableContentsCollection.'): + translatable_object.get_translatable_contents_collection() + + def test_get_all_contents_which_need_translations(self) -> None: + translation_dict = { + 'content_id_3': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + True) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict) + + translatable_object = DummyTranslatableObjectWithFourParams( + 'My name is jack.', 'My name is jhon.', 'My name is Nikhil.', '') + contents_which_need_translation = ( + translatable_object.get_all_contents_which_need_translations( + entity_translations).values()) + + expected_list_of_contents_which_need_translataion = [ + 'My name is jack.', + 'My name is jhon.', + 'My name is Nikhil.' + ] + list_of_contents_which_need_translataion = [ + translatable_content.content_value + for translatable_content in contents_which_need_translation + ] + self.assertItemsEqual( + expected_list_of_contents_which_need_translataion, + list_of_contents_which_need_translataion) + + def test_get_translatable_content_ids(self) -> None: + translatable_object = DummyTranslatableObjectWithFourParams( + 'My name is jack.', 'My name is jhon.', 'My name is Nikhil.', '') + content_ids = ( + translatable_object.get_translatable_content_ids()) + + self.assertItemsEqual( + content_ids, + ['content_id_1', 'content_id_2', 'content_id_3', 'content_id_4'] + ) + + def test_get_all_contents_which_need_translations_with_digits( + self + ) -> None: + translation_dict = { + 'content_id_3': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + True) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict) + + translatable_object = DummyTranslatableObjectWithFourParams( + '

    10000

    ', 'My name is jhon.', 'My name is Nikhil.', '') + contents_which_need_translation = ( + translatable_object.get_all_contents_which_need_translations( + entity_translations).values()) + + expected_list_of_contents_which_need_translataion = [ + 'My name is jhon.', + 'My name is Nikhil.' + ] + list_of_contents_which_need_translataion = [ + translatable_content.content_value + for translatable_content in contents_which_need_translation + ] + self.assertItemsEqual( + expected_list_of_contents_which_need_translataion, + list_of_contents_which_need_translataion) + + def test_are_translations_displayable_with_all_translations(self) -> None: + translation_dict = { + 'content_id_2': translation_domain.TranslatedContent( + 'Translation.', + translation_domain.TranslatableContentFormat.HTML, + True), + 'content_id_3': translation_domain.TranslatedContent( + 'Translation.', + translation_domain.TranslatableContentFormat.HTML, + True), + 'content_id_4': translation_domain.TranslatedContent( + 'Translation.', + translation_domain.TranslatableContentFormat.HTML, + True), + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict) + + translatable_object = DummyTranslatableObjectWithFourParams( + 'Content', 'My name is jhon.', 'My name is Nikhil.', '') + self.assertTrue( + translatable_object.are_translations_displayable( + entity_translations)) + + def test_are_translations_displayable_without_rule_translation( + self + ) -> None: + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'Translation.', + translation_domain.TranslatableContentFormat.HTML, + True), + 'content_id_2': translation_domain.TranslatedContent( + 'Translation.', + translation_domain.TranslatableContentFormat.HTML, + True), + 'content_id_4': translation_domain.TranslatedContent( + 'Translation.', + translation_domain.TranslatableContentFormat.HTML, + True), + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict) + + translatable_object = DummyTranslatableObjectWithFourParams( + 'Content', 'My name is jhon.', 'My name is Nikhil.', 'Content') + self.assertFalse( + translatable_object.are_translations_displayable( + entity_translations)) + + def test_are_translations_displayable_without_min_translation( + self + ) -> None: + translation_dict = { + 'content_id_2': translation_domain.TranslatedContent( + 'Translation.', + translation_domain.TranslatableContentFormat.HTML, + True), + 'content_id_4': translation_domain.TranslatedContent( + 'Translation.', + translation_domain.TranslatableContentFormat.HTML, + True), + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict) + min_value_swap = self.swap( + feconf, + 'MIN_ALLOWED_MISSING_OR_UPDATE_NEEDED_WRITTEN_TRANSLATIONS', + 1) + translatable_object = DummyTranslatableObjectWithFourParams( + 'Content', 'My name is jhon.', 'My name is Nikhil.', 'Content') + with min_value_swap: + self.assertFalse( + translatable_object.are_translations_displayable( + entity_translations)) + + def test_get_content_count(self) -> None: + translatable_object = DummyTranslatableObjectWithFourParams( + 'My name is jack.', + 'My name is jhon.', + 'My name is Nikhil.', + 'Content' + ) + + self.assertEqual(translatable_object.get_content_count(), 4) + + def test_get_translation_count(self) -> None: + translatable_object = DummyTranslatableObjectWithFourParams( + 'My name is jack.', + 'My name is jhon.', + 'My name is Nikhil.', + 'Content' + ) + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'content_id_1 translation', + translation_domain.TranslatableContentFormat.HTML, + False), + 'content_id_2': translation_domain.TranslatedContent( + 'content_id_2 translation', + translation_domain.TranslatableContentFormat.HTML, + False), + 'content_id_3': translation_domain.TranslatedContent( + 'content_id_3 translation', + translation_domain.TranslatableContentFormat.HTML, + False), + 'non_exsting_id': translation_domain.TranslatedContent( + 'content_id_3 translation', + translation_domain.TranslatableContentFormat.HTML, + False), + } + + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict + ) + + self.assertEqual( + translatable_object.get_translation_count(entity_translations), + 3 + ) + + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'content_id_1 translation', + translation_domain.TranslatableContentFormat.HTML, + False), + 'content_id_2': translation_domain.TranslatedContent( + 'content_id_2 translation', + translation_domain.TranslatableContentFormat.HTML, + True), + 'content_id_3': translation_domain.TranslatedContent( + 'content_id_3 translation', + translation_domain.TranslatableContentFormat.HTML, + True), + } + + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict + ) + + self.assertEqual( + translatable_object.get_translation_count(entity_translations), + 1 + ) + + def test_get_all_html_content_strings(self) -> None: + translatable_object = DummyTranslatableObjectWithFourParams( + '

    HTML content

    ', 'My name is jhon.', 'My name is Nikhil.', '') + html_contents = translatable_object.get_all_html_content_strings() + + self.assertItemsEqual(html_contents, ['

    HTML content

    ']) + + def test_validate_translatable_contents_raise_error(self) -> None: + translatable_object = DummyTranslatableObjectWithFourParams( + '

    HTML content

    ', 'My name is jhon.', 'My name is Nikhil.', '') + + with self.assertRaisesRegex( + utils.ValidationError, + 'Expected all content id indexes to be less than' + ): + translatable_object.validate_translatable_contents(2) + + +class EntityTranslationsUnitTests(test_utils.GenericTestBase): + """Test class for EntityTranslation.""" + + def test_creation_of_object(self) -> None: + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict) + + self.assertEqual(entity_translations.entity_id, 'exp_id') + self.assertEqual(entity_translations.entity_type, 'exploration') + self.assertEqual(entity_translations.entity_version, 1) + self.assertEqual(entity_translations.language_code, 'en') + self.assertEqual( + entity_translations.translations['content_id_1'].content_value, + 'My name is Nikhil.') + self.assertEqual( + entity_translations.translations['content_id_1'].needs_update, + False) + + def test_validate_entity_type(self) -> None: + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict + ) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( + utils.ValidationError, + 'entity_type must be a string' + ): + entity_translations.entity_type = 123 # type: ignore[assignment] + entity_translations.validate() + + def test_validate_entity_id(self) -> None: + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict + ) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( + utils.ValidationError, + 'entity_id must be a string' + ): + entity_translations.entity_id = 123 # type: ignore[assignment] + entity_translations.validate() + + def test_validate_language_code(self) -> None: + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict + ) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( + utils.ValidationError, + 'language_code must be a string' + ): + entity_translations.language_code = 123 # type: ignore[assignment] + entity_translations.validate() + + def test_validate_entity_version(self) -> None: + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict + ) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( + utils.ValidationError, + 'entity_version must be an int' + ): + entity_translations.entity_version = '123' # type: ignore[assignment] + entity_translations.validate() + + def test_validate_content_id(self) -> None: + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict + ) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( + utils.ValidationError, + 'content_id must be a string' + ): + entity_translations.translations[1] = ( # type: ignore[index] + translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + entity_translations.validate() + + def test_validate_needs_update(self) -> None: + translation_dict = { + 'content_id_1': translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + } + entity_translations = translation_domain.EntityTranslation( + 'exp_id', feconf.TranslatableEntityType.EXPLORATION, 1, 'en', + translation_dict + ) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex( + utils.ValidationError, + 'needs_update must be a bool' + ): + entity_translations.translations['content_id_1'].needs_update = 5 # type: ignore[assignment] + entity_translations.validate() + + +class TranslatableContentUnitTests(test_utils.GenericTestBase): + """Test class for TranslatableContent.""" + + def test_creation_of_object(self) -> None: + translatable_content = translation_domain.TranslatableContent( + 'content_id_1', + translation_domain.ContentType.CONTENT, + translation_domain.TranslatableContentFormat.HTML, + 'My name is Jhon.', + ) + + self.assertEqual(translatable_content.content_id, 'content_id_1') + self.assertEqual(translatable_content.content_value, 'My name is Jhon.') + + self.assertEqual( + translatable_content.content_format, + translation_domain.TranslatableContentFormat.HTML) + self.assertEqual( + translatable_content.content_type, + translation_domain.ContentType.CONTENT) + + def test_to_dict_method_of_translatable_content_class(self) -> None: + translatable_content_dict = { + 'content_id': 'content_id_1', + 'content_value': 'My name is Jhon.', + 'content_type': 'content', + 'content_format': 'html', + 'interaction_id': None, + 'rule_type': None + } + translatable_content = translation_domain.TranslatableContent( + 'content_id_1', + translation_domain.ContentType.CONTENT, + translation_domain.TranslatableContentFormat.HTML, + 'My name is Jhon.' + ) + + self.assertEqual( + translatable_content.to_dict(), + translatable_content_dict + ) + + +class TranslatedContentUnitTests(test_utils.GenericTestBase): + """Test class for TranslatedContent.""" + + def test_creation_of_object(self) -> None: + translated_content = translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + + self.assertEqual(translated_content.content_value, 'My name is Nikhil.') + self.assertEqual(translated_content.needs_update, False) + + def test_to_dict_method_of_translated_content_class(self) -> None: + translated_content = translation_domain.TranslatedContent( + 'My name is Nikhil.', + translation_domain.TranslatableContentFormat.HTML, + False) + translated_content_dict = { + 'content_value': 'My name is Nikhil.', + 'content_format': 'html', + 'needs_update': False + } + + self.assertEqual(translated_content.to_dict(), translated_content_dict) + class MachineTranslationTests(test_utils.GenericTestBase): """Tests for the MachineTranslation domain object.""" @@ -30,7 +708,7 @@ class MachineTranslationTests(test_utils.GenericTestBase): def setUp(self) -> None: """Setup for MachineTranslation domain object tests.""" - super(MachineTranslationTests, self).setUp() + super().setUp() self._init_translation() def _init_translation(self) -> None: @@ -43,7 +721,7 @@ def test_validate_with_invalid_source_language_code_raises(self) -> None: self.translation.source_language_code = 'ABC' expected_error_message = ( 'Invalid source language code: ABC') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, expected_error_message): self.translation.validate() @@ -51,7 +729,7 @@ def test_validate_with_invalid_target_language_code_raises(self) -> None: self.translation.target_language_code = 'ABC' expected_error_message = ( 'Invalid target language code: ABC') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, expected_error_message): self.translation.validate() @@ -63,7 +741,7 @@ def test_validate_with_same_source_target_language_codes_raises( expected_error_message = ( 'Expected source_language_code to be different from ' 'target_language_code: "en" = "en"') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, expected_error_message): self.translation.validate() @@ -77,3 +755,302 @@ def test_to_dict(self) -> None: 'translated_text': 'hola mundo' } ) + + +class WrittenTranslationsDomainUnitTests(test_utils.GenericTestBase): + """Test methods operating on written transcripts.""" + + def test_data_formats_are_correct_and_complete(self) -> None: + translatable_class_names_in_data_formats = sorted( + translation_domain.WrittenTranslation. + DATA_FORMAT_TO_TRANSLATABLE_OBJ_TYPE.values()) + self.assertEqual( + translatable_class_names_in_data_formats, + translatable_object_registry.Registry.get_all_class_names()) + + def test_from_and_to_dict_works_correctly(self) -> None: + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { + 'translations_mapping': { + 'content1': { + 'en': { + 'data_format': 'html', + 'translation': 'hello', + 'needs_update': True + }, + 'hi': { + 'data_format': 'html', + 'translation': 'Hey!', + 'needs_update': False + }, + 'fr': { + 'data_format': 'set_of_normalized_string', + 'translation': ['test1', 'test2'], + 'needs_update': False + }, + }, + 'feedback_1': { + 'hi': { + 'data_format': 'html', + 'translation': 'Testing!', + 'needs_update': False + }, + 'en': { + 'data_format': 'html', + 'translation': 'hello!', + 'needs_update': False + }, + 'fr': { + 'data_format': 'set_of_normalized_string', + 'translation': ['test1', 'test2'], + 'needs_update': False + } + } + } + } + + written_translations = translation_domain.WrittenTranslations.from_dict( + written_translations_dict) + written_translations.validate(['content1', 'feedback_1']) + self.assertEqual( + written_translations.to_dict(), written_translations_dict) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_add_content_id_for_translation_with_invalid_content_id_raise_error( + self + ) -> None: + written_translations = ( + translation_domain.WrittenTranslations.from_dict({ + 'translations_mapping': {} + })) + invalid_content_id = 123 + with self.assertRaisesRegex( + Exception, 'Expected content_id to be a string, received 123'): + written_translations.add_content_id_for_translation( + invalid_content_id) # type: ignore[arg-type] + + def test_add_content_id_for_translation_with_existing_content_id_raise_error( # pylint: disable=line-too-long + self + ) -> None: + written_translations_dict: translation_domain.WrittenTranslationsDict = { + 'translations_mapping': { + 'feedback_1': { + 'en': { + 'data_format': 'html', + 'translation': 'hello!', + 'needs_update': False + } + } + } + } + + written_translations = translation_domain.WrittenTranslations.from_dict( + written_translations_dict) + existing_content_id = 'feedback_1' + with self.assertRaisesRegex( + Exception, 'The content_id feedback_1 already exist.'): + written_translations.add_content_id_for_translation( + existing_content_id) + + def test_delete_content_id_for_translations_deletes_content_id( + self + ) -> None: + old_written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { + 'translations_mapping': { + 'content': { + 'en': { + 'data_format': 'html', + 'translation': 'hello!', + 'needs_update': False + } + } + } + } + + written_translations = translation_domain.WrittenTranslations.from_dict( + old_written_translations_dict) + self.assertEqual( + len(written_translations.translations_mapping.keys()), 1) + + written_translations.delete_content_id_for_translation('content') + + self.assertEqual( + len(written_translations.translations_mapping.keys()), 0) + + def test_delete_content_id_for_translation_with_nonexisting_content_id_raise_error( # pylint: disable=line-too-long + self + ) -> None: + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { + 'translations_mapping': { + 'content': {} + } + } + written_translations = translation_domain.WrittenTranslations.from_dict( + written_translations_dict) + nonexisting_content_id_to_delete = 'feedback_1' + with self.assertRaisesRegex( + Exception, 'The content_id feedback_1 does not exist.'): + written_translations.delete_content_id_for_translation( + nonexisting_content_id_to_delete) + + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. + def test_delete_content_id_for_translation_with_invalid_content_id_raise_error( # pylint: disable=line-too-long + self + ) -> None: + written_translations = ( + translation_domain.WrittenTranslations.from_dict({ + 'translations_mapping': {} + })) + invalid_content_id_to_delete = 123 + with self.assertRaisesRegex( + Exception, 'Expected content_id to be a string, '): + written_translations.delete_content_id_for_translation( + invalid_content_id_to_delete) # type: ignore[arg-type] + + def test_validation_with_invalid_content_id_raise_error(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { + 'translations_mapping': { + 123: {} # type: ignore[dict-item] + } + } + + written_translations = translation_domain.WrittenTranslations.from_dict( + written_translations_dict) + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex( + Exception, 'Expected content_id to be a string, '): + written_translations.validate([123]) # type: ignore[list-item] + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_dict_language_code_to_written_translation( + self + ) -> None: + written_translations = translation_domain.WrittenTranslations({ + 'en': [] # type: ignore[dict-item] + }) + + with self.assertRaisesRegex( + Exception, + re.escape('Expected content_id value to be a dict, received []')): + written_translations.validate(None) + + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_with_invalid_type_language_code_raise_error( + self + ) -> None: + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { + 'translations_mapping': { + 'content': { + 123: { # type: ignore[dict-item] + 'data_format': 'html', + 'translation': 'hello!', + 'needs_update': False + } + } + } + } + + written_translations = translation_domain.WrittenTranslations.from_dict( + written_translations_dict) + + with self.assertRaisesRegex( + Exception, 'Expected language_code to be a string, '): + written_translations.validate(['content']) + + def test_validation_with_unknown_language_code_raise_error(self) -> None: + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { + 'translations_mapping': { + 'content': { + 'ed': { + 'data_format': 'html', + 'translation': 'hello!', + 'needs_update': False + } + } + } + } + + written_translations = translation_domain.WrittenTranslations.from_dict( + written_translations_dict) + + with self.assertRaisesRegex(Exception, 'Invalid language_code: ed'): + written_translations.validate(['content']) + + def test_validation_with_invalid_content_id_list(self) -> None: + written_translations_dict: ( + translation_domain.WrittenTranslationsDict + ) = { + 'translations_mapping': { + 'content': { + 'en': { + 'data_format': 'html', + 'translation': '

    hello!

    ', + 'needs_update': False + } + } + } + } + + written_translations = translation_domain.WrittenTranslations.from_dict( + written_translations_dict) + + with self.assertRaisesRegex( + Exception, + re.escape( + 'Expected state written_translations to match the listed ' + 'content ids [\'invalid_content\']')): + written_translations.validate(['invalid_content']) + + def test_written_translation_validation(self) -> None: + """Test validation of translation script.""" + written_translation = translation_domain.WrittenTranslation( + 'html', 'Test.', True) + written_translation.validate() + + with self.assertRaisesRegex( + AssertionError, 'Expected unicode HTML string, received 30'): + with self.swap(written_translation, 'translation', 30): + written_translation.validate() + + with self.assertRaisesRegex( + utils.ValidationError, 'Expected needs_update to be a bool' + ): + with self.swap(written_translation, 'needs_update', 20): + written_translation.validate() + + with self.assertRaisesRegex( + utils.ValidationError, 'Invalid data_format' + ): + with self.swap(written_translation, 'data_format', 'int'): + written_translation.validate() + + with self.assertRaisesRegex( + utils.ValidationError, 'Invalid data_format' + ): + with self.swap(written_translation, 'data_format', 2): + written_translation.validate() diff --git a/core/domain/translation_fetchers.py b/core/domain/translation_fetchers.py index 9728a1cb748d..ee811e562f36 100644 --- a/core/domain/translation_fetchers.py +++ b/core/domain/translation_fetchers.py @@ -18,14 +18,24 @@ from __future__ import annotations +from core import feconf from core.domain import translation_domain from core.platform import models +from typing import List, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import translation_models + + (translation_models,) = models.Registry.import_models([ - models.NAMES.translation]) + models.Names.TRANSLATION]) -def get_translation_from_model(translation_model): +def get_translation_from_model( + translation_model: translation_models.MachineTranslationModel +) -> translation_domain.MachineTranslation: """Returns a MachineTranslation object given a MachineTranslationModel loaded from the datastore. @@ -45,7 +55,10 @@ def get_translation_from_model(translation_model): def get_machine_translation( - source_language_code, target_language_code, source_text): + source_language_code: str, + target_language_code: str, + source_text: str +) -> Optional[translation_domain.MachineTranslation]: """Gets MachineTranslation by language codes and source text. Returns None if no translation exists for the given parameters. @@ -68,3 +81,88 @@ def get_machine_translation( if translation_model is None: return None return get_translation_from_model(translation_model) + + +def _get_entity_translation_from_model( + entity_translation_model: translation_models.EntityTranslationsModel +) -> translation_domain.EntityTranslation: + """Returns the EntityTranslation domain object from its model representation + (EntityTranslationsModel). + + Args: + entity_translation_model: EntityTranslatioModel. An instance of + EntityTranslationsModel. + + Returns: + EntityTranslation. An instance of EntityTranslation object, created from + its model. + """ + entity_translation = translation_domain.EntityTranslation.from_dict({ + 'entity_id': entity_translation_model.entity_id, + 'entity_type': entity_translation_model.entity_type, + 'entity_version': entity_translation_model.entity_version, + 'language_code': entity_translation_model.language_code, + 'translations': entity_translation_model.translations + }) + return entity_translation + + +def get_all_entity_translations_for_entity( + entity_type: feconf.TranslatableEntityType, + entity_id: str, + entity_version: int +) -> List[translation_domain.EntityTranslation]: + """Returns a list of entity translation domain objects. + + Args: + entity_type: TranslatableEntityType. The type of the entity whose + translations are to be fetched. + entity_id: str. The ID of the entity whose translations are to be + fetched. + entity_version: int. The version of the entity whose translations + are to be fetched. + + Returns: + list(EnitityTranslation). A list of EntityTranslation domain objects. + """ + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all_for_entity( + entity_type, entity_id, entity_version) + ) + entity_translation_objects = [] + for model in entity_translation_models: + domain_object = _get_entity_translation_from_model(model) + entity_translation_objects.append(domain_object) + + return entity_translation_objects + + +def get_entity_translation( + entity_type: feconf.TranslatableEntityType, + entity_id: str, + entity_version: int, + language_code: str +) -> translation_domain.EntityTranslation: + """Returns a unique entity translation domain object. + + Args: + entity_type: TranslatableEntityType. The type of the entity. + entity_id: str. The ID of the entity. + entity_version: int. The version of the entity. + language_code: str. The language code for the entity. + + Returns: + EntityTranslation. An instance of Entitytranslations. + """ + entity_translation_model = ( + translation_models.EntityTranslationsModel.get_model( + entity_type, entity_id, entity_version, language_code) + ) + + if entity_translation_model: + domain_object = _get_entity_translation_from_model( + entity_translation_model) + return domain_object + return translation_domain.EntityTranslation.create_empty( + entity_type, entity_id, language_code, entity_version=entity_version + ) diff --git a/core/domain/translation_fetchers_test.py b/core/domain/translation_fetchers_test.py index 4fd4bb3baf71..9ff7df87831b 100644 --- a/core/domain/translation_fetchers_test.py +++ b/core/domain/translation_fetchers_test.py @@ -18,24 +18,35 @@ from __future__ import annotations +from core import feconf from core.domain import translation_domain from core.domain import translation_fetchers from core.platform import models from core.tests import test_utils + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import translation_models + + (translation_models,) = models.Registry.import_models([ - models.NAMES.translation]) + models.Names.TRANSLATION]) -class TranslationFetchersTests(test_utils.GenericTestBase): +class MachineTranslationFetchersTests(test_utils.GenericTestBase): - def test_get_translation_from_model(self): + def test_get_translation_from_model(self) -> None: model_id = ( translation_models.MachineTranslationModel.create( 'en', 'es', 'hello world', 'hola mundo') ) + # Ruling out the possibility of None for mypy type checking. + assert model_id is not None model_instance = translation_models.MachineTranslationModel.get( model_id) + # Ruling out the possibility of None for mypy type checking. + assert model_instance is not None self.assertEqual( translation_fetchers.get_translation_from_model( model_instance).to_dict(), @@ -43,16 +54,82 @@ def test_get_translation_from_model(self): 'en', 'es', 'hello world', 'hola mundo').to_dict() ) - def test_get_machine_translation_with_no_translation_returns_none(self): + def test_get_machine_translation_with_no_translation_returns_none( + self + ) -> None: translation = translation_fetchers.get_machine_translation( 'en', 'es', 'untranslated_text') self.assertIsNone(translation) def test_get_machine_translation_for_cached_translation_returns_from_cache( - self): + self + ) -> None: translation_models.MachineTranslationModel.create( 'en', 'es', 'hello world', 'hola mundo') translation = translation_fetchers.get_machine_translation( 'en', 'es', 'hello world' ) + # Ruling out the possibility of None for mypy type checking. + assert translation is not None self.assertEqual(translation.translated_text, 'hola mundo') + + +class EntityTranslationFetchersTests(test_utils.GenericTestBase): + + def test_get_all_entity_translation_objects_for_entity_returns_correclty( + self + ) -> None: + exp_id = 'exp1' + + entity_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, exp_id, 5 + ) + ) + self.assertEqual(len(entity_translations), 0) + + language_codes = ['hi', 'bn'] + for language_code in language_codes: + translation_models.EntityTranslationsModel.create_new( + 'exploration', exp_id, 5, language_code, {} + ).put() + + entity_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, exp_id, 5 + ) + ) + self.assertEqual(len(entity_translations), 2) + self.assertItemsEqual( + [ + entity_translation.language_code + for entity_translation in entity_translations + ], language_codes + ) + + def test_get_entity_translation_returns_correctly( + self + ) -> None: + exp_id = 'exp1' + translation_models.EntityTranslationsModel.create_new( + 'exploration', exp_id, 5, 'hi', {} + ).put() + + entity_translation = ( + translation_fetchers.get_entity_translation( + feconf.TranslatableEntityType.EXPLORATION, exp_id, 5, 'hi' + ) + ) + self.assertEqual(entity_translation.language_code, 'hi') + + def test_get_entity_translation_creates_empty_object( + self + ) -> None: + exp_id = 'exp1' + entity_translation = ( + translation_fetchers.get_entity_translation( + feconf.TranslatableEntityType.EXPLORATION, exp_id, 5, 'hi' + ) + ) + self.assertEqual(entity_translation.language_code, 'hi') + self.assertEqual(entity_translation.translations, {}) diff --git a/core/domain/translation_services.py b/core/domain/translation_services.py index 08fda6245f3d..a92b87c98a3f 100644 --- a/core/domain/translation_services.py +++ b/core/domain/translation_services.py @@ -18,19 +18,34 @@ from __future__ import annotations +import collections import logging +from core import feconf +from core.domain import exp_domain +from core.domain import translation_domain from core.domain import translation_fetchers from core.platform import models +from typing import Dict, List, Optional, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import translate_services + from mypy_imports import translation_models + + translate_services = models.Registry.import_translate_services() (translation_models,) = models.Registry.import_models([ - models.NAMES.translation]) + models.Names.TRANSLATION]) def get_and_cache_machine_translation( - source_language_code, target_language_code, source_text): + source_language_code: str, + target_language_code: str, + source_text: str +) -> Optional[str]: """Gets a machine translation of the source text for the given source and target languages. If no translation exists in the datastore for the given input, generates a machine translation using cloud_translate_services and @@ -74,3 +89,199 @@ def get_and_cache_machine_translation( ) return translated_text + + +def add_new_translation( + entity_type: feconf.TranslatableEntityType, + entity_id: str, + entity_version: int, + language_code: str, + content_id: str, + translated_content: translation_domain.TranslatedContent +) -> None: + """Adds new translated content for the entity in the EntityTranslation + model. + + Args: + entity_type: TranslatableEntityType. The type of the entity. + entity_id: str. The ID of the entity. + entity_version: int. The version of the entity. + language_code: str. The language code for the entity. + content_id: str. The Id of the content. + translated_content: TranslatedContent. The translated content object. + """ + entity_translation = translation_fetchers.get_entity_translation( + entity_type, entity_id, entity_version, language_code) + entity_translation.translations[content_id] = translated_content + entity_translation.validate() + + model = translation_models.EntityTranslationsModel.create_new( + entity_type.value, + entity_id, + entity_version, + language_code, + entity_translation.to_dict()['translations'] + ) + model.update_timestamps() + model.put() + + +def compute_translation_related_change( + updated_exploration: exp_domain.Exploration, + content_ids_corresponding_translations_to_remove: List[str], + content_ids_corresponding_translations_to_mark_needs_update: List[str], +) -> Tuple[List[translation_models.EntityTranslationsModel], Dict[str, int]]: + """Cretase new EntityTranslation models corresponding to translation related + changes. + + Args: + updated_exploration: Exploration. The updated exploration object. + content_ids_corresponding_translations_to_remove: List[str]. The list of + content Ids for translation removal. + content_ids_corresponding_translations_to_mark_needs_update: List[str]. + The list of content Ids to mark translation needs update. + + Returns: + Tuple(list(EntityTranslationsModel), dict(str, int)). A tuple containing + list of new EntityTranslationsModel and a dict with count of translated + contents as value and the languages as key. + """ + old_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + feconf.TranslatableEntityType.EXPLORATION, + updated_exploration.id, + updated_exploration.version - 1 + )) + + # Create new_translation_models with updated id and entity version. + new_translation_models = [] + translation_counts = {} + for entity_translation in old_translations: + entity_translation.remove_translations( + content_ids_corresponding_translations_to_remove) + entity_translation.mark_translations_needs_update( + content_ids_corresponding_translations_to_mark_needs_update) + + translation_counts[entity_translation.language_code] = ( + updated_exploration.get_translation_count(entity_translation)) + + new_translation_models.append( + translation_models.EntityTranslationsModel.create_new( + entity_translation.entity_type, + entity_translation.entity_id, + entity_translation.entity_version + 1, + entity_translation.language_code, + entity_translation.to_dict()['translations'] + ) + ) + + return new_translation_models, translation_counts + + +def get_languages_with_complete_translation( + exploration: exp_domain.Exploration +) -> List[str]: + """Returns a list of language codes in which the exploration translation + is 100%. + + Returns: + list(str). A list of language codes in which the translation for the + exploration is complete i.e, 100%. + """ + content_count = exploration.get_content_count() + language_code_list = [] + for language_code, count in get_translation_counts( + feconf.TranslatableEntityType.EXPLORATION, + exploration.id, + exploration.version + ).items(): + if count == content_count: + language_code_list.append(language_code) + + return language_code_list + + +def get_displayable_translation_languages( + entity_type: feconf.TranslatableEntityType, + entity: exp_domain.Exploration +) -> List[str]: + """Returns a list of language codes in which the exploration translation + is 100%. + + Returns: + list(str). A list of language codes in which the translation for the + exploration is complete i.e, 100%. + """ + language_code_list = [] + entity_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + entity_type, entity.id, entity.version)) + + for entity_translation in entity_translations: + if entity.are_translations_displayable(entity_translation): + language_code_list.append(entity_translation.language_code) + + return language_code_list + + +def get_translation_counts( + entity_type: feconf.TranslatableEntityType, + entity_id: str, + entity_version: int +) -> Dict[str, int]: + """Returns a dict representing the number of translations available in a + language for which there exists at least one translation in the + exploration. + + Returns: + dict(str, int). A dict with language code as a key and number of + translation available in that language as the value. + """ + exploration_translation_counts: Dict[str, int] = collections.defaultdict( + int) + entity_translations = ( + translation_fetchers.get_all_entity_translations_for_entity( + entity_type, + entity_id, + entity_version) + ) + for entity_translation in entity_translations: + lang_code = entity_translation.language_code + translation_count_in_a_lang_code = ( + len(entity_translation.translations.keys())) + + exploration_translation_counts[lang_code] += ( + translation_count_in_a_lang_code) + + return dict(exploration_translation_counts) + + +def get_translatable_text( + exploration: exp_domain.Exploration, language_code: str +) -> Dict[str, Dict[str, translation_domain.TranslatableContent]]: + """Returns all the contents which needs translation in the given + language. + + Args: + exploration: Exploration. The Exploration object. + language_code: str. The language code in which translation is + required. + + Returns: + dict(str, list(TranslatableContent)). A dict with state names + as keys and a list of TranslatableContent as values. + """ + entity_translations = ( + translation_fetchers.get_entity_translation( + feconf.TranslatableEntityType.EXPLORATION, + exploration.id, + exploration.version, + language_code) + ) + state_names_to_content_id_mapping = {} + for state_name, state in exploration.states.items(): + state_names_to_content_id_mapping[state_name] = ( + state.get_all_contents_which_need_translations( + entity_translations)) + + return state_names_to_content_id_mapping diff --git a/core/domain/translation_services_test.py b/core/domain/translation_services_test.py index c41fa5d2f083..7ef9ef2b0bb9 100644 --- a/core/domain/translation_services_test.py +++ b/core/domain/translation_services_test.py @@ -18,26 +18,38 @@ from __future__ import annotations +from core import feconf +from core.domain import exp_domain +from core.domain import translation_domain from core.domain import translation_fetchers from core.domain import translation_services from core.platform import models from core.tests import test_utils +from typing import Sequence + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import translate_services + from mypy_imports import translation_models + + translate_services = models.Registry.import_translate_services() (translation_models,) = models.Registry.import_models([ - models.NAMES.translation]) + models.Names.TRANSLATION]) class TranslationServiceTests(test_utils.GenericTestBase): - def setUp(self): - super(TranslationServiceTests, self).setUp() + def setUp(self) -> None: + super().setUp() translation_models.MachineTranslationModel.create( 'en', 'es', 'text to translate', 'texto para traducir') def test_get_machine_translation_with_same_source_and_target_language_code( - self): + self + ) -> None: translated_text = ( translation_services.get_and_cache_machine_translation( 'en', 'en', 'text to translate') @@ -48,7 +60,8 @@ def test_get_machine_translation_with_same_source_and_target_language_code( self.assertIsNone(translation) def test_machine_translation_with_non_allowlisted_language_returns_none( - self): + self + ) -> None: translated_text = ( translation_services.get_and_cache_machine_translation( 'en', 'hi', 'text to translate') @@ -72,7 +85,7 @@ def test_machine_translation_with_non_allowlisted_language_returns_none( ) ) - def test_get_machine_translation_checks_datastore_first(self): + def test_get_machine_translation_checks_datastore_first(self) -> None: with self.swap_to_always_raise( translate_services.CLIENT, 'translate', error=AssertionError ): @@ -83,7 +96,8 @@ def test_get_machine_translation_checks_datastore_first(self): ) def test_get_machine_translation_with_new_translation_saves_translation( - self): + self + ) -> None: translated_text = ( translation_services.get_and_cache_machine_translation( 'en', 'fr', 'hello world') @@ -92,4 +106,301 @@ def test_get_machine_translation_with_new_translation_saves_translation( translation = translation_fetchers.get_machine_translation( 'en', 'fr', 'hello world') self.assertIsNotNone(translation) + # Ruling out the possibility of None for mypy type checking. + assert translation is not None self.assertEqual(translation.translated_text, 'Bonjour le monde') + + +class EntityTranslationServicesTest(test_utils.GenericTestBase): + """Test class for the entity translation services.""" + + def setUp(self) -> None: + super().setUp() + + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + + self.EXP_ID = 'exp_id_123' + self.exp = self.save_new_valid_exploration(self.EXP_ID, self.owner_id) + + def test_add_new_translation_creats_new_model_if_needed(self) -> None: + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = translation_models.EntityTranslationsModel.get_all().fetch() + self.assertEqual(len(entity_translation_models), 0) + + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, + self.EXP_ID, + 5, + 'hi', + 'content_5', + translation_domain.TranslatedContent( + 'Translations in Hindi!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + self.assertEqual(len(entity_translation_models), 1) + self.assertEqual(entity_translation_models[0].entity_id, self.EXP_ID) + self.assertEqual(entity_translation_models[0].language_code, 'hi') + + def test_add_new_translation_adds_translations_to_existing_model( + self + ) -> None: + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, + self.EXP_ID, + 5, + 'hi', + 'content_5', + translation_domain.TranslatedContent( + 'Translations in Hindi!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = translation_models.EntityTranslationsModel.get_all().fetch() + self.assertEqual(len(entity_translation_models), 1) + entity_translation_model = entity_translation_models[0] + self.assertEqual(entity_translation_model.entity_id, self.EXP_ID) + self.assertEqual(entity_translation_model.language_code, 'hi') + self.assertEqual( + list(entity_translation_model.translations), ['content_5']) + + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, + self.EXP_ID, + 5, + 'hi', + 'default_outcome_2', + translation_domain.TranslatedContent( + 'Translations in Hindi!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + self.assertEqual(len(entity_translation_models), 1) + entity_translation_model = entity_translation_models[0] + self.assertEqual(entity_translation_model.entity_id, self.EXP_ID) + self.assertEqual(entity_translation_model.language_code, 'hi') + self.assertEqual( + list(entity_translation_model.translations.keys()), + ['content_5', 'default_outcome_2'] + ) + + def test_compute_translation_related_change_removes_translations( + self + ) -> None: + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.EXP_ID, 5, 'hi', + 'content_5', translation_domain.TranslatedContent( + 'Translations in Hindi!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.EXP_ID, 5, 'hi', + 'content_6', translation_domain.TranslatedContent( + 'Translations in Hindi!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = translation_models.EntityTranslationsModel.get_all().fetch() + self.assertEqual(len(entity_translation_models), 1) + entity_translation_model = entity_translation_models[0] + self.assertEqual(entity_translation_model.entity_version, 5) + self.assertEqual( + list(entity_translation_model.translations.keys()), + ['content_5', 'content_6'] + ) + self.exp.version = 6 + + entity_translations, _ = ( + translation_services.compute_translation_related_change( + self.exp, ['content_5'], [] + ) + ) + + self.assertEqual(len(entity_translations), 1) + entity_translation = entity_translations[0] + self.assertEqual( + list(entity_translation.translations.keys()), + ['content_6'] + ) + + def test_compute_translation_related_change_mark_translation_needs_update( + self + ) -> None: + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.EXP_ID, 5, 'hi', + 'content_5', translation_domain.TranslatedContent( + 'Translations in Hindi!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.EXP_ID, 5, 'hi', + 'content_6', translation_domain.TranslatedContent( + 'Translations in Hindi!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = translation_models.EntityTranslationsModel.get_all().fetch() + self.assertEqual(len(entity_translation_models), 1) + entity_translation_model = entity_translation_models[0] + self.assertEqual(entity_translation_model.entity_version, 5) + self.assertEqual([ + t['needs_update'] + for t in entity_translation_model.translations.values() + ], [False, False]) + + self.exp.version = 6 + + entity_translation_models, _ = ( + translation_services.compute_translation_related_change( + self.exp, [], ['content_6'] + ) + ) + self.assertEqual(len(entity_translation_models), 1) + entity_translation = entity_translation_models[0] + self.assertItemsEqual( + [ + t['needs_update'] + for t in entity_translation.translations.values() + ], [False, True] + ) + + def test_get_displayable_translation_languages_returns_correct_items( + self + ) -> None: + expected_language_list = ['ak', 'bn', 'hi'] + for lang_code in expected_language_list: + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, + self.EXP_ID, 5, lang_code, + 'content_0', translation_domain.TranslatedContent( + 'Translations in %s!' % lang_code, + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + exp = exp_domain.Exploration.create_default_exploration( + self.EXP_ID, 'exp title') + exp.version = 5 + + are_translations_displayable_swap = self.swap_to_always_return( + exp, 'are_translations_displayable', True) + with are_translations_displayable_swap: + observed_language_list = ( + translation_services.get_displayable_translation_languages( + feconf.TranslatableEntityType.EXPLORATION, + exp + ) + ) + self.assertItemsEqual(observed_language_list, expected_language_list) + + are_translations_displayable_swap = self.swap_to_always_return( + exp, 'are_translations_displayable', False) + with are_translations_displayable_swap: + observed_language_list = ( + translation_services.get_displayable_translation_languages( + feconf.TranslatableEntityType.EXPLORATION, + exp + ) + ) + self.assertItemsEqual(observed_language_list, []) + + def test_get_languages_with_complete_translation_returns_correct_lang( + self + ) -> None: + expected_language_list = ['ak', 'bn'] + for lang_code in expected_language_list: + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.EXP_ID, + 5, lang_code, 'content_0', + translation_domain.TranslatedContent( + 'Translations in %s!' % lang_code, + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.EXP_ID, 5, + lang_code, 'default_outcome_1', + translation_domain.TranslatedContent( + 'Translations in %s!' % lang_code, + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.EXP_ID, 5, 'sq', + 'content_0', translation_domain.TranslatedContent( + 'Translations in sq!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + + exp = exp_domain.Exploration.create_default_exploration( + self.EXP_ID, 'exp title') + init_state = exp.states[exp.init_state_name] + init_state.content.html = 'Content for translation' + assert init_state.interaction.default_outcome is not None + init_state.interaction.default_outcome.feedback.html = 'Content' + exp.version = 5 + + observed_language_list = ( + translation_services.get_languages_with_complete_translation( + exp + ) + ) + self.assertItemsEqual(observed_language_list, expected_language_list) + + def test_get_translatable_text_returns_correct_dict(self) -> None: + exp = exp_domain.Exploration.create_default_exploration( + self.EXP_ID, 'exp title') + init_state = exp.states[exp.init_state_name] + init_state.content.html = 'Content for translation' + assert init_state.interaction.default_outcome is not None + init_state.interaction.default_outcome.feedback.html = 'Content' + exp.version = 5 + + translation_services.add_new_translation( + feconf.TranslatableEntityType.EXPLORATION, self.EXP_ID, 5, 'sq', + 'content_0', translation_domain.TranslatedContent( + 'Translations in sq!', + translation_domain.TranslatableContentFormat.HTML, + False + ) + ) + + observed_translatable_text = translation_services.get_translatable_text( + exp, 'sq') + self.assertEqual( + list(observed_translatable_text.keys()), + ['Introduction'] + ) + self.assertEqual( + list(observed_translatable_text['Introduction'].keys()), + ['default_outcome_1'] + ) diff --git a/core/domain/user_domain.py b/core/domain/user_domain.py index 30308642adde..02370beb1bf0 100644 --- a/core/domain/user_domain.py +++ b/core/domain/user_domain.py @@ -18,12 +18,48 @@ from __future__ import annotations +import datetime import re from core import feconf from core import utils from core.constants import constants +from typing import Dict, List, Optional, TypedDict + + +# TODO(#15105): Refactor UserSettings to limit the number of Optional +# fields used in UserSettingsDict. +class UserSettingsDict(TypedDict): + """Dictionary representing the UserSettings object.""" + + email: str + roles: List[str] + banned: bool + has_viewed_lesson_info_modal_once: bool + username: Optional[str] + normalized_username: Optional[str] + last_agreed_to_terms: Optional[datetime.datetime] + last_started_state_editor_tutorial: Optional[datetime.datetime] + last_started_state_translation_tutorial: Optional[datetime.datetime] + last_logged_in: Optional[datetime.datetime] + last_created_an_exploration: Optional[datetime.datetime] + last_edited_an_exploration: Optional[datetime.datetime] + profile_picture_data_url: Optional[str] + default_dashboard: str + creator_dashboard_display_pref: str + user_bio: str + subject_interests: List[str] + first_contribution_msec: Optional[float] + preferred_language_codes: List[str] + preferred_site_language_code: Optional[str] + preferred_audio_language_code: Optional[str] + preferred_translation_language_code: Optional[str] + pin: Optional[str] + display_alias: Optional[str] + deleted: bool + created_on: Optional[datetime.datetime] + class UserSettings: """Value object representing a user's settings. @@ -32,6 +68,9 @@ class UserSettings: user_id: str. The unique ID of the user. email: str. The user email. roles: list(str). Roles of the user. + has_viewed_lesson_info_modal_once: bool. Flag to check whether + the user has viewed lesson info modal once which shows the progress + of the user through exploration checkpoints. username: str or None. Identifiable username to display in the UI. last_agreed_to_terms: datetime.datetime or None. When the user last agreed to the terms of the site. @@ -46,7 +85,7 @@ class UserSettings: last edited an exploration. profile_picture_data_url: str or None. User uploaded profile picture as a dataURI string. - default_dashboard: str or None. The default dashboard of the user. + default_dashboard: str. The default dashboard of the user. user_bio: str. User-specified biography. subject_interests: list(str) or None. Subject interests specified by the user. @@ -56,6 +95,9 @@ class UserSettings: preferences specified by the user. preferred_site_language_code: str or None. System language preference. preferred_audio_language_code: str or None. Audio language preference. + preferred_translation_language_code: str or None. Text Translation + language preference of the translator that persists on the + contributor dashboard. pin: str or None. The PIN of the user's profile for android. display_alias: str or None. Display name of a user who is logged into the Android app. None when the request is coming from web @@ -63,17 +105,39 @@ class UserSettings: """ def __init__( - self, user_id, email, roles, banned, username=None, - last_agreed_to_terms=None, last_started_state_editor_tutorial=None, - last_started_state_translation_tutorial=None, last_logged_in=None, - last_created_an_exploration=None, last_edited_an_exploration=None, - profile_picture_data_url=None, default_dashboard=None, - creator_dashboard_display_pref=( - constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD']), - user_bio='', subject_interests=None, first_contribution_msec=None, - preferred_language_codes=None, preferred_site_language_code=None, - preferred_audio_language_code=None, pin=None, display_alias=None, - deleted=False, created_on=None): + self, + user_id: str, + email: str, + roles: List[str], + banned: bool, + has_viewed_lesson_info_modal_once: bool, + username: Optional[str] = None, + last_agreed_to_terms: Optional[datetime.datetime] = None, + last_started_state_editor_tutorial: ( + Optional[datetime.datetime]) = None, + last_started_state_translation_tutorial: ( + Optional[datetime.datetime]) = None, + last_logged_in: Optional[datetime.datetime]=None, + last_created_an_exploration: ( + Optional[datetime.datetime]) = None, + last_edited_an_exploration: ( + Optional[datetime.datetime]) = None, + profile_picture_data_url: Optional[str]=None, + default_dashboard: str = constants.DASHBOARD_TYPE_LEARNER, + creator_dashboard_display_pref: str = ( + constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD']), + user_bio: str = '', + subject_interests: Optional[List[str]] = None, + first_contribution_msec: Optional[float] = None, + preferred_language_codes: Optional[List[str]] = None, + preferred_site_language_code: Optional[str] = None, + preferred_audio_language_code: Optional[str] = None, + preferred_translation_language_code: Optional[str] = None, + pin: Optional[str] = None, + display_alias: Optional[str] = None, + deleted: bool = False, + created_on: Optional[datetime.datetime] = None + ) -> None: """Constructs a UserSettings domain object. Args: @@ -81,6 +145,9 @@ def __init__( email: str. The user email. roles: list(str). Roles of the user. banned: bool. Whether the uses is banned. + has_viewed_lesson_info_modal_once: bool. Flag to check whether + the user has viewed lesson info modal once which shows the + progress of the user through exploration checkpoints. username: str or None. Identifiable username to display in the UI. last_agreed_to_terms: datetime.datetime or None. When the user last agreed to the terms of the site. @@ -96,7 +163,7 @@ def __init__( user last edited an exploration. profile_picture_data_url: str or None. User uploaded profile picture as a dataURI string. - default_dashboard: str|None. The default dashboard of the user. + default_dashboard: str. The default dashboard of the user. creator_dashboard_display_pref: str. The creator dashboard of the user. user_bio: str. User-specified biography. @@ -110,6 +177,9 @@ def __init__( preference. preferred_audio_language_code: str or None. Default language used for audio translations preference. + preferred_translation_language_code: str or None. Text Translation + language preference of the translator that persists on the + contributor dashboard. pin: str or None. The PIN of the user's profile for android. display_alias: str or None. Display name of a user who is logged into the Android app. None when the request is coming from @@ -141,13 +211,17 @@ def __init__( preferred_language_codes if preferred_language_codes else []) self.preferred_site_language_code = preferred_site_language_code self.preferred_audio_language_code = preferred_audio_language_code + self.preferred_translation_language_code = ( + preferred_translation_language_code) self.pin = pin self.display_alias = display_alias self.banned = banned self.deleted = deleted self.created_on = created_on + self.has_viewed_lesson_info_modal_once = ( + has_viewed_lesson_info_modal_once) - def validate(self): + def validate(self) -> None: """Checks that the user_id, email, roles, banned, pin and display_alias fields of this UserSettings domain object are valid. @@ -212,8 +286,11 @@ def validate(self): 'Expected PIN to be a string, received %s' % self.pin ) - elif (len(self.pin) != feconf.FULL_USER_PIN_LENGTH and - len(self.pin) != feconf.PROFILE_USER_PIN_LENGTH): + + if ( + len(self.pin) != feconf.FULL_USER_PIN_LENGTH and + len(self.pin) != feconf.PROFILE_USER_PIN_LENGTH + ): raise utils.ValidationError( 'User PIN can only be of length %s or %s' % ( @@ -221,12 +298,12 @@ def validate(self): feconf.PROFILE_USER_PIN_LENGTH ) ) - else: - for character in self.pin: - if character < '0' or character > '9': - raise utils.ValidationError( - 'Only numeric characters are allowed in PIN.' - ) + + for character in self.pin: + if character < '0' or character > '9': + raise utils.ValidationError( + 'Only numeric characters are allowed in PIN.' + ) if (self.display_alias is not None and not isinstance(self.display_alias, str)): @@ -256,7 +333,28 @@ def validate(self): '%s is not a valid value for the dashboard display ' 'preferences.' % (self.creator_dashboard_display_pref)) - def populate_from_modifiable_user_data(self, modifiable_user_data): + def record_user_edited_an_exploration(self) -> None: + """Updates last_edited_an_exploration to the current datetime for the + user. + """ + self.last_edited_an_exploration = datetime.datetime.utcnow() + + def update_first_contribution_msec( + self, first_contribution_msec: float + ) -> None: + """Updates first_contribution_msec of user with given user_id + if it is set to None. + + Args: + first_contribution_msec: float. New time to set in milliseconds + representing user's first contribution to Oppia. + """ + if self.first_contribution_msec is None: + self.first_contribution_msec = first_contribution_msec + + def populate_from_modifiable_user_data( + self, modifiable_user_data: ModifiableUserData + ) -> None: """Populate the UserSettings domain object using the user data in modifiable_user_data. @@ -281,13 +379,15 @@ def populate_from_modifiable_user_data(self, modifiable_user_data): modifiable_user_data.preferred_site_language_code) self.preferred_audio_language_code = ( modifiable_user_data.preferred_audio_language_code) + self.preferred_translation_language_code = ( + modifiable_user_data.preferred_translation_language_code) self.pin = modifiable_user_data.pin - def to_dict(self): + def to_dict(self) -> UserSettingsDict: """Convert the UserSettings domain instance into a dictionary form with its keys as the attributes of this class. - Rerurns: + Returns: dict. A dictionary containing the UserSettings class information in a dictionary form. """ @@ -319,14 +419,18 @@ def to_dict(self): self.preferred_site_language_code), 'preferred_audio_language_code': ( self.preferred_audio_language_code), + 'preferred_translation_language_code': ( + self.preferred_translation_language_code), 'pin': self.pin, 'display_alias': self.display_alias, 'deleted': self.deleted, - 'created_on': self.created_on + 'created_on': self.created_on, + 'has_viewed_lesson_info_modal_once': ( + self.has_viewed_lesson_info_modal_once) } @property - def truncated_email(self): + def truncated_email(self) -> str: """Returns truncated email by replacing last two characters before @ with period. @@ -346,7 +450,7 @@ def truncated_email(self): return '%s%s' % (first_part, last_part) @property - def normalized_username(self): + def normalized_username(self) -> Optional[str]: """Returns username in lowercase or None if it does not exist. Returns: @@ -354,10 +458,13 @@ def normalized_username(self): the normalized version of the username. Otherwise, returns None. """ - return self.normalize_username(self.username) + if self.username: + return self.normalize_username(self.username) + else: + return None @classmethod - def normalize_username(cls, username): + def normalize_username(cls, username: str) -> str: """Returns the normalized version of the given username, or None if the passed-in 'username' is None. @@ -365,14 +472,13 @@ def normalize_username(cls, username): username: str. Identifiable username to display in the UI. Returns: - str or None. The normalized version of the given username, - or None if the passed-in username is None. + str. The normalized version of the given username. """ - return username.lower() if username else None + return username.lower() @classmethod - def require_valid_username(cls, username: str): + def require_valid_username(cls, username: str) -> None: """Checks if the given username is valid or not. Args: @@ -388,29 +494,29 @@ def require_valid_username(cls, username: str): """ if not username: raise utils.ValidationError('Empty username supplied.') - elif len(username) > constants.MAX_USERNAME_LENGTH: + if len(username) > constants.MAX_USERNAME_LENGTH: raise utils.ValidationError( 'A username can have at most %s characters.' % constants.MAX_USERNAME_LENGTH) - elif not re.match(feconf.ALPHANUMERIC_REGEX, username): + if not re.match(feconf.ALPHANUMERIC_REGEX, username): raise utils.ValidationError( 'Usernames can only have alphanumeric characters.') - else: - # Disallow usernames that contain the system usernames or the - # strings "admin" or "oppia". - reserved_usernames = set(feconf.SYSTEM_USERS.values()) | set([ - 'admin', 'oppia']) - for reserved_username in reserved_usernames: - if reserved_username in username.lower().strip(): - raise utils.ValidationError( - 'This username is not available.') - def mark_banned(self): + # Disallow usernames that contain the system usernames or the + # strings "admin" or "oppia". + reserved_usernames = ( + set(feconf.SYSTEM_USERS.values()) | {'admin', 'oppia'} + ) + for reserved_username in reserved_usernames: + if reserved_username in username.lower().strip(): + raise utils.ValidationError('This username is not available.') + + def mark_banned(self) -> None: """Marks a user banned.""" self.banned = True self.roles = [] - def unmark_banned(self, default_role): + def unmark_banned(self, default_role: str) -> None: """Unmarks ban for a banned user. Args: @@ -420,22 +526,35 @@ def unmark_banned(self, default_role): self.banned = False self.roles = [default_role] + def mark_lesson_info_modal_viewed(self) -> None: + """Sets has_viewed_lesson_info_modal_once to true which shows + the user has viewed their progress through exploration in the lesson + info modal at least once in their lifetime journey. + """ + self.has_viewed_lesson_info_modal_once = True + class UserActionsInfo: """A class representing information of user actions. Attributes: - user_id: str. The unique ID of the user. + user_id: str|None. The unique ID of the user, or None if the user + is not logged in. roles: list(str). The roles of the user. actions: list(str). A list of actions accessible to the role. """ - def __init__(self, user_id, roles, actions): + def __init__( + self, + user_id: Optional[str], + roles: List[str], + actions: List[str] + ) -> None: self._user_id = user_id self._roles = roles self._actions = actions @property - def user_id(self): + def user_id(self) -> Optional[str]: """Returns the unique ID of the user. Returns: @@ -444,7 +563,7 @@ def user_id(self): return self._user_id @property - def roles(self): + def roles(self) -> List[str]: """Returns the roles of user. Returns: @@ -453,7 +572,7 @@ def roles(self): return self._roles @property - def actions(self): + def actions(self) -> List[str]: """Returns list of actions accessible to a user. Returns: @@ -474,7 +593,11 @@ class UserContributions: """ def __init__( - self, user_id, created_exploration_ids, edited_exploration_ids): + self, + user_id: str, + created_exploration_ids: List[str], + edited_exploration_ids: List[str] + ) -> None: """Constructs a UserContributions domain object. Args: @@ -488,12 +611,13 @@ def __init__( self.created_exploration_ids = created_exploration_ids self.edited_exploration_ids = edited_exploration_ids - def validate(self): + def validate(self) -> None: """Checks that user_id, created_exploration_ids and edited_exploration_ids fields of this UserContributions domain object are valid. Raises: + ValidationError. No user id specified. ValidationError. The user_id is not str. ValidationError. The created_exploration_ids is not a list. ValidationError. The exploration_id in created_exploration_ids @@ -530,6 +654,29 @@ def validate(self): 'to be a string, received %s' % ( exploration_id)) + def add_created_exploration_id(self, exploration_id: str) -> None: + """Adds an exploration_id to list of created explorations. + + Args: + exploration_id: str. The exploration id. + """ + if exploration_id not in self.created_exploration_ids: + self.created_exploration_ids.append(exploration_id) + self.created_exploration_ids.sort() + + def add_edited_exploration_id( + self, + exploration_id: str + ) -> None: + """Adds an exploration_id to list of edited explorations. + + Args: + exploration_id: str. The exploration id. + """ + if exploration_id not in self.edited_exploration_ids: + self.edited_exploration_ids.append(exploration_id) + self.edited_exploration_ids.sort() + class UserGlobalPrefs: """Domain object for user global email preferences. @@ -546,9 +693,12 @@ class UserGlobalPrefs: """ def __init__( - self, can_receive_email_updates, can_receive_editor_role_email, - can_receive_feedback_message_email, - can_receive_subscription_email): + self, + can_receive_email_updates: bool, + can_receive_editor_role_email: bool, + can_receive_feedback_message_email: bool, + can_receive_subscription_email: bool + ) -> None: """Constructs a UserGlobalPrefs domain object. Args: @@ -568,7 +718,7 @@ def __init__( self.can_receive_subscription_email = can_receive_subscription_email @classmethod - def create_default_prefs(cls): + def create_default_prefs(cls) -> UserGlobalPrefs: """Returns UserGlobalPrefs with default attributes.""" return cls( feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE, @@ -577,6 +727,13 @@ def create_default_prefs(cls): feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE) +class UserExplorationPrefsDict(TypedDict): + """Dictionary representing the UserExplorationPrefs object.""" + + mute_feedback_notifications: bool + mute_suggestion_notifications: bool + + class UserExplorationPrefs: """Domain object for user exploration email preferences. @@ -588,7 +745,10 @@ class UserExplorationPrefs: """ def __init__( - self, mute_feedback_notifications, mute_suggestion_notifications): + self, + mute_feedback_notifications: bool, + mute_suggestion_notifications: bool + ) -> None: """Constructs a UserExplorationPrefs domain object. Args: @@ -601,13 +761,13 @@ def __init__( self.mute_suggestion_notifications = mute_suggestion_notifications @classmethod - def create_default_prefs(cls): + def create_default_prefs(cls) -> UserExplorationPrefs: """Returns UserExplorationPrefs with default attributes.""" return cls( feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE, feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE) - def to_dict(self): + def to_dict(self) -> UserExplorationPrefsDict: """Return dictionary representation of UserExplorationPrefs. Returns: @@ -627,8 +787,13 @@ class ExpUserLastPlaythrough: """Domain object for an exploration last playthrough model.""" def __init__( - self, user_id, exploration_id, last_played_exp_version, - last_updated, last_played_state_name): + self, + user_id: str, + exploration_id: str, + last_played_exp_version: int, + last_updated: datetime.datetime, + last_played_state_name: str + ) -> None: self.id = '%s.%s' % (user_id, exploration_id) self.user_id = user_id self.exploration_id = exploration_id @@ -637,7 +802,10 @@ def __init__( self.last_played_state_name = last_played_state_name def update_last_played_information( - self, last_played_exp_version, last_played_state_name): + self, + last_played_exp_version: int, + last_played_state_name: str + ) -> None: """Updates the last playthrough information of the user. Args: @@ -654,8 +822,14 @@ class IncompleteActivities: """Domain object for the incomplete activities model.""" def __init__( - self, user_id, exploration_ids, collection_ids, story_ids, - partially_learnt_topic_ids, partially_mastered_topic_id=None): + self, + user_id: str, + exploration_ids: List[str], + collection_ids: List[str], + story_ids: List[str], + partially_learnt_topic_ids: List[str], + partially_mastered_topic_id: Optional[str] = None + ) -> None: self.id = user_id self.exploration_ids = exploration_ids self.collection_ids = collection_ids @@ -663,7 +837,7 @@ def __init__( self.partially_learnt_topic_ids = partially_learnt_topic_ids self.partially_mastered_topic_id = partially_mastered_topic_id - def add_exploration_id(self, exploration_id): + def add_exploration_id(self, exploration_id: str) -> None: """Adds the exploration id to the list of incomplete exploration ids. Args: @@ -672,7 +846,7 @@ def add_exploration_id(self, exploration_id): """ self.exploration_ids.append(exploration_id) - def remove_exploration_id(self, exploration_id): + def remove_exploration_id(self, exploration_id: str) -> None: """Removes the exploration id from the list of incomplete exploration ids. @@ -682,7 +856,7 @@ def remove_exploration_id(self, exploration_id): """ self.exploration_ids.remove(exploration_id) - def add_collection_id(self, collection_id): + def add_collection_id(self, collection_id: str) -> None: """Adds the collection id to the list of incomplete collection ids. Args: @@ -691,7 +865,7 @@ def add_collection_id(self, collection_id): """ self.collection_ids.append(collection_id) - def remove_collection_id(self, collection_id): + def remove_collection_id(self, collection_id: str) -> None: """Removes the collection id from the list of incomplete collection ids. @@ -701,7 +875,7 @@ def remove_collection_id(self, collection_id): """ self.collection_ids.remove(collection_id) - def add_story_id(self, story_id): + def add_story_id(self, story_id: str) -> None: """Adds the story id to the list of incomplete story ids. Args: @@ -710,7 +884,7 @@ def add_story_id(self, story_id): """ self.story_ids.append(story_id) - def remove_story_id(self, story_id): + def remove_story_id(self, story_id: str) -> None: """Removes the story id from the list of incomplete story ids. @@ -720,7 +894,9 @@ def remove_story_id(self, story_id): """ self.story_ids.remove(story_id) - def add_partially_learnt_topic_id(self, partially_learnt_topic_id): + def add_partially_learnt_topic_id( + self, partially_learnt_topic_id: str + ) -> None: """Adds the topic id to the list of partially learnt topic ids. Args: @@ -729,7 +905,9 @@ def add_partially_learnt_topic_id(self, partially_learnt_topic_id): """ self.partially_learnt_topic_ids.append(partially_learnt_topic_id) - def remove_partially_learnt_topic_id(self, partially_learnt_topic_id): + def remove_partially_learnt_topic_id( + self, partially_learnt_topic_id: str + ) -> None: """Removes the topic id from the list of partially learnt topic ids. @@ -744,8 +922,14 @@ class CompletedActivities: """Domain object for the activities completed by learner model.""" def __init__( - self, user_id, exploration_ids, collection_ids, story_ids, - learnt_topic_ids, mastered_topic_ids=None): + self, + user_id: str, + exploration_ids: List[str], + collection_ids: List[str], + story_ids: List[str], + learnt_topic_ids: List[str], + mastered_topic_ids: Optional[List[str]] = None + ) -> None: self.id = user_id self.exploration_ids = exploration_ids self.collection_ids = collection_ids @@ -753,7 +937,7 @@ def __init__( self.learnt_topic_ids = learnt_topic_ids self.mastered_topic_ids = mastered_topic_ids - def add_exploration_id(self, exploration_id): + def add_exploration_id(self, exploration_id: str) -> None: """Adds the exploration id to the list of completed exploration ids. Args: @@ -762,7 +946,7 @@ def add_exploration_id(self, exploration_id): """ self.exploration_ids.append(exploration_id) - def remove_exploration_id(self, exploration_id): + def remove_exploration_id(self, exploration_id: str) -> None: """Removes the exploration id from the list of completed exploration ids. @@ -772,7 +956,7 @@ def remove_exploration_id(self, exploration_id): """ self.exploration_ids.remove(exploration_id) - def add_collection_id(self, collection_id): + def add_collection_id(self, collection_id: str) -> None: """Adds the collection id to the list of completed collection ids. Args: @@ -781,7 +965,7 @@ def add_collection_id(self, collection_id): """ self.collection_ids.append(collection_id) - def remove_collection_id(self, collection_id): + def remove_collection_id(self, collection_id: str) -> None: """Removes the collection id from the list of completed collection ids. @@ -791,7 +975,7 @@ def remove_collection_id(self, collection_id): """ self.collection_ids.remove(collection_id) - def add_story_id(self, story_id): + def add_story_id(self, story_id: str) -> None: """Adds the story id to the list of completed story ids. Args: @@ -800,7 +984,7 @@ def add_story_id(self, story_id): """ self.story_ids.append(story_id) - def remove_story_id(self, story_id): + def remove_story_id(self, story_id: str) -> None: """Removes the story id from the list of completed story ids. @@ -810,7 +994,7 @@ def remove_story_id(self, story_id): """ self.story_ids.remove(story_id) - def add_learnt_topic_id(self, learnt_topic_id): + def add_learnt_topic_id(self, learnt_topic_id: str) -> None: """Adds the topic id to the list of learnt topic ids. Args: @@ -819,7 +1003,7 @@ def add_learnt_topic_id(self, learnt_topic_id): """ self.learnt_topic_ids.append(learnt_topic_id) - def remove_learnt_topic_id(self, learnt_topic_id): + def remove_learnt_topic_id(self, learnt_topic_id: str) -> None: """Removes the topic id from the list of learnt topic ids. @@ -830,17 +1014,27 @@ def remove_learnt_topic_id(self, learnt_topic_id): self.learnt_topic_ids.remove(learnt_topic_id) +class LearnerGoalsDict(TypedDict): + """Dictionary representing the LearnerGoals object.""" + + topic_ids_to_learn: List[str] + topic_ids_to_master: List[str] + + class LearnerGoals: """Domain object for the learner goals model.""" def __init__( - self, user_id, topic_ids_to_learn, - topic_ids_to_master): + self, + user_id: str, + topic_ids_to_learn: List[str], + topic_ids_to_master: List[str] + ) -> None: self.id = user_id self.topic_ids_to_learn = topic_ids_to_learn self.topic_ids_to_master = topic_ids_to_master - def add_topic_id_to_learn(self, topic_id): + def add_topic_id_to_learn(self, topic_id: str) -> None: """Adds the topic id to 'topic IDs to learn' list. Args: @@ -848,14 +1042,14 @@ def add_topic_id_to_learn(self, topic_id): """ self.topic_ids_to_learn.append(topic_id) - def remove_topic_id_from_learn(self, topic_id): + def remove_topic_id_from_learn(self, topic_id: str) -> None: """Removes the topic id from the 'topic IDs to learn' list. topic_id: str. The id of the topic to be removed. """ self.topic_ids_to_learn.remove(topic_id) - def to_dict(self): + def to_dict(self) -> LearnerGoalsDict: """Return dictionary representation of LearnerGoals. Returns: @@ -872,13 +1066,18 @@ class LearnerPlaylist: """Domain object for the learner playlist model.""" def __init__( - self, user_id, exploration_ids, collection_ids): + self, + user_id: str, + exploration_ids: List[str], + collection_ids: List[str] + ) -> None: self.id = user_id self.exploration_ids = exploration_ids self.collection_ids = collection_ids def insert_exploration_id_at_given_position( - self, exploration_id, position_to_be_inserted): + self, exploration_id: str, position_to_be_inserted: int + ) -> None: """Inserts the given exploration id at the given position. Args: @@ -890,7 +1089,7 @@ def insert_exploration_id_at_given_position( self.exploration_ids.insert( position_to_be_inserted, exploration_id) - def add_exploration_id_to_list(self, exploration_id): + def add_exploration_id_to_list(self, exploration_id: str) -> None: """Inserts the exploration id at the end of the list. Args: @@ -900,7 +1099,8 @@ def add_exploration_id_to_list(self, exploration_id): self.exploration_ids.append(exploration_id) def insert_collection_id_at_given_position( - self, collection_id, position_to_be_inserted): + self, collection_id: str, position_to_be_inserted: int + ) -> None: """Inserts the given collection id at the given position. Args: @@ -911,7 +1111,7 @@ def insert_collection_id_at_given_position( """ self.collection_ids.insert(position_to_be_inserted, collection_id) - def add_collection_id_to_list(self, collection_id): + def add_collection_id_to_list(self, collection_id: str) -> None: """Inserts the collection id at the end of the list. Args: @@ -920,14 +1120,14 @@ def add_collection_id_to_list(self, collection_id): """ self.collection_ids.append(collection_id) - def remove_exploration_id(self, exploration_id): + def remove_exploration_id(self, exploration_id: str) -> None: """Removes the exploration id from the learner playlist. exploration_id: str. The id of the exploration to be removed. """ self.exploration_ids.remove(exploration_id) - def remove_collection_id(self, collection_id): + def remove_collection_id(self, collection_id: str) -> None: """Removes the collection id from the learner playlist. collection_id: str. The id of the collection to be removed. @@ -938,13 +1138,19 @@ def remove_collection_id(self, collection_id): class UserContributionProficiency: """Domain object for UserContributionProficiencyModel.""" - def __init__(self, user_id, score_category, score, onboarding_email_sent): + def __init__( + self, + user_id: str, + score_category: str, + score: int, + onboarding_email_sent: bool + ) -> None: self.user_id = user_id self.score_category = score_category self.score = score self.onboarding_email_sent = onboarding_email_sent - def increment_score(self, increment_by): + def increment_score(self, increment_by: int) -> None: """Increments the score of the user in the category by the given amount. In the first version of the scoring system, the increment_by quantity @@ -957,7 +1163,7 @@ def increment_score(self, increment_by): """ self.score += increment_by - def can_user_review_category(self): + def can_user_review_category(self) -> bool: """Checks if user can review suggestions in category score_category. If the user has score above the minimum required score, then the user is allowed to review. @@ -968,7 +1174,7 @@ def can_user_review_category(self): """ return self.score >= feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW - def mark_onboarding_email_as_sent(self): + def mark_onboarding_email_as_sent(self) -> None: """Marks the email as sent.""" self.onboarding_email_sent = True @@ -977,9 +1183,13 @@ class UserContributionRights: """Domain object for the UserContributionRightsModel.""" def __init__( - self, user_id, can_review_translation_for_language_codes, - can_review_voiceover_for_language_codes, can_review_questions, - can_submit_questions): + self, + user_id: str, + can_review_translation_for_language_codes: List[str], + can_review_voiceover_for_language_codes: List[str], + can_review_questions: bool, + can_submit_questions: bool + ): self.id = user_id self.can_review_translation_for_language_codes = ( can_review_translation_for_language_codes) @@ -988,18 +1198,21 @@ def __init__( self.can_review_questions = can_review_questions self.can_submit_questions = can_submit_questions - def can_review_at_least_one_item(self): + def can_review_at_least_one_item(self) -> bool: """Checks whether user has rights to review at least one item. Returns: boolean. Whether user has rights to review at east one item. """ - return ( + # Note that 'can_review_translation_for_language_codes' and + # 'can_review_voiceover_for_language_codes' are List[str], so we need + # the bool cast to ensure that the return value is boolean. + return bool( self.can_review_translation_for_language_codes or self.can_review_voiceover_for_language_codes or self.can_review_questions) - def validate(self): + def validate(self) -> None: """Validates different attributes of the class.""" if not isinstance(self.can_review_translation_for_language_codes, list): raise utils.ValidationError( @@ -1044,27 +1257,63 @@ def validate(self): 'found: %s' % type(self.can_submit_questions)) +# TODO(#15106): Refactor ModifiableUserData to limit the number of Optional +# fields used in ModifiableUserDataDict. +class ModifiableUserDataDict(TypedDict): + """Dictionary representing the ModifiableUserData object.""" + + display_alias: str + pin: Optional[str] + preferred_language_codes: List[str] + preferred_site_language_code: Optional[str] + preferred_audio_language_code: Optional[str] + preferred_translation_language_code: Optional[str] + user_id: Optional[str] + + +class RawUserDataDict(TypedDict): + """Type for the argument raw_user_data_dict.""" + + schema_version: int + display_alias: str + pin: Optional[str] + preferred_language_codes: List[str] + preferred_site_language_code: Optional[str] + preferred_audio_language_code: Optional[str] + preferred_translation_language_code: Optional[str] + user_id: Optional[str] + + class ModifiableUserData: """Domain object to represent the new values in a UserSettingsModel change submitted by the Android client. """ def __init__( - self, display_alias, pin, preferred_language_codes, - preferred_site_language_code, preferred_audio_language_code, - user_id=None): + self, + display_alias: str, + pin: Optional[str], + preferred_language_codes: List[str], + preferred_site_language_code: Optional[str], + preferred_audio_language_code: Optional[str], + preferred_translation_language_code: Optional[str], + user_id: Optional[str] = None + ) -> None: """Constructs a ModifiableUserData domain object. Args: display_alias: str. Display alias of the user shown on Android. pin: str or None. PIN of the user used for PIN based authentication on Android. None if it hasn't been set till now. - preferred_language_codes: list(str) or None. Exploration language + preferred_language_codes: list(str). Exploration language preferences specified by the user. preferred_site_language_code: str or None. System language preference. preferred_audio_language_code: str or None. Audio language preference. + preferred_translation_language_code: str or None. Text Translation + language preference of the translator that persists on the + contributor dashboard. user_id: str or None. User ID of the user whose data is being updated. None if request did not have a user_id for the user yet and expects the backend to create a new user entry for it. @@ -1074,12 +1323,16 @@ def __init__( self.preferred_language_codes = preferred_language_codes self.preferred_site_language_code = preferred_site_language_code self.preferred_audio_language_code = preferred_audio_language_code + self.preferred_translation_language_code = ( + preferred_translation_language_code) # The user_id is not intended to be a modifiable attribute, it is just # needed to identify the object. self.user_id = user_id @classmethod - def from_dict(cls, modifiable_user_data_dict): + def from_dict( + cls, modifiable_user_data_dict: ModifiableUserDataDict + ) -> ModifiableUserData: """Return a ModifiableUserData domain object from a dict. Args: @@ -1096,13 +1349,16 @@ def from_dict(cls, modifiable_user_data_dict): modifiable_user_data_dict['preferred_language_codes'], modifiable_user_data_dict['preferred_site_language_code'], modifiable_user_data_dict['preferred_audio_language_code'], + modifiable_user_data_dict['preferred_translation_language_code'], modifiable_user_data_dict['user_id'], ) CURRENT_SCHEMA_VERSION = 1 @classmethod - def from_raw_dict(cls, raw_user_data_dict): + def from_raw_dict( + cls, raw_user_data_dict: RawUserDataDict + ) -> ModifiableUserData: """Converts the raw_user_data_dict into a ModifiableUserData domain object by converting it according to the latest schema format. @@ -1113,8 +1369,13 @@ def from_raw_dict(cls, raw_user_data_dict): Returns: ModifiableUserData. The domain object representing the user data dict transformed according to the latest schema version. + + Raises: + Exception. No schema version specified. + Exception. Schema version is not of type int. + Exception. Invalid schema version. """ - data_schema_version = raw_user_data_dict.get('schema_version') + data_schema_version = raw_user_data_dict['schema_version'] if data_schema_version is None: raise Exception( @@ -1125,9 +1386,9 @@ def from_raw_dict(cls, raw_user_data_dict): 'received %s' % type(data_schema_version) ) if ( - not isinstance(data_schema_version, int) or - data_schema_version < 1 or - data_schema_version > cls.CURRENT_SCHEMA_VERSION + not isinstance(data_schema_version, int) or + data_schema_version < 1 or + data_schema_version > cls.CURRENT_SCHEMA_VERSION ): raise Exception( 'Invalid version %s received. At present we can only process v1' @@ -1136,3 +1397,265 @@ def from_raw_dict(cls, raw_user_data_dict): ) return cls.from_dict(raw_user_data_dict) + + +class ExplorationUserDataDict(TypedDict): + """Dictionary representing the ExplorationUserData object.""" + + rating: Optional[int] + rated_on: Optional[datetime.datetime] + draft_change_list: Optional[List[Dict[str, str]]] + draft_change_list_last_updated: Optional[datetime.datetime] + draft_change_list_exp_version: Optional[int] + draft_change_list_id: int + mute_suggestion_notifications: bool + mute_feedback_notifications: bool + furthest_reached_checkpoint_exp_version: Optional[int] + furthest_reached_checkpoint_state_name: Optional[str] + most_recently_reached_checkpoint_exp_version: Optional[int] + most_recently_reached_checkpoint_state_name: Optional[str] + + +class ExplorationUserData: + """Value object representing a user's exploration data. + + Attributes: + user_id: str. The user id. + exploration_id: str. The exploration id. + rating: int or None. The rating (1-5) the user assigned to the + exploration. + rated_on: datetime or None. When the most recent rating was awarded, + or None if not rated. + draft_change_list: list(dict) or None. List of uncommitted changes made + by the user to the exploration. + draft_change_list_last_updated: datetime or None. Timestamp of when the + change list was last updated. + draft_change_list_exp_version: int or None. The exploration version + that this change list applied to. + draft_change_list_id: int. The version of the draft change list which + was last saved by the user. + mute_suggestion_notifications: bool. The user's preference for + receiving suggestion emails for this exploration. + mute_feedback_notifications: bool. The user's preference for receiving + feedback emails for this exploration. + furthest_reached_checkpoint_exp_version: int or None. The exploration + version of furthest reached checkpoint. + furthest_reached_checkpoint_state_name: str or None. The state name + of the furthest reached checkpoint. + most_recently_reached_checkpoint_exp_version: int or None. The + exploration version of the most recently reached checkpoint. + most_recently_reached_checkpoint_state_name: str or None. The state + name of the most recently reached checkpoint. + """ + + def __init__( + self, + user_id: str, + exploration_id: str, + rating: Optional[int] = None, + rated_on: Optional[datetime.datetime] = None, + draft_change_list: Optional[List[Dict[str, str]]] = None, + draft_change_list_last_updated: Optional[datetime.datetime] = None, + draft_change_list_exp_version: Optional[int] = None, + draft_change_list_id: int = 0, + mute_suggestion_notifications: bool = ( + feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), + mute_feedback_notifications: bool = ( + feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE), + furthest_reached_checkpoint_exp_version: Optional[int] = None, + furthest_reached_checkpoint_state_name: Optional[str] = None, + most_recently_reached_checkpoint_exp_version: Optional[int] = None, + most_recently_reached_checkpoint_state_name: Optional[str] = None + ) -> None: + """Constructs a ExplorationUserData domain object. + + Attributes: + user_id: str. The user id. + exploration_id: str. The exploration id. + rating: int or None. The rating (1-5) the user assigned to the + exploration. + rated_on: datetime or None. When the most recent rating was + awarded, or None if not rated. + draft_change_list: list(dict) or None. List of uncommitted + changes made by the user to the exploration. + draft_change_list_last_updated: datetime or None. Timestamp of + when the change list was last updated. + draft_change_list_exp_version: int or None. The exploration + version that this change list applied to. + draft_change_list_id: int. The version of the draft change list + which was last saved by the user. + mute_suggestion_notifications: bool. The user's preference for + receiving suggestion emails for this exploration. + mute_feedback_notifications: bool. The user's preference for + receiving feedback emails for this exploration. + furthest_reached_checkpoint_exp_version: int or None. The + exploration version of furthest reached checkpoint. + furthest_reached_checkpoint_state_name: str or None. The + state name of the furthest reached checkpoint. + most_recently_reached_checkpoint_exp_version: int or None. The + exploration version of the most recently reached + checkpoint. + most_recently_reached_checkpoint_state_name: str or None. The + state name of the most recently reached checkpoint. + """ + self.user_id = user_id + self.exploration_id = exploration_id + self.rating = rating + self.rated_on = rated_on + self.draft_change_list = draft_change_list + self.draft_change_list_last_updated = draft_change_list_last_updated + self.draft_change_list_exp_version = draft_change_list_exp_version + self.draft_change_list_id = draft_change_list_id + self.mute_suggestion_notifications = mute_suggestion_notifications + self.mute_feedback_notifications = mute_feedback_notifications + self.furthest_reached_checkpoint_exp_version = ( + furthest_reached_checkpoint_exp_version) + self.furthest_reached_checkpoint_state_name = ( + furthest_reached_checkpoint_state_name) + self.most_recently_reached_checkpoint_exp_version = ( + most_recently_reached_checkpoint_exp_version) + self.most_recently_reached_checkpoint_state_name = ( + most_recently_reached_checkpoint_state_name) + + def to_dict(self) -> ExplorationUserDataDict: + """Convert the ExplorationUserData domain instance into a dictionary + form with its keys as the attributes of this class. + + Returns: + dict. A dictionary containing the UserSettings class information + in a dictionary form. + """ + + return { + 'rating': self.rating, + 'rated_on': self.rated_on, + 'draft_change_list': self.draft_change_list, + 'draft_change_list_last_updated': ( + self.draft_change_list_last_updated), + 'draft_change_list_exp_version': self.draft_change_list_exp_version, + 'draft_change_list_id': self.draft_change_list_id, + 'mute_suggestion_notifications': self.mute_suggestion_notifications, + 'mute_feedback_notifications': self.mute_feedback_notifications, + 'furthest_reached_checkpoint_exp_version': ( + self.furthest_reached_checkpoint_exp_version), + 'furthest_reached_checkpoint_state_name': ( + self.furthest_reached_checkpoint_state_name), + 'most_recently_reached_checkpoint_exp_version': ( + self.most_recently_reached_checkpoint_exp_version), + 'most_recently_reached_checkpoint_state_name': ( + self.most_recently_reached_checkpoint_state_name) + } + + +class LearnerGroupsUserDict(TypedDict): + """Dictionary for LearnerGroupsUser domain object.""" + + user_id: str + invited_to_learner_groups_ids: List[str] + learner_groups_user_details: List[LearnerGroupUserDetailsDict] + learner_groups_user_details_schema_version: int + + +class LearnerGroupUserDetailsDict(TypedDict): + """Dictionary for user details of a particular learner group.""" + + group_id: str + progress_sharing_is_turned_on: bool + + +class LearnerGroupUserDetails: + """Domain object for user details of a particular learner group.""" + + def __init__( + self, + group_id: str, + progress_sharing_is_turned_on: bool + ) -> None: + """Constructs a LearnerGroupUserDetails domain object. + + Attributes: + group_id: str. The id of the learner group. + progress_sharing_is_turned_on: bool. Whether progress sharing is + turned on for the learner group. + """ + self.group_id = group_id + self.progress_sharing_is_turned_on = progress_sharing_is_turned_on + + def to_dict(self) -> LearnerGroupUserDetailsDict: + """Convert the LearnerGroupUserDetails domain instance into a + dictionary form with its keys as the attributes of this class. + + Returns: + dict. A dictionary containing the LearnerGroupUserDetails class + information in a dictionary form. + """ + return { + 'group_id': self.group_id, + 'progress_sharing_is_turned_on': self.progress_sharing_is_turned_on + } + + +class LearnerGroupsUser: + """Domain object for learner groups user.""" + + def __init__( + self, + user_id: str, + invited_to_learner_groups_ids: List[str], + learner_groups_user_details: List[LearnerGroupUserDetails], + learner_groups_user_details_schema_version: int + ) -> None: + """Constructs a LearnerGroupsUser domain object. + + Attributes: + user_id: str. The user id. + invited_to_learner_groups_ids: list(str). List of learner group ids + that the user has been invited to join as learner. + learner_groups_user_details: + list(LearnerGroupUserDetails). List of user details of + all learner groups that the user is learner of. + learner_groups_user_details_schema_version: int. The version + of the learner groups user details schema blob. + """ + self.user_id = user_id + self.invited_to_learner_groups_ids = invited_to_learner_groups_ids + self.learner_groups_user_details = learner_groups_user_details + self.learner_groups_user_details_schema_version = ( + learner_groups_user_details_schema_version) + + def to_dict(self) -> LearnerGroupsUserDict: + """Convert the LearnerGroupsUser domain instance into a dictionary + form with its keys as the attributes of this class. + + Returns: + dict. A dictionary containing the LearnerGroupsUser class + information in a dictionary form. + """ + learner_groups_user_details_dict = [ + learner_group_details.to_dict() + for learner_group_details in self.learner_groups_user_details + ] + + return { + 'user_id': self.user_id, + 'invited_to_learner_groups_ids': + self.invited_to_learner_groups_ids, + 'learner_groups_user_details': learner_groups_user_details_dict, + 'learner_groups_user_details_schema_version': ( + self.learner_groups_user_details_schema_version) + } + + def validate(self) -> None: + """Validates the LearnerGroupsUser domain object. + + Raises: + ValidationError. One or more attributes of the LearnerGroupsUser + are invalid. + """ + for learner_group_details in self.learner_groups_user_details: + if learner_group_details.group_id in ( + self.invited_to_learner_groups_ids): + raise utils.ValidationError( + 'Learner cannot be invited to join learner group ' + '%s since they are already its learner.' % ( + learner_group_details.group_id)) diff --git a/core/domain/user_domain_test.py b/core/domain/user_domain_test.py index 58257149bbbe..1d40d0375cb8 100644 --- a/core/domain/user_domain_test.py +++ b/core/domain/user_domain_test.py @@ -18,6 +18,7 @@ from __future__ import annotations +import datetime import logging from core import feconf @@ -29,7 +30,27 @@ from core.platform import models from core.tests import test_utils -user_models, = models.Registry.import_models([models.NAMES.user]) +from typing import List, Optional, TypedDict + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +user_models, = models.Registry.import_models([models.Names.USER]) + + +class MockModifiableUserDataDict(TypedDict): + """Dictionary representing the MockModifiableUserData object.""" + + display_alias: str + schema_version: int + pin: Optional[str] + preferred_language_codes: List[str] + preferred_site_language_code: Optional[str] + preferred_audio_language_code: Optional[str] + preferred_translation_language_code: Optional[str] + user_id: Optional[str] + fake_field: Optional[str] # This mock class will not be needed once the schema version is >=2 for the @@ -42,33 +63,52 @@ class to create a new version of the schema for testing migration of old """ def __init__( - self, display_alias, pin, preferred_language_codes, - preferred_site_language_code, preferred_audio_language_code, - user_id=None, fake_field=None): - super(MockModifiableUserData, self).__init__( - display_alias, pin, preferred_language_codes, - preferred_site_language_code, preferred_audio_language_code, - user_id=None) + self, + display_alias: str, + pin: Optional[str], + preferred_language_codes: List[str], + preferred_site_language_code: Optional[str], + preferred_audio_language_code: Optional[str], + preferred_translation_language_code: Optional[str], + user_id: Optional[str]=None, + fake_field: Optional[str]=None + ) -> None: + super().__init__( + display_alias, + pin, + preferred_language_codes, + preferred_site_language_code, + preferred_audio_language_code, + preferred_translation_language_code, + user_id=None + ) self.fake_field = fake_field CURRENT_SCHEMA_VERSION = 2 # Overriding method to add a new attribute added names 'fake_field'. + # Here we use MyPy ignore because the signature of this method + # doesn't match with user_domain.ModifiableUserData.from_dict(). @classmethod - def from_dict(cls, modifiable_user_data_dict): + def from_dict( # type: ignore[override] + cls, modifiable_user_data_dict: MockModifiableUserDataDict + ) -> MockModifiableUserData: return MockModifiableUserData( modifiable_user_data_dict['display_alias'], modifiable_user_data_dict['pin'], modifiable_user_data_dict['preferred_language_codes'], modifiable_user_data_dict['preferred_site_language_code'], modifiable_user_data_dict['preferred_audio_language_code'], + modifiable_user_data_dict['preferred_translation_language_code'], modifiable_user_data_dict['user_id'], modifiable_user_data_dict['fake_field'] ) # Adding a new method to convert v1 schema data dict to v2. @classmethod - def _convert_v1_dict_to_v2_dict(cls, user_data_dict): + def _convert_v1_dict_to_v2_dict( + cls, user_data_dict: MockModifiableUserDataDict + ) -> MockModifiableUserDataDict: """Mock function to convert v1 dict to v2.""" user_data_dict['schema_version'] = 2 user_data_dict['fake_field'] = 'default_value' @@ -76,8 +116,12 @@ def _convert_v1_dict_to_v2_dict(cls, user_data_dict): # Overiding method to first convert raw user data dict to latest version # then returning a ModifiableUserData domain object. + # Here we use MyPy ignore because the signature of this method + # doesn't match with user_domain.ModifiableUserData.from_raw_dict(). @classmethod - def from_raw_dict(cls, raw_user_data_dict): + def from_raw_dict( # type: ignore[override] + cls, raw_user_data_dict: MockModifiableUserDataDict + ) -> MockModifiableUserData: data_schema_version = raw_user_data_dict.get('schema_version') user_data_dict = raw_user_data_dict @@ -90,192 +134,260 @@ def from_raw_dict(cls, raw_user_data_dict): class UserSettingsTests(test_utils.GenericTestBase): - def setUp(self): - super(UserSettingsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner = user_services.get_user_actions_info(self.owner_id) - self.user_settings = user_services.get_user_settings(self.owner_id) + user_settings = user_services.get_user_settings(self.owner_id) + self.user_settings = user_settings self.user_settings.validate() self.assertEqual(self.owner.roles, [feconf.ROLE_ID_FULL_USER]) - user_data_dict = { + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': 'user_id', } self.modifiable_user_data = ( user_domain.ModifiableUserData.from_raw_dict(user_data_dict)) - new_user_data_dict = { + new_user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias_3', 'pin': None, 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': None, } self.modifiable_new_user_data = ( user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict)) - def test_validate_non_str_user_id_raises_exception(self): - self.user_settings.user_id = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_user_id_raises_exception(self) -> None: + self.user_settings.user_id = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected user_id to be a string' ): self.user_settings.validate() - def test_validate_wrong_format_user_id_raises_exception(self): + def test_validate_wrong_format_user_id_raises_exception( + self + ) -> None: self.user_settings.user_id = 'uid_%sA' % ('a' * 31) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The user ID is in a wrong format.' ): self.user_settings.validate() self.user_settings.user_id = 'uid_%s' % ('a' * 31) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The user ID is in a wrong format.' ): self.user_settings.validate() self.user_settings.user_id = 'a' * 36 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'The user ID is in a wrong format.' ): self.user_settings.validate() - def test_validate_invalid_banned_value_type_raises_exception(self): - self.user_settings.banned = 123 - with self.assertRaisesRegexp( + def test_validate_invalid_banned_value_type_raises_exception(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.user_settings.banned = 123 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected banned to be a bool'): self.user_settings.validate() - self.user_settings.banned = '123' - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.user_settings.banned = '123' # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected banned to be a bool'): self.user_settings.validate() - def test_validate_invalid_roles_value_type_raises_exception(self): - self.user_settings.roles = 123 - with self.assertRaisesRegexp( + def test_validate_invalid_roles_value_type_raises_exception(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.user_settings.roles = 123 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected roles to be a list'): self.user_settings.validate() - self.user_settings.roles = True - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + self.user_settings.roles = True # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected roles to be a list'): self.user_settings.validate() - def test_validate_banned_user_with_roles_raises_exception(self): + def test_validate_banned_user_with_roles_raises_exception( + self + ) -> None: self.user_settings.roles = ['FULL_USER'] self.user_settings.banned = True - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected roles for banned user to be empty'): self.user_settings.validate() - def test_validate_roles_with_duplicate_value_raise_exception(self): + def test_validate_roles_with_duplicate_value_raise_exception( + self + ) -> None: self.user_settings.roles = ['FULL_USER', 'FULL_USER', 'TOPIC_MANAGER'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Roles contains duplicate values:'): self.user_settings.validate() - def test_validate_roles_without_any_default_role_raise_exception(self): + def test_validate_roles_without_any_default_role_raise_exception( + self + ) -> None: self.user_settings.roles = ['TOPIC_MANAGER'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected roles to contains one default role.'): self.user_settings.validate() - def test_validate_non_str_pin_id(self): - self.user_settings.pin = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_pin_id(self) -> None: + self.user_settings.pin = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected PIN to be a string' ): self.user_settings.validate() - def test_validate_invalid_length_pin_raises_error(self): + def test_validate_invalid_length_pin_raises_error(self) -> None: invalid_pin_values_list = ['1', '12', '1234', '123@#6', 'ABCa', '1!#a'] error_msg = ( 'User PIN can only be of length %s or %s' % (feconf.FULL_USER_PIN_LENGTH, feconf.PROFILE_USER_PIN_LENGTH) ) for pin in invalid_pin_values_list: - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, error_msg ): self.user_settings.pin = pin self.user_settings.validate() - def test_validate_valid_length_with_numeric_char_pin_works_fine(self): + def test_validate_valid_length_with_numeric_char_pin_works_fine( + self + ) -> None: valid_pin_values_list = ['123', '12345', '764', '42343'] for pin in valid_pin_values_list: self.user_settings.pin = pin self.user_settings.validate() - def test_validate_valid_length_pin_with_non_numeric_char_raises_error(self): + def test_validate_valid_length_pin_with_non_numeric_char_raises_error( + self + ) -> None: valid_pin_values_list = ['AbC', '123A}', '1!2', 'AB!', '[123]'] error_msg = 'Only numeric characters are allowed in PIN' for pin in valid_pin_values_list: - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, error_msg ): self.user_settings.pin = pin self.user_settings.validate() - def test_validate_empty_user_id_raises_exception(self): + def test_validate_empty_user_id_raises_exception(self) -> None: self.user_settings.user_id = '' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'No user id specified.' ): self.user_settings.validate() - def test_validate_non_str_role_raises_exception(self): - self.user_settings.roles = [0] - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_role_raises_exception(self) -> None: + self.user_settings.roles = [0] # type: ignore[list-item] + with self.assertRaisesRegex( utils.ValidationError, 'Expected roles to be a string' ): self.user_settings.validate() - def test_validate_invalid_role_name_raises_exception(self): + def test_validate_invalid_role_name_raises_exception(self) -> None: self.user_settings.roles = ['invalid_role'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Role invalid_role does not exist.'): self.user_settings.validate() - def test_validate_non_str_display_alias_raises_error(self): - self.user_settings.display_alias = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_display_alias_raises_error(self) -> None: + self.user_settings.display_alias = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected display_alias to be a string,' ' received %s' % self.user_settings.display_alias): self.user_settings.validate() - def test_validate_non_str_creator_dashboard_display_pref_raises_error(self): - self.user_settings.creator_dashboard_display_pref = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_creator_dashboard_display_pref_raises_error( + self + ) -> None: + self.user_settings.creator_dashboard_display_pref = 0 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected dashboard display preference to be a string' ): self.user_settings.validate() - def test_validate_invalid_creator_dashboard_display_pref_raises_error(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validation_none__email_raises_error(self) -> None: + self.user_settings.email = None # type: ignore[assignment] + with self.assertRaisesRegex( + utils.ValidationError, 'Expected email to be a string,' + ' received %s' % self.user_settings.email): + self.user_settings.validate() + + def test_validation_wrong_email_raises_error(self) -> None: + invalid_emails_list = [ + 'testemail.com', '@testemail.com', 'testemail.com@'] + for email in invalid_emails_list: + self.user_settings.email = email + with self.assertRaisesRegex( + utils.ValidationError, 'Invalid email address: %s' % email + ): + self.user_settings.validate() + + def test_validate_invalid_creator_dashboard_display_pref_raises_error( + self + ) -> None: self.user_settings.creator_dashboard_display_pref = ( 'invalid_creator_dashboard_display_pref') - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'invalid_creator_dashboard_display_pref is not a valid ' 'value for the dashboard display preferences.' ): self.user_settings.validate() - def test_validate_empty_display_alias_for_profiles_raises_error(self): + def test_validate_empty_display_alias_for_profiles_raises_error( + self + ) -> None: self.modifiable_user_data.user_id = self.owner_id self.modifiable_user_data.pin = '12345' self.modifiable_user_data.display_alias = 'temp_name' @@ -284,48 +396,57 @@ def test_validate_empty_display_alias_for_profiles_raises_error(self): auth_id = self.get_auth_id_from_email(self.OWNER_EMAIL) profile_pin = '123' error_msg = 'Expected display_alias to be a string, received' - with self.assertRaisesRegexp(utils.ValidationError, error_msg): + with self.assertRaisesRegex(utils.ValidationError, error_msg): self.modifiable_new_user_data.display_alias = '' self.modifiable_new_user_data.pin = profile_pin user_services.create_new_profiles( auth_id, self.OWNER_EMAIL, [self.modifiable_new_user_data] ) - def test_has_not_fully_registered_for_guest_user_is_false(self): - self.assertFalse(user_services.has_fully_registered_account(None)) + def test_has_not_fully_registered_for_guest_user_is_false( + self + ) -> None: + self.assertFalse(user_services.has_fully_registered_account( + 'non_existing_user' + )) - def test_create_new_user_with_existing_auth_id_raises_error(self): + def test_create_new_user_with_existing_auth_id_raises_error(self) -> None: user_id = self.user_settings.user_id user_auth_id = auth_services.get_auth_id_from_user_id(user_id) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'User %s already exists for auth_id %s.' % (user_id, user_auth_id) ): + # Ruling out the possibility of None for mypy type checking. + assert user_auth_id is not None user_services.create_new_user(user_auth_id, self.OWNER_EMAIL) - def test_cannot_set_existing_username(self): - with self.assertRaisesRegexp( + def test_cannot_set_existing_username(self) -> None: + with self.assertRaisesRegex( utils.ValidationError, 'Sorry, the username \"%s\" is already taken! Please pick ' 'a different one.' % self.OWNER_USERNAME ): user_services.set_username(self.owner_id, self.OWNER_USERNAME) - def test_cannot_add_user_role_with_invalid_role(self): - with self.assertRaisesRegexp( + def test_cannot_add_user_role_with_invalid_role(self) -> None: + with self.assertRaisesRegex( Exception, 'Role invalid_role does not exist.' ): user_services.add_user_role(self.owner_id, 'invalid_role') - def test_cannot_get_human_readable_user_ids_with_invalid_user_ids(self): + def test_cannot_get_human_readable_user_ids_with_invalid_user_ids( + self + ) -> None: observed_log_messages = [] - def _mock_logging_function(msg, *args): + # Here, args can take any non-keyword argument. + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.error().""" observed_log_messages.append(msg % args) logging_swap = self.swap(logging, 'error', _mock_logging_function) - assert_raises_user_not_found = self.assertRaisesRegexp( + assert_raises_user_not_found = self.assertRaisesRegex( Exception, 'User not found.') with logging_swap, assert_raises_user_not_found: @@ -338,7 +459,7 @@ def _mock_logging_function(msg, *args): '[\'invalid_user_id\']' ]) - def test_get_human_readable_user_ids(self): + def test_get_human_readable_user_ids(self) -> None: # Create an unregistered user who has no username. user_models.UserSettingsModel( id='unregistered_user_id', @@ -355,7 +476,8 @@ def test_get_human_readable_user_ids(self): self.assertEqual(user_ids, expected_user_ids) def test_get_human_readable_user_ids_with_nonexistent_id_non_strict_passes( - self): + self + ) -> None: user_id = user_services.create_new_user( 'auth_id', 'user@example.com').user_id user_services.set_username(user_id, 'username') @@ -367,7 +489,7 @@ def test_get_human_readable_user_ids_with_nonexistent_id_non_strict_passes( human_readable_user_ids, [user_services.LABEL_FOR_USER_BEING_DELETED]) - def test_created_on_gets_updated_correctly(self): + def test_created_on_gets_updated_correctly(self) -> None: # created_on should not be updated upon updating other attributes of # the user settings model. user_settings = user_services.create_new_user( @@ -386,86 +508,113 @@ def test_created_on_gets_updated_correctly(self): class UserContributionsTests(test_utils.GenericTestBase): - def setUp(self): - super(UserContributionsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.user_contributions = user_services.get_user_contributions( - self.owner_id) + self.owner_id, strict=True + ) self.user_contributions.validate() - def test_validate_non_str_user_id(self): - self.user_contributions.user_id = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_str_user_id(self) -> None: + self.user_contributions.user_id = 0 # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected user_id to be a string'): self.user_contributions.validate() - def test_validate_user_id(self): + def test_validate_user_id(self) -> None: self.user_contributions.user_id = '' - with self.assertRaisesRegexp(Exception, 'No user id specified.'): + with self.assertRaisesRegex(Exception, 'No user id specified.'): self.user_contributions.validate() - def test_validate_non_list_created_exploration_ids(self): - self.user_contributions.created_exploration_ids = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_list_created_exploration_ids(self) -> None: + self.user_contributions.created_exploration_ids = 0 # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected created_exploration_ids to be a list'): self.user_contributions.validate() - def test_validate_created_exploration_ids(self): - self.user_contributions.created_exploration_ids = [0] - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_created_exploration_ids(self) -> None: + self.user_contributions.created_exploration_ids = [0] # type: ignore[list-item] + with self.assertRaisesRegex( Exception, 'Expected exploration_id in created_exploration_ids ' 'to be a string'): self.user_contributions.validate() - def test_validate_non_list_edited_exploration_ids(self): - self.user_contributions.edited_exploration_ids = 0 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_non_list_edited_exploration_ids(self) -> None: + self.user_contributions.edited_exploration_ids = 0 # type: ignore[assignment] + with self.assertRaisesRegex( Exception, 'Expected edited_exploration_ids to be a list'): self.user_contributions.validate() - def test_validate_edited_exploration_ids(self): - self.user_contributions.edited_exploration_ids = [0] - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_validate_edited_exploration_ids(self) -> None: + self.user_contributions.edited_exploration_ids = [0] # type: ignore[list-item] + with self.assertRaisesRegex( Exception, 'Expected exploration_id in edited_exploration_ids ' 'to be a string'): self.user_contributions.validate() - def test_cannot_create_user_contributions_with_migration_bot(self): - self.assertIsNone( - user_services.create_user_contributions( - feconf.MIGRATION_BOT_USER_ID, [], [])) - - def test_update_user_contributions(self): + def test_save_user_contributions(self) -> None: user_services.update_user_contributions(self.owner_id, ['e1'], ['e2']) + contributions = user_services.get_user_contributions( + self.owner_id, strict=True + ) - contributions = user_services.get_user_contributions(self.owner_id) self.assertEqual(contributions.user_id, self.owner_id) self.assertEqual(contributions.created_exploration_ids, ['e1']) self.assertEqual(contributions.edited_exploration_ids, ['e2']) - def test_cannot_create_user_contributions_with_existing_user_id(self): - with self.assertRaisesRegexp( - Exception, - 'User contributions model for user %s already exists.' - % self.owner_id): - user_services.create_user_contributions(self.owner_id, [], []) + contributions.add_created_exploration_id('e3') + contributions.add_edited_exploration_id('e4') + user_services.save_user_contributions(contributions) + + updated_contributions = user_services.get_user_contributions( + self.owner_id, strict=True + ) - def test_cannot_update_user_contributions_with_invalid_user_id(self): - with self.assertRaisesRegexp( + self.assertEqual(updated_contributions.user_id, self.owner_id) + self.assertEqual( + updated_contributions.created_exploration_ids, + ['e1', 'e3'] + ) + self.assertEqual( + updated_contributions.edited_exploration_ids, + ['e2', 'e4'] + ) + + def test_cannot_update_user_contributions_with_invalid_user_id( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'User contributions model for user invalid_user_id does not exist'): user_services.update_user_contributions('invalid_user_id', [], []) def test_cannot_update_dashboard_stats_log_with_invalid_schema_version( - self): + self + ) -> None: model = user_models.UserStatsModel.get_or_create(self.owner_id) model.schema_version = 0 model.update_timestamps() model.put() self.assertIsNone(user_services.get_user_impact_score(self.owner_id)) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d dashboard stats schemas at ' 'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION): @@ -475,7 +624,7 @@ def test_cannot_update_dashboard_stats_log_with_invalid_schema_version( class UserGlobalPrefsTests(test_utils.GenericTestBase): """Test domain object for user global email preferences.""" - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" user_global_prefs = (user_domain.UserGlobalPrefs( True, False, True, False)) @@ -485,7 +634,7 @@ def test_initialization(self): self.assertTrue(user_global_prefs.can_receive_feedback_message_email) self.assertFalse(user_global_prefs.can_receive_subscription_email) - def test_create_default_prefs(self): + def test_create_default_prefs(self) -> None: """Testing create_default_prefs.""" default_user_global_prefs = ( user_domain.UserGlobalPrefs.create_default_prefs()) @@ -507,7 +656,7 @@ def test_create_default_prefs(self): class UserExplorationPrefsTests(test_utils.GenericTestBase): """Test domain object for user exploration email preferences.""" - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" user_exp_prefs = (user_domain.UserExplorationPrefs( False, True)) @@ -520,7 +669,7 @@ def test_initialization(self): self.assertFalse(mute_feedback_notifications) self.assertTrue(mute_suggestion_notifications) - def test_create_default_prefs(self): + def test_create_default_prefs(self) -> None: """Testing create_default_prefs.""" default_user_exp_prefs = ( user_domain.UserExplorationPrefs.create_default_prefs()) @@ -532,7 +681,7 @@ def test_create_default_prefs(self): default_user_exp_prefs.mute_suggestion_notifications, feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE) - def test_to_dict(self): + def test_to_dict(self) -> None: """Testing to_dict.""" user_exp_prefs = (user_domain.UserExplorationPrefs( False, True)) @@ -563,11 +712,11 @@ def test_to_dict(self): class ExpUserLastPlaythroughTests(test_utils.GenericTestBase): """Testing domain object for an exploration last playthrough model.""" - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" + current_time = datetime.datetime.utcnow() exp_last_playthrough = (user_domain.ExpUserLastPlaythrough( - 'user_id0', 'exp_id0', 0, 'last_updated', 'state0')) - + 'user_id0', 'exp_id0', 0, current_time, 'state0')) self.assertEqual( exp_last_playthrough.id, 'user_id0.exp_id0') self.assertEqual( @@ -577,14 +726,15 @@ def test_initialization(self): self.assertEqual( exp_last_playthrough.last_played_exp_version, 0) self.assertEqual( - exp_last_playthrough.last_updated, 'last_updated') + exp_last_playthrough.last_updated, current_time) self.assertEqual( exp_last_playthrough.last_played_state_name, 'state0') - def test_update_last_played_information(self): + def test_update_last_played_information(self) -> None: """Testing update_last_played_information.""" + current_time = datetime.datetime.utcnow() exp_last_playthrough = (user_domain.ExpUserLastPlaythrough( - 'user_id0', 'exp_id0', 0, 'last_updated', 'state0')) + 'user_id0', 'exp_id0', 0, current_time, 'state0')) self.assertEqual( exp_last_playthrough.last_played_exp_version, 0) @@ -602,7 +752,7 @@ def test_update_last_played_information(self): class IncompleteActivitiesTests(test_utils.GenericTestBase): """Testing domain object for incomplete activities model.""" - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -618,7 +768,7 @@ def test_initialization(self): self.assertListEqual( incomplete_activities.partially_learnt_topic_ids, ['topic_id0']) - def test_add_exploration_id(self): + def test_add_exploration_id(self) -> None: """Testing add_exploration_id.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -633,7 +783,7 @@ def test_add_exploration_id(self): incomplete_activities.exploration_ids, ['exp_id0', 'exp_id1']) - def test_remove_exploration_id(self): + def test_remove_exploration_id(self) -> None: """Testing remove_exploration_id.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -647,7 +797,7 @@ def test_remove_exploration_id(self): self.assertListEqual( incomplete_activities.exploration_ids, []) - def test_add_collection_id(self): + def test_add_collection_id(self) -> None: """Testing add_collection_id.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -662,7 +812,7 @@ def test_add_collection_id(self): incomplete_activities.collection_ids, ['collect_id0', 'collect_id1']) - def test_remove_collection_id(self): + def test_remove_collection_id(self) -> None: """Testing remove_collection_id.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -676,7 +826,7 @@ def test_remove_collection_id(self): self.assertListEqual( incomplete_activities.collection_ids, []) - def test_add_story_id(self): + def test_add_story_id(self) -> None: """Testing add_story_id.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -691,7 +841,7 @@ def test_add_story_id(self): incomplete_activities.story_ids, ['story_id0', 'story_id1']) - def test_remove_story_id(self): + def test_remove_story_id(self) -> None: """Testing remove_story_id.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -705,7 +855,7 @@ def test_remove_story_id(self): self.assertListEqual( incomplete_activities.story_ids, []) - def test_add_partially_learnt_topic_id(self): + def test_add_partially_learnt_topic_id(self) -> None: """Testing add_partially_learnt_topic_id.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -720,7 +870,7 @@ def test_add_partially_learnt_topic_id(self): incomplete_activities.partially_learnt_topic_ids, ['topic_id0', 'topic_id1']) - def test_remove_partially_learnt_topic_id(self): + def test_remove_partially_learnt_topic_id(self) -> None: """Testing remove_partially_learnt_topic_id.""" incomplete_activities = (user_domain.IncompleteActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -738,7 +888,7 @@ def test_remove_partially_learnt_topic_id(self): class CompletedActivitiesTests(test_utils.GenericTestBase): """Testing domain object for the activities completed.""" - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -754,7 +904,7 @@ def test_initialization(self): self.assertListEqual( completed_activities.learnt_topic_ids, ['topic_id0']) - def test_add_exploration_id(self): + def test_add_exploration_id(self) -> None: """Testing add_exploration_id.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -769,7 +919,7 @@ def test_add_exploration_id(self): completed_activities.exploration_ids, ['exp_id0', 'exp_id1']) - def test_remove_exploration_id(self): + def test_remove_exploration_id(self) -> None: """Testing remove_exploration_id.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -783,7 +933,7 @@ def test_remove_exploration_id(self): self.assertListEqual( completed_activities.exploration_ids, []) - def test_add_collection_id(self): + def test_add_collection_id(self) -> None: """Testing add_collection_id.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -798,7 +948,7 @@ def test_add_collection_id(self): completed_activities.collection_ids, ['collect_id0', 'collect_id1']) - def test_remove_collection_id(self): + def test_remove_collection_id(self) -> None: """Testing remove_collection_id.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -812,7 +962,7 @@ def test_remove_collection_id(self): self.assertListEqual( completed_activities.collection_ids, []) - def test_add_story_id(self): + def test_add_story_id(self) -> None: """Testing add_story_id.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -827,7 +977,7 @@ def test_add_story_id(self): completed_activities.story_ids, ['story_id0', 'story_id1']) - def test_remove_story_id(self): + def test_remove_story_id(self) -> None: """Testing remove_story_id.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -841,7 +991,7 @@ def test_remove_story_id(self): self.assertListEqual( completed_activities.story_ids, []) - def test_add_learnt_topic_id(self): + def test_add_learnt_topic_id(self) -> None: """Testing add_learnt_topic_id.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -856,7 +1006,7 @@ def test_add_learnt_topic_id(self): completed_activities.learnt_topic_ids, ['topic_id0', 'topic_id1']) - def test_remove_learnt_topic_id(self): + def test_remove_learnt_topic_id(self) -> None: """Testing remove_learnt_topic_id.""" completed_activities = (user_domain.CompletedActivities( 'user_id0', ['exp_id0'], ['collect_id0'], ['story_id0'], @@ -874,7 +1024,7 @@ def test_remove_learnt_topic_id(self): class LearnerGoalsTests(test_utils.GenericTestBase): """Testing domain object for learner goals model.""" - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" learner_goals = ( user_domain.LearnerGoals('user_id0', ['topic_id0'], [])) @@ -882,7 +1032,7 @@ def test_initialization(self): self.assertListEqual( learner_goals.topic_ids_to_learn, ['topic_id0']) - def test_add_topic_id_to_learn(self): + def test_add_topic_id_to_learn(self) -> None: """Testing add_topic_id_to_learn.""" learner_goals = ( user_domain.LearnerGoals('user_id0', ['topic_id0'], [])) @@ -895,7 +1045,7 @@ def test_add_topic_id_to_learn(self): self.assertListEqual( learner_goals.topic_ids_to_learn, ['topic_id0', 'topic_id1']) - def test_remove_topic_id_to_learn(self): + def test_remove_topic_id_to_learn(self) -> None: """Testing remove_topic_id_to_learn.""" learner_goals = ( user_domain.LearnerGoals('user_id0', ['topic_id0'], [])) @@ -912,7 +1062,7 @@ def test_remove_topic_id_to_learn(self): class LearnerPlaylistTests(test_utils.GenericTestBase): """Testing domain object for the learner playlist.""" - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" learner_playlist = (user_domain.LearnerPlaylist( 'user_id0', ['exp_id0'], ['collect_id0'])) @@ -923,7 +1073,7 @@ def test_initialization(self): self.assertListEqual( learner_playlist.collection_ids, ['collect_id0']) - def test_insert_exploration_id_at_given_position(self): + def test_insert_exploration_id_at_given_position(self) -> None: """Testing inserting the given exploration id at the given position.""" learner_playlist = (user_domain.LearnerPlaylist( 'user_id0', ['exp_id0'], ['collect_id0'])) @@ -940,7 +1090,7 @@ def test_insert_exploration_id_at_given_position(self): learner_playlist.exploration_ids, ['exp_id0', 'exp_id2', 'exp_id1']) - def test_add_exploration_id_to_list(self): + def test_add_exploration_id_to_list(self) -> None: """Testing add_exploration_id_to_list.""" learner_playlist = (user_domain.LearnerPlaylist( 'user_id0', ['exp_id0'], ['collect_id0'])) @@ -953,7 +1103,7 @@ def test_add_exploration_id_to_list(self): self.assertListEqual( learner_playlist.exploration_ids, ['exp_id0', 'exp_id1']) - def test_insert_collection_id_at_given_position(self): + def test_insert_collection_id_at_given_position(self) -> None: """Testing insert_exploration_id_at_given_position.""" learner_playlist = (user_domain.LearnerPlaylist( 'user_id0', ['exp_id0'], ['collect_id0'])) @@ -970,7 +1120,7 @@ def test_insert_collection_id_at_given_position(self): learner_playlist.collection_ids, ['collect_id0', 'collect_id2', 'collect_id1']) - def test_add_collection_id_list(self): + def test_add_collection_id_list(self) -> None: """Testing add_collection_id.""" learner_playlist = (user_domain.LearnerPlaylist( 'user_id0', ['exp_id0'], ['collect_id0'])) @@ -984,7 +1134,7 @@ def test_add_collection_id_list(self): learner_playlist.collection_ids, ['collect_id0', 'collect_id1']) - def test_remove_exploration_id(self): + def test_remove_exploration_id(self) -> None: """Testing remove_exploration_id.""" learner_playlist = (user_domain.LearnerPlaylist( 'user_id0', ['exp_id0'], ['collect_id0'])) @@ -997,7 +1147,7 @@ def test_remove_exploration_id(self): self.assertListEqual( learner_playlist.exploration_ids, []) - def test_remove_collection_id(self): + def test_remove_collection_id(self) -> None: """Testing remove_collection_id.""" learner_playlist = (user_domain.LearnerPlaylist( 'user_id0', ['exp_id0'], ['collect_id0'])) @@ -1014,12 +1164,12 @@ def test_remove_collection_id(self): class UserContributionProficiencyTests(test_utils.GenericTestBase): """Testing domain object for user contribution scoring model.""" - def setUp(self): - super(UserContributionProficiencyTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.user_proficiency = user_domain.UserContributionProficiency( 'user_id0', 'category0', 0, False) - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" self.assertEqual(self.user_proficiency.user_id, 'user_id0') self.assertEqual( @@ -1028,7 +1178,7 @@ def test_initialization(self): self.assertEqual( self.user_proficiency.onboarding_email_sent, False) - def test_increment_score(self): + def test_increment_score(self) -> None: self.assertEqual(self.user_proficiency.score, 0) self.user_proficiency.increment_score(4) @@ -1037,7 +1187,7 @@ def test_increment_score(self): self.user_proficiency.increment_score(-3) self.assertEqual(self.user_proficiency.score, 1) - def test_can_user_review_category(self): + def test_can_user_review_category(self) -> None: self.assertEqual(self.user_proficiency.score, 0) self.assertFalse(self.user_proficiency.can_user_review_category()) @@ -1046,7 +1196,7 @@ def test_can_user_review_category(self): self.assertTrue(self.user_proficiency.can_user_review_category()) - def test_mark_onboarding_email_as_sent(self): + def test_mark_onboarding_email_as_sent(self) -> None: self.assertFalse(self.user_proficiency.onboarding_email_sent) self.user_proficiency.mark_onboarding_email_as_sent() @@ -1057,12 +1207,12 @@ def test_mark_onboarding_email_as_sent(self): class UserContributionRightsTests(test_utils.GenericTestBase): """Testing UserContributionRights domain object.""" - def setUp(self): - super(UserContributionRightsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.user_contribution_rights = user_domain.UserContributionRights( 'user_id', ['hi'], [], True, False) - def test_initialization(self): + def test_initialization(self) -> None: """Testing init method.""" self.assertEqual(self.user_contribution_rights.id, 'user_id') @@ -1076,72 +1226,94 @@ def test_initialization(self): self.assertEqual( self.user_contribution_rights.can_review_questions, True) - def test_can_review_translation_for_language_codes_incorrect_type(self): - self.user_contribution_rights.can_review_translation_for_language_codes = 5 # pylint: disable=line-too-long - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_can_review_translation_for_language_codes_incorrect_type( + self + ) -> None: + # To avoid pylint's line-too-long error, new variable is created here. + user_contribution_rights = self.user_contribution_rights + user_contribution_rights.can_review_translation_for_language_codes = 5 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected can_review_translation_for_language_codes to be a list'): self.user_contribution_rights.validate() - def test_can_review_voiceover_for_language_codes_incorrect_type(self): - self.user_contribution_rights.can_review_voiceover_for_language_codes = 5 # pylint: disable=line-too-long - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_can_review_voiceover_for_language_codes_incorrect_type( + self + ) -> None: + # To avoid pylint's line-too-long error, new variable is created here. + user_contribution_rights = self.user_contribution_rights + user_contribution_rights.can_review_voiceover_for_language_codes = 5 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected can_review_voiceover_for_language_codes to be a list'): self.user_contribution_rights.validate() - def test_incorrect_language_code_for_voiceover_raise_error(self): + def test_incorrect_language_code_for_voiceover_raise_error(self) -> None: self.user_contribution_rights.can_review_voiceover_for_language_codes = [ # pylint: disable=line-too-long 'invalid_lang_code'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid language_code: invalid_lang_code'): self.user_contribution_rights.validate() - def test_incorrect_language_code_for_translation_raise_error(self): + def test_incorrect_language_code_for_translation_raise_error(self) -> None: self.user_contribution_rights.can_review_translation_for_language_codes = [ # pylint: disable=line-too-long 'invalid_lang_code'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid language_code: invalid_lang_code'): self.user_contribution_rights.validate() def test_can_review_voiceover_for_language_codes_with_duplicate_values( - self): + self + ) -> None: self.user_contribution_rights.can_review_voiceover_for_language_codes = [ # pylint: disable=line-too-long 'hi'] self.user_contribution_rights.validate() self.user_contribution_rights.can_review_voiceover_for_language_codes = [ # pylint: disable=line-too-long 'hi', 'hi'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected can_review_voiceover_for_language_codes list not to have ' 'duplicate values'): self.user_contribution_rights.validate() def test_can_review_translation_for_language_codes_with_duplicate_values( - self): + self + ) -> None: self.user_contribution_rights.can_review_translation_for_language_codes = [ # pylint: disable=line-too-long 'hi'] self.user_contribution_rights.validate() self.user_contribution_rights.can_review_translation_for_language_codes = [ # pylint: disable=line-too-long 'hi', 'hi'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected can_review_translation_for_language_codes list not to ' 'have duplicate values'): self.user_contribution_rights.validate() - def test_incorrect_type_for_can_review_questions_raise_error(self): - self.user_contribution_rights.can_review_questions = 5 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_incorrect_type_for_can_review_questions_raise_error(self) -> None: + self.user_contribution_rights.can_review_questions = 5 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected can_review_questions to be a boolean value'): self.user_contribution_rights.validate() - def test_incorrect_type_for_can_submit_questions_raise_error(self): - self.user_contribution_rights.can_submit_questions = 5 - with self.assertRaisesRegexp( + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_incorrect_type_for_can_submit_questions_raise_error(self) -> None: + self.user_contribution_rights.can_submit_questions = 5 # type: ignore[assignment] + with self.assertRaisesRegex( utils.ValidationError, 'Expected can_submit_questions to be a boolean value'): self.user_contribution_rights.validate() @@ -1150,15 +1322,17 @@ def test_incorrect_type_for_can_submit_questions_raise_error(self): class ModifiableUserDataTests(test_utils.GenericTestBase): """Testing domain object for modifiable user data.""" - def test_initialization_with_none_user_id_is_successful(self): + def test_initialization_with_none_user_id_is_successful(self) -> None: """Testing init method user id set None.""" - user_data_dict = { + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '123', - 'preferred_language_codes': 'preferred_language_codes', + 'preferred_language_codes': ['preferred_language_codes'], 'preferred_site_language_code': 'preferred_site_language_code', 'preferred_audio_language_code': 'preferred_audio_language_code', + 'preferred_translation_language_code': ( + 'preferred_translation_language_code'), 'user_id': None, } modifiable_user_data = ( @@ -1170,7 +1344,7 @@ def test_initialization_with_none_user_id_is_successful(self): self.assertEqual(modifiable_user_data.pin, '123') self.assertEqual( modifiable_user_data.preferred_language_codes, - 'preferred_language_codes' + ['preferred_language_codes'] ) self.assertEqual( modifiable_user_data.preferred_site_language_code, @@ -1180,17 +1354,23 @@ def test_initialization_with_none_user_id_is_successful(self): modifiable_user_data.preferred_audio_language_code, 'preferred_audio_language_code' ) + self.assertEqual( + modifiable_user_data.preferred_translation_language_code, + 'preferred_translation_language_code' + ) self.assertIsNone(modifiable_user_data.user_id) - def test_initialization_with_valid_user_id_is_successful(self): + def test_initialization_with_valid_user_id_is_successful(self) -> None: """Testing init method with a valid user id set.""" - user_data_dict = { + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '123', - 'preferred_language_codes': 'preferred_language_codes', + 'preferred_language_codes': ['preferred_language_codes'], 'preferred_site_language_code': 'preferred_site_language_code', 'preferred_audio_language_code': 'preferred_audio_language_code', + 'preferred_translation_language_code': ( + 'preferred_translation_language_code'), 'user_id': 'user_id', } modifiable_user_data = ( @@ -1202,7 +1382,7 @@ def test_initialization_with_valid_user_id_is_successful(self): self.assertEqual(modifiable_user_data.pin, '123') self.assertEqual( modifiable_user_data.preferred_language_codes, - 'preferred_language_codes' + ['preferred_language_codes'] ) self.assertEqual( modifiable_user_data.preferred_site_language_code, @@ -1212,30 +1392,45 @@ def test_initialization_with_valid_user_id_is_successful(self): modifiable_user_data.preferred_audio_language_code, 'preferred_audio_language_code' ) + self.assertEqual( + modifiable_user_data.preferred_translation_language_code, + 'preferred_translation_language_code' + ) self.assertEqual(modifiable_user_data.user_id, 'user_id') - def test_from_raw_dict_with_none_schema_version_raises_error(self): - user_data_dict = { - 'schema_version': None, + def test_from_raw_dict_with_none_schema_version_raises_error( + self + ) -> None: + # Here we use MyPy ignore because schema_version is expecting an int + # type but for test purposes we're assigning it with None. Thus to + # avoid MyPy error, ignore statement is added here. + user_data_dict: user_domain.RawUserDataDict = { + 'schema_version': None, # type: ignore[typeddict-item] 'display_alias': 'display_alias', 'pin': '123', - 'preferred_language_codes': 'preferred_language_codes', + 'preferred_language_codes': ['preferred_language_codes'], 'preferred_site_language_code': 'preferred_site_language_code', 'preferred_audio_language_code': 'preferred_audio_language_code', + 'preferred_translation_language_code': ( + 'preferred_translation_language_code'), 'user_id': 'user_id', } error_msg = 'Invalid modifiable user data: no schema version specified.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_domain.ModifiableUserData.from_raw_dict(user_data_dict) - def test_from_raw_dict_with_invalid_schema_version_raises_error(self): - user_data_dict = { + def test_from_raw_dict_with_invalid_schema_version_raises_error( + self + ) -> None: + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '123', - 'preferred_language_codes': 'preferred_language_codes', + 'preferred_language_codes': ['preferred_language_codes'], 'preferred_site_language_code': 'preferred_site_language_code', 'preferred_audio_language_code': 'preferred_audio_language_code', + 'preferred_translation_language_code': ( + 'preferred_translation_language_code'), 'user_id': 'user_id', } current_version_plus_one = ( @@ -1246,41 +1441,48 @@ def test_from_raw_dict_with_invalid_schema_version_raises_error(self): for version in invalid_schema_versions: user_data_dict['schema_version'] = version error_msg = 'Invalid version %s received.' % version - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_domain.ModifiableUserData.from_raw_dict(user_data_dict) - def test_from_raw_dict_with_invalid_schema_version_type_raises_error(self): - user_data_dict = { + def test_from_raw_dict_with_invalid_schema_version_type_raises_error( + self + ) -> None: + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '123', - 'preferred_language_codes': 'preferred_language_codes', + 'preferred_language_codes': ['preferred_language_codes'], 'preferred_site_language_code': 'preferred_site_language_code', 'preferred_audio_language_code': 'preferred_audio_language_code', + 'preferred_translation_language_code': ( + 'preferred_translation_language_code'), 'user_id': 'user_id', } - invalid_schema_versions = ( - '', 'abc', '-1', '1', {}, [1], 1.0 + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + version = '-1' + user_data_dict['schema_version'] = version # type: ignore[arg-type] + error_msg = ( + 'Version has invalid type, expected int, ' + 'received %s' % type(version) ) - for version in invalid_schema_versions: - user_data_dict['schema_version'] = version - error_msg = ( - 'Version has invalid type, expected int, ' - 'received %s' % type(version) - ) - with self.assertRaisesRegexp(Exception, error_msg): - user_domain.ModifiableUserData.from_raw_dict(user_data_dict) + with self.assertRaisesRegex(Exception, error_msg): + user_domain.ModifiableUserData.from_raw_dict(user_data_dict) # This test should be modified to use the original class ModifiableUserData # itself when the CURRENT_SCHEMA_VERSION has been updated to 2 or higher. - def test_mock_modifiable_user_data_class_with_all_attributes_given(self): - user_data_dict = { + def test_mock_modifiable_user_data_class_with_all_attributes_given( + self + ) -> None: + user_data_dict: MockModifiableUserDataDict = { 'schema_version': 2, 'display_alias': 'name', 'pin': '123', 'preferred_language_codes': ['en', 'es'], 'preferred_site_language_code': 'es', 'preferred_audio_language_code': 'en', + 'preferred_translation_language_code': 'en', 'user_id': None, 'fake_field': 'set_value' } @@ -1294,20 +1496,26 @@ def test_mock_modifiable_user_data_class_with_all_attributes_given(self): modifiable_user_data.preferred_site_language_code, 'es') self.assertEqual( modifiable_user_data.preferred_audio_language_code, 'en') + self.assertEqual( + modifiable_user_data.preferred_translation_language_code, 'en') self.assertEqual(modifiable_user_data.fake_field, 'set_value') self.assertEqual(modifiable_user_data.user_id, None) # This test should be modified to use the original class ModifiableUserData # itself when the CURRENT_SCHEMA_VERSION has been updated to 2 or higher. - def test_mock_migration_from_old_version_to_new_works_correctly(self): - user_data_dict = { + def test_mock_migration_from_old_version_to_new_works_correctly( + self + ) -> None: + user_data_dict: MockModifiableUserDataDict = { 'schema_version': 1, 'display_alias': 'name', 'pin': '123', 'preferred_language_codes': ['en', 'es'], 'preferred_site_language_code': 'es', 'preferred_audio_language_code': 'en', - 'user_id': None + 'preferred_translation_language_code': 'en', + 'user_id': None, + 'fake_field': None } modifiable_user_data = ( MockModifiableUserData.from_raw_dict(user_data_dict)) @@ -1319,5 +1527,170 @@ def test_mock_migration_from_old_version_to_new_works_correctly(self): modifiable_user_data.preferred_site_language_code, 'es') self.assertEqual( modifiable_user_data.preferred_audio_language_code, 'en') + self.assertEqual( + modifiable_user_data.preferred_translation_language_code, 'en') self.assertEqual(modifiable_user_data.fake_field, 'default_value') self.assertEqual(modifiable_user_data.user_id, None) + + +class ExplorationUserDataTests(test_utils.GenericTestBase): + """Tests for ExplorationUserData domain object.""" + + def test_initialization(self) -> None: + exploration_user_data = user_domain.ExplorationUserData( + 'user1', 'exp1') + + expected_exploration_user_data_dict = { + 'rating': None, + 'rated_on': None, + 'draft_change_list': None, + 'draft_change_list_last_updated': None, + 'draft_change_list_exp_version': None, + 'draft_change_list_id': 0, + 'mute_suggestion_notifications': ( + feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), + 'mute_feedback_notifications': ( + feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE), + 'furthest_reached_checkpoint_exp_version': None, + 'furthest_reached_checkpoint_state_name': None, + 'most_recently_reached_checkpoint_state_name': None, + 'most_recently_reached_checkpoint_exp_version': None + } + + self.assertEqual(exploration_user_data.user_id, 'user1') + self.assertEqual(exploration_user_data.exploration_id, 'exp1') + self.assertEqual( + exploration_user_data.to_dict(), + expected_exploration_user_data_dict) + + def test_to_dict(self) -> None: + exploration_user_data = user_domain.ExplorationUserData( + 'user1', 'exp1', 4, + datetime.datetime(2022, 4, 1, 0, 0, 0, 0), None, + None, None, 0, False, False, 1, 'checkpoint2', 2, 'checkpoint1' + ) + expected_exploration_user_data_dict = { + 'rating': 4, + 'rated_on': datetime.datetime(2022, 4, 1, 0, 0, 0, 0), + 'draft_change_list': None, + 'draft_change_list_last_updated': None, + 'draft_change_list_exp_version': None, + 'draft_change_list_id': 0, + 'mute_suggestion_notifications': False, + 'mute_feedback_notifications': False, + 'furthest_reached_checkpoint_exp_version': 1, + 'furthest_reached_checkpoint_state_name': 'checkpoint2', + 'most_recently_reached_checkpoint_exp_version': 2, + 'most_recently_reached_checkpoint_state_name': 'checkpoint1' + } + + self.assertEqual(exploration_user_data.user_id, 'user1') + self.assertEqual(exploration_user_data.exploration_id, 'exp1') + self.assertEqual( + exploration_user_data.to_dict(), + expected_exploration_user_data_dict) + + +class LearnerGroupUserDetailsTests(test_utils.GenericTestBase): + """Tests for LearnerGroupUserDetails domain object.""" + + def test_initialization(self) -> None: + learner_group_user_details = ( + user_domain.LearnerGroupUserDetails( + 'group_id_1', True)) + + expected_learner_grp_user_details_dict = { + 'group_id': 'group_id_1', + 'progress_sharing_is_turned_on': True + } + + self.assertEqual( + learner_group_user_details.group_id, 'group_id_1') + self.assertEqual( + learner_group_user_details.progress_sharing_is_turned_on, True) + self.assertEqual( + learner_group_user_details.to_dict(), + expected_learner_grp_user_details_dict) + + def test_to_dict(self) -> None: + learner_group_user_details = ( + user_domain.LearnerGroupUserDetails( + 'group_id_1', True)) + expected_learner_grp_user_details_dict = { + 'group_id': 'group_id_1', + 'progress_sharing_is_turned_on': True + } + + self.assertEqual( + learner_group_user_details.to_dict(), + expected_learner_grp_user_details_dict) + + +class LearnerGroupsUserTest(test_utils.GenericTestBase): + """Tests for LearnerGroupsUser domain object.""" + + def test_initialization(self) -> None: + learner_group_user_details = ( + user_domain.LearnerGroupUserDetails( + 'group_id_1', False)) + learner_group_user = user_domain.LearnerGroupsUser( + 'user1', ['group_id_2', 'group_id_3'], + [learner_group_user_details], 1) + + expected_learner_group_user_dict = { + 'user_id': 'user1', + 'invited_to_learner_groups_ids': ['group_id_2', 'group_id_3'], + 'learner_groups_user_details': [ + { + 'group_id': 'group_id_1', + 'progress_sharing_is_turned_on': False + } + ], + 'learner_groups_user_details_schema_version': 1 + } + + self.assertEqual(learner_group_user.user_id, 'user1') + self.assertEqual( + learner_group_user.invited_to_learner_groups_ids, + ['group_id_2', 'group_id_3']) + self.assertEqual( + learner_group_user.learner_groups_user_details, + [learner_group_user_details]) + self.assertEqual( + learner_group_user.learner_groups_user_details_schema_version, 1) + self.assertEqual( + learner_group_user.to_dict(), + expected_learner_group_user_dict) + + def test_to_dict(self) -> None: + learner_group_user_details = ( + user_domain.LearnerGroupUserDetails('group_id_1', False)) + learner_group_user = user_domain.LearnerGroupsUser( + 'user1', ['group_id_2', 'group_id_3'], + [learner_group_user_details], 1) + + expected_learner_group_user_dict = { + 'user_id': 'user1', + 'invited_to_learner_groups_ids': ['group_id_2', 'group_id_3'], + 'learner_groups_user_details': [ + { + 'group_id': 'group_id_1', + 'progress_sharing_is_turned_on': False + } + ], + 'learner_groups_user_details_schema_version': 1 + } + + self.assertEqual( + learner_group_user.to_dict(), + expected_learner_group_user_dict) + + def test_validation(self) -> None: + learner_group_user_details = ( + user_domain.LearnerGroupUserDetails('group_id_1', True)) + + self._assert_validation_error( + user_domain.LearnerGroupsUser( + 'user1', ['group_id_1'], [learner_group_user_details], 1), + 'Learner cannot be invited to join learner group group_id_1 since ' + 'they are already its learner.') diff --git a/core/domain/user_query_domain.py b/core/domain/user_query_domain.py index 4ce7b0b3a7f2..0a6e89e967bb 100644 --- a/core/domain/user_query_domain.py +++ b/core/domain/user_query_domain.py @@ -19,26 +19,43 @@ from __future__ import annotations import collections +import datetime from core import feconf from core import utils -from core.constants import constants -attribute_names = [ # pylint: disable=invalid-name - predicate['backend_attr'] for predicate in ( - constants.EMAIL_DASHBOARD_PREDICATE_DEFINITION)] +from typing import List, Optional -UserQueryParams = collections.namedtuple( # pylint: disable=invalid-name - 'UserQueryParams', attribute_names, defaults=(None,) * len(attribute_names)) + +UserQueryParams = collections.namedtuple( + 'UserQueryParams', + [ + 'inactive_in_last_n_days', + 'has_not_logged_in_for_n_days', + 'created_at_least_n_exps', + 'created_fewer_than_n_exps', + 'edited_at_least_n_exps', + 'edited_fewer_than_n_exps', + 'created_collection' + ], + defaults=(None, None, None, None, None, None, None) +) class UserQuery: """Domain object for the UserQueryModel.""" def __init__( - self, query_id, query_params, submitter_id, query_status, user_ids, - sent_email_model_id=None, created_on=None, deleted=False - ): + self, + query_id: str, + query_params: UserQueryParams, + submitter_id: str, + query_status: str, + user_ids: List[str], + sent_email_model_id: Optional[str] = None, + created_on: Optional[datetime.datetime] = None, + deleted: bool = False + ) -> None: """Create user query domain object. Args: @@ -62,68 +79,35 @@ def __init__( self.created_on = created_on self.deleted = deleted - def validate(self): + def validate(self) -> None: """Validates various properties of the UserQuery. Raises: - ValidationError. Expected ID to be a string. - ValidationError. Expected params to be of type UserQueryParams. - ValidationError. Expected objective to be a string. ValidationError. Expected submitter ID to be a valid user ID. - ValidationError. Expected status to be a string. ValidationError. Invalid status. - ValidationError. Expected user_ids to be a list. - ValidationError. Expected each user ID in user_ids to be a string. ValidationError. Expected user ID in user_ids to be a valid user ID. - ValidationError. Expected sent_email_model_id to be a string. """ - if not isinstance(self.id, str): - raise utils.ValidationError( - 'Expected ID to be a string, received %s' % self.id) - - if not isinstance(self.params, tuple): - raise utils.ValidationError( - 'Expected params to be of type tuple, received %s' - % type(self.params)) - - if not isinstance(self.submitter_id, str): - raise utils.ValidationError( - 'Expected submitter ID to be a string, received %s' % - self.submitter_id) if not utils.is_user_id_valid(self.submitter_id): raise utils.ValidationError( 'Expected submitter ID to be a valid user ID, received %s' % self.submitter_id) - if not isinstance(self.status, str): - raise utils.ValidationError( - 'Expected status to be a string, received %s' % self.status) if self.status not in feconf.ALLOWED_USER_QUERY_STATUSES: raise utils.ValidationError('Invalid status: %s' % self.status) - if not isinstance(self.user_ids, list): - raise utils.ValidationError( - 'Expected user_ids to be a list, received %s' % - type(self.user_ids)) for user_id in self.user_ids: - if not isinstance(user_id, str): - raise utils.ValidationError( - 'Expected each user ID in user_ids to be a string, ' - 'received %s' % user_id) - if not utils.is_user_id_valid(user_id): raise utils.ValidationError( 'Expected user ID in user_ids to be a valid user ID, ' 'received %s' % user_id) - if self.sent_email_model_id and not isinstance( - self.sent_email_model_id, str): - raise utils.ValidationError( - 'Expected sent_email_model_id to be a string, received %s' - % self.sent_email_model_id) - @classmethod - def create_default(cls, query_id, query_params, submitter_id): + def create_default( + cls, + query_id: str, + query_params: UserQueryParams, + submitter_id: str + ) -> UserQuery: """Create default user query. Args: @@ -139,7 +123,7 @@ def create_default(cls, query_id, query_params, submitter_id): feconf.USER_QUERY_STATUS_PROCESSING, [] ) - def archive(self, sent_email_model_id=None): + def archive(self, sent_email_model_id: Optional[str] = None) -> None: """Archive the query. Args: diff --git a/core/domain/user_query_domain_test.py b/core/domain/user_query_domain_test.py index 57da7a855f23..8a3237482ee9 100644 --- a/core/domain/user_query_domain_test.py +++ b/core/domain/user_query_domain_test.py @@ -22,15 +22,39 @@ from core import feconf from core import utils +from core.constants import constants from core.domain import user_query_domain from core.tests import test_utils +class UserQueryParamsAttributeTests(test_utils.GenericTestBase): + """Test for ensuring matching values for UserQueryParams attributes between + predefined and dynamically fetched fields from assets/constants.ts + """ + + def test_user_query_params_attributes_against_dynamic_data(self) -> None: + """Check to see if the list of attributes of UserQueryParams + is similar to the one we get during runtime from assets/constants.ts. + """ + + attribute_names_predefined = list( + user_query_domain.UserQueryParams._fields) + attribute_names = [ + predicate['backend_attr'] for predicate + in constants.EMAIL_DASHBOARD_PREDICATE_DEFINITION + ] + + attribute_names_predefined.sort() + attribute_names.sort() + + self.assertEqual(attribute_names_predefined, attribute_names) + + class UserQueryTests(test_utils.GenericTestBase): """Test for the UserQuery.""" - def setUp(self): - super(UserQueryTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.user_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.user_query_params = user_query_domain.UserQueryParams( @@ -47,85 +71,33 @@ def setUp(self): ) self.user_query.validate() - def test_validate_query_with_invalid_type_id_raises(self): - self.user_query.id = 1 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected ID to be a string' - ): - self.user_query.validate() - - def test_validate_query_with_invalid_type_params_raises(self): - self.user_query.params = 1 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected params to be of type tuple' - ): - self.user_query.validate() - - def test_validate_query_with_invalid_type_submitter_id_raises( - self): - self.user_query.submitter_id = 1 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected submitter ID to be a string' - ): - self.user_query.validate() - def test_validate_query_with_invalid_user_id_submitter_id_raises( - self): + self + ) -> None: self.user_query.submitter_id = 'aaabbc' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected submitter ID to be a valid user ID' ): self.user_query.validate() - def test_validate_query_with_invalid_type_status_raises(self): - self.user_query.status = 1 - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected status to be a string' - ): - self.user_query.validate() - - def test_validate_query_with_invalid_status_raises(self): + def test_validate_query_with_invalid_status_raises(self) -> None: self.user_query.status = 'a' - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Invalid status: a' ): self.user_query.validate() - def test_validate_query_with_invalid_type_user_ids_raises(self): - self.user_query.user_ids = 'a' - with self.assertRaisesRegexp( - utils.ValidationError, 'Expected user_ids to be a list' - ): - self.user_query.validate() - - def test_validate_query_with_invalid_type_of_values_in_user_ids_raises( - self): - self.user_query.user_ids = [1] - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected each user ID in user_ids to be a string' - ): - self.user_query.validate() - - def test_validate_query_with_non_user_id_values_in_user_ids_raises(self): + def test_validate_query_with_non_user_id_values_in_user_ids_raises( + self + ) -> None: self.user_query.user_ids = ['aaa'] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'Expected user ID in user_ids to be a valid user ID' ): self.user_query.validate() - def test_validate_query_with_invalid_type_of_sent_email_model_id_raises( - self): - self.user_query.sent_email_model_id = 1 - with self.assertRaisesRegexp( - utils.ValidationError, - 'Expected sent_email_model_id to be a string' - ): - self.user_query.validate() - - def test_create_default_returns_correct_user_query(self): + def test_create_default_returns_correct_user_query(self) -> None: default_user_query = user_query_domain.UserQuery.create_default( 'id', self.user_query_params, self.user_id) self.assertEqual(default_user_query.params, self.user_query_params) @@ -134,7 +106,7 @@ def test_create_default_returns_correct_user_query(self): default_user_query.status, feconf.USER_QUERY_STATUS_PROCESSING) self.assertEqual(default_user_query.user_ids, []) - def test_archive_returns_correct_dict(self): + def test_archive_returns_correct_dict(self) -> None: self.user_query.archive(sent_email_model_id='sent_email_model_id') self.assertEqual( self.user_query.sent_email_model_id, 'sent_email_model_id') diff --git a/core/domain/user_query_services.py b/core/domain/user_query_services.py index 7b72eacaa020..c3d38374c9b0 100644 --- a/core/domain/user_query_services.py +++ b/core/domain/user_query_services.py @@ -23,10 +23,18 @@ from core.domain import user_query_domain from core.platform import models -(user_models,) = models.Registry.import_models([models.NAMES.user]) +from typing import Dict, List, Literal, Optional, Tuple, overload +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models -def _get_user_query_from_model(user_query_model): +(user_models,) = models.Registry.import_models([models.Names.USER]) + + +def _get_user_query_from_model( + user_query_model: user_models.UserQueryModel +) -> user_query_domain.UserQuery: """Transform user query model to domain object. Args: @@ -54,7 +62,21 @@ def _get_user_query_from_model(user_query_model): ) -def get_user_query(query_id, strict=False): +@overload +def get_user_query( + query_id: str, *, strict: Literal[True] = ... +) -> user_query_domain.UserQuery: ... + + +@overload +def get_user_query( + query_id: str, *, strict: Literal[False] = ... +) -> Optional[user_query_domain.UserQuery]: ... + + +def get_user_query( + query_id: str, strict: bool = False +) -> Optional[user_query_domain.UserQuery]: """Gets the user query with some ID. Args: @@ -62,7 +84,8 @@ def get_user_query(query_id, strict=False): strict: bool. Whether to raise an error if the user query doesn't exist. Returns: - UserQuery. The user query. + UserQuery|None. Returns the user query domain object. Can be None if + there is no user query model. """ user_query_model = user_models.UserQueryModel.get(query_id, strict=strict) return ( @@ -71,7 +94,9 @@ def get_user_query(query_id, strict=False): ) -def get_recent_user_queries(num_queries_to_fetch, cursor): +def get_recent_user_queries( + num_queries_to_fetch: int, cursor: Optional[str] +) -> Tuple[List[user_query_domain.UserQuery], Optional[str]]: """Get recent user queries. Args: @@ -93,7 +118,7 @@ def get_recent_user_queries(num_queries_to_fetch, cursor): ) -def _save_user_query(user_query): +def _save_user_query(user_query: user_query_domain.UserQuery) -> str: """Save the user query into the datastore. Args: @@ -127,7 +152,9 @@ def _save_user_query(user_query): return user_query_model.id -def save_new_user_query(submitter_id, query_params): +def save_new_user_query( + submitter_id: str, query_params: Dict[str, int] +) -> str: """Saves a new user query. Args: @@ -145,7 +172,7 @@ def save_new_user_query(submitter_id, query_params): return _save_user_query(user_query) -def archive_user_query(user_query_id): +def archive_user_query(user_query_id: str) -> None: """Delete the user query. Args: @@ -157,7 +184,12 @@ def archive_user_query(user_query_id): def send_email_to_qualified_users( - query_id, email_subject, email_body, email_intent, max_recipients): + query_id: str, + email_subject: str, + email_body: str, + email_intent: str, + max_recipients: Optional[int] +) -> None: """Send email to maximum 'max_recipients' qualified users. Args: @@ -165,7 +197,7 @@ def send_email_to_qualified_users( email_subject: str. Subject of the email to be sent. email_body: str. Body of the email to be sent. email_intent: str. Intent of the email. - max_recipients: int. Maximum number of recipients send emails to. + max_recipients: int|None. Maximum number of recipients send emails to. """ user_query = get_user_query(query_id, strict=True) recipient_ids = user_query.user_ids diff --git a/core/domain/user_query_services_test.py b/core/domain/user_query_services_test.py index f825e4776107..9abb865f76e2 100644 --- a/core/domain/user_query_services_test.py +++ b/core/domain/user_query_services_test.py @@ -24,8 +24,13 @@ from core.platform import models from core.tests import test_utils +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import email_models + from mypy_imports import user_models + (email_models, user_models) = models.Registry.import_models([ - models.NAMES.email, models.NAMES.user]) + models.Names.EMAIL, models.Names.USER]) class UserQueryServicesTests(test_utils.GenericTestBase): @@ -33,8 +38,8 @@ class UserQueryServicesTests(test_utils.GenericTestBase): USER_QUERY_1_ID = 'user_query_1_id' USER_QUERY_2_ID = 'user_query_2_id' - def setUp(self): - super(UserQueryServicesTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.admin_user_id = ( self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) @@ -62,9 +67,11 @@ def setUp(self): self.user_query_model_2.update_timestamps() self.user_query_model_2.put() - def test_get_user_query_returns_user_query(self): + def test_get_user_query_returns_user_query(self) -> None: user_query = user_query_services.get_user_query(self.USER_QUERY_1_ID) + # Ruling out the possibility of None for mypy type checking. + assert user_query is not None self.assertEqual(self.user_query_model_1.id, user_query.id) self.assertEqual( self.user_query_model_1.inactive_in_last_n_days, @@ -97,7 +104,7 @@ def test_get_user_query_returns_user_query(self): self.assertEqual( self.user_query_model_1.deleted, user_query.deleted) - def test_get_recent_user_queries_returns_recent_user_queries(self): + def test_get_recent_user_queries_returns_recent_user_queries(self) -> None: user_queries, _ = user_query_services.get_recent_user_queries(5, None) self.assertEqual(self.user_query_model_1.id, user_queries[1].id) @@ -113,7 +120,7 @@ def test_get_recent_user_queries_returns_recent_user_queries(self): self.assertEqual( self.user_query_model_2.query_status, user_queries[0].status) - def test_save_new_query_model(self): + def test_save_new_query_model(self) -> None: query_param = { 'inactive_in_last_n_days': 10, 'created_at_least_n_exps': 5, @@ -124,6 +131,8 @@ def test_save_new_query_model(self): query_model = user_models.UserQueryModel.get(user_query_id) + # Ruling out the possibility of None for mypy type checking. + assert query_model is not None self.assertEqual(query_model.submitter_id, self.admin_user_id) self.assertEqual( query_model.inactive_in_last_n_days, @@ -138,9 +147,11 @@ def test_save_new_query_model(self): self.assertIsNone(query_model.edited_at_least_n_exps) self.assertIsNone(query_model.edited_fewer_than_n_exps) - def test_archive_user_query_archives_user_query(self): + def test_archive_user_query_archives_user_query(self) -> None: original_user_query = ( user_query_services.get_user_query(self.USER_QUERY_1_ID)) + # Ruling out the possibility of None for mypy type checking. + assert original_user_query is not None user_query_services.archive_user_query(original_user_query.id) archived_user_query_model = ( @@ -150,7 +161,7 @@ def test_archive_user_query_archives_user_query(self): feconf.USER_QUERY_STATUS_ARCHIVED) self.assertTrue(archived_user_query_model.deleted) - def test_send_email_to_qualified_users(self): + def test_send_email_to_qualified_users(self) -> None: self.assertIsNone( user_models.UserBulkEmailsModel.get(self.new_user_id, strict=False)) self.assertIsNone( @@ -190,6 +201,8 @@ def test_send_email_to_qualified_users(self): new_user_bulk_email_model = user_models.UserBulkEmailsModel.get( self.new_user_id) + # Ruling out the possibility of None for mypy type checking. + assert new_user_bulk_email_model is not None self.assertIsNotNone( email_models.BulkEmailModel.get( new_user_bulk_email_model.sent_email_model_ids[0])) diff --git a/core/domain/user_services.py b/core/domain/user_services.py index 230f107f9edc..c38ffcc3b0ef 100644 --- a/core/domain/user_services.py +++ b/core/domain/user_services.py @@ -21,42 +21,68 @@ import datetime import hashlib import imghdr +import itertools import logging import re import urllib from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import auth_domain from core.domain import auth_services +from core.domain import exp_fetchers from core.domain import role_services +from core.domain import state_domain from core.domain import user_domain from core.platform import models import requests -auth_models, user_models, audit_models, suggestion_models = ( - models.Registry.import_models( - [models.NAMES.auth, models.NAMES.user, models.NAMES.audit, - models.NAMES.suggestion])) +from typing import ( + Dict, Final, List, Literal, Optional, Sequence, TypedDict, overload) + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import audit_models + from mypy_imports import auth_models + from mypy_imports import bulk_email_services + from mypy_imports import suggestion_models + from mypy_imports import transaction_services + from mypy_imports import user_models + +(auth_models, user_models, audit_models, suggestion_models) = ( + models.Registry.import_models([ + models.Names.AUTH, + models.Names.USER, + models.Names.AUDIT, + models.Names.SUGGESTION + ]) +) bulk_email_services = models.Registry.import_bulk_email_services() transaction_services = models.Registry.import_transaction_services() # Size (in px) of the gravatar being retrieved. -GRAVATAR_SIZE_PX = 150 +GRAVATAR_SIZE_PX: Final = 150 # Data url for images/avatar/user_blue_72px.png. # Generated using utils.convert_png_to_data_url. -DEFAULT_IDENTICON_DATA_URL = ( +DEFAULT_IDENTICON_DATA_URL: Final = ( 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEwAAABMCAYAAADHl1ErAAAAAXNSR0IArs4c6QAADhtJREFUeAHtXHlwVdUZ/859jyxmIQESyCaglC0iAgkJIntrIpvKphSwY2ttxbFOp9R/cGGqdhykLaMVO2OtoyRSCEKNEpYKyBIVQ1iNkBhNMCtb8shiQpJ3b7/fTW7m5uUlecu9L4nTM5Pce8895zvf93vnnPud833fEdQLKXb5jsC6%2BuZERZbHKaSMYRbGKERxgpQQUkSIIigEbAmFavlfrUKiVhCVcFa%2BIJEvJOlCcNCAnNKMFQ0o58vEfPgmhS5Mn0ot8n2KIs8lIZJJUfy8almIJqbxhRDSIbJKe2s%2BXvWlV/RcrGwqYGGp20bI1LyaeVmjKMrodp4EycGBAy6MjgsrSxozqG7O5GgxcVREeEigNDAwwBpmsUiRKGu3y1caGltstQ3yjbOFV6sPnypXTuRXBReU2GLqGprHkUKSRlMIUcD3WyUakGbbt7JYyzf6agpgYfe9O8kui/U8nB7UhJIkUTljwrBTTz449mZKUlyCEBTnjTCKQiX7T5ScfGP3Rf9j5ysny7IyTKXHPwYP690WSXnZtvcXp71pw1ldQwELm59%2BlyzbX%2BbeNL%2Btscb4EYOyNz2ZWD99wtAFnGdxxoQBefbs85f3rHsjJyivuGo60wsATe51WZJkWW/LWnXGgDZUEoYAFr58x0B7beOLPHGv5XnFIpGoS0mKOfze%2Bpmj/f2smNR9lm42teQ/8vLRgv0nyuZwVwtm1Ows5BZLSMBz1RkrbnjLiNeAhaWmPWgn%2BxYeejwkRMu9idH7tm%2BYE8/z0EhvmfOmPs9/RQ9tOJx3IKc8lUixkqBKC1nW2vat3u0NXY8Bi1%2B%2Bw6%2BktnETD7%2BnwEB4iP/pL/5xf03U4IBZ3jBkdN2K641Hkn/7YWh17c1JoM3D9PW4kIB1eRkrmjxpyyPAeK4aLttbPuAhOIU5aHpm1cTMZ1ffuRT8eMKED%2BooL6Wd%2B2Bj%2BtnFUGeYyVzJYl3Kc9sld9t2W8Dw%2BWkTWuz2fdxQ9ACr9P3Jfy7%2BZuSw0HnuNtwb5Ysqaw4mPJb5k%2BYW%2BVZuv9xqsaRWZ60%2B7w4vbgEWnrJ1hp3kTO5ZYUPCAnK%2B3bYiitWDWHca7O2yrI6U3r5yR8U1W2MiC2%2BzkLS4ev%2BaY67y1a749VQBYLUIZT/AGhUTduS7f68Y39/AgozgGbxDBsgCmSBbT/Jr710CDMMQPYvHf2DC2Mj9p95efA8TCNKI9MNrEGSALJAJskFGV%2BTocUhigrfbWz5jYtH4VdrAMksBdYVnI8vYJ/8q83hhmW0WEy23WKx39/Qh6LaHQXXA1xBgYc5isBL4/scCFoC3QCbIBhkhK2TGi65St4CpeharDvgaYoJnIv15GHaFQRBkg4w8p02BzF0VRH6XgEGDV5VS1rOgOvTHCb47wfXvIBtkhE4JmSG7/r3%2B3ilg6toQyx1OUEr7i56lF8zde8gIWVEPSz1g4IyGU8CwkMbaEMudNg3eWd0fXR5khcyQXcXAiYSdAMMWDY/ltVhIY23IdXr8kjqh21%2BzRKvMogUYAAtHQToBhv0sbNFg16GvLaQdmTfjGTJDdmCgYuHQSIfe07pTSqewn3V9z6qrvb1F48Crzx6xNTR4QXoE9tN4c2%2ByfufWqudC3VbmAYzNPwZrkf6dL%2B4LSm5Q9vkrVH79B6qs%2BoH8B1goatAtNCIqmOZOiabw4G5VJMNYREdhDD7ae6J0USsmtEwj3t7DYLCwK83f8WbbzauZP7/kq53SxiY7vfmfC5R24Fv6prTrDVEWgqbfEUlPLY2nlKkxGv%2BmXbFzG7H4/eE8g/tZyO92zbDSPoe1WncUgT14X4G189NimvjobnrhX6e6BQuo8DCho2crafnzB2n%2BMwe4PL5H5iVgACx4wEltli%2B1sXbA%2BGkNcmCwUN%2BY%2BI%2B3WOjZt3Lpl68cpQoefu6m4%2Bcqae7TWfTfk%2BXuVnWrvA4LFRtUVockjKxKc8sJmMJsWWsiON/U9eJvNmXTtk%2B%2BdYt5Z4WZX0p/bjYtmBbn7LURefaw%2BVuvwoQnBliTYCxu7WFskQb1WROjcvliKlibM/IMAQv8siD0643H6etiGx7NSBbYUlXCbRipgKnme859Ysl4jwwDrnKaV2SjDe%2B0tu9qnZ7KsQWch/YxVpt6KunZexieUVPDSIJjCC86k3lwyikJ0di%2BMS09/3au2iuMbuDr4mpKN2CIO%2BMLVnpgA4yAlVRX1ziV4fODrwOv2k2bDM4UVvEkXeaMJ0PyXn3/nCF0HIkAE2ADjICVpChiLArBMcSxsJHPmdmXjCTXiVZRRS19VVTdKd%2BIDA0bYCW1%2BWcRvGiMIN4Vjb1flHb1yrD8rM9LDKOlJ6RhA6ww6au%2BD3A50hcy%2Bt5sRRP8FpSYo8zqsBnDPax13oJ/ltEgafSqam5SU7NdezTtWsHrTzOShg2wYtWP3SQ5wZnNjMZA80Z9s1mkO9CtMakdDRtgJcGnFK3C869D6wY%2BRISp7loGUnROKtKkdtqxYawkzQGXdwNUN0nnrHiXGxxoJf40e0fEhdpRg29xoZT7RTRsgJV%2B8e0%2BJTdqJIwd4kZpz4pOGWN%2BG5Lq2s38wQHXMzZdq2XiAlllgP2%2BaH6yOX4xGjbAinejlVq0CG9l10T3rNT99wwnf96KMyvNuHMoDR0UaAr5dmwYK1YrhAoYXLtNaa2N6DAW5vFF6qLClGZeeHSyKXRBVMMGWLFaoUZYEPzgTWuxjfC6lROI/RgMb2bZ7JGUaOIcqWEDrDDp50MCBA0YLokDQRgx0p%2BdTezH4PDG88dxI8LotaeneU7AhZo6bPK5hwkVMERYuFDX6yLT2JDx99/fTVY2anibYiOCaPuGuayydDB%2BeUu2U30NG2AlCaFcRAmEo3QqaVLGynm30a6X5sHz2uMWksZH0pHXF9CIYeb/zho2CAqTgoMDvoTXCmJ3EI7isQRuVpw9KYqytyykhxk8qASuJoD84mNTKGvjveSLFQQwUeOaGCNE0Flqvs5o8b/9gZ8xwyMmj404NComZJyrzHtbLjTIjxZNv1X9C/S30pXqRrLVdd4lh7EjOX4oPfHAOHrzD9Np9l1RZMHnygeJ45kOZXxaPJ6byr6WueotdfAjhI73rGdu2ZXnn5oY7QM2OjZxx8hw%2BvPjCepf2bUfqJz/Llc1qHpb1OBAiosMpoFB5i%2BtOnLV%2BoTgL9ypYYZ8bZ0tOd6QmuUNbCiFMoN9GPM0TCbeXYoZcgvhr48kOyLlVF6AESf1UwV7G88jBbC/ISqsjzDb62wAC9UmydhoAaz6b/tWcIgQul7ntI8woMNCxQZstQOGSFYeqQriDeGI0Ud47jU2gIEae8kmtlZsWllpB6zNO2UXZwcg3rDXOO0jDbdhEIDoXs1zB6y1A4YHhP3iiuBMOJXh3tfJzuZ/qBbfX65nR5UGqmto8TUL2OoqAgZoWMNEY6KTMhOa%2Bt4ehCDfmxjz8c4X5y3UChp5hVk/j63Vpwuu0zdlNVTIrkuFfC1hkOobO%2B//Qw8LD/an26JDaFRsKI2KCWU76kCaOi6CoHYYnZY9d/DjAzllC/lDmFWz75EFevqdFmGIkbbL9hREsiI40yg/11wGhxex9PlXV%2BjEhatUU99ZQdUzpr%2BH08n1mkb1L%2BfiVf0rGs5Lo2nxkXT3HUPZ0S7WawAhsxrFy6HPwKJDY/zQqYehAPey1%2BDgDxfsSxkPwZPYaTmU7S7BPWDXkWLafayYLlWaaidW2cASK5nBWzJzOD3AG5YebCgqw5dvP4PoXab1Oveu3znK5xQIOPW31DZchL/6M6vv2sn%2B68scK3b1jDlo%2B6Hv6G878ij/e1M3cbtiQc3HML4vKZbWrbyTpowe3G1Z7SVH7e7cmHZmGXePSmtI4FhnQfVOAQMBNfhdse/CwvzsO/cf6ykapKlZpq0HCmlzxlc%2B6U2akK5c2XJNf3x4At3D29hdJUTrTnz0wxlwOrEIy5Kugum7BAyEtaGJwKVrH63mrSDn0besEdNTmz9XJ%2B6uGOoL%2BbAr/OXJJIoM77jryx%2Bh0iGL0mSENnc1FDX%2BO6gVWqZ2RfQ9I5oLQgj75fxO/q%2BvpJ9TnXTxlevr6cPjlyj5iUx2bb%2BsZ7UesqlgsayQWf/S8b7bHobC3QWYrv3rZ%2BwuXuhIs88/Y4v8vfWz4BvrdoBpj4BBejWE2W4/yupTGMJ%2BD21O/emf3j1t2bTNrYD8PgWkv7/FflvUwE8uFFelMAg2i8Uy05UTBlwCTAWtLUieJ8XA2MiQIxXX6xNYI%2B6XC3Wep%2Br5xz/Jsszij1qDVREprp4s4DJgGmjaMQzcUA5bgaNkRTbH3GxSf5SEVMoxRBUMlrnHMIB//ArounxbjgZZuWWtSzlokmyGkwWv4Bm8QwZ1GLpxZgUYcquHaRLgQ6A/SobJ4IiGpeyc7RE9ja55V/aKEOID5s/3R8loQjkeVsTzwmmeF2oYuFlamT5xFeII/4qh3LMmgR/oWT4/rEgPhONxWEKifUJW4mWikfpyvr5nBbNIkUQeD8BU7lm9fxyWHgDHA9fYQlzHg/0w/6qjuZzqdKwvb/J9PveiAl4Hz%2BE5q%2B8duKYXHjHSjkf6sXkqWyEZK4QFLIQ51iihWrr2CJKCeE6fzm2pax8Grm8e6acHDffth0YSLdF9CCoZvFye55okRU7gIetV1AkPuRJZSCfZUdefezJMYf3v0MhOwHVzLKlQxAWSRJlQlDr%2BzrPcUjjbGwbyBB2mCKH62/K7KwywjWM8b5CQq%2BH9x%2B%2BCSVZiFKH8eI4ldQQOz4jJ/P/Bt86QcSFPPVqZA50Qu4NwFK7i3tHK7HEEJ5reOFr5fwkK97jkk8ywAAAAAElFTkSuQmCC') # pylint: disable=line-too-long -LABEL_FOR_USER_BEING_DELETED = '[User being deleted]' -USERNAME_FOR_USER_BEING_DELETED = 'UserBeingDeleted' +LABEL_FOR_USER_BEING_DELETED: Final = '[User being deleted]' +USERNAME_FOR_USER_BEING_DELETED: Final = 'UserBeingDeleted' -def is_username_taken(username): +class DashboardStatsDict(TypedDict): + """Dictionary representing the dashborad stats dictionary.""" + + num_ratings: int + average_ratings: Optional[float] + total_plays: int + + +def is_username_taken(username: str) -> bool: """Returns whether the given username has already been taken. Args: @@ -69,7 +95,7 @@ def is_username_taken(username): user_domain.UserSettings.normalize_username(username)) -def get_email_from_user_id(user_id): +def get_email_from_user_id(user_id: str) -> str: """Gets the email from a given user_id. Args: @@ -85,25 +111,122 @@ def get_email_from_user_id(user_id): return user_settings.email -def get_user_id_from_username(username): +@overload +def get_user_id_from_username( + username: str, *, strict: Literal[True] +) -> str: ... + + +@overload +def get_user_id_from_username( + username: str +) -> Optional[str]: ... + + +@overload +def get_user_id_from_username( + username: str, *, strict: Literal[False] +) -> Optional[str]: ... + + +def get_user_id_from_username( + username: str, strict: bool = False +) -> Optional[str]: """Gets the user_id for a given username. Args: username: str. Identifiable username to display in the UI. + strict: bool. Whether to fail noisily if no UserSettingsModel with a + given username found in the datastore. Returns: str or None. If the user with given username does not exist, return None. Otherwise return the user_id corresponding to given username. + + Raises: + Exception. No user_id found for the given username. """ user_model = user_models.UserSettingsModel.get_by_normalized_username( user_domain.UserSettings.normalize_username(username)) if user_model is None: + if strict: + raise Exception( + 'No user_id found for the given username: %s' % username + ) return None else: return user_model.id -def get_user_settings_from_username(username): +@overload +def get_multi_user_ids_from_usernames( + usernames: List[str], *, strict: Literal[True] +) -> List[str]: ... + + +@overload +def get_multi_user_ids_from_usernames( + usernames: List[str] +) -> List[Optional[str]]: ... + + +@overload +def get_multi_user_ids_from_usernames( + usernames: List[str], *, strict: Literal[False] +) -> List[Optional[str]]: ... + + +def get_multi_user_ids_from_usernames( + usernames: List[str], strict: bool = False +) -> Sequence[Optional[str]]: + """Gets the user_ids for a given list of usernames. + + Args: + usernames: list(str). Identifiable usernames to display in the UI. + strict: bool. Whether to fail noisily if no user_id with the given + useranme found. + + Returns: + list(str|None). Return the list of user ids corresponding to given + usernames. + + Raises: + Exception. No user_id found for the username. + """ + if len(usernames) == 0: + return [] + + normalized_usernames = [ + user_domain.UserSettings.normalize_username(username) + for username in usernames + ] + + found_models: Sequence[user_models.UserSettingsModel] = ( + user_models.UserSettingsModel.query( + user_models.UserSettingsModel.normalized_username.IN( + normalized_usernames + ) + ).fetch() + ) + + username_to_user_id_map = { + model.normalized_username: model.id for model in found_models + } + user_ids = [] + for username in normalized_usernames: + user_id = username_to_user_id_map.get(username) + if strict and user_id is None: + raise Exception( + 'No user_id found for the username: %s' % username + ) + user_ids.append(user_id) + + return user_ids + + +def get_user_settings_from_username( + username: str +) -> Optional[user_domain.UserSettings]: """Gets the user settings for a given username. Args: @@ -121,7 +244,9 @@ def get_user_settings_from_username(username): return get_user_settings(user_model.id) -def get_user_settings_from_email(email): +def get_user_settings_from_email( + email: str +) -> Optional[user_domain.UserSettings]: """Gets the user settings for a given email. Args: @@ -138,7 +263,38 @@ def get_user_settings_from_email(email): return get_user_settings(user_model.id) -def get_users_settings(user_ids, strict=False, include_marked_deleted=False): +@overload +def get_users_settings( + user_ids: Sequence[Optional[str]], + *, + strict: Literal[True], + include_marked_deleted: bool = False +) -> Sequence[user_domain.UserSettings]: ... + + +@overload +def get_users_settings( + user_ids: Sequence[Optional[str]], + *, + strict: Literal[False], + include_marked_deleted: bool = False +) -> Sequence[Optional[user_domain.UserSettings]]: ... + + +@overload +def get_users_settings( + user_ids: Sequence[Optional[str]], + *, + strict: bool = ..., + include_marked_deleted: bool = False +) -> Sequence[Optional[user_domain.UserSettings]]: ... + + +def get_users_settings( + user_ids: Sequence[Optional[str]], + strict: bool = False, + include_marked_deleted: bool = False +) -> Sequence[Optional[user_domain.UserSettings]]: """Gets domain objects representing the settings for the given user_ids. Args: @@ -161,11 +317,10 @@ def get_users_settings(user_ids, strict=False, include_marked_deleted=False): user_ids, include_deleted=include_marked_deleted) if strict: - for user_id, user_settings_model in python_utils.ZIP( - user_ids, user_settings_models): + for user_id, user_settings_model in zip(user_ids, user_settings_models): if user_settings_model is None: raise Exception('User with ID \'%s\' not found.' % user_id) - result = [] + result: List[Optional[user_domain.UserSettings]] = [] for i, model in enumerate(user_settings_models): if user_ids[i] == feconf.SYSTEM_COMMITTER_ID: result.append(user_domain.UserSettings( @@ -179,6 +334,7 @@ def get_users_settings(user_ids, strict=False, include_marked_deleted=False): ], banned=False, username='admin', + has_viewed_lesson_info_modal_once=False, last_agreed_to_terms=datetime.datetime.utcnow() )) else: @@ -191,7 +347,7 @@ def get_users_settings(user_ids, strict=False, include_marked_deleted=False): return result -def generate_initial_profile_picture(user_id): +def generate_initial_profile_picture(user_id: str) -> None: """Generates a profile picture for a new user and updates the user's settings in the datastore. @@ -203,7 +359,7 @@ def generate_initial_profile_picture(user_id): update_profile_picture_data_url(user_id, user_gravatar) -def get_gravatar_url(email): +def get_gravatar_url(email: str) -> str: """Returns the gravatar url for the specified email. Args: @@ -218,7 +374,7 @@ def get_gravatar_url(email): (hashlib.md5(email.encode('utf-8')).hexdigest(), GRAVATAR_SIZE_PX)) -def fetch_gravatar(email): +def fetch_gravatar(email: str) -> str: """Returns the gravatar corresponding to the user's email, or an identicon generated from the email if the gravatar doesn't exist. @@ -240,7 +396,8 @@ def fetch_gravatar(email): else: if response.ok: if imghdr.what(None, h=response.content) == 'png': - return utils.convert_png_binary_to_data_url(response.content) + return utils.convert_png_or_webp_binary_to_data_url( + response.content, 'png') else: logging.error( '[Status %s] Failed to fetch Gravatar from %s' % @@ -249,13 +406,33 @@ def fetch_gravatar(email): return DEFAULT_IDENTICON_DATA_URL -def get_user_settings(user_id, strict=False): +@overload +def get_user_settings( + user_id: str +) -> user_domain.UserSettings: ... + + +@overload +def get_user_settings( + user_id: str, *, strict: Literal[True] +) -> user_domain.UserSettings: ... + + +@overload +def get_user_settings( + user_id: str, *, strict: Literal[False] +) -> Optional[user_domain.UserSettings]: ... + + +def get_user_settings( + user_id: str, strict: bool = True +) -> Optional[user_domain.UserSettings]: """Return the user settings for a single user. Args: user_id: str. The unique ID of the user. strict: bool. Whether to fail noisily if no user with the given - id exists in the datastore. Defaults to False. + id exists in the datastore. Defaults to True. Returns: UserSettings or None. If the given user_id does not exist and strict @@ -273,7 +450,27 @@ def get_user_settings(user_id, strict=False): return user_settings -def get_user_settings_by_auth_id(auth_id, strict=False): +@overload +def get_user_settings_by_auth_id( + auth_id: str, *, strict: Literal[True] +) -> user_domain.UserSettings: ... + + +@overload +def get_user_settings_by_auth_id( + auth_id: str +) -> Optional[user_domain.UserSettings]: ... + + +@overload +def get_user_settings_by_auth_id( + auth_id: str, *, strict: Literal[False] +) -> Optional[user_domain.UserSettings]: ... + + +def get_user_settings_by_auth_id( + auth_id: str, strict: bool = False +) -> Optional[user_domain.UserSettings]: """Return the user settings for a single user. Args: @@ -289,7 +486,9 @@ def get_user_settings_by_auth_id(auth_id, strict=False): Raises: Exception. The value of strict is True and given auth_id does not exist. """ - user_id = auth_services.get_user_id_from_auth_id(auth_id) + user_id = auth_services.get_user_id_from_auth_id( + auth_id, include_deleted=True + ) user_settings_model = ( None if user_id is None else user_models.UserSettingsModel.get_by_id(user_id)) @@ -302,7 +501,7 @@ def get_user_settings_by_auth_id(auth_id, strict=False): return None -def get_user_roles_from_id(user_id): +def get_user_roles_from_id(user_id: str) -> List[str]: """Returns roles of the user with given user_id. Args: @@ -317,7 +516,11 @@ def get_user_roles_from_id(user_id): return user_settings.roles -def _create_user_contribution_rights_from_model(user_contribution_rights_model): +def _create_user_contribution_rights_from_model( + user_contribution_rights_model: Optional[ + user_models.UserContributionRightsModel + ] +) -> user_domain.UserContributionRights: """Creates a UserContributionRights object from the given model. If the model is None, an empty UserContributionRights object is returned. @@ -347,7 +550,9 @@ def _create_user_contribution_rights_from_model(user_contribution_rights_model): return user_domain.UserContributionRights('', [], [], False, False) -def get_user_contribution_rights(user_id): +def get_user_contribution_rights( + user_id: str +) -> user_domain.UserContributionRights: """Returns the UserContributionRights domain object for the given user_id. Args: @@ -360,7 +565,9 @@ def get_user_contribution_rights(user_id): return get_users_contribution_rights([user_id])[0] -def get_users_contribution_rights(user_ids): +def get_users_contribution_rights( + user_ids: List[str] +) -> List[user_domain.UserContributionRights]: """Returns the UserContributionRights domain object for each user_id in user_ids. @@ -389,7 +596,7 @@ def get_users_contribution_rights(user_ids): return users_contribution_rights -def get_reviewer_user_ids_to_notify(): +def get_reviewer_user_ids_to_notify() -> List[str]: """Gets a list of the reviewer user_ids who want to be notified of Contributor Dashboard reviewer updates. @@ -413,7 +620,9 @@ def get_reviewer_user_ids_to_notify(): return reviewer_ids_to_notify -def get_all_reviewers_contribution_rights(): +def get_all_reviewers_contribution_rights() -> List[ + user_domain.UserContributionRights +]: """Returns a list of UserContributionRights objects corresponding to each UserContributionRightsModel. @@ -431,7 +640,9 @@ def get_all_reviewers_contribution_rights(): ] -def _save_user_contribution_rights(user_contribution_rights): +def _save_user_contribution_rights( + user_contribution_rights: user_domain.UserContributionRights +) -> None: """Saves the UserContributionRights object into the datastore. Args: @@ -455,7 +666,9 @@ def _save_user_contribution_rights(user_contribution_rights): user_contribution_rights.can_submit_questions)).put() -def _update_user_contribution_rights(user_contribution_rights): +def _update_user_contribution_rights( + user_contribution_rights: user_domain.UserContributionRights +) -> None: """Updates the users rights model if the updated object has review rights in at least one item else delete the existing model. @@ -471,7 +684,8 @@ def _update_user_contribution_rights(user_contribution_rights): @transaction_services.run_in_transaction_wrapper def _update_reviewer_counts_in_community_contribution_stats_transactional( - future_user_contribution_rights): + future_user_contribution_rights: user_domain.UserContributionRights +) -> None: """Updates the reviewer counts in the community contribution stats based on the given user contribution rights with the most up-to-date values. This method is intended to be called right before the new updates to the @@ -530,7 +744,8 @@ def _update_reviewer_counts_in_community_contribution_stats_transactional( def _update_reviewer_counts_in_community_contribution_stats( - user_contribution_rights): + user_contribution_rights: user_domain.UserContributionRights +) -> None: """Updates the reviewer counts in the community contribution stats based on the updates to the given user contribution rights. The GET and PUT is done in a transaction to avoid loss of updates that come in rapid @@ -544,7 +759,7 @@ def _update_reviewer_counts_in_community_contribution_stats( user_contribution_rights) -def get_usernames_by_role(role): +def get_usernames_by_role(role: str) -> List[str]: """Get usernames of all the users with given role ID. Args: @@ -557,7 +772,7 @@ def get_usernames_by_role(role): return [user.username for user in user_settings] -def get_user_ids_by_role(role): +def get_user_ids_by_role(role: str) -> List[str]: """Get user ids of all the users with given role ID. Args: @@ -570,21 +785,26 @@ def get_user_ids_by_role(role): return [user.id for user in user_settings] -def get_user_actions_info(user_id): +def get_user_actions_info( + user_id: Optional[str] +) -> user_domain.UserActionsInfo: """Gets user actions info for a user. Args: - user_id: str|None. The user ID of the user we want to get actions for. + user_id: str|None. The user ID of the user we want to get actions for, + or None if the user is not logged in. Returns: UserActionsInfo. User object with system committer user id. """ - roles = get_user_roles_from_id(user_id) + roles = ( + get_user_roles_from_id(user_id) if user_id else [feconf.ROLE_ID_GUEST] + ) actions = role_services.get_all_actions(roles) return user_domain.UserActionsInfo(user_id, roles, actions) -def get_system_user(): +def get_system_user() -> user_domain.UserActionsInfo: """Returns user object with system committer user id. Returns: @@ -593,11 +813,31 @@ def get_system_user(): return get_user_actions_info(feconf.SYSTEM_COMMITTER_ID) -def _save_user_settings(user_settings): +def save_user_settings(user_settings: user_domain.UserSettings) -> None: """Commits a user settings object to the datastore. Args: user_settings: UserSettings. The user setting domain object to be saved. + + Returns: + UserSettingsModel. The updated user settings model that was saved. + """ + user_model = convert_to_user_settings_model(user_settings) + user_model.update_timestamps() + user_model.put() + + +def convert_to_user_settings_model( + user_settings: user_domain.UserSettings +) -> user_models.UserSettingsModel: + """Converts a UserSettings domain object to a UserSettingsModel. + + Args: + user_settings: UserSettings. The user setting domain object to be + converted. + + Returns: + UserSettingsModel. The user settings model that was converted. """ user_settings.validate() @@ -612,11 +852,12 @@ def _save_user_settings(user_settings): user_settings_dict['id'] = user_settings.user_id user_model = user_models.UserSettingsModel(**user_settings_dict) - user_model.update_timestamps() - user_model.put() + return user_model -def _get_user_settings_from_model(user_settings_model): +def _get_user_settings_from_model( + user_settings_model: user_models.UserSettingsModel +) -> user_domain.UserSettings: """Transform user settings storage model to domain object. Args: @@ -656,14 +897,18 @@ def _get_user_settings_from_model(user_settings_model): user_settings_model.preferred_site_language_code), preferred_audio_language_code=( user_settings_model.preferred_audio_language_code), + preferred_translation_language_code=( + user_settings_model.preferred_translation_language_code), pin=user_settings_model.pin, display_alias=user_settings_model.display_alias, deleted=user_settings_model.deleted, - created_on=user_settings_model.created_on + created_on=user_settings_model.created_on, + has_viewed_lesson_info_modal_once=( + user_settings_model.has_viewed_lesson_info_modal_once) ) -def is_user_registered(user_id): +def is_user_registered(user_id: str) -> bool: """Checks if a user is registered with the given user_id. Args: @@ -672,13 +917,11 @@ def is_user_registered(user_id): Returns: bool. Whether a user with the given user_id is registered. """ - if user_id is None: - return False user_settings = user_models.UserSettingsModel.get(user_id, strict=False) return bool(user_settings) -def has_ever_registered(user_id): +def has_ever_registered(user_id: str) -> bool: """Checks if a user has ever been registered with given user_id. Args: @@ -691,7 +934,7 @@ def has_ever_registered(user_id): return bool(user_settings.username and user_settings.last_agreed_to_terms) -def has_fully_registered_account(user_id): +def has_fully_registered_account(user_id: str) -> bool: """Checks if a user has fully registered. Args: @@ -700,16 +943,22 @@ def has_fully_registered_account(user_id): Returns: bool. Whether a user with the given user_id has fully registered. """ - if user_id is None: + user_settings = get_user_settings(user_id, strict=False) + + if user_settings is None: return False - user_settings = get_user_settings(user_id, strict=True) - return user_settings.username and user_settings.last_agreed_to_terms and ( - user_settings.last_agreed_to_terms >= - feconf.REGISTRATION_PAGE_LAST_UPDATED_UTC) + return bool( + user_settings.username and user_settings.last_agreed_to_terms and ( + user_settings.last_agreed_to_terms >= + feconf.TERMS_PAGE_LAST_UPDATED_UTC + ) + ) -def get_all_profiles_auth_details_by_parent_user_id(parent_user_id): +def get_all_profiles_auth_details_by_parent_user_id( + parent_user_id: str +) -> List[auth_domain.UserAuthDetails]: """Gets domain objects representing the auth details for all profiles associated with the user having the given parent_user_id. @@ -736,7 +985,7 @@ def get_all_profiles_auth_details_by_parent_user_id(parent_user_id): ] -def create_new_user(auth_id, email): +def create_new_user(auth_id: str, email: str) -> user_domain.UserSettings: """Creates a new user and commits it to the datastore. Args: @@ -755,14 +1004,16 @@ def create_new_user(auth_id, email): user_settings.user_id, auth_id)) user_id = user_models.UserSettingsModel.get_new_id('') user_settings = user_domain.UserSettings( - user_id, email, [feconf.ROLE_ID_FULL_USER], False, + user_id, email, [feconf.ROLE_ID_FULL_USER], False, False, preferred_language_codes=[constants.DEFAULT_LANGUAGE_CODE]) _create_new_user_transactional(auth_id, user_settings) return user_settings @transaction_services.run_in_transaction_wrapper -def _create_new_user_transactional(auth_id, user_settings): +def _create_new_user_transactional( + auth_id: str, user_settings: user_domain.UserSettings +) -> None: """Save user models for new users as a transaction. Args: @@ -770,13 +1021,20 @@ def _create_new_user_transactional(auth_id, user_settings): user_settings: UserSettings. The user settings domain object corresponding to the newly created user. """ - _save_user_settings(user_settings) - create_user_contributions(user_settings.user_id, [], []) + save_user_settings(user_settings) + user_contributions = get_or_create_new_user_contributions( + user_settings.user_id + ) + save_user_contributions(user_contributions) auth_services.associate_auth_id_with_user_id( auth_domain.AuthIdUserIdPair(auth_id, user_settings.user_id)) -def create_new_profiles(auth_id, email, modifiable_user_data_list): +def create_new_profiles( + auth_id: str, + email: str, + modifiable_user_data_list: List[user_domain.ModifiableUserData] +) -> List[user_domain.UserSettings]: """Creates new profiles for the users specified in the modifiable_user_data_list and commits them to the datastore. @@ -810,7 +1068,7 @@ def create_new_profiles(auth_id, email, modifiable_user_data_list): raise Exception('User id cannot already exist for a new user.') user_id = user_models.UserSettingsModel.get_new_id() user_settings = user_domain.UserSettings( - user_id, email, [feconf.ROLE_ID_MOBILE_LEARNER], False, + user_id, email, [feconf.ROLE_ID_MOBILE_LEARNER], False, False, preferred_language_codes=[constants.DEFAULT_LANGUAGE_CODE], pin=modifiable_user_data.pin) user_settings.populate_from_modifiable_user_data(modifiable_user_data) @@ -829,7 +1087,10 @@ def create_new_profiles(auth_id, email, modifiable_user_data_list): @transaction_services.run_in_transaction_wrapper -def _create_new_profile_transactional(user_settings, user_auth_details): +def _create_new_profile_transactional( + user_settings: user_domain.UserSettings, + user_auth_details: auth_domain.UserAuthDetails +) -> None: """Save user models for new users as a transaction. Args: @@ -838,11 +1099,13 @@ def _create_new_profile_transactional(user_settings, user_auth_details): user_auth_details: UserAuthDetails. The user auth details domain object corresponding to the newly created list of users. """ - _save_user_settings(user_settings) + save_user_settings(user_settings) _save_user_auth_details(user_auth_details) -def update_multiple_users_data(modifiable_user_data_list): +def update_multiple_users_data( + modifiable_user_data_list: List[user_domain.ModifiableUserData] +) -> None: """Updates user settings and user auth model details for the users specified in the modifiable_user_data_list. @@ -857,22 +1120,26 @@ def update_multiple_users_data(modifiable_user_data_list): not found. """ user_ids = [user.user_id for user in modifiable_user_data_list] - user_settings_list = get_users_settings(user_ids) + user_settings_list_with_none = get_users_settings(user_ids, strict=False) + user_settings_list = [] user_auth_details_list = get_multiple_user_auth_details(user_ids) - for modifiable_user_data, user_settings in python_utils.ZIP( - modifiable_user_data_list, user_settings_list): + for modifiable_user_data, user_settings in zip( + modifiable_user_data_list, user_settings_list_with_none): user_id = modifiable_user_data.user_id if user_id is None: raise Exception('Missing user ID.') if not user_settings: raise Exception('User not found.') user_settings.populate_from_modifiable_user_data(modifiable_user_data) + user_settings_list.append(user_settings) _save_existing_users_settings(user_settings_list) _save_existing_users_auth_details(user_auth_details_list) -def _save_existing_users_settings(user_settings_list): +def _save_existing_users_settings( + user_settings_list: List[user_domain.UserSettings] +) -> None: """Commits a list of existing users' UserSettings objects to the datastore. Args: @@ -880,18 +1147,24 @@ def _save_existing_users_settings(user_settings_list): objects to be saved. """ user_ids = [user.user_id for user in user_settings_list] - user_settings_models = user_models.UserSettingsModel.get_multi( + user_settings_models_with_none = user_models.UserSettingsModel.get_multi( user_ids, include_deleted=True) - for user_model, user_settings in python_utils.ZIP( - user_settings_models, user_settings_list): + user_settings_models = [] + for user_model, user_settings in zip( + user_settings_models_with_none, user_settings_list): + # Ruling out the possibility of None for mypy type checking. + assert user_model is not None user_settings.validate() user_model.populate(**user_settings.to_dict()) + user_settings_models.append(user_model) user_models.UserSettingsModel.update_timestamps_multi(user_settings_models) user_models.UserSettingsModel.put_multi(user_settings_models) -def _save_existing_users_auth_details(user_auth_details_list): +def _save_existing_users_auth_details( + user_auth_details_list: List[auth_domain.UserAuthDetails] +) -> None: """Commits a list of existing users' UserAuthDetails objects to the datastore. @@ -900,17 +1173,26 @@ def _save_existing_users_auth_details(user_auth_details_list): UserAuthDetails objects to be saved. """ user_ids = [user.user_id for user in user_auth_details_list] - user_auth_models = auth_models.UserAuthDetailsModel.get_multi( + user_auth_models_with_none = auth_models.UserAuthDetailsModel.get_multi( user_ids, include_deleted=True) - for user_auth_details_model, user_auth_details in python_utils.ZIP( - user_auth_models, user_auth_details_list): + user_auth_models = [] + for user_auth_details_model, user_auth_details in zip( + user_auth_models_with_none, user_auth_details_list): + # Ruling out the possibility of None for mypy type checking. + assert user_auth_details_model is not None user_auth_details.validate() user_auth_details_model.populate(**user_auth_details.to_dict()) - auth_models.UserAuthDetailsModel.update_timestamps_multi(user_auth_models) + user_auth_models.append(user_auth_details_model) + + auth_models.UserAuthDetailsModel.update_timestamps_multi( + user_auth_models + ) auth_models.UserAuthDetailsModel.put_multi(user_auth_models) -def _save_user_auth_details(user_auth_details): +def _save_user_auth_details( + user_auth_details: auth_domain.UserAuthDetails +) -> None: """Commits a user auth details object to the datastore. Args: @@ -935,7 +1217,9 @@ def _save_user_auth_details(user_auth_details): model.put() -def get_multiple_user_auth_details(user_ids): +def get_multiple_user_auth_details( + user_ids: List[Optional[str]] +) -> List[auth_domain.UserAuthDetails]: """Gets domain objects representing the auth details for the given user_ids. @@ -944,9 +1228,8 @@ def get_multiple_user_auth_details(user_ids): the user auth details. Returns: - list(UserAuthDetails|None). The UserAuthDetails domain objects - corresponding to the given user ids. If the given user_id does not - exist, the corresponding entry in the returned list is None. + list(UserAuthDetails). The UserAuthDetails domain objects + corresponding to the given user ids. """ user_settings_models = auth_models.UserAuthDetailsModel.get_multi(user_ids) return [ @@ -955,7 +1238,27 @@ def get_multiple_user_auth_details(user_ids): ] -def get_auth_details_by_user_id(user_id, strict=False): +@overload +def get_auth_details_by_user_id( + user_id: str, *, strict: Literal[True] +) -> auth_domain.UserAuthDetails: ... + + +@overload +def get_auth_details_by_user_id( + user_id: str +) -> Optional[auth_domain.UserAuthDetails]: ... + + +@overload +def get_auth_details_by_user_id( + user_id: str, *, strict: Literal[False] +) -> Optional[auth_domain.UserAuthDetails]: ... + + +def get_auth_details_by_user_id( + user_id: str, strict: bool = False +) -> Optional[auth_domain.UserAuthDetails]: """Return the user auth details for a single user. Args: @@ -983,7 +1286,7 @@ def get_auth_details_by_user_id(user_id, strict=False): return None -def get_pseudonymous_username(pseudonymous_id): +def get_pseudonymous_username(pseudonymous_id: str) -> str: """Get the username from pseudonymous ID. Args: @@ -998,7 +1301,7 @@ def get_pseudonymous_username(pseudonymous_id): pseudonymous_id[-8].upper(), pseudonymous_id[-7:]) -def get_username(user_id): +def get_username(user_id: str) -> str: """Gets username corresponding to the given user_id. Args: @@ -1010,7 +1313,27 @@ def get_username(user_id): return get_usernames([user_id], strict=True)[0] -def get_usernames(user_ids, strict=False): +@overload +def get_usernames( + user_ids: List[str], *, strict: Literal[True] +) -> Sequence[str]: ... + + +@overload +def get_usernames( + user_ids: List[str] +) -> Sequence[Optional[str]]: ... + + +@overload +def get_usernames( + user_ids: List[str], *, strict: Literal[False] +) -> Sequence[Optional[str]]: ... + + +def get_usernames( + user_ids: List[str], strict: bool = False +) -> Sequence[Optional[str]]: """Gets usernames corresponding to the given user_ids. Args: @@ -1024,7 +1347,7 @@ def get_usernames(user_ids, strict=False): returned list is None. Can also return username of pseudonymized user or a temporary username of user that is being deleted. """ - usernames = [None] * len(user_ids) + usernames: List[Optional[str]] = [None] * len(user_ids) non_system_user_indices = [] non_system_user_ids = [] for index, user_id in enumerate(user_ids): @@ -1048,7 +1371,7 @@ def get_usernames(user_ids, strict=False): return usernames -def set_username(user_id, new_username): +def set_username(user_id: str, new_username: str) -> None: """Updates the username of the user with the given user_id. Args: @@ -1066,10 +1389,10 @@ def set_username(user_id, new_username): 'Sorry, the username \"%s\" is already taken! Please pick ' 'a different one.' % new_username) user_settings.username = new_username - _save_user_settings(user_settings) + save_user_settings(user_settings) -def record_agreement_to_terms(user_id): +def record_agreement_to_terms(user_id: str) -> None: """Records that the user with given user_id has agreed to the license terms. Args: @@ -1077,10 +1400,12 @@ def record_agreement_to_terms(user_id): """ user_settings = get_user_settings(user_id, strict=True) user_settings.last_agreed_to_terms = datetime.datetime.utcnow() - _save_user_settings(user_settings) + save_user_settings(user_settings) -def update_profile_picture_data_url(user_id, profile_picture_data_url): +def update_profile_picture_data_url( + user_id: str, profile_picture_data_url: str +) -> None: """Updates profile_picture_data_url of user with given user_id. Args: @@ -1089,10 +1414,10 @@ def update_profile_picture_data_url(user_id, profile_picture_data_url): """ user_settings = get_user_settings(user_id, strict=True) user_settings.profile_picture_data_url = profile_picture_data_url - _save_user_settings(user_settings) + save_user_settings(user_settings) -def update_user_bio(user_id, user_bio): +def update_user_bio(user_id: str, user_bio: str) -> None: """Updates user_bio of user with given user_id. Args: @@ -1101,10 +1426,12 @@ def update_user_bio(user_id, user_bio): """ user_settings = get_user_settings(user_id, strict=True) user_settings.user_bio = user_bio - _save_user_settings(user_settings) + save_user_settings(user_settings) -def update_user_default_dashboard(user_id, default_dashboard): +def update_user_default_dashboard( + user_id: str, default_dashboard: str +) -> None: """Updates the default dashboard of user with given user id. Args: @@ -1113,11 +1440,12 @@ def update_user_default_dashboard(user_id, default_dashboard): """ user_settings = get_user_settings(user_id, strict=True) user_settings.default_dashboard = default_dashboard - _save_user_settings(user_settings) + save_user_settings(user_settings) def update_user_creator_dashboard_display( - user_id, creator_dashboard_display_pref): + user_id: str, creator_dashboard_display_pref: str +) -> None: """Updates the creator dashboard preference of user with given user id. Args: @@ -1128,10 +1456,12 @@ def update_user_creator_dashboard_display( user_settings = get_user_settings(user_id, strict=True) user_settings.creator_dashboard_display_pref = ( creator_dashboard_display_pref) - _save_user_settings(user_settings) + save_user_settings(user_settings) -def update_subject_interests(user_id, subject_interests): +def update_subject_interests( + user_id: str, subject_interests: List[str] +) -> None: """Updates subject_interests of user with given user_id. Args: @@ -1140,18 +1470,18 @@ def update_subject_interests(user_id, subject_interests): """ if not isinstance(subject_interests, list): raise utils.ValidationError('Expected subject_interests to be a list.') - else: - for interest in subject_interests: - if not isinstance(interest, str): - raise utils.ValidationError( - 'Expected each subject interest to be a string.') - elif not interest: - raise utils.ValidationError( - 'Expected each subject interest to be non-empty.') - elif not re.match(constants.TAG_REGEX, interest): - raise utils.ValidationError( - 'Expected each subject interest to consist only of ' - 'lowercase alphabetic characters and spaces.') + + for interest in subject_interests: + if not isinstance(interest, str): + raise utils.ValidationError( + 'Expected each subject interest to be a string.') + if not interest: + raise utils.ValidationError( + 'Expected each subject interest to be non-empty.') + if not re.match(constants.TAG_REGEX, interest): + raise utils.ValidationError( + 'Expected each subject interest to consist only of ' + 'lowercase alphabetic characters and spaces.') if len(set(subject_interests)) != len(subject_interests): raise utils.ValidationError( @@ -1159,38 +1489,12 @@ def update_subject_interests(user_id, subject_interests): user_settings = get_user_settings(user_id, strict=True) user_settings.subject_interests = subject_interests - _save_user_settings(user_settings) - + save_user_settings(user_settings) -def _update_first_contribution_msec(user_id, first_contribution_msec): - """Updates first_contribution_msec of user with given user_id. - Args: - user_id: str. The unique ID of the user. - first_contribution_msec: float. New time to set in milliseconds - representing user's first contribution to Oppia. - """ - user_settings = get_user_settings(user_id, strict=True) - user_settings.first_contribution_msec = first_contribution_msec - _save_user_settings(user_settings) - - -def update_first_contribution_msec_if_not_set(user_id, first_contribution_msec): - """Updates first_contribution_msec of user with given user_id - if it is set to None. - - Args: - user_id: str. The unique ID of the user. - first_contribution_msec: float. New time to set in milliseconds - representing user's first contribution to Oppia. - """ - user_settings = get_user_settings(user_id, strict=True) - if user_settings.first_contribution_msec is None: - _update_first_contribution_msec( - user_id, first_contribution_msec) - - -def update_preferred_language_codes(user_id, preferred_language_codes): +def update_preferred_language_codes( + user_id: str, preferred_language_codes: List[str] +) -> None: """Updates preferred_language_codes of user with given user_id. Args: @@ -1200,10 +1504,12 @@ def update_preferred_language_codes(user_id, preferred_language_codes): """ user_settings = get_user_settings(user_id, strict=True) user_settings.preferred_language_codes = preferred_language_codes - _save_user_settings(user_settings) + save_user_settings(user_settings) -def update_preferred_site_language_code(user_id, preferred_site_language_code): +def update_preferred_site_language_code( + user_id: str, preferred_site_language_code: str +) -> None: """Updates preferred_site_language_code of user with given user_id. Args: @@ -1214,11 +1520,12 @@ def update_preferred_site_language_code(user_id, preferred_site_language_code): user_settings = get_user_settings(user_id, strict=True) user_settings.preferred_site_language_code = ( preferred_site_language_code) - _save_user_settings(user_settings) + save_user_settings(user_settings) def update_preferred_audio_language_code( - user_id, preferred_audio_language_code): + user_id: str, preferred_audio_language_code: str +) -> None: """Updates preferred_audio_language_code of user with given user_id. Args: @@ -1229,10 +1536,27 @@ def update_preferred_audio_language_code( user_settings = get_user_settings(user_id, strict=True) user_settings.preferred_audio_language_code = ( preferred_audio_language_code) - _save_user_settings(user_settings) + save_user_settings(user_settings) + + +def update_preferred_translation_language_code( + user_id: str, preferred_translation_language_code: str +) -> None: + """Updates preferred_translation_language_code of user with + given user_id. + + Args: + user_id: str. The unique ID of the user. + preferred_translation_language_code: str. New text translation + language preference to set. + """ + user_settings = get_user_settings(user_id, strict=True) + user_settings.preferred_translation_language_code = ( + preferred_translation_language_code) + save_user_settings(user_settings) -def add_user_role(user_id, role): +def add_user_role(user_id: str, role: str) -> None: """Updates the roles of the user with given user_id. Args: @@ -1247,16 +1571,15 @@ def add_user_role(user_id, role): raise Exception('The role of a Mobile Learner cannot be changed.') if role in feconf.ALLOWED_DEFAULT_USER_ROLES_ON_REGISTRATION: raise Exception('Adding a %s role is not allowed.' % role) - user_settings.roles.append(role) role_services.log_role_query( user_id, feconf.ROLE_ACTION_ADD, role=role, username=user_settings.username) - _save_user_settings(user_settings) + save_user_settings(user_settings) -def remove_user_role(user_id, role): +def remove_user_role(user_id: str, role: str) -> None: """Updates the roles of the user with given user_id. Args: @@ -1278,10 +1601,10 @@ def remove_user_role(user_id, role): user_id, feconf.ROLE_ACTION_REMOVE, role=role, username=user_settings.username) - _save_user_settings(user_settings) + save_user_settings(user_settings) -def mark_user_for_deletion(user_id): +def mark_user_for_deletion(user_id: str) -> None: """Set the 'deleted' property of the user with given user_id to True. Args: @@ -1289,7 +1612,7 @@ def mark_user_for_deletion(user_id): """ user_settings = get_user_settings(user_id, strict=True) user_settings.deleted = True - _save_user_settings(user_settings) + save_user_settings(user_settings) user_auth_details = auth_services.get_user_auth_details_from_model( auth_models.UserAuthDetailsModel.get(user_id)) user_auth_details.deleted = True @@ -1297,7 +1620,7 @@ def mark_user_for_deletion(user_id): auth_services.mark_user_for_deletion(user_id) -def save_deleted_username(normalized_username): +def save_deleted_username(normalized_username: str) -> None: """Save the username of deleted user. Args: @@ -1313,7 +1636,9 @@ def save_deleted_username(normalized_username): deleted_user_model.put() -def get_human_readable_user_ids(user_ids, strict=True): +def get_human_readable_user_ids( + user_ids: List[str], strict: bool = True +) -> List[str]: """Converts the given ids to usernames, or truncated email addresses. Requires all users to be known. @@ -1352,7 +1677,7 @@ def get_human_readable_user_ids(user_ids, strict=True): return usernames -def record_user_started_state_editor_tutorial(user_id): +def record_user_started_state_editor_tutorial(user_id: str) -> None: """Updates last_started_state_editor_tutorial to the current datetime for the user with given user_id. @@ -1362,10 +1687,10 @@ def record_user_started_state_editor_tutorial(user_id): user_settings = get_user_settings(user_id, strict=True) user_settings.last_started_state_editor_tutorial = ( datetime.datetime.utcnow()) - _save_user_settings(user_settings) + save_user_settings(user_settings) -def record_user_started_state_translation_tutorial(user_id): +def record_user_started_state_translation_tutorial(user_id: str) -> None: """Updates last_started_state_translation_tutorial to the current datetime for the user with given user_id. @@ -1375,10 +1700,10 @@ def record_user_started_state_translation_tutorial(user_id): user_settings = get_user_settings(user_id, strict=True) user_settings.last_started_state_translation_tutorial = ( datetime.datetime.utcnow()) - _save_user_settings(user_settings) + save_user_settings(user_settings) -def record_user_logged_in(user_id): +def record_user_logged_in(user_id: str) -> None: """Updates last_logged_in to the current datetime for the user with given user_id. @@ -1388,39 +1713,49 @@ def record_user_logged_in(user_id): user_settings = get_user_settings(user_id, strict=True) user_settings.last_logged_in = datetime.datetime.utcnow() - _save_user_settings(user_settings) + save_user_settings(user_settings) -def record_user_edited_an_exploration(user_id): - """Updates last_edited_an_exploration to the current datetime for +def record_user_created_an_exploration(user_id: str) -> None: + """Updates last_created_an_exploration to the current datetime for the user with given user_id. Args: user_id: str. The unique ID of the user. """ - user_settings = get_user_settings(user_id) - if user_settings: - user_settings.last_edited_an_exploration = datetime.datetime.utcnow() - _save_user_settings(user_settings) + user_settings = get_user_settings(user_id, strict=False) + if user_settings is not None: + user_settings.last_created_an_exploration = datetime.datetime.utcnow() + save_user_settings(user_settings) -def record_user_created_an_exploration(user_id): - """Updates last_created_an_exploration to the current datetime for - the user with given user_id. +def add_user_to_mailing_list(email: str, name: str, tag: str) -> bool: + """Adds user to the bulk email provider with the relevant tag and required + merge fields. Args: - user_id: str. The unique ID of the user. + email: str. Email of the user. + name: str. Name of the user. + tag: str. Tag for the mailing list. + + Returns: + bool. Whether the operation was successful or not. """ - user_settings = get_user_settings(user_id) - if user_settings: - user_settings.last_created_an_exploration = datetime.datetime.utcnow() - _save_user_settings(user_settings) + merge_fields = { + 'NAME': name + } + return bulk_email_services.add_or_update_user_status( + email, merge_fields, tag, can_receive_email_updates=True) def update_email_preferences( - user_id, can_receive_email_updates, can_receive_editor_role_email, - can_receive_feedback_email, can_receive_subscription_email, - bulk_email_db_already_updated=False): + user_id: str, + can_receive_email_updates: bool, + can_receive_editor_role_email: bool, + can_receive_feedback_email: bool, + can_receive_subscription_email: bool, + bulk_email_db_already_updated: bool = False +) -> bool: """Updates whether the user has chosen to receive email updates. If no UserEmailPreferencesModel exists for this user, a new one will @@ -1463,7 +1798,8 @@ def update_email_preferences( if not bulk_email_db_already_updated and feconf.CAN_SEND_EMAILS: user_creation_successful = ( bulk_email_services.add_or_update_user_status( - email, can_receive_email_updates)) + email, {}, 'Account', + can_receive_email_updates=can_receive_email_updates)) if not user_creation_successful: email_preferences_model.site_updates = False email_preferences_model.update_timestamps() @@ -1475,7 +1811,7 @@ def update_email_preferences( return False -def get_email_preferences(user_id): +def get_email_preferences(user_id: str) -> user_domain.UserGlobalPrefs: """Gives email preferences of user with given user_id. Args: @@ -1497,7 +1833,9 @@ def get_email_preferences(user_id): email_preferences_model.subscription_notifications) -def get_users_email_preferences(user_ids): +def get_users_email_preferences( + user_ids: List[str] +) -> List[user_domain.UserGlobalPrefs]: """Get email preferences for the list of users. Args: @@ -1527,8 +1865,11 @@ def get_users_email_preferences(user_ids): def set_email_preferences_for_exploration( - user_id, exploration_id, mute_feedback_notifications=None, - mute_suggestion_notifications=None): + user_id: str, + exploration_id: str, + mute_feedback_notifications: Optional[bool] = None, + mute_suggestion_notifications: Optional[bool] = None +) -> None: """Sets mute preferences for exploration with given exploration_id of user with given user_id. @@ -1558,7 +1899,9 @@ def set_email_preferences_for_exploration( exploration_user_model.put() -def get_email_preferences_for_exploration(user_id, exploration_id): +def get_email_preferences_for_exploration( + user_id: str, exploration_id: str +) -> user_domain.UserExplorationPrefs: """Gives mute preferences for exploration with given exploration_id of user with given user_id. @@ -1581,7 +1924,9 @@ def get_email_preferences_for_exploration(user_id, exploration_id): exploration_user_model.mute_suggestion_notifications) -def get_users_email_preferences_for_exploration(user_ids, exploration_id): +def get_users_email_preferences_for_exploration( + user_ids: List[str], exploration_id: str +) -> List[user_domain.UserExplorationPrefs]: """Gives mute preferences for exploration with given exploration_id of user with given user_id. @@ -1594,9 +1939,11 @@ def get_users_email_preferences_for_exploration(user_ids, exploration_id): list(UserExplorationPrefs). Representing whether the users has chosen to receive email updates for particular exploration. """ + user_id_exp_id_combinations = list( + itertools.product(user_ids, [exploration_id])) exploration_user_models = ( user_models.ExplorationUserDataModel.get_multi( - user_ids, exploration_id)) + user_id_exp_id_combinations)) result = [] for exploration_user_model in exploration_user_models: @@ -1611,7 +1958,27 @@ def get_users_email_preferences_for_exploration(user_ids, exploration_id): return result -def get_user_contributions(user_id, strict=False): +@overload +def get_user_contributions( + user_id: str, *, strict: Literal[True] +) -> user_domain.UserContributions: ... + + +@overload +def get_user_contributions( + user_id: str +) -> Optional[user_domain.UserContributions]: ... + + +@overload +def get_user_contributions( + user_id: str, *, strict: Literal[False] +) -> Optional[user_domain.UserContributions]: ... + + +def get_user_contributions( + user_id: str, strict: bool = False +) -> Optional[user_domain.UserContributions]: """Gets domain object representing the contributions for the given user_id. Args: @@ -1625,52 +1992,57 @@ def get_user_contributions(user_id, strict=False): object. """ model = user_models.UserContributionsModel.get(user_id, strict=strict) - if model is not None: - result = user_domain.UserContributions( - model.id, model.created_exploration_ids, - model.edited_exploration_ids) - else: - result = None + if model is None: + return None + + result = user_domain.UserContributions( + model.id, model.created_exploration_ids, + model.edited_exploration_ids) + return result -def create_user_contributions( - user_id, created_exploration_ids, edited_exploration_ids): - """Creates a new UserContributionsModel and returns the domain object. - Note: This does not create a contributions model if the user is - OppiaMigrationBot. +def get_or_create_new_user_contributions( + user_id: str +) -> user_domain.UserContributions: + """Gets domain object representing the contributions for the given user_id. + If the domain object does not exist, it is created. Args: user_id: str. The unique ID of the user. - created_exploration_ids: list(str). IDs of explorations that this - user has created. - edited_exploration_ids: list(str). IDs of explorations that this - user has edited. Returns: - UserContributions|None. The domain object representing the newly-created - UserContributionsModel. If the user id is for oppia migration bot, None - is returned. - - Raises: - Exception. The UserContributionsModel for the given user_id already - exists. + UserContributions. The UserContributions domain object corresponding to + the given user_id. """ - if user_id == feconf.MIGRATION_BOT_USER_ID: - return None user_contributions = get_user_contributions(user_id, strict=False) - if user_contributions: - raise Exception( - 'User contributions model for user %s already exists.' % user_id) - else: + if user_contributions is None: user_contributions = user_domain.UserContributions( - user_id, created_exploration_ids, edited_exploration_ids) - _save_user_contributions(user_contributions) + user_id, [], []) return user_contributions +def save_user_contributions( + user_contributions: user_domain.UserContributions +) -> None: + """Saves a user contributions object to the datastore. + + Args: + user_contributions: UserContributions. The user contributions object to + be saved. + """ + user_contributions_model = get_validated_user_contributions_model( + user_contributions + ) + user_contributions_model.update_timestamps() + user_contributions_model.put() + + def update_user_contributions( - user_id, created_exploration_ids, edited_exploration_ids): + user_id: str, + created_exploration_ids: List[str], + edited_exploration_ids: List[str] +) -> None: """Updates an existing UserContributionsModel with new calculated contributions. @@ -1693,62 +2065,35 @@ def update_user_contributions( user_contributions.created_exploration_ids = created_exploration_ids user_contributions.edited_exploration_ids = edited_exploration_ids - _save_user_contributions(user_contributions) - - -def add_created_exploration_id(user_id, exploration_id): - """Adds an exploration_id to a user_id's UserContributionsModel collection - of created explorations. - - Args: - user_id: str. The unique ID of the user. - exploration_id: str. The exploration id. - """ - user_contributions = get_user_contributions(user_id, strict=False) - - if not user_contributions: - create_user_contributions(user_id, [exploration_id], []) - elif exploration_id not in user_contributions.created_exploration_ids: - user_contributions.created_exploration_ids.append(exploration_id) - user_contributions.created_exploration_ids.sort() - _save_user_contributions(user_contributions) - - -def add_edited_exploration_id(user_id, exploration_id): - """Adds an exploration_id to a user_id's UserContributionsModel collection - of edited explorations. - - Args: - user_id: str. The unique ID of the user. - exploration_id: str. The exploration id. - """ - user_contributions = get_user_contributions(user_id, strict=False) - - if not user_contributions: - create_user_contributions(user_id, [], [exploration_id]) + get_validated_user_contributions_model(user_contributions).put() - elif exploration_id not in user_contributions.edited_exploration_ids: - user_contributions.edited_exploration_ids.append(exploration_id) - user_contributions.edited_exploration_ids.sort() - _save_user_contributions(user_contributions) +def get_validated_user_contributions_model( + user_contributions: user_domain.UserContributions +) -> user_models.UserContributionsModel: + """Constructs a valid UserContributionsModel from the given domain object. -def _save_user_contributions(user_contributions): - """Commits a user contributions object to the datastore. + This function does not save anything to the datastore. Args: user_contributions: UserContributions. Value object representing a user's contributions. + + Returns: + UserContributionsModel. The UserContributionsModel object that was + updated. """ user_contributions.validate() - user_models.UserContributionsModel( + return user_models.UserContributionsModel( id=user_contributions.user_id, created_exploration_ids=user_contributions.created_exploration_ids, edited_exploration_ids=user_contributions.edited_exploration_ids, - ).put() + ) -def migrate_dashboard_stats_to_latest_schema(versioned_dashboard_stats): +def migrate_dashboard_stats_to_latest_schema( + versioned_dashboard_stats: user_models.UserStatsModel +) -> None: """Holds responsibility of updating the structure of dashboard stats. Args: @@ -1766,7 +2111,7 @@ def migrate_dashboard_stats_to_latest_schema(versioned_dashboard_stats): 'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION) -def get_current_date_as_string(): +def get_current_date_as_string() -> str: """Gets the current date. Returns: @@ -1776,7 +2121,7 @@ def get_current_date_as_string(): feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT) -def parse_date_from_string(datetime_str): +def parse_date_from_string(datetime_str: str) -> Dict[str, int]: """Parses the given string, and returns the year, month and day of the date that it represents. @@ -1795,7 +2140,7 @@ def parse_date_from_string(datetime_str): } -def get_user_impact_score(user_id): +def get_user_impact_score(user_id: str) -> float: """Gets the user impact score for the given user_id. Args: @@ -1808,12 +2153,18 @@ def get_user_impact_score(user_id): model = user_models.UserStatsModel.get(user_id, strict=False) if model: - return model.impact_score + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + impact_score: float = model.impact_score + return impact_score else: return 0 -def get_weekly_dashboard_stats(user_id): +def get_weekly_dashboard_stats( + user_id: str +) -> List[Dict[str, DashboardStatsDict]]: """Gets weekly dashboard stats for a given user_id. Args: @@ -1849,12 +2200,20 @@ def get_weekly_dashboard_stats(user_id): """ model = user_models.UserStatsModel.get(user_id, strict=False) if model and model.weekly_creator_stats_list: - return model.weekly_creator_stats_list + # TODO(#15621): The explicit declaration of type for ndb properties + # should be removed. Currently, these ndb properties are annotated with + # Any return type. Once we have proper return type we can remove this. + weekly_creator_stats_list: List[ + Dict[str, DashboardStatsDict] + ] = model.weekly_creator_stats_list + return weekly_creator_stats_list else: - return None + return [] -def get_last_week_dashboard_stats(user_id): +def get_last_week_dashboard_stats( + user_id: str +) -> Optional[Dict[str, DashboardStatsDict]]: """Gets last week's dashboard stats for a given user_id. Args: @@ -1878,7 +2237,7 @@ def get_last_week_dashboard_stats(user_id): return None -def update_dashboard_stats_log(user_id): +def update_dashboard_stats_log(user_id: str) -> None: """Save statistics for creator dashboard of a user by appending to a list keyed by a datetime string. @@ -1902,7 +2261,7 @@ def update_dashboard_stats_log(user_id): model.put() -def is_moderator(user_id): +def is_moderator(user_id: str) -> bool: """Checks if a user with given user_id is a moderator. Args: @@ -1914,7 +2273,7 @@ def is_moderator(user_id): return feconf.ROLE_ID_MODERATOR in get_user_roles_from_id(user_id) -def is_curriculum_admin(user_id): +def is_curriculum_admin(user_id: str) -> bool: """Checks if a user with given user_id is an admin. Args: @@ -1926,7 +2285,7 @@ def is_curriculum_admin(user_id): return feconf.ROLE_ID_CURRICULUM_ADMIN in get_user_roles_from_id(user_id) -def is_topic_manager(user_id): +def is_topic_manager(user_id: str) -> bool: """Checks if a user with given user_id is a topic manager. Args: @@ -1938,7 +2297,9 @@ def is_topic_manager(user_id): return feconf.ROLE_ID_TOPIC_MANAGER in get_user_roles_from_id(user_id) -def can_review_translation_suggestions(user_id, language_code=None): +def can_review_translation_suggestions( + user_id: str, language_code: Optional[str] = None +) -> bool: """Returns whether the user can review translation suggestions in any language or in the given language. @@ -1963,32 +2324,7 @@ def can_review_translation_suggestions(user_id, language_code=None): return bool(reviewable_language_codes) -def can_review_voiceover_applications(user_id, language_code=None): - """Returns whether the user can review voiceover applications in any - language or in the given language. - - NOTE: If the language_code is provided then this method will check whether - the user can review voiceover in the given language code else it will - check whether the user can review in any language. - - Args: - user_id: str. The unique ID of the user. - language_code: str. The code of the language. - - Returns: - bool. Whether the user can review voiceover applications in any language - or in the given language. - """ - user_contribution_rights = get_user_contribution_rights(user_id) - reviewable_language_codes = ( - user_contribution_rights.can_review_voiceover_for_language_codes) - if language_code is not None: - return language_code in reviewable_language_codes - else: - return bool(reviewable_language_codes) - - -def can_review_question_suggestions(user_id): +def can_review_question_suggestions(user_id: str) -> bool: """Checks whether the user can review question suggestions. Args: @@ -2001,7 +2337,7 @@ def can_review_question_suggestions(user_id): return user_contribution_rights.can_review_questions -def can_submit_question_suggestions(user_id): +def can_submit_question_suggestions(user_id: str) -> bool: """Checks whether the user can submit question suggestions. Args: @@ -2014,7 +2350,9 @@ def can_submit_question_suggestions(user_id): return user_contribution_rights.can_submit_questions -def allow_user_to_review_translation_in_language(user_id, language_code): +def allow_user_to_review_translation_in_language( + user_id: str, language_code: str +) -> None: """Allows the user with the given user id to review translation in the given language_code. @@ -2027,14 +2365,16 @@ def allow_user_to_review_translation_in_language(user_id, language_code): user_contribution_rights = get_user_contribution_rights(user_id) allowed_language_codes = set( user_contribution_rights.can_review_translation_for_language_codes) - allowed_language_codes.add(language_code) + if language_code is not None: + allowed_language_codes.add(language_code) user_contribution_rights.can_review_translation_for_language_codes = ( sorted(list(allowed_language_codes))) _save_user_contribution_rights(user_contribution_rights) def remove_translation_review_rights_in_language( - user_id, language_code_to_remove): + user_id: str, language_code_to_remove: str +) -> None: """Removes the user's review rights to translation suggestions in the given language_code. @@ -2052,7 +2392,9 @@ def remove_translation_review_rights_in_language( _update_user_contribution_rights(user_contribution_rights) -def allow_user_to_review_voiceover_in_language(user_id, language_code): +def allow_user_to_review_voiceover_in_language( + user_id: str, language_code: str +) -> None: """Allows the user with the given user id to review voiceover applications in the given language_code. @@ -2071,7 +2413,9 @@ def allow_user_to_review_voiceover_in_language(user_id, language_code): _save_user_contribution_rights(user_contribution_rights) -def remove_voiceover_review_rights_in_language(user_id, language_code): +def remove_voiceover_review_rights_in_language( + user_id: str, language_code: str +) -> None: """Removes the user's review rights to voiceover applications in the given language_code. @@ -2087,7 +2431,7 @@ def remove_voiceover_review_rights_in_language(user_id, language_code): _update_user_contribution_rights(user_contribution_rights) -def allow_user_to_review_question(user_id): +def allow_user_to_review_question(user_id: str) -> None: """Allows the user with the given user id to review question suggestions. Args: @@ -2099,7 +2443,7 @@ def allow_user_to_review_question(user_id): _save_user_contribution_rights(user_contribution_rights) -def remove_question_review_rights(user_id): +def remove_question_review_rights(user_id: str) -> None: """Removes the user's review rights to question suggestions. Args: @@ -2111,7 +2455,7 @@ def remove_question_review_rights(user_id): _update_user_contribution_rights(user_contribution_rights) -def allow_user_to_submit_question(user_id): +def allow_user_to_submit_question(user_id: str) -> None: """Allows the user with the given user id to submit question suggestions. Args: @@ -2123,7 +2467,7 @@ def allow_user_to_submit_question(user_id): _save_user_contribution_rights(user_contribution_rights) -def remove_question_submit_rights(user_id): +def remove_question_submit_rights(user_id: str) -> None: """Removes the user's submit rights to question suggestions. Args: @@ -2135,7 +2479,7 @@ def remove_question_submit_rights(user_id): _update_user_contribution_rights(user_contribution_rights) -def remove_contribution_reviewer(user_id): +def remove_contribution_reviewer(user_id: str) -> None: """Deletes the UserContributionRightsModel corresponding to the given user_id. @@ -2156,7 +2500,9 @@ def remove_contribution_reviewer(user_id): user_contribution_rights_model.delete() -def get_contributor_usernames(category, language_code=None): +def get_contributor_usernames( + category: str, language_code: Optional[str] = None +) -> Sequence[str]: """Returns a list of usernames of users who has contribution rights of given category. @@ -2167,14 +2513,35 @@ def get_contributor_usernames(category, language_code=None): review category. Returns: - list(str). A list of usernames. + Sequence(str). A list of usernames. + + Raises: + Exception. The language code is not of None for question review + contribution. + Exception. Invalid category. + Exception. The language_code cannot be None if review category is + 'translation' or 'voiceover'. """ user_ids = [] + if ( + category in ( + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION, + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER + ) and language_code is None + ): + raise Exception( + 'The language_code cannot be None if review category is' + ' \'translation\' or \'voiceover\'.' + ) if category == constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION: + # Ruling out the possibility of None for mypy type checking. + assert language_code is not None user_ids = ( user_models.UserContributionRightsModel .get_translation_reviewer_user_ids(language_code)) elif category == constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER: + # Ruling out the possibility of None for mypy type checking. + assert language_code is not None user_ids = ( user_models.UserContributionRightsModel .get_voiceover_reviewer_user_ids(language_code)) @@ -2192,10 +2559,15 @@ def get_contributor_usernames(category, language_code=None): else: raise Exception('Invalid category: %s' % category) - return get_usernames(user_ids) + usernames = get_usernames(user_ids, strict=True) + return usernames -def log_username_change(committer_id, old_username, new_username): +def log_username_change( + committer_id: str, + old_username: str, + new_username: str +) -> None: """Stores the query to role structure in UsernameChangeAuditModel. Args: @@ -2211,7 +2583,7 @@ def log_username_change(committer_id, old_username, new_username): new_username=new_username).put() -def create_login_url(return_url): +def create_login_url(return_url: str) -> str: """Creates a login url. Args: @@ -2223,7 +2595,7 @@ def create_login_url(return_url): return '/login?%s' % urllib.parse.urlencode({'return_url': return_url}) -def mark_user_banned(user_id): +def mark_user_banned(user_id: str) -> None: """Marks a user banned. Args: @@ -2231,10 +2603,10 @@ def mark_user_banned(user_id): """ user_settings = get_user_settings(user_id) user_settings.mark_banned() - _save_user_settings(user_settings) + save_user_settings(user_settings) -def unmark_user_banned(user_id): +def unmark_user_banned(user_id: str) -> None: """Unmarks a banned user. Args: @@ -2249,10 +2621,10 @@ def unmark_user_banned(user_id): feconf.ROLE_ID_MOBILE_LEARNER )) - _save_user_settings(user_settings) + save_user_settings(user_settings) -def get_dashboard_stats(user_id): +def get_dashboard_stats(user_id: str) -> DashboardStatsDict: """Returns the dashboard stats associated with the given user_id. Args: @@ -2282,3 +2654,329 @@ def get_dashboard_stats(user_id): 'num_ratings': num_ratings, 'average_ratings': average_ratings } + + +def get_checkpoints_in_order( + init_state_name: str, states: Dict[str, state_domain.State] +) -> List[str]: + """Returns the checkpoints of an exploration in sequential order by a + BFS traversal. + + Args: + init_state_name: str. The name of the first state of the exploration. + states: dict(state). All states of the exploration. + + Returns: + list(str). List of all checkpoints of the exploration in sequential + order. + + Raises: + Exception. States with a null destination can never be a checkpoint. + """ + queue = [init_state_name] + checkpoint_state_names = [] + visited_state_names = [] + while len(queue) > 0: + current_state_name = queue.pop() + if current_state_name not in visited_state_names: + visited_state_names.append(current_state_name) + current_state = states[current_state_name] + if ( + current_state.card_is_checkpoint and + current_state_name not in checkpoint_state_names + ): + checkpoint_state_names.append(current_state_name) + for answer_group in current_state.interaction.answer_groups: + if answer_group.outcome.dest is None: + raise Exception( + 'States with a null destination can never be a' + ' checkpoint.' + ) + queue.append(answer_group.outcome.dest) + + # Add the default outcome destination in the queue. + if current_state.interaction.default_outcome is not None: + if current_state.interaction.default_outcome.dest is None: + raise Exception( + 'States with a null destination can never be a' + ' checkpoint.' + ) + queue.append(current_state.interaction.default_outcome.dest) + + return checkpoint_state_names + + +def get_most_distant_reached_checkpoint_in_current_exploration( + checkpoints_in_current_exploration: List[str], + checkpoints_in_older_exploration: List[str], + most_distant_reached_checkpoint_state_name_in_older_exploration: str +) -> Optional[str]: + """Returns the most distant reached checkpoint in current exploration after + comparing current exploration with older exploration. + + Args: + checkpoints_in_current_exploration: list(str). The checkpoints of + current exploration in sequential order. + checkpoints_in_older_exploration: list(str). The checkpoints + of older exploration in sequential order. + most_distant_reached_checkpoint_state_name_in_older_exploration: str. + The state name of the most distant reached checkpoint in the older + exploration. + + Returns: + str or None. The most distant checkpoint in current exploration or + None if most distant reached checkpoint of older exploration is not + present in current exploration. + """ + # Index of the most_distant_reached_checkpoint in the older exploration. + mdrc_index = ( + checkpoints_in_older_exploration.index( + most_distant_reached_checkpoint_state_name_in_older_exploration)) + + # Loop through checkpoints of furthest_reached_exploration backwards until + # a checkpoint is found that exists in current_exploration too. + while mdrc_index >= 0: + checkpoint_in_old_exp = checkpoints_in_older_exploration[mdrc_index] + if checkpoint_in_old_exp in checkpoints_in_current_exploration: + return checkpoint_in_old_exp + mdrc_index -= 1 + + return None + + +def update_learner_checkpoint_progress( + user_id: str, + exploration_id: str, + state_name: str, + exp_version: int +) -> None: + """Sets the furthest reached and most recently reached checkpoint in + an exploration by the user. + + Args: + user_id: str. The Id of the user. + exploration_id: str. The Id of the exploration. + state_name: str. The state name of the most recently reached checkpoint. + exp_version: int. The exploration version of the most recently reached + checkpoint. + """ + + exp_user_model = user_models.ExplorationUserDataModel.get( + user_id, exploration_id) + if exp_user_model is None: + exp_user_model = user_models.ExplorationUserDataModel.create( + user_id, exploration_id) + + current_exploration = exp_fetchers.get_exploration_by_id( + exploration_id, strict=True, version=exp_version + ) + + # If the exploration is being visited the first time. + if exp_user_model.furthest_reached_checkpoint_state_name is None: + exp_user_model.furthest_reached_checkpoint_exp_version = exp_version + exp_user_model.furthest_reached_checkpoint_state_name = state_name + elif exp_user_model.furthest_reached_checkpoint_exp_version < exp_version: + furthest_reached_checkpoint_exp = ( + exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=exp_user_model.furthest_reached_checkpoint_exp_version + ) + ) + checkpoints_in_current_exp = get_checkpoints_in_order( + current_exploration.init_state_name, current_exploration.states) + checkpoints_in_older_exp = get_checkpoints_in_order( + furthest_reached_checkpoint_exp.init_state_name, + furthest_reached_checkpoint_exp.states) + + # Get the furthest reached checkpoint in current exploration. + furthest_reached_checkpoint_in_current_exp = ( + get_most_distant_reached_checkpoint_in_current_exploration( + checkpoints_in_current_exp, + checkpoints_in_older_exp, + exp_user_model.furthest_reached_checkpoint_state_name + ) + ) + + # If the furthest reached checkpoint doesn't exist in current + # exploration. + if furthest_reached_checkpoint_in_current_exp is None: + exp_user_model.furthest_reached_checkpoint_exp_version = ( + exp_version) + exp_user_model.furthest_reached_checkpoint_state_name = state_name + else: + # Index of the furthest reached checkpoint. + frc_index = checkpoints_in_current_exp.index( + furthest_reached_checkpoint_in_current_exp) + # If furthest reached checkpoint is behind most recently + # reached checkpoint. + if frc_index <= checkpoints_in_current_exp.index(state_name): + exp_user_model.furthest_reached_checkpoint_exp_version = ( + exp_version) + exp_user_model.furthest_reached_checkpoint_state_name = ( + state_name) + + exp_user_model.most_recently_reached_checkpoint_exp_version = exp_version + exp_user_model.most_recently_reached_checkpoint_state_name = state_name + exp_user_model.update_timestamps() + exp_user_model.put() + + +def set_user_has_viewed_lesson_info_modal_once(user_id: str) -> None: + """Updates the user's settings once he has viewed the lesson info modal. + + Args: + user_id: str. The Id of the user. + """ + user_settings = get_user_settings(user_id) + user_settings.mark_lesson_info_modal_viewed() + save_user_settings(user_settings) + + +def clear_learner_checkpoint_progress( + user_id: str, exploration_id: str +) -> None: + """Clears learner's checkpoint progress through the exploration by + clearing the most recently reached checkpoint fields of the exploration. + + Args: + user_id: str. The Id of the user. + exploration_id: str. The Id of the exploration. + """ + exp_user_model = user_models.ExplorationUserDataModel.get( + user_id, exploration_id) + if exp_user_model is not None: + exp_user_model.most_recently_reached_checkpoint_exp_version = None + exp_user_model.most_recently_reached_checkpoint_state_name = None + exp_user_model.update_timestamps() + exp_user_model.put() + + +@overload +def sync_logged_in_learner_checkpoint_progress_with_current_exp_version( + user_id: str, exploration_id: str +) -> Optional[user_domain.ExplorationUserData]: ... + + +@overload +def sync_logged_in_learner_checkpoint_progress_with_current_exp_version( + user_id: str, exploration_id: str, *, strict: Literal[True] +) -> user_domain.ExplorationUserData: ... + + +@overload +def sync_logged_in_learner_checkpoint_progress_with_current_exp_version( + user_id: str, exploration_id: str, *, strict: Literal[False] +) -> Optional[user_domain.ExplorationUserData]: ... + + +def sync_logged_in_learner_checkpoint_progress_with_current_exp_version( + user_id: str, exploration_id: str, strict: bool = False +) -> Optional[user_domain.ExplorationUserData]: + """Synchronizes the most recently reached checkpoint and the furthest + reached checkpoint with the latest exploration. + + Args: + user_id: str. The Id of the user. + exploration_id: str. The Id of the exploration. + strict: bool. Whether to fail noisily if no ExplorationUserDataModel + with the given user_id exists in the datastore. + + Returns: + ExplorationUserData. The domain object corresponding to the given user + and exploration. + + Raises: + Exception. No ExplorationUserDataModel found for the given user and + exploration ids. + """ + exp_user_model = user_models.ExplorationUserDataModel.get( + user_id, exploration_id) + + if exp_user_model is None: + if strict: + raise Exception( + 'No ExplorationUserDataModel found for the given user and ' + 'exploration ids: %s, %s' % (user_id, exploration_id) + ) + return None + + latest_exploration = exp_fetchers.get_exploration_by_id(exploration_id) + most_recently_interacted_exploration = ( + exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=exp_user_model.most_recently_reached_checkpoint_exp_version + )) + furthest_reached_exploration = ( + exp_fetchers.get_exploration_by_id( + exploration_id, + strict=True, + version=exp_user_model.furthest_reached_checkpoint_exp_version + )) + + most_recently_reached_checkpoint_in_current_exploration = ( + get_most_distant_reached_checkpoint_in_current_exploration( + get_checkpoints_in_order( + latest_exploration.init_state_name, + latest_exploration.states), + get_checkpoints_in_order( + most_recently_interacted_exploration.init_state_name, + most_recently_interacted_exploration.states), + exp_user_model.most_recently_reached_checkpoint_state_name + ) + ) + + furthest_reached_checkpoint_in_current_exploration = ( + get_most_distant_reached_checkpoint_in_current_exploration( + get_checkpoints_in_order( + latest_exploration.init_state_name, + latest_exploration.states), + get_checkpoints_in_order( + furthest_reached_exploration.init_state_name, + furthest_reached_exploration.states), + exp_user_model.furthest_reached_checkpoint_state_name + ) + ) + + # If the most recently reached checkpoint doesn't exist in current + # exploration. + if ( + most_recently_reached_checkpoint_in_current_exploration != + exp_user_model.most_recently_reached_checkpoint_state_name + ): + exp_user_model.most_recently_reached_checkpoint_state_name = ( + most_recently_reached_checkpoint_in_current_exploration) + exp_user_model.most_recently_reached_checkpoint_exp_version = ( + latest_exploration.version) + exp_user_model.update_timestamps() + exp_user_model.put() + + # If the furthest reached checkpoint doesn't exist in current + # exploration. + if ( + furthest_reached_checkpoint_in_current_exploration != + exp_user_model.furthest_reached_checkpoint_state_name + ): + exp_user_model.furthest_reached_checkpoint_state_name = ( + furthest_reached_checkpoint_in_current_exploration) + exp_user_model.furthest_reached_checkpoint_exp_version = ( + latest_exploration.version) + exp_user_model.update_timestamps() + exp_user_model.put() + + return exp_fetchers.get_exploration_user_data(user_id, exploration_id) + + +def is_user_blog_post_author(user_id: str) -> bool: + """Checks whether user can write blog posts. + + Args: + user_id: str. The user id of the user. + + Returns: + bool. Whether the user can author blog posts. + """ + user_settings = get_user_settings(user_id, strict=True) + author_roles = [feconf.ROLE_ID_BLOG_ADMIN, feconf.ROLE_ID_BLOG_POST_EDITOR] + return any(role in author_roles for role in user_settings.roles) diff --git a/core/domain/user_services_test.py b/core/domain/user_services_test.py index 52160b15f0b9..2bc24578be19 100644 --- a/core/domain/user_services_test.py +++ b/core/domain/user_services_test.py @@ -24,15 +24,16 @@ import re from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import auth_services from core.domain import collection_services from core.domain import event_services from core.domain import exp_domain +from core.domain import exp_fetchers from core.domain import exp_services from core.domain import rights_manager +from core.domain import state_domain from core.domain import suggestion_services from core.domain import user_domain from core.domain import user_services @@ -41,32 +42,60 @@ import requests_mock -auth_models, user_models = ( - models.Registry.import_models([models.NAMES.auth, models.NAMES.user])) +from typing import Dict, Final, List + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import audit_models + from mypy_imports import auth_models + from mypy_imports import user_models + +datastore_services = models.Registry.import_datastore_services() +(auth_models, user_models, audit_models) = (models.Registry.import_models([ + models.Names.AUTH, + models.Names.USER, + models.Names.AUDIT +])) bulk_email_services = models.Registry.import_bulk_email_services() +def _get_change_list( + state_name: str, + property_name: str, + new_value: bool +) -> List[exp_domain.ExplorationChange]: + """Generates a change list for a single state change.""" + return [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, + 'state_name': state_name, + 'property_name': property_name, + 'new_value': new_value + })] + + class UserServicesUnitTests(test_utils.GenericTestBase): """Test the user services methods.""" - def setUp(self): - super(UserServicesUnitTests, self).setUp() - user_data_dict = { + def setUp(self) -> None: + super().setUp() + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': 'user_id', } - new_user_data_dict = { + new_user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias3', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': None, } self.modifiable_user_data = ( @@ -74,10 +103,10 @@ def setUp(self): self.modifiable_new_user_data = ( user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict)) - def test_set_and_get_username(self): + def test_set_and_get_username(self) -> None: auth_id = 'someUser' username = 'username' - with self.assertRaisesRegexp(Exception, 'User not found.'): + with self.assertRaisesRegex(Exception, 'User not found.'): user_services.set_username(auth_id, username) user_settings = user_services.create_new_user( @@ -87,7 +116,27 @@ def test_set_and_get_username(self): self.assertEqual( username, user_services.get_username(user_settings.user_id)) - def test_get_username_for_system_user(self): + def test_set_username_to_existing_username_raises_error(self) -> None: + auth_ids = ['user1', 'user2'] + username = 'username1' + user_emails = ['user1@example.com', 'user2@example.com'] + user_ids = [] + + for i, auth_id in enumerate(auth_ids): + user_ids.append(user_services.create_new_user( + auth_id, + user_emails[i]).user_id) + + user_services.set_username(user_ids[0], username) + + error_msg = ( + 'Sorry, the username \"%s\" is already taken! Please pick ' + 'a different one.' % username) + + with self.assertRaisesRegex(utils.ValidationError, error_msg): + user_services.set_username(user_ids[1], username) + + def test_get_username_for_system_user(self) -> None: self.assertEqual( feconf.SYSTEM_COMMITTER_ID, user_services.get_username(feconf.SYSTEM_COMMITTER_ID)) @@ -95,7 +144,7 @@ def test_get_username_for_system_user(self): feconf.MIGRATION_BOT_USERNAME, user_services.get_username(feconf.MIGRATION_BOT_USER_ID)) - def test_get_username_for_pseudonymous_id(self): + def test_get_username_for_pseudonymous_id(self) -> None: self.assertEqual( 'User_Aaaaaaaa', user_services.get_username('pid_' + 'a' * 32)) @@ -103,31 +152,31 @@ def test_get_username_for_pseudonymous_id(self): 'User_Bbbbbbbb', user_services.get_username('pid_' + 'b' * 32)) - def test_get_usernames_for_pseudonymous_ids(self): + def test_get_usernames_for_pseudonymous_ids(self) -> None: # Handle usernames that exists. self.assertEqual( ['User_Aaaaaaaa', 'User_Bbbbbbbb'], user_services.get_usernames(['pid_' + 'a' * 32, 'pid_' + 'b' * 32])) - def test_get_usernames_empty_list(self): + def test_get_usernames_empty_list(self) -> None: # Return empty list when no user id passed. self.assertEqual([], user_services.get_usernames([])) - def test_get_usernames_system_admin(self): + def test_get_usernames_system_admin(self) -> None: # Check that system admin has correct username. self.assertEqual( [feconf.SYSTEM_COMMITTER_ID], user_services.get_usernames([feconf.SYSTEM_COMMITTER_ID])) - def test_get_username_for_nonexistent_user(self): - with self.assertRaisesRegexp( + def test_get_username_for_nonexistent_user(self) -> None: + with self.assertRaisesRegex( Exception, 'User with ID \'fakeUser\' not found.' ): user_services.get_username('fakeUser') - def test_get_username_for_user_being_deleted(self): + def test_get_username_for_user_being_deleted(self) -> None: auth_id = 'someUser' username = 'newUsername' user_id = user_services.create_new_user( @@ -141,15 +190,15 @@ def test_get_username_for_user_being_deleted(self): user_services.get_username(user_id), user_services.USERNAME_FOR_USER_BEING_DELETED) - def test_get_username_none(self): + def test_get_username_none(self) -> None: user_id = user_services.create_new_user( 'fakeUser', 'user@example.com').user_id self.assertEqual(None, user_services.get_username(user_id)) - def test_is_username_taken_false(self): + def test_is_username_taken_false(self) -> None: self.assertFalse(user_services.is_username_taken('fakeUsername')) - def test_is_username_taken_true(self): + def test_is_username_taken_true(self) -> None: auth_id = 'someUser' username = 'newUsername' user_id = user_services.create_new_user( @@ -157,7 +206,7 @@ def test_is_username_taken_true(self): user_services.set_username(user_id, username) self.assertTrue(user_services.is_username_taken(username)) - def test_is_username_taken_different_case(self): + def test_is_username_taken_different_case(self) -> None: auth_id = 'someUser' username = 'camelCase' user_id = user_services.create_new_user( @@ -166,7 +215,8 @@ def test_is_username_taken_different_case(self): self.assertTrue(user_services.is_username_taken('CaMeLcAsE')) def test_is_username_taken_when_user_marked_as_deleted_has_same_username( - self): + self + ) -> None: auth_id = 'someUser' username = 'camelCase' user_id = user_services.create_new_user( @@ -175,14 +225,16 @@ def test_is_username_taken_when_user_marked_as_deleted_has_same_username( user_services.mark_user_for_deletion(user_id) self.assertTrue(user_services.is_username_taken(username)) - def test_is_username_taken_when_deleted_user_had_same_username(self): + def test_is_username_taken_when_deleted_user_had_same_username( + self + ) -> None: username = 'userName123' user_services.save_deleted_username( user_domain.UserSettings.normalize_username(username) ) self.assertTrue(user_services.is_username_taken(username)) - def test_set_invalid_usernames(self): + def test_set_invalid_usernames(self) -> None: auth_id = 'someUser' user_id = user_services.create_new_user( auth_id, 'user@example.com').user_id @@ -202,10 +254,12 @@ def test_set_invalid_usernames(self): ('oppiaXyz', 'This username is not available.'), ('abcOppiaXyz', 'This username is not available.')] for username, error_msg in bad_usernames_with_expected_error_message: - with self.assertRaisesRegexp(utils.ValidationError, error_msg): + with self.assertRaisesRegex(utils.ValidationError, error_msg): user_services.set_username(user_id, username) - def test_update_user_settings_for_invalid_display_alias_raises_error(self): + def test_update_user_settings_for_invalid_display_alias_raises_error( + self + ) -> None: auth_id = 'someUser' user_id = user_services.create_new_user( auth_id, 'user@example.com').user_id @@ -216,13 +270,18 @@ def test_update_user_settings_for_invalid_display_alias_raises_error(self): ] self.modifiable_new_user_data.user_id = user_id self.modifiable_new_user_data.pin = None + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. for display_alias, error_msg in bad_display_aliases_with_expected_error: - with self.assertRaisesRegexp(utils.ValidationError, error_msg): - self.modifiable_new_user_data.display_alias = display_alias + with self.assertRaisesRegex(utils.ValidationError, error_msg): + self.modifiable_new_user_data.display_alias = display_alias # type: ignore[assignment] user_services.update_multiple_users_data( [self.modifiable_new_user_data]) - def test_update_user_settings_valid_display_alias_set_successfully(self): + def test_update_user_settings_valid_display_alias_set_successfully( + self + ) -> None: auth_id = 'someUser' user_id = user_services.create_new_user( auth_id, 'user@example.com').user_id @@ -236,7 +295,7 @@ def test_update_user_settings_valid_display_alias_set_successfully(self): user_settings = user_services.get_user_settings(user_id) self.assertEqual(user_settings.display_alias, display_alias) - def test_create_new_user_with_invalid_emails_raises_exception(self): + def test_create_new_user_with_invalid_emails_raises_exception(self) -> None: bad_email_addresses_with_expected_error_message = [ ('@', 'Invalid email address: @'), ('@@', 'Invalid email address: @@'), @@ -251,14 +310,19 @@ def test_create_new_user_with_invalid_emails_raises_exception(self): ) ) ] + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. for email, error_msg in bad_email_addresses_with_expected_error_message: - with self.assertRaisesRegexp(utils.ValidationError, error_msg): - user_services.create_new_user('auth_id', email) + with self.assertRaisesRegex(utils.ValidationError, error_msg): + user_services.create_new_user('auth_id', email) # type: ignore[arg-type] - def test_create_new_user_with_invalid_email_creates_no_user_models(self): + def test_create_new_user_with_invalid_email_creates_no_user_models( + self + ) -> None: bad_email = '@' error_msg = 'Invalid email address: @' - with self.assertRaisesRegexp(utils.ValidationError, error_msg): + with self.assertRaisesRegex(utils.ValidationError, error_msg): user_services.create_new_user('auth_id', bad_email) tmp_admin_user_id = self.get_user_id_from_email(self.SUPER_ADMIN_EMAIL) user_ids_in_user_settings = [ @@ -271,7 +335,20 @@ def test_create_new_user_with_invalid_email_creates_no_user_models(self): self.assertEqual(user_ids_in_user_auth_details, [tmp_admin_user_id]) self.assertEqual(user_ids_in_user_contributions, [tmp_admin_user_id]) - def test_email_truncation(self): + def test_create_new_user_with_already_existing_auth_id_raises_error( + self + ) -> None: + auth_id = 'someUser' + email = 'user@example.com' + user_id = user_services.create_new_user(auth_id, email).user_id + + with self.assertRaisesRegex( + Exception, + 'User %s already exists for auth_id %s.' % (user_id, auth_id) + ): + user_services.create_new_user(auth_id, email) + + def test_email_truncation(self) -> None: email_addresses = [ ('a@b.c', '..@b.c'), ('ab@c.d', 'a..@c.d'), @@ -284,7 +361,7 @@ def test_email_truncation(self): str(ind), actual_email) self.assertEqual(user_settings.truncated_email, expected_email) - def test_get_user_id_from_username(self): + def test_get_user_id_from_username(self) -> None: auth_id = 'someUser' username = 'username' user_email = 'user@example.com' @@ -308,21 +385,145 @@ def test_get_user_id_from_username(self): self.assertIsNone( user_services.get_user_id_from_username('fakeUsername')) - def test_get_user_settings_by_auth_id_returns_user_settings(self): + # Raises error for usernames which don't exist, if + # 'get_user_id_from_username' called with strict. + with self.assertRaisesRegex( + Exception, + 'No user_id found for the given username: fakeUsername' + ): + user_services.get_user_id_from_username( + 'fakeUsername', strict=True + ) + + def test_get_multi_user_ids_from_usernames(self) -> None: + auth_id1 = 'someUser1' + username1 = 'username1' + user_email1 = 'user1@example.com' + auth_id2 = 'someUser2' + username2 = 'username2' + user_email2 = 'user2@example.com' + + # Create user 1. + user_settings = user_services.create_new_user(auth_id1, user_email1) + user_id1 = user_settings.user_id + user_services.set_username(user_id1, username1) + self.assertEqual(user_services.get_username(user_id1), username1) + + # Create user 2. + user_settings = user_services.create_new_user(auth_id2, user_email2) + user_id2 = user_settings.user_id + user_services.set_username(user_id2, username2) + self.assertEqual(user_services.get_username(user_id2), username2) + + # Handle usernames that exist. + self.assertEqual( + user_services.get_multi_user_ids_from_usernames( + [username1, username2]), [user_id1, user_id2]) + + # Handle usernames in the same equivalence class correctly. + self.assertEqual( + user_services.get_multi_user_ids_from_usernames( + ['USERNAME1', 'USERNAME2']), [user_id1, user_id2]) + + # Return None for usernames which don't exist. + self.assertEqual( + user_services.get_multi_user_ids_from_usernames( + ['fakeUsername1', 'fakeUsername2', 'fakeUsername3', + 'fakeUsername4', 'fakeUsername5', 'fakeUsername6', + 'fakeUsername7', 'fakeUsername8', 'fakeUsername9'] + ), [None, None, None, None, None, None, None, None, None] + ) + self.assertEqual( + user_services.get_multi_user_ids_from_usernames( + ['fakeUsername1', 'USERNAME1', 'fakeUsername3', + 'fakeUsername4', 'fakeUsername5', 'fakeUsername6', + 'fakeUsername7', username2, 'fakeUsername9'] + ), [None, user_id1, None, None, None, None, None, user_id2, None] + ) + + # Return empty list if empty list is passed in as arguments. + self.assertEqual( + user_services.get_multi_user_ids_from_usernames([]), [] + ) + + with self.assertRaisesRegex( + Exception, + 'No user_id found for the username: fakeusername1' + ): + user_services.get_multi_user_ids_from_usernames( + ['fakeUsername1', 'USERNAME1', 'fakeUsername3', + 'fakeUsername4', 'fakeUsername5', 'fakeUsername6', + 'fakeUsername7', username2, 'fakeUsername9'], + strict=True + ) + + def test_get_user_settings_from_username_returns_user_settings( + self + ) -> None: + auth_id = 'someUser' + username = 'username' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + user_services.set_username(user_id, username) + user_settings_model = user_models.UserSettingsModel.get_by_id(user_id) + user_settings = user_services.get_user_settings_from_username(username) + + # Ruling out the possibility of None for mypy type checking. + assert user_settings is not None + self.assertEqual( + user_settings_model.id, user_settings.user_id) + self.assertEqual( + user_email, user_settings.email) + + def test_get_user_settings_from_username_for_no_username_is_none( + self + ) -> None: + self.assertIsNone( + user_services.get_user_settings_from_username('fakeUsername')) + + def test_get_user_settings_from_email_returns_user_settings(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + user_settings_model = user_models.UserSettingsModel.get_by_id(user_id) + user_settings = user_services.get_user_settings_from_email(user_email) + + # Ruling out the possibility of None for mypy type checking. + assert user_settings is not None + self.assertEqual( + user_settings_model.id, user_settings.user_id) + self.assertEqual( + user_email, user_settings.email) + + def test_get_user_settings_from_email_for_nonexistent_email_is_none( + self + ) -> None: + self.assertIsNone( + user_services.get_user_settings_from_email('fakeEmail@example.com')) + + def test_get_user_settings_by_auth_id_returns_user_settings(self) -> None: auth_id = 'auth_id' email = 'user@example.com' user_id = 'user_id' user_id = user_services.create_new_user(auth_id, email).user_id user_settings_model = user_models.UserSettingsModel.get_by_id(user_id) user_settings = user_services.get_user_settings_by_auth_id(auth_id) + # Ruling out the possibility of None for mypy type checking. + assert user_settings is not None self.assertEqual(user_settings_model.id, user_settings.user_id) self.assertEqual(user_settings_model.email, user_settings.email) - def test_get_user_settings_by_auth_id_for_nonexistent_auth_id_is_none(self): + def test_get_user_settings_by_auth_id_for_nonexistent_auth_id_is_none( + self + ) -> None: self.assertIsNone( user_services.get_user_settings_by_auth_id('auth_id_x')) - def test_get_user_settings_by_auth_id_strict_returns_user_settings(self): + def test_get_user_settings_by_auth_id_strict_returns_user_settings( + self + ) -> None: auth_id = 'auth_id' email = 'user@example.com' user_id = user_services.create_new_user(auth_id, email).user_id @@ -334,18 +535,65 @@ def test_get_user_settings_by_auth_id_strict_returns_user_settings(self): self.assertEqual(user_settings_model.email, user_settings.email) def test_get_user_settings_by_auth_id_strict_for_missing_auth_id_is_none( - self): - with self.assertRaisesRegexp(Exception, 'User not found.'): - user_services.get_user_settings_by_auth_id('auth_id_x', strict=True) + self + ) -> None: + with self.assertRaisesRegex(Exception, 'User not found.'): + user_services.get_user_settings_by_auth_id( + 'auth_id_x', + strict=True + ) + + def test_get_users_setting_retrieves_settings_for_system_user(self) -> None: + user_id = feconf.SYSTEM_COMMITTER_ID + user_ids = [user_id] + + roles = [ + feconf.ROLE_ID_FULL_USER, + feconf.ROLE_ID_CURRICULUM_ADMIN, + feconf.ROLE_ID_MODERATOR, + feconf.ROLE_ID_VOICEOVER_ADMIN + ] + + less_than_time = datetime.datetime.utcnow() + + users_settings = user_services.get_users_settings(user_ids) + self.assertEqual(len(users_settings), 1) + admin_settings = users_settings[0] + + greater_than_time = datetime.datetime.utcnow() + + # Ruling out the possibility of None for mypy type checking. + assert admin_settings is not None + self.assertEqual(admin_settings.user_id, user_id) + self.assertEqual(admin_settings.email, feconf.SYSTEM_EMAIL_ADDRESS) + self.assertEqual(admin_settings.roles, roles) + self.assertFalse(admin_settings.banned) + self.assertEqual(admin_settings.username, 'admin') + self.assertGreater( + admin_settings.last_agreed_to_terms, + less_than_time + ) + self.assertLess( + admin_settings.last_agreed_to_terms, + greater_than_time + ) + + def test_get_users_setting_for_empty_user_ids_returns_empty_list( + self + ) -> None: + user_ids: List[str] = [] + users_settings = user_services.get_users_settings(user_ids) - def test_fetch_gravatar_success(self): + self.assertEqual(len(users_settings), 0) + + def test_fetch_gravatar_success(self) -> None: user_email = 'user@example.com' gravatar_url = user_services.get_gravatar_url(user_email) expected_gravatar_filepath = os.path.join( self.get_static_asset_filepath(), 'assets', 'images', 'avatar', 'gravatar_example.png') - with python_utils.open_file( + with utils.open_file( expected_gravatar_filepath, 'rb', encoding=None) as f: expected_gravatar = f.read() @@ -356,11 +604,11 @@ def test_fetch_gravatar_success(self): self.assertEqual( gravatar, utils.convert_png_to_data_url(expected_gravatar_filepath)) - def test_fetch_gravatar_failure_404(self): + def test_fetch_gravatar_failure_404(self) -> None: user_email = 'user@example.com' gravatar_url = user_services.get_gravatar_url(user_email) - error_messages = [] + error_messages: List[str] = [] logging_mocker = self.swap(logging, 'error', error_messages.append) with logging_mocker, requests_mock.Mocker() as requests_mocker: @@ -372,11 +620,11 @@ def test_fetch_gravatar_failure_404(self): ['[Status 404] Failed to fetch Gravatar from %s' % gravatar_url]) self.assertEqual(gravatar, user_services.DEFAULT_IDENTICON_DATA_URL) - def test_fetch_gravatar_failure_exception(self): + def test_fetch_gravatar_failure_exception(self) -> None: user_email = 'user@example.com' gravatar_url = user_services.get_gravatar_url(user_email) - error_messages = [] + error_messages: List[str] = [] logging_mocker = self.swap(logging, 'exception', error_messages.append) with logging_mocker, requests_mock.Mocker() as requests_mocker: @@ -387,7 +635,7 @@ def test_fetch_gravatar_failure_exception(self): error_messages, ['Failed to fetch Gravatar from %s' % gravatar_url]) self.assertEqual(gravatar, user_services.DEFAULT_IDENTICON_DATA_URL) - def test_default_identicon_data_url(self): + def test_default_identicon_data_url(self) -> None: identicon_filepath = os.path.join( self.get_static_asset_filepath(), 'assets', 'images', 'avatar', 'user_blue_72px.png') @@ -395,7 +643,56 @@ def test_default_identicon_data_url(self): self.assertEqual( identicon_data_url, user_services.DEFAULT_IDENTICON_DATA_URL) - def test_set_and_get_user_email_preferences(self): + def test_get_users_email_preferences(self) -> None: + auth_id = 'someUser' + username = 'username' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + user_services.set_username(user_id, username) + email_prefs = user_services.get_users_email_preferences([user_id]) + self.assertEqual(len(email_prefs), 1) + user_email_prefs = email_prefs[0] + + self.assertEqual( + user_email_prefs.can_receive_email_updates, + feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE) + + self.assertEqual( + user_email_prefs.can_receive_editor_role_email, + feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE) + + self.assertEqual( + user_email_prefs.can_receive_feedback_message_email, + feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE) + + self.assertEqual( + user_email_prefs.can_receive_subscription_email, + feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE) + + def test_add_user_to_mailing_list(self) -> None: + def _mock_add_or_update_user_status( + unused_email: str, + merge_fields: Dict[str, str], + unused_tag: str, + *, + can_receive_email_updates: bool + ) -> bool: + """Mocks bulk_email_services.add_or_update_user_status().""" + self.assertDictEqual(merge_fields, { + 'NAME': 'Name' + }) + return can_receive_email_updates + + fn_swap = self.swap( + bulk_email_services, 'add_or_update_user_status', + _mock_add_or_update_user_status) + with fn_swap: + self.assertTrue( + user_services.add_user_to_mailing_list( + 'email@example.com', 'Name', 'Android')) + + def test_set_and_get_user_email_preferences(self) -> None: auth_id = 'someUser' username = 'username' user_email = 'user@example.com' @@ -416,7 +713,7 @@ def test_set_and_get_user_email_preferences(self): feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE) observed_log_messages = [] - def _mock_logging_function(msg, *args): + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) @@ -435,9 +732,15 @@ def _mock_logging_function(msg, *args): 'preference in the service provider\'s db to False. Cannot access ' 'API, since this is a dev environment.' % user_email]) - def _mock_add_or_update_user_status(_email, _can_receive_updates): + def _mock_add_or_update_user_status( + unused_email: str, + unused_merge_fields: Dict[str, str], + unused_tag: str, + *, + can_receive_email_updates: bool + ) -> bool: """Mocks bulk_email_services.add_or_update_user_status().""" - return False + return not can_receive_email_updates send_mail_swap = self.swap(feconf, 'CAN_SEND_EMAILS', True) bulk_email_swap = self.swap( @@ -476,7 +779,7 @@ def _mock_add_or_update_user_status(_email, _can_receive_updates): self.assertFalse(email_preferences.can_receive_feedback_message_email) self.assertFalse(email_preferences.can_receive_subscription_email) - def test_get_and_set_user_email_preferences_with_error(self): + def test_get_and_set_user_email_preferences_with_error(self) -> None: auth_id = 'someUser' username = 'username' user_email = 'user@example.com' @@ -491,7 +794,9 @@ def test_get_and_set_user_email_preferences_with_error(self): email_preferences = user_services.get_email_preferences(user_id) self.assertFalse(email_preferences.can_receive_email_updates) - def _mock_add_or_update_user_status(_email, _can_receive_updates): + def _mock_add_or_update_user_status( + _email: str, _can_receive_updates: bool + ) -> None: """Mocks bulk_email_services.add_or_update_user_status(). Raises: @@ -522,7 +827,7 @@ def _mock_add_or_update_user_status(_email, _can_receive_updates): email_preferences = user_services.get_email_preferences(user_id) self.assertTrue(email_preferences.can_receive_email_updates) - def test_set_and_get_user_email_preferences_for_exploration(self): + def test_set_and_get_user_email_preferences_for_exploration(self) -> None: auth_id = 'someUser' exploration_id = 'someExploration' username = 'username' @@ -535,7 +840,7 @@ def test_set_and_get_user_email_preferences_for_exploration(self): # of mute_feedback_notifications and mute_suggestion_notifications # should match the default values. exploration_user_model = ( - user_services.user_models.ExplorationUserDataModel.get( + user_models.ExplorationUserDataModel.get( user_id, exploration_id)) self.assertIsNone(exploration_user_model) email_preferences = user_services.get_email_preferences_for_exploration( @@ -587,7 +892,41 @@ def test_set_and_get_user_email_preferences_for_exploration(self): self.assertTrue(email_preferences.mute_feedback_notifications) self.assertTrue(email_preferences.mute_suggestion_notifications) - def test_get_usernames_by_role(self): + def test_get_users_email_preferences_for_exploration(self) -> None: + auth_ids = ['someUser1', 'someUser2'] + exploration_ids = ['someExploration1', 'someExploration2'] + usernames = ['username1', 'username2'] + emails = ['user1@example.com', 'user2@example.com'] + user_ids = [] + + for i, auth_id in enumerate(auth_ids): + user_id = user_services.create_new_user(auth_id, emails[i]).user_id + user_ids.append(user_id) + user_services.set_username(user_id, usernames[i]) + + user_services.set_email_preferences_for_exploration( + user_ids[1], + exploration_ids[1], + mute_feedback_notifications=True, + mute_suggestion_notifications=True) + + exp_prefs = user_services.get_users_email_preferences_for_exploration( + user_ids, + exploration_ids[1] + ) + + self.assertEqual( + exp_prefs[0].mute_feedback_notifications, + feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE) + + self.assertEqual( + exp_prefs[0].mute_suggestion_notifications, + feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE) + + self.assertTrue(exp_prefs[1].mute_feedback_notifications) + self.assertTrue(exp_prefs[1].mute_suggestion_notifications) + + def test_get_usernames_by_role(self) -> None: auth_ids = ['test1', 'test2', 'test3', 'test4'] usernames = ['name1', 'name2', 'name3', 'name4'] user_emails = [ @@ -595,8 +934,7 @@ def test_get_usernames_by_role(self): 'test3@email.com', 'test4@email.com'] user_ids = [] - for auth_id, email, name in python_utils.ZIP( - auth_ids, user_emails, usernames): + for auth_id, email, name in zip(auth_ids, user_emails, usernames): user_id = user_services.create_new_user(auth_id, email).user_id user_ids.append(user_id) user_services.set_username(user_id, name) @@ -615,7 +953,7 @@ def test_get_usernames_by_role(self): feconf.ROLE_ID_TOPIC_MANAGER)), set(['name3', 'name4'])) - def test_get_user_ids_by_role(self): + def test_get_user_ids_by_role(self) -> None: auth_ids = ['test1', 'test2', 'test3', 'test4'] usernames = ['name1', 'name2', 'name3', 'name4'] user_emails = [ @@ -623,8 +961,7 @@ def test_get_user_ids_by_role(self): 'test3@email.com', 'test4@email.com'] user_ids = [] - for uid, email, name in python_utils.ZIP( - auth_ids, user_emails, usernames): + for uid, email, name in zip(auth_ids, user_emails, usernames): user_id = user_services.create_new_user(uid, email).user_id user_ids.append(user_id) user_services.set_username(user_id, name) @@ -645,7 +982,162 @@ def test_get_user_ids_by_role(self): feconf.ROLE_ID_CURRICULUM_ADMIN)), set([user_ids[2], user_ids[3]])) - def test_update_user_creator_dashboard_display(self): + def test_get_system_user_returns_system_user_action_info(self) -> None: + system_user_action = user_services.get_system_user() + expected_actions = set([ + 'MANAGE_TOPIC_RIGHTS', 'EDIT_ANY_PUBLIC_ACTIVITY', + 'DELETE_ANY_SKILL', 'PUBLISH_OWNED_SKILL', 'DELETE_TOPIC', + 'EDIT_OWNED_TOPIC', 'CREATE_NEW_TOPIC', 'ACCESS_MODERATOR_PAGE', + 'RATE_ANY_PUBLIC_EXPLORATION', 'DELETE_ANY_PUBLIC_ACTIVITY', + 'MANAGE_ACCOUNT', 'MODIFY_CORE_ROLES_FOR_OWNED_ACTIVITY', + 'CREATE_EXPLORATION', 'UNPUBLISH_ANY_PUBLIC_ACTIVITY', + 'CHANGE_TOPIC_STATUS', 'SEND_MODERATOR_EMAILS', 'FLAG_EXPLORATION', + 'ACCESS_CREATOR_DASHBOARD', 'EDIT_ANY_TOPIC', + 'ACCEPT_ANY_SUGGESTION', 'PUBLISH_OWNED_ACTIVITY', + 'PLAY_ANY_PUBLIC_ACTIVITY', + 'EDIT_ANY_SUBTOPIC_PAGE', 'VISIT_ANY_QUESTION_EDITOR_PAGE', + 'ACCESS_LEARNER_DASHBOARD', + 'EDIT_ANY_ACTIVITY', 'VISIT_ANY_TOPIC_EDITOR_PAGE', + 'SUGGEST_CHANGES', 'DELETE_OWNED_PRIVATE_ACTIVITY', + 'EDIT_OWNED_ACTIVITY', 'EDIT_SKILL_DESCRIPTION', + 'DELETE_ANY_ACTIVITY', 'SUBSCRIBE_TO_USERS', + 'PLAY_ANY_PRIVATE_ACTIVITY', 'MANAGE_QUESTION_SKILL_STATUS', + 'MODIFY_CORE_ROLES_FOR_ANY_ACTIVITY', + 'ACCESS_TOPICS_AND_SKILLS_DASHBOARD', 'EDIT_SKILL', + 'DELETE_ANY_QUESTION', 'EDIT_ANY_STORY', 'PUBLISH_ANY_ACTIVITY', + 'EDIT_ANY_QUESTION', 'CREATE_NEW_SKILL', 'CHANGE_STORY_STATUS', + 'CAN_MANAGE_VOICE_ARTIST', 'ACCESS_LEARNER_GROUPS']) + expected_roles = set( + ['EXPLORATION_EDITOR', 'ADMIN', 'MODERATOR', + 'VOICEOVER_ADMIN']) + + self.assertEqual(set(system_user_action.actions), expected_actions) + self.assertEqual(set(system_user_action.roles), expected_roles) + self.assertEqual(system_user_action.user_id, 'admin') + + def test_update_user_bio(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + user_bio = 'new bio' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + pre_update_user_settings = user_services.get_user_settings(user_id) + self.assertNotEqual(pre_update_user_settings.user_bio, user_bio) + + user_services.update_user_bio(user_id, user_bio) + user_settings = user_services.get_user_settings(user_id) + + self.assertEqual(user_bio, user_settings.user_bio) + + def test_update_preferred_language_codes(self) -> None: + language_codes = ['es'] + + user_id = user_services.create_new_user( + 'someUser', + 'user@example.com').user_id + user_settings = user_services.get_user_settings(user_id) + + self.assertNotEqual( + language_codes, + user_settings.preferred_language_codes + ) + + user_services.update_preferred_language_codes( + user_id, language_codes) + user_settings = user_services.get_user_settings(user_id) + + self.assertEqual( + language_codes, + user_settings.preferred_language_codes + ) + + def test_update_preferred_site_language_code(self) -> None: + preferred_site_language_code = 'es' + + user_id = user_services.create_new_user( + 'someUser', + 'user@example.com').user_id + user_settings = user_services.get_user_settings(user_id) + + self.assertNotEqual( + 'es', + user_settings.preferred_site_language_code + ) + + user_services.update_preferred_site_language_code( + user_id, preferred_site_language_code) + user_settings = user_services.get_user_settings(user_id) + + self.assertEqual( + preferred_site_language_code, + user_settings.preferred_site_language_code + ) + + def test_update_preferred_audio_language_code(self) -> None: + audio_code = 'es' + + user_id = user_services.create_new_user( + 'someUser', + 'user@example.com').user_id + user_settings = user_services.get_user_settings(user_id) + + self.assertNotEqual( + 'es', + user_settings.preferred_audio_language_code + ) + user_services.update_preferred_audio_language_code( + user_id, audio_code) + user_settings = user_services.get_user_settings(user_id) + + self.assertEqual( + audio_code, + user_settings.preferred_audio_language_code + ) + + def test_update_preferred_translation_language_code(self) -> None: + language_code = 'es' + + user_id = user_services.create_new_user( + 'someUser', 'user@example.com').user_id + user_settings = user_services.get_user_settings(user_id) + + self.assertNotEqual( + user_settings.preferred_translation_language_code, 'es') + + user_services.update_preferred_translation_language_code( + user_id, language_code) + user_settings = user_services.get_user_settings(user_id) + + self.assertEqual( + language_code, user_settings.preferred_translation_language_code) + + def test_remove_user_role(self) -> None: + user_id = user_services.create_new_user( + 'someUser', + 'user@example.com').user_id + user_settings_model = user_models.UserSettingsModel.get_by_id(user_id) + user_services.add_user_role(user_id, feconf.ROLE_ID_BLOG_POST_EDITOR) + user_settings = user_services.get_user_settings(user_id) + + user_services.remove_user_role(user_id, feconf.ROLE_ID_BLOG_POST_EDITOR) + + self.assertEqual( + user_settings_model.roles, + user_settings.roles + ) + + def test_remove_user_role_for_default_role_raises_error(self) -> None: + user_id = user_services.create_new_user( + 'someUser', + 'user@example.com').user_id + + with self.assertRaisesRegex( + Exception, + 'Removing a default role is not allowed.' + ): + user_services.remove_user_role(user_id, feconf.ROLE_ID_FULL_USER) + + def test_update_user_creator_dashboard_display(self) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -665,7 +1157,7 @@ def test_update_user_creator_dashboard_display(self): user_setting.creator_dashboard_display_pref, constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['LIST']) - def test_add_user_role(self): + def test_add_user_role(self) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -683,7 +1175,7 @@ def test_add_user_role(self): user_services.get_user_roles_from_id(user_id), [ feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_COLLECTION_EDITOR]) - def test_adding_other_roles_to_full_user_updates_roles(self): + def test_adding_other_roles_to_full_user_updates_roles(self) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -737,7 +1229,7 @@ def test_adding_other_roles_to_full_user_updates_roles(self): feconf.ROLE_ID_CURRICULUM_ADMIN]) self.assertFalse(user_settings_model.banned) - def test_profile_user_settings_have_correct_roles(self): + def test_profile_user_settings_have_correct_roles(self) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -749,13 +1241,14 @@ def test_profile_user_settings_have_correct_roles(self): user_settings_model.update_timestamps() user_settings_model.put() - profile_user_data_dict = { + profile_user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias3', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': None, } modifiable_user_data = user_domain.ModifiableUserData.from_raw_dict( @@ -769,14 +1262,16 @@ def test_profile_user_settings_have_correct_roles(self): profile_user_settings_model.roles, [feconf.ROLE_ID_MOBILE_LEARNER]) self.assertFalse(profile_user_settings_model.banned) - def test_get_all_profiles_auth_details_non_existent_id_raises_error(self): + def test_get_all_profiles_auth_details_non_existent_id_raises_error( + self + ) -> None: non_existent_user_id = 'id_x' error_msg = 'Parent user not found.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_services.get_all_profiles_auth_details_by_parent_user_id( non_existent_user_id) - def test_add_user_role_to_mobile_learner_raises_exception(self): + def test_add_user_role_to_mobile_learner_raises_exception(self) -> None: auth_id = 'test_id' user_email = 'test@email.com' user_pin = '12345' @@ -802,11 +1297,11 @@ def test_add_user_role_to_mobile_learner_raises_exception(self): user_services.get_user_roles_from_id(profile_user_id), [feconf.ROLE_ID_MOBILE_LEARNER]) error_msg = 'The role of a Mobile Learner cannot be changed.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_services.add_user_role( profile_user_id, feconf.ROLE_ID_FULL_USER) - def test_add_full_user_role_to_learner_raises_exception(self): + def test_add_full_user_role_to_learner_raises_exception(self) -> None: auth_id = 'test_id' user_email = 'test@email.com' @@ -816,11 +1311,43 @@ def test_add_full_user_role_to_learner_raises_exception(self): [feconf.ROLE_ID_FULL_USER]) error_msg = 'Adding a %s role is not allowed.' % ( feconf.ROLE_ID_MOBILE_LEARNER) - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_services.add_user_role( user_id, feconf.ROLE_ID_MOBILE_LEARNER) - def test_removing_role_from_mobile_learner_user_raises_exception(self): + def test_is_user_blog_post_author_returns_true_for_authors(self) -> None: + # When user is a blog admin. + self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) + blog_admin_id = ( + self.get_user_id_from_email(self.BLOG_ADMIN_EMAIL)) + # Precheck before adding blog admin role. + self.assertFalse(user_services.is_user_blog_post_author(blog_admin_id)) + + self.add_user_role( + self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) + + self.assertTrue(user_services.is_user_blog_post_author(blog_admin_id)) + + # When user is a blog editor. + self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) + blog_editor_id = ( + self.get_user_id_from_email(self.BLOG_EDITOR_EMAIL)) + # Precheck before adding blog editor role. + self.assertFalse(user_services.is_user_blog_post_author(blog_editor_id)) + + self.add_user_role( + self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) + + self.assertTrue(user_services.is_user_blog_post_author(blog_editor_id)) + + # Assigning multiple roles to blog editor. + self.add_user_role( + self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_RELEASE_COORDINATOR) + self.assertTrue(user_services.is_user_blog_post_author(blog_editor_id)) + + def test_removing_role_from_mobile_learner_user_raises_exception( + self + ) -> None: auth_id = 'test_id' user_email = 'test@email.com' user_pin = '12345' @@ -846,11 +1373,11 @@ def test_removing_role_from_mobile_learner_user_raises_exception(self): user_services.get_user_roles_from_id(profile_user_id), [feconf.ROLE_ID_MOBILE_LEARNER]) error_msg = 'The role of a Mobile Learner cannot be changed.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_services.remove_user_role( profile_user_id, feconf.ROLE_ID_TOPIC_MANAGER) - def test_removing_default_user_role_raises_exception(self): + def test_removing_default_user_role_raises_exception(self) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -863,10 +1390,44 @@ def test_removing_default_user_role_raises_exception(self): self.assertFalse(user_settings_model.banned) error_msg = 'Removing a default role is not allowed.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_services.remove_user_role(user_id, feconf.ROLE_ID_FULL_USER) - def test_mark_user_banned(self): + def test_is_user_registered_for_existing_user_id_returns_true(self) -> None: + auth_id = 'test_id' + user_email = 'test@email.com' + user_id = user_services.create_new_user(auth_id, user_email).user_id + + self.assertTrue(user_services.is_user_registered(user_id)) + + def test_is_user_registered_for_non_user_id_returns_false(self) -> None: + user_id = 'just_random_id' + self.assertFalse(user_services.is_user_registered(user_id)) + + def test_has_fully_registered_account_for_properly_registered_user( + self + ) -> None: + """checks whether the user with user_id has created their username and + has agreed to terms. + """ + + auth_id = 'test_id' + username = 'testname' + user_email = 'test@email.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + user_services.set_username(user_id, username) + user_services.record_agreement_to_terms(user_id) + + self.assertTrue(user_services.has_fully_registered_account(user_id)) + + def test_has_fully_registered_account_for_none_user_id_returns_false( + self + ) -> None: + user_id = 'non_existing_user' + self.assertFalse(user_services.has_fully_registered_account(user_id)) + + def test_mark_user_banned(self) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -881,7 +1442,7 @@ def test_mark_user_banned(self): user_settings_model = user_models.UserSettingsModel.get_by_id(user_id) self.assertTrue(user_settings_model.banned) - def test_unmark_banned_user(self): + def test_unmark_banned_user(self) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -897,7 +1458,9 @@ def test_unmark_banned_user(self): user_settings_model = user_models.UserSettingsModel.get_by_id(user_id) self.assertFalse(user_settings_model.banned) - def test_create_new_user_creates_a_new_user_auth_details_entry(self): + def test_create_new_user_creates_a_new_user_auth_details_entry( + self + ) -> None: new_auth_id = 'new_auth_id' new_email = 'new@example.com' @@ -909,12 +1472,16 @@ def test_create_new_user_creates_a_new_user_auth_details_entry(self): self.assertEqual( auth_services.get_auth_id_from_user_id(user_id), new_auth_id) - def test_get_auth_details_by_user_id_for_existing_user_works_fine(self): + def test_get_auth_details_by_user_id_for_existing_user_works_fine( + self + ) -> None: auth_id = 'new_auth_id' email = 'new@example.com' user_id = user_services.create_new_user(auth_id, email).user_id user_auth_details_model = auth_models.UserAuthDetailsModel.get(user_id) user_auth_details = user_services.get_auth_details_by_user_id(user_id) + # Ruling out the possibility of None for mypy type checking. + assert user_auth_details is not None self.assertEqual( user_auth_details.user_id, user_auth_details_model.id) self.assertEqual( @@ -923,24 +1490,32 @@ def test_get_auth_details_by_user_id_for_existing_user_works_fine(self): user_auth_details.parent_user_id, user_auth_details_model.parent_user_id) - def test_get_auth_details_by_user_id_non_existing_user_returns_none(self): + def test_get_auth_details_by_user_id_non_existing_user_returns_none( + self + ) -> None: non_existent_user_id = 'id_x' self.assertIsNone( user_services.get_auth_details_by_user_id(non_existent_user_id)) - def test_get_auth_details_by_user_id_strict_non_existing_user_error(self): + def test_get_auth_details_by_user_id_strict_non_existing_user_error( + self + ) -> None: non_existent_user_id = 'id_x' error_msg = 'User not found' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_services.get_auth_details_by_user_id( non_existent_user_id, strict=True) - def test_get_auth_details_by_auth_id_non_existing_user_returns_none(self): + def test_get_auth_details_by_auth_id_non_existing_user_returns_none( + self + ) -> None: non_existent_user_id = 'id_x' self.assertIsNone( user_services.get_auth_details_by_user_id(non_existent_user_id)) - def test_create_new_profile_with_parent_user_pin_set_is_success(self): + def test_create_new_profile_with_parent_user_pin_set_is_success( + self + ) -> None: auth_id = 'auth_id' email = 'new@example.com' display_alias = 'display_alias' @@ -966,20 +1541,24 @@ def test_create_new_profile_with_parent_user_pin_set_is_success(self): self.assertEqual(user_auth_details_models[0].parent_user_id, user_id) self.assertIsNone(user_auth_details_models[0].gae_id) - def test_create_new_profile_with_parent_user_pin_not_set_raises_error(self): + def test_create_new_profile_with_parent_user_pin_not_set_raises_error( + self + ) -> None: auth_id = 'auth_id' email = 'new@example.com' display_alias = 'display_alias' profile_pin = '123' user_services.create_new_user(auth_id, email) error_msg = 'Pin must be set for a full user before creating a profile.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): self.modifiable_new_user_data.display_alias = display_alias self.modifiable_new_user_data.pin = profile_pin user_services.create_new_profiles( auth_id, email, [self.modifiable_new_user_data]) - def test_create_multiple_new_profiles_for_same_user_works_correctly(self): + def test_create_multiple_new_profiles_for_same_user_works_correctly( + self + ) -> None: auth_id = 'auth_id' email = 'new@example.com' display_alias = 'display_alias' @@ -994,13 +1573,14 @@ def test_create_multiple_new_profiles_for_same_user_works_correctly(self): user_services.update_multiple_users_data([self.modifiable_user_data]) self.modifiable_new_user_data.display_alias = display_alias_2 self.modifiable_new_user_data.pin = profile_pin - new_user_data_dict_2 = { + new_user_data_dict_2: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': display_alias_3, 'pin': None, 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': None, } modifiable_new_user_data_2 = ( @@ -1038,16 +1618,18 @@ def test_create_multiple_new_profiles_for_same_user_works_correctly(self): self.assertItemsEqual( user_auth_details_models, expected_user_auth_output) - user_settings_models = [ - { + user_settings_models = [] + for model in user_models.UserSettingsModel.get_multi( + [profile_1_id, profile_2_id] + ): + # Ruling out the possibility of None for mypy type checking. + assert model is not None + user_settings_models.append({ 'id': model.id, 'display_alias': model.display_alias, 'pin': model.pin, 'roles': model.roles - } for model in - user_models.UserSettingsModel.get_multi( - [profile_1_id, profile_2_id]) - ] + }) expected_user_settings_output = [ { @@ -1066,13 +1648,15 @@ def test_create_multiple_new_profiles_for_same_user_works_correctly(self): self.assertItemsEqual( user_settings_models, expected_user_settings_output) - def test_create_new_profile_with_nonexistent_user_raises_error(self): + def test_create_new_profile_with_nonexistent_user_raises_error( + self + ) -> None: non_existent_auth_id = 'auth_id_x' non_existent_email = 'x@example.com' profile_pin = '123' display_alias = 'display_alias' error_msg = 'User not found.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): self.modifiable_new_user_data.display_alias = display_alias self.modifiable_new_user_data.pin = profile_pin user_services.create_new_profiles( @@ -1080,7 +1664,9 @@ def test_create_new_profile_with_nonexistent_user_raises_error(self): [self.modifiable_new_user_data] ) - def test_create_new_profile_modifiable_user_with_user_id_raises_error(self): + def test_create_new_profile_modifiable_user_with_user_id_raises_error( + self + ) -> None: auth_id = 'auth_id' email = 'new@example.com' display_alias = 'display_alias' @@ -1093,7 +1679,7 @@ def test_create_new_profile_modifiable_user_with_user_id_raises_error(self): self.modifiable_user_data.display_alias = display_alias user_services.update_multiple_users_data([self.modifiable_user_data]) error_msg = 'User id cannot already exist for a new user.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): self.modifiable_new_user_data.display_alias = display_alias_2 self.modifiable_new_user_data.pin = profile_pin self.modifiable_new_user_data.user_id = 'user_id' @@ -1101,7 +1687,9 @@ def test_create_new_profile_modifiable_user_with_user_id_raises_error(self): auth_id, email, [self.modifiable_new_user_data] ) - def test_update_users_modifiable_object_user_id_not_set_raises_error(self): + def test_update_users_modifiable_object_user_id_not_set_raises_error( + self + ) -> None: auth_id = 'auth_id' email = 'new@example.com' display_alias = 'display_alias2' @@ -1112,11 +1700,13 @@ def test_update_users_modifiable_object_user_id_not_set_raises_error(self): self.modifiable_user_data.display_alias = display_alias error_msg = 'Missing user ID.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_services.update_multiple_users_data( [self.modifiable_user_data]) - def test_update_users_for_user_with_non_existent_id_raises_error(self): + def test_update_users_for_user_with_non_existent_id_raises_error( + self + ) -> None: auth_id = 'auth_id' non_existent_user_id = 'id_x' email = 'new@example.com' @@ -1128,11 +1718,11 @@ def test_update_users_for_user_with_non_existent_id_raises_error(self): self.modifiable_user_data.display_alias = display_alias error_msg = 'User not found.' - with self.assertRaisesRegexp(Exception, error_msg): + with self.assertRaisesRegex(Exception, error_msg): user_services.update_multiple_users_data( [self.modifiable_user_data]) - def test_update_users_data_for_multiple_users_works_correctly(self): + def test_update_users_data_for_multiple_users_works_correctly(self) -> None: # Preparing for the test. auth_id = 'auth_id' email = 'new@example.com' @@ -1148,13 +1738,14 @@ def test_update_users_data_for_multiple_users_works_correctly(self): user_services.update_multiple_users_data([self.modifiable_user_data]) self.modifiable_new_user_data.display_alias = display_alias_2 self.modifiable_new_user_data.pin = profile_pin - new_user_data_dict_2 = { + new_user_data_dict_2: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': display_alias_3, 'pin': None, 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': None, } modifiable_new_user_data_2 = ( @@ -1177,14 +1768,17 @@ def test_update_users_data_for_multiple_users_works_correctly(self): [self.modifiable_new_user_data, modifiable_new_user_data_2]) # Post-checking. - user_auth_details_models = [ - { + user_auth_details_models = [] + for model in auth_models.UserAuthDetailsModel.get_multi( + profile_user_ids + ): + # Ruling out the possibility of None for mypy type checking. + assert model is not None + user_auth_details_models.append({ 'id': model.id, 'auth_id': model.gae_id, 'parent_user_id': model.parent_user_id - } for model in - auth_models.UserAuthDetailsModel.get_multi(profile_user_ids) - ] + }) expected_auth_details_output = [ { @@ -1201,14 +1795,17 @@ def test_update_users_data_for_multiple_users_works_correctly(self): self.assertItemsEqual( expected_auth_details_output, user_auth_details_models) - user_settings_models = [ - { - 'id': model.id, - 'display_alias': model.display_alias, - 'pin': model.pin - } for model in - user_models.UserSettingsModel.get_multi(profile_user_ids) - ] + user_settings_models = [] + for model_setting_model in user_models.UserSettingsModel.get_multi( + profile_user_ids + ): + # Ruling out the possibility of None for mypy type checking. + assert model_setting_model is not None + user_settings_models.append({ + 'id': model_setting_model.id, + 'display_alias': model_setting_model.display_alias, + 'pin': model_setting_model.pin + }) expected_user_settings_output = [ { @@ -1225,7 +1822,9 @@ def test_update_users_data_for_multiple_users_works_correctly(self): self.assertItemsEqual( expected_user_settings_output, user_settings_models) - def test_mark_user_for_deletion_deletes_user_settings(self): + def test_mark_user_for_deletion_marks_user_settings_as_deleted( + self + ) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -1234,14 +1833,20 @@ def test_mark_user_for_deletion_deletes_user_settings(self): user_services.set_username(user_id, username) user_settings = user_services.get_user_settings_by_auth_id(auth_id) + # Ruling out the possibility of None for mypy type checking. + assert user_settings is not None self.assertFalse(user_settings.deleted) user_services.mark_user_for_deletion(user_id) user_settings = user_services.get_user_settings_by_auth_id(auth_id) - self.assertIsNone(user_settings) + # Ruling out the possibility of None for mypy type checking. + assert user_settings is not None + self.assertTrue(user_settings.deleted) - def test_mark_user_for_deletion_deletes_user_auth_details_entry(self): + def test_mark_user_for_deletion_deletes_user_auth_details_entry( + self + ) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -1257,7 +1862,9 @@ def test_mark_user_for_deletion_deletes_user_auth_details_entry(self): user_auth_details = auth_models.UserAuthDetailsModel.get_by_id(user_id) self.assertTrue(user_auth_details.deleted) - def test_mark_user_for_deletion_deletes_user_identifiers_entry(self): + def test_mark_user_for_deletion_deletes_user_identifiers_entry( + self + ) -> None: auth_id = 'test_id' username = 'testname' user_email = 'test@email.com' @@ -1271,7 +1878,7 @@ def test_mark_user_for_deletion_deletes_user_identifiers_entry(self): self.assertIsNone(auth_services.get_auth_id_from_user_id(user_id)) - def test_get_current_date_as_string(self): + def test_get_current_date_as_string(self) -> None: custom_datetimes = [ datetime.date(2011, 1, 1), datetime.date(2012, 2, 28) @@ -1291,7 +1898,7 @@ def test_get_current_date_as_string(self): self.assertEqual(datetime_strings[0], '2011-01-01') self.assertEqual(datetime_strings[1], '2012-02-28') - def test_parse_date_from_string(self): + def test_parse_date_from_string(self) -> None: self.assertEqual( user_services.parse_date_from_string('2016-06-30'), {'year': 2016, 'month': 6, 'day': 30}) @@ -1299,14 +1906,14 @@ def test_parse_date_from_string(self): user_services.parse_date_from_string('2016-07-05'), {'year': 2016, 'month': 7, 'day': 5}) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, 'time data \'2016-13-01\' does not match format \'%Y-%m-%d\''): user_services.parse_date_from_string('2016-13-01') - with self.assertRaisesRegexp(ValueError, 'unconverted data remains: 2'): + with self.assertRaisesRegex(ValueError, 'unconverted data remains: 2'): user_services.parse_date_from_string('2016-03-32') - def test_record_user_started_state_translation_tutorial(self): + def test_record_user_started_state_translation_tutorial(self) -> None: # Testing of the user translation tutorial firsttime state storage. auth_id = 'someUser' username = 'username' @@ -1321,6 +1928,706 @@ def test_record_user_started_state_translation_tutorial(self): self.assertTrue( user_settings.last_started_state_translation_tutorial is not None) + def test_get_human_readable_user_ids(self) -> None: + auth_ids = ['regular_user', 'user_being_deleted', 'no_username_user'] + user_emails = [ + 'reuglar_user@example.com', + 'user_being_deleted@example.com', + 'no_username_user@example.com'] + user_ids = [] + + for i, auth_id in enumerate(auth_ids): + user_ids.append(user_services.create_new_user( + auth_id, user_emails[i]).user_id) + + user_services.set_username(user_ids[0], 'regularUsername') + user_services.mark_user_for_deletion(user_ids[1]) + + user_settings_for_no_username = user_services.get_user_settings( + user_ids[2]) + + usernames = [ + 'regularUsername', + user_services.LABEL_FOR_USER_BEING_DELETED, + ( + '[Awaiting user registration: %s]' % + user_settings_for_no_username.truncated_email + ) + ] + + self.assertEqual( + usernames, + user_services.get_human_readable_user_ids(user_ids) + ) + + def test_get_human_readable_user_ids_for_no_user_raises_error( + self + ) -> None: + with self.assertRaisesRegex(Exception, 'User not found.'): + user_services.get_human_readable_user_ids(['unregistered_id']) + + def test_record_user_started_state_editor_tutorial(self) -> None: + user_id = user_services.create_new_user( + 'someUser', + 'user@example.com').user_id + user_services.record_user_started_state_editor_tutorial(user_id) + user_settings = user_services.get_user_settings(user_id) + prev_started_state = user_settings.last_started_state_editor_tutorial + + self.assertEqual( + user_settings.last_started_state_editor_tutorial, + prev_started_state + ) + + user_services.record_user_started_state_editor_tutorial(user_id) + user_settings = user_services.get_user_settings(user_id) + + self.assertGreaterEqual( + user_settings.last_started_state_editor_tutorial, + prev_started_state + ) + + def test_create_user_contributions(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + created_exp_ids = ['exp1', 'exp2', 'exp3'] + edited_exp_ids = ['exp2', 'exp3', 'exp4'] + + user_id = user_services.create_new_user(auth_id, user_email).user_id + + pre_add_contributions = user_services.get_user_contributions( + user_id, strict=True + ) + + self.assertEqual( + [], + pre_add_contributions.created_exploration_ids) + + self.assertEqual( + [], + pre_add_contributions.edited_exploration_ids) + + for created_exp_id in created_exp_ids: + pre_add_contributions.add_created_exploration_id( + created_exp_id + ) + for edited_exp_id in edited_exp_ids: + pre_add_contributions.add_edited_exploration_id( + edited_exp_id + ) + user_services.save_user_contributions(pre_add_contributions) + + contributions = user_services.get_user_contributions( + user_id, strict=True + ) + + self.assertEqual( + ['exp1', 'exp2', 'exp3'], + contributions.created_exploration_ids) + + self.assertEqual( + ['exp2', 'exp3', 'exp4'], + contributions.edited_exploration_ids) + + def test_update_user_contributions(self) -> None: + created_exp_ids = ['exp1', 'exp2', 'exp3'] + edited_exp_ids = ['exp2', 'exp3', 'exp4'] + + user_id = user_services.create_new_user( + 'someUser', + 'user@example.com').user_id + pre_add_contributions = user_services.get_user_contributions( + user_id, strict=True + ) + self.assertEqual( + [], + pre_add_contributions.created_exploration_ids) + self.assertEqual( + [], + pre_add_contributions.edited_exploration_ids) + + user_services.update_user_contributions( + user_id, + created_exp_ids, + edited_exp_ids) + contributions = user_services.get_user_contributions( + user_id, strict=True + ) + self.assertEqual( + ['exp1', 'exp2', 'exp3'], + contributions.created_exploration_ids) + self.assertEqual( + ['exp2', 'exp3', 'exp4'], + contributions.edited_exploration_ids) + + def test_update_user_contributions_for_invalid_user_raises_error( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'User contributions model for user %s does not exist.' + % 'non_existent_user_id' + ): + user_services.update_user_contributions( + 'non_existent_user_id', + ['exp1', 'exp2', 'exp3'], + ['exp2', 'exp3', 'exp4']) + + def test_add_created_exploration_id(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + contributions = user_services.get_user_contributions( + user_id, strict=True + ) + self.assertNotIn('exp1', contributions.created_exploration_ids) + + contributions.add_created_exploration_id('exp1') + user_services.save_user_contributions(contributions) + contributions = user_services.get_user_contributions( + user_id, strict=True + ) + self.assertIn('exp1', contributions.created_exploration_ids) + + def test_add_edited_exploration_id(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + contributions = user_services.get_user_contributions( + user_id, strict=True + ) + self.assertNotIn('exp1', contributions.edited_exploration_ids) + + contributions.add_edited_exploration_id('exp1') + user_services.save_user_contributions(contributions) + contributions = user_services.get_user_contributions( + user_id, strict=True + ) + self.assertIn('exp1', contributions.edited_exploration_ids) + + def test_is_moderator(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + self.assertFalse(user_services.is_moderator(user_id)) + + user_services.add_user_role(user_id, feconf.ROLE_ID_MODERATOR) + self.assertTrue(user_services.is_moderator(user_id)) + + def test_is_curriculum_admin(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + self.assertFalse(user_services.is_curriculum_admin(user_id)) + + user_services.add_user_role(user_id, feconf.ROLE_ID_CURRICULUM_ADMIN) + self.assertTrue(user_services.is_curriculum_admin(user_id)) + + def test_is_topic_manager(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + self.assertFalse(user_services.is_topic_manager(user_id)) + + user_services.add_user_role(user_id, feconf.ROLE_ID_TOPIC_MANAGER) + self.assertTrue(user_services.is_topic_manager(user_id)) + + def test_create_login_url(self) -> None: + return_url = 'sample_url' + expected_url = '/login?return_url=sample_url' + login_url = user_services.create_login_url(return_url) + + self.assertEqual(expected_url, login_url) + + def test_set_user_has_viewed_lesson_info_modal_once(self) -> None: + auth_id = 'test_id' + username = 'testname' + user_email = 'test@email.com' + user_id = user_services.create_new_user(auth_id, user_email).user_id + user_services.set_username(user_id, username) + + user_settings_model = user_models.UserSettingsModel.get_by_id(user_id) + self.assertFalse( + user_settings_model.has_viewed_lesson_info_modal_once) + + user_services.set_user_has_viewed_lesson_info_modal_once(user_id) + + user_settings_model = user_models.UserSettingsModel.get_by_id(user_id) + self.assertTrue( + user_settings_model.has_viewed_lesson_info_modal_once) + + def test_log_username_change(self) -> None: + committer_id = 'someUser' + + all_models_before_update = ( + audit_models.UsernameChangeAuditModel.get_all()) + self.assertEqual(all_models_before_update.count(), 0) + + user_services.log_username_change( + committer_id, 'oldUsername', 'newUsername') + + all_models_after_update = ( + audit_models.UsernameChangeAuditModel.get_all()) + self.assertEqual(all_models_after_update.count(), 1) + + user_audit_model = all_models_after_update.get() + # Ruling out the possibility of None for mypy type checking. + assert user_audit_model is not None + self.assertEqual(user_audit_model.committer_id, committer_id) + self.assertEqual(user_audit_model.old_username, 'oldUsername') + self.assertEqual(user_audit_model.new_username, 'newUsername') + + def test_raises_error_if_none_destination_is_provided_for_checkpoint( + self + ) -> None: + state = state_domain.State.create_default_state( + 'state_1', 'content_0', 'default_outcome_1') + state_answer_group: List[state_domain.AnswerGroup] = [ + state_domain.AnswerGroup( + state_domain.Outcome( + None, None, state_domain.SubtitledHtml( + 'feedback_2', '

    state outcome html

    '), + False, [], None, None), + [ + state_domain.RuleSpec( + 'Equals', { + 'x': { + 'contentId': 'rule_input_3', + 'normalizedStrSet': ['Test rule spec.'] + }}) + ], + [], + None + ) + ] + state.update_interaction_id('TextInput') + state.update_interaction_answer_groups(state_answer_group) + states = {'Introduction': state} + + with self.assertRaisesRegex( + Exception, + 'States with a null destination can never be a checkpoint.' + ): + user_services.get_checkpoints_in_order('Introduction', states) + + state_answer_group = [ + state_domain.AnswerGroup( + state_domain.Outcome( + 'destination', None, state_domain.SubtitledHtml( + 'feedback_4', '

    state outcome html

    '), + False, [], None, None), + [ + state_domain.RuleSpec( + 'Equals', { + 'x': { + 'contentId': 'rule_input_5', + 'normalizedStrSet': ['Test rule spec.'] + }}) + ], + [], + None + ) + ] + state.update_interaction_answer_groups(state_answer_group) + # Ruling out the possibility of None for mypy type checking. + assert state.interaction.default_outcome is not None + state.interaction.default_outcome.dest = None + + with self.assertRaisesRegex( + Exception, + 'States with a null destination can never be a checkpoint' + ): + user_services.get_checkpoints_in_order('Introduction', states) + + def test_raises_error_if_sync_logged_in_learner_checkpoint_with_invalid_id( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'No ExplorationUserDataModel found for the given user and ' + 'exploration ids: invalid_user_id, exp_1' + ): + user_services.sync_logged_in_learner_checkpoint_progress_with_current_exp_version( # pylint: disable=line-too-long + 'invalid_user_id', 'exp_1', strict=True + ) + + +class UserCheckpointProgressUpdateTests(test_utils.GenericTestBase): + """Tests whether user checkpoint progress is updated correctly""" + + EXP_ID: Final = 'exp_id0' + + SAMPLE_EXPLORATION_YAML: Final = ( +""" +author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Category +correctness_feedback_enabled: false +edits_allowed: true +init_state_name: Introduction +language_code: en +objective: '' +param_changes: [] +param_specs: {} +schema_version: 47 +states: + Introduction: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: New state + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + contentId: rule_input_3 + normalizedStrSet: + - InputString + rule_type: Equals + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + catchMisspellings: + value: false + default_outcome: + dest: Introduction + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: + - hint_content: + content_id: hint_1 + html:

    hint one,

    + id: TextInput + solution: + answer_is_exclusive: false + correct_answer: helloworld! + explanation: + content_id: solution + html:

    hello_world is a string

    + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_placeholder_2: {} + content: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: introduction_state.mp3 + needs_update: false + default_outcome: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: unknown_answer_feedback.mp3 + needs_update: false + feedback_1: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: correct_answer_feedback.mp3 + needs_update: false + hint_1: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: answer_hint.mp3 + needs_update: false + rule_input_3: {} + solution: + en: + duration_secs: 0.0 + file_size_bytes: 99999 + filename: answer_solution.mp3 + needs_update: false + solicit_answer_details: false + card_is_checkpoint: true + written_translations: + translations_mapping: + ca_placeholder_2: {} + content: {} + default_outcome: {} + feedback_1: {} + hint_1: {} + rule_input_3: {} + solution: {} + New state: + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + placeholder: + value: + content_id: ca_placeholder_2 + unicode_str: '' + rows: + value: 1 + default_outcome: + dest: New state + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: TextInput + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + default_outcome: {} + ca_placeholder_2: {} + solicit_answer_details: false + card_is_checkpoint: false + written_translations: + translations_mapping: + content: {} + default_outcome: {} +states_schema_version: 42 +tags: [] +title: Title +""") + + def setUp(self) -> None: + super().setUp() + self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) + self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) + self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) + self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL) + + exp_services.save_new_exploration_from_yaml_and_assets( + self.owner_id, self.SAMPLE_EXPLORATION_YAML, self.EXP_ID, []) + self.exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) + + def test_user_checkpoint_progress_is_updated_correctly(self) -> None: + self.login(self.VIEWER_EMAIL) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + self.assertIsNone(exp_user_data) + + # First checkpoint reached. + user_services.update_learner_checkpoint_progress( + self.viewer_id, self.EXP_ID, 'Introduction', 1) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, 1) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, + 'Introduction') + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, 1) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, + 'Introduction') + + # Make 'New state' a checkpoint. + # Now version of the exploration becomes 2. + change_list = _get_change_list( + 'New state', + exp_domain.STATE_PROPERTY_CARD_IS_CHECKPOINT, + True) + exp_services.update_exploration( + self.owner_id, self.EXP_ID, change_list, '') + + # Second checkpoint reached. + user_services.update_learner_checkpoint_progress( + self.viewer_id, self.EXP_ID, 'New state', 2) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, 2) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, + 'New state') + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, 2) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, + 'New state') + + # Restart the exploration. + user_services.clear_learner_checkpoint_progress( + self.viewer_id, self.EXP_ID) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, 2) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, 'New state') + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, None) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, None) + + # Unmark 'New state' as a checkpoint. + # Now version of the exploration becomes 3. + change_list = _get_change_list( + 'New state', + exp_domain.STATE_PROPERTY_CARD_IS_CHECKPOINT, + False) + exp_services.update_exploration( + self.owner_id, self.EXP_ID, change_list, '') + + # First checkpoint reached again. + # Since the previously furthest reached checkpoint 'New state' doesn't + # exist in the current exploration, the first checkpoint behind + # 'New state' that exists in current exploration ('Introduction' + # state in this case) becomes the new furthest reached checkpoint. + user_services.update_learner_checkpoint_progress( + self.viewer_id, self.EXP_ID, 'Introduction', 3) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, 3) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, + 'Introduction') + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, 3) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, + 'Introduction') + + # Change state name of 'Introduction' state. + # Now version of exploration becomes 4. + exp_services.update_exploration( + self.owner_id, self.EXP_ID, + [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'Intro', + })], 'Change state name' + ) + + # First checkpoint reached again. + user_services.update_learner_checkpoint_progress( + self.viewer_id, self.EXP_ID, 'Intro', 4) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, 4) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, 'Intro') + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, 4) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, 'Intro') + + self.logout() + + def test_sync_logged_in_learner_checkpoint_progress_with_current_exp_version( # pylint: disable=line-too-long + self + ) -> None: + self.login(self.VIEWER_EMAIL) + exp_user_data = ( + user_services.sync_logged_in_learner_checkpoint_progress_with_current_exp_version( # pylint: disable=line-too-long + self.viewer_id, self.EXP_ID)) + self.assertIsNone(exp_user_data) + + # First checkpoint reached. + user_services.update_learner_checkpoint_progress( + self.viewer_id, self.EXP_ID, 'Introduction', 1) + exp_user_data = exp_fetchers.get_exploration_user_data( + self.viewer_id, self.EXP_ID) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, 1) + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_state_name, + 'Introduction') + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, 1) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_state_name, + 'Introduction') + + # Change state name of 'Introduction' state. + # Now version of exploration becomes 2. + exp_services.update_exploration( + self.owner_id, self.EXP_ID, + [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'Intro', + })], 'Change state name' + ) + + # This method is called when exploration data is fetched since now + # latest exploration version > most recently interacted exploration + # version. + # Working - First the furthest reached checkpoint ('Introduction' in + # this case) is searched in current exploration. It will not be found + # since its state name is changed to 'Intro'. It will then search for + # an checkpoint that had been reached in older exploration and also + # exists in current exploration. If such checkpoint is not found, + # furthest reached checkpoint is set to None. Similar workflow is + # carried out for most recently reached checkpoint. + exp_user_data = ( + user_services.sync_logged_in_learner_checkpoint_progress_with_current_exp_version( # pylint: disable=line-too-long + self.viewer_id, self.EXP_ID + ) + ) + # Ruling out the possibility of None for mypy type checking. + assert exp_user_data is not None + self.assertEqual( + exp_user_data.furthest_reached_checkpoint_exp_version, 2) + self.assertIsNone( + exp_user_data.furthest_reached_checkpoint_state_name) + self.assertEqual( + exp_user_data.most_recently_reached_checkpoint_exp_version, 2) + self.assertIsNone( + exp_user_data.most_recently_reached_checkpoint_state_name) + class UpdateContributionMsecTests(test_utils.GenericTestBase): """Test whether contribution date changes with publication of @@ -1328,14 +2635,14 @@ class UpdateContributionMsecTests(test_utils.GenericTestBase): exploration/collection. """ - EXP_ID = 'test_exp' - COL_ID = 'test_col' - COLLECTION_TITLE = 'title' - COLLECTION_CATEGORY = 'category' - COLLECTION_OBJECTIVE = 'objective' + EXP_ID: Final = 'test_exp' + COL_ID: Final = 'test_col' + COLLECTION_TITLE: Final = 'title' + COLLECTION_CATEGORY: Final = 'category' + COLLECTION_OBJECTIVE: Final = 'objective' - def setUp(self): - super(UpdateContributionMsecTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) @@ -1353,7 +2660,7 @@ def setUp(self): self.admin = user_services.get_user_actions_info(self.admin_id) self.owner = user_services.get_user_actions_info(self.owner_id) - def test_contribution_msec_updates_on_published_explorations(self): + def test_contribution_msec_updates_on_published_explorations(self) -> None: exploration = self.save_new_valid_exploration( self.EXP_ID, self.admin_id, end_state_name='End') init_state_name = exploration.init_state_name @@ -1393,7 +2700,9 @@ def test_contribution_msec_updates_on_published_explorations(self): self.assertIsNotNone(user_services.get_user_settings( self.editor_id).first_contribution_msec) - def test_contribution_msec_does_not_update_until_exp_is_published(self): + def test_contribution_msec_does_not_update_until_exp_is_published( + self + ) -> None: exploration = self.save_new_valid_exploration( self.EXP_ID, self.admin_id, end_state_name='End') init_state_name = exploration.init_state_name @@ -1450,7 +2759,9 @@ def test_contribution_msec_does_not_update_until_exp_is_published(self): self.assertIsNotNone(user_services.get_user_settings( self.editor_id).first_contribution_msec) - def test_contribution_msec_does_not_change_if_no_contribution_to_exp(self): + def test_contribution_msec_does_not_change_if_no_contribution_to_exp( + self + ) -> None: self.save_new_valid_exploration( self.EXP_ID, self.admin_id, end_state_name='End') rights_manager.assign_role_for_exploration( @@ -1465,7 +2776,7 @@ def test_contribution_msec_does_not_change_if_no_contribution_to_exp(self): self.assertIsNone(user_services.get_user_settings( self.editor_id).first_contribution_msec) - def test_contribution_msec_does_not_change_if_exp_unpublished(self): + def test_contribution_msec_does_not_change_if_exp_unpublished(self) -> None: self.save_new_valid_exploration( self.EXP_ID, self.owner_id, end_state_name='End') @@ -1478,7 +2789,7 @@ def test_contribution_msec_does_not_change_if_exp_unpublished(self): self.assertIsNotNone(user_services.get_user_settings( self.owner_id).first_contribution_msec) - def test_contribution_msec_updates_on_published_collections(self): + def test_contribution_msec_updates_on_published_collections(self) -> None: self.save_new_valid_collection( self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE, category=self.COLLECTION_CATEGORY, @@ -1511,7 +2822,8 @@ def test_contribution_msec_updates_on_published_collections(self): self.editor_id).first_contribution_msec) def test_contribution_msec_does_not_update_until_collection_is_published( - self): + self + ) -> None: self.save_new_valid_collection( self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE, category=self.COLLECTION_CATEGORY, @@ -1557,7 +2869,8 @@ def test_contribution_msec_does_not_update_until_collection_is_published( self.editor_id).first_contribution_msec) def test_contribution_msec_does_not_change_if_no_contribution_to_collection( - self): + self + ) -> None: self.save_new_valid_collection( self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE, category=self.COLLECTION_CATEGORY, @@ -1575,7 +2888,9 @@ def test_contribution_msec_does_not_change_if_no_contribution_to_collection( self.assertIsNone(user_services.get_user_settings( self.editor_id).first_contribution_msec) - def test_contribution_msec_does_not_change_if_collection_unpublished(self): + def test_contribution_msec_does_not_change_if_collection_unpublished( + self + ) -> None: self.save_new_valid_collection( self.COL_ID, self.owner_id, title=self.COLLECTION_TITLE, category=self.COLLECTION_CATEGORY, @@ -1596,23 +2911,21 @@ class UserDashboardStatsTests(test_utils.GenericTestBase): are registered. """ - OWNER_EMAIL = 'owner@example.com' - OWNER_USERNAME = 'owner' - EXP_ID = 'exp1' + EXP_ID: Final = 'exp1' - USER_SESSION_ID = 'session1' + USER_SESSION_ID: Final = 'session1' - CURRENT_DATE_AS_STRING = user_services.get_current_date_as_string() + CURRENT_DATE_AS_STRING: Final = user_services.get_current_date_as_string() - def setUp(self): - super(UserDashboardStatsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) - def mock_get_current_date_as_string(self): + def mock_get_current_date_as_string(self) -> str: return self.CURRENT_DATE_AS_STRING - def test_get_user_dashboard_stats(self): + def test_get_user_dashboard_stats(self) -> None: exploration = self.save_new_valid_exploration( self.EXP_ID, self.owner_id, end_state_name='End') init_state_name = exploration.init_state_name @@ -1634,7 +2947,7 @@ def test_get_user_dashboard_stats(self): 'average_ratings': None }) - def test_get_weekly_dashboard_stats_when_stats_model_is_none(self): + def test_get_weekly_dashboard_stats_when_stats_model_is_none(self) -> None: exploration = self.save_new_valid_exploration( self.EXP_ID, self.owner_id, end_state_name='End') init_state_name = exploration.init_state_name @@ -1642,7 +2955,7 @@ def test_get_weekly_dashboard_stats_when_stats_model_is_none(self): self.EXP_ID, 1, init_state_name, self.USER_SESSION_ID, {}, feconf.PLAY_TYPE_NORMAL) self.assertEqual( - user_services.get_weekly_dashboard_stats(self.owner_id), None) + user_services.get_weekly_dashboard_stats(self.owner_id), []) self.assertEqual( user_services.get_last_week_dashboard_stats(self.owner_id), None) @@ -1660,7 +2973,7 @@ def test_get_weekly_dashboard_stats_when_stats_model_is_none(self): } }]) - def test_get_weekly_dashboard_stats(self): + def test_get_weekly_dashboard_stats(self) -> None: exploration = self.save_new_valid_exploration( self.EXP_ID, self.owner_id, end_state_name='End') init_state_name = exploration.init_state_name @@ -1676,14 +2989,14 @@ def test_get_weekly_dashboard_stats(self): }) self.assertEqual( - user_services.get_weekly_dashboard_stats(self.owner_id), None) + user_services.get_weekly_dashboard_stats(self.owner_id), []) self.assertEqual( user_services.get_last_week_dashboard_stats(self.owner_id), None) self.process_and_flush_pending_tasks() self.assertEqual( - user_services.get_weekly_dashboard_stats(self.owner_id), None) + user_services.get_weekly_dashboard_stats(self.owner_id), []) self.assertEqual( user_services.get_last_week_dashboard_stats(self.owner_id), None) @@ -1710,12 +3023,92 @@ def test_get_weekly_dashboard_stats(self): } }) + def test_migrate_dashboard_stats_to_latest_schema_raises_error( + self + ) -> None: + user_id = 'id_x' + user_stats_model = user_models.UserStatsModel.get_or_create(user_id) + user_stats_model.schema_version = 2 + error_msg = ( + 'Sorry, we can only process v1-v%d dashboard stats schemas at ' + 'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION) + with self.assertRaisesRegex(Exception, error_msg): + user_services.migrate_dashboard_stats_to_latest_schema( + user_stats_model) + + def test_get_user_impact_score_with_no_user_stats_model_returns_zero( + self + ) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + impact_score = user_services.get_user_impact_score(user_id) + + self.assertEqual(0, impact_score) + + def test_get_user_impact_score(self) -> None: + expected_impact_score = 3 + with self.swap( + user_models.UserStatsModel, 'impact_score', + expected_impact_score + ): + impact_score_for_user_with_no_activity = ( + user_services.get_user_impact_score(self.owner_id)) + self.assertEqual(impact_score_for_user_with_no_activity, 0) + + exploration = self.save_new_valid_exploration( + self.EXP_ID, self.owner_id, end_state_name='End') + init_state_name = exploration.init_state_name + event_services.StartExplorationEventHandler.record( + self.EXP_ID, 1, init_state_name, self.USER_SESSION_ID, {}, + feconf.PLAY_TYPE_NORMAL) + event_services.StatsEventsHandler.record( + self.EXP_ID, 1, { + 'num_starts': 1, + 'num_actual_starts': 0, + 'num_completions': 0, + 'state_stats_mapping': {} + }) + + model = user_models.UserStatsModel.get_or_create(self.owner_id) + self.assertEqual(model.impact_score, expected_impact_score) + + impact_score_for_user_with_some_learner_activity = ( + user_services.get_user_impact_score(self.owner_id)) + self.assertEqual( + impact_score_for_user_with_some_learner_activity, + expected_impact_score) + + def test_get_dashboard_stats_for_user_with_no_stats_model(self) -> None: + fake_user_id = 'id_x' + stats = user_services.get_dashboard_stats(fake_user_id) + + self.assertEqual( + stats, + { + 'total_plays': 0, + 'num_ratings': 0, + 'average_ratings': None + }) + + def test_update_dashboard_stats_log_with_invalid_schema_version( + self + ) -> None: + with self.swap(user_models.UserStatsModel, 'schema_version', 5): + with self.assertRaisesRegex( + Exception, + 'Sorry, we can only process v1-v%d dashboard stats schemas at' + ' present.' % (feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION) + ): + user_services.update_dashboard_stats_log(self.owner_id) + class SubjectInterestsUnitTests(test_utils.GenericTestBase): """Test the update_subject_interests method.""" - def setUp(self): - super(SubjectInterestsUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.auth_id = 'someUser' self.username = 'username' self.user_email = 'user@example.com' @@ -1724,37 +3117,43 @@ def setUp(self): self.auth_id, self.user_email).user_id user_services.set_username(self.user_id, self.username) - def test_invalid_subject_interests_are_not_accepted(self): - with self.assertRaisesRegexp(utils.ValidationError, 'to be a list'): - user_services.update_subject_interests(self.user_id, 'not a list') + def test_invalid_subject_interests_are_not_accepted(self) -> None: + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex(utils.ValidationError, 'to be a list'): + user_services.update_subject_interests(self.user_id, 'not a list') # type: ignore[arg-type] - with self.assertRaisesRegexp(utils.ValidationError, 'to be a string'): - user_services.update_subject_interests(self.user_id, [1, 2, 3]) + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + with self.assertRaisesRegex(utils.ValidationError, 'to be a string'): + user_services.update_subject_interests(self.user_id, [1, 2, 3]) # type: ignore[list-item] - with self.assertRaisesRegexp(utils.ValidationError, 'to be non-empty'): + with self.assertRaisesRegex(utils.ValidationError, 'to be non-empty'): user_services.update_subject_interests(self.user_id, ['', 'ab']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'to consist only of lowercase alphabetic characters and spaces' ): user_services.update_subject_interests(self.user_id, ['!']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'to consist only of lowercase alphabetic characters and spaces' ): user_services.update_subject_interests( self.user_id, ['has-hyphens']) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( utils.ValidationError, 'to consist only of lowercase alphabetic characters and spaces' ): user_services.update_subject_interests( self.user_id, ['HasCapitalLetters']) - with self.assertRaisesRegexp(utils.ValidationError, 'to be distinct'): + with self.assertRaisesRegex(utils.ValidationError, 'to be distinct'): user_services.update_subject_interests(self.user_id, ['a', 'a']) # The following cases are all valid. @@ -1768,14 +3167,14 @@ class LastLoginIntegrationTests(test_utils.GenericTestBase): correctly. """ - def setUp(self): + def setUp(self) -> None: """Create exploration with two versions.""" - super(LastLoginIntegrationTests, self).setUp() + super().setUp() self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL) - def test_legacy_user(self): + def test_legacy_user(self) -> None: """Test the case of a user who existed in the system before the last-login check was introduced. """ @@ -1800,7 +3199,9 @@ def test_legacy_user(self): user_services.get_user_settings(self.viewer_id).last_logged_in) self.logout() - def test_last_logged_in_only_updated_if_enough_time_has_elapsed(self): + def test_last_logged_in_only_updated_if_enough_time_has_elapsed( + self + ) -> None: # The last logged-in time has already been set when the user # registered. previous_last_logged_in_datetime = ( @@ -1833,11 +3234,11 @@ class LastExplorationEditedIntegrationTests(test_utils.GenericTestBase): exploration updates correctly. """ - EXP_ID = 'exp' + EXP_ID: Final = 'exp' - def setUp(self): + def setUp(self) -> None: """Create users for creating and editing exploration.""" - super(LastExplorationEditedIntegrationTests, self).setUp() + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) @@ -1846,7 +3247,7 @@ def setUp(self): self.save_new_valid_exploration( self.EXP_ID, self.owner_id, end_state_name='End') - def test_legacy_user(self): + def test_legacy_user(self) -> None: """Test the case of a user who are editing exploration for first time after the last edited time check was introduced. """ @@ -1863,21 +3264,26 @@ def test_legacy_user(self): editor_settings = user_services.get_user_settings(self.editor_id) self.assertIsNotNone(editor_settings.last_edited_an_exploration) - def test_last_exp_edit_time_gets_updated(self): + def test_last_exp_edit_time_gets_updated(self) -> None: exp_services.update_exploration( self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({ 'cmd': 'edit_exploration_property', 'property_name': 'objective', 'new_value': 'the objective' - })], 'Test edit') + })], + 'Test edit' + ) # Decrease last exploration edited time by 13 hours. user_settings = user_services.get_user_settings(self.editor_id) + # Ruling out the possibility of None for mypy type checking. + assert user_settings.last_edited_an_exploration is not None mocked_datetime_utcnow = ( user_settings.last_edited_an_exploration - datetime.timedelta(hours=13)) with self.mock_datetime_utcnow(mocked_datetime_utcnow): - user_services.record_user_edited_an_exploration(self.editor_id) + user_settings.record_user_edited_an_exploration() + user_services.save_user_settings(user_settings) editor_settings = user_services.get_user_settings(self.editor_id) previous_last_edited_an_exploration = ( @@ -1904,16 +3310,16 @@ class LastExplorationCreatedIntegrationTests(test_utils.GenericTestBase): updates correctly. """ - EXP_ID_A = 'exp_a' - EXP_ID_B = 'exp_b' + EXP_ID_A: Final = 'exp_a' + EXP_ID_B: Final = 'exp_b' - def setUp(self): + def setUp(self) -> None: """Create user for creating exploration.""" - super(LastExplorationCreatedIntegrationTests, self).setUp() + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) - def test_legacy_user(self): + def test_legacy_user(self) -> None: """Test the case of a user who are creating exploration for first time after the last edited time check was introduced. """ @@ -1926,12 +3332,14 @@ def test_legacy_user(self): owner_settings = user_services.get_user_settings(self.owner_id) self.assertIsNotNone(owner_settings.last_created_an_exploration) - def test_last_exp_edit_time_gets_updated(self): + def test_last_exp_edit_time_gets_updated(self) -> None: self.save_new_valid_exploration( self.EXP_ID_A, self.owner_id, end_state_name='End') # Decrease last exploration created time by 13 hours. user_settings = user_services.get_user_settings(self.owner_id) + # Ruling out the possibility of None for mypy type checking. + assert user_settings.last_created_an_exploration is not None with self.mock_datetime_utcnow( user_settings.last_created_an_exploration - datetime.timedelta(hours=13)): @@ -1958,10 +3366,10 @@ class CommunityContributionStatsUnitTests(test_utils.GenericTestBase): stats. """ - REVIEWER_1_EMAIL = 'reviewer1@community.org' - REVIEWER_2_EMAIL = 'reviewer2@community.org' + REVIEWER_1_EMAIL: Final = 'reviewer1@community.org' + REVIEWER_2_EMAIL: Final = 'reviewer2@community.org' - def _assert_community_contribution_stats_is_in_default_state(self): + def _assert_community_contribution_stats_is_in_default_state(self) -> None: """Checks if the community contribution stats is in its default state. """ @@ -1984,9 +3392,8 @@ def _assert_community_contribution_stats_is_in_default_state(self): self.assertEqual( community_contribution_stats.question_suggestion_count, 0) - def setUp(self): - super( - CommunityContributionStatsUnitTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.REVIEWER_1_EMAIL, 'reviewer1') self.reviewer_1_id = self.get_user_id_from_email( @@ -1996,7 +3403,9 @@ def setUp(self): self.reviewer_2_id = self.get_user_id_from_email( self.REVIEWER_2_EMAIL) - def test_grant_reviewer_translation_reviewing_rights_increases_count(self): + def test_grant_reviewer_translation_reviewing_rights_increases_count( + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') @@ -2009,7 +3418,8 @@ def test_grant_reviewer_translation_reviewing_rights_increases_count(self): stats.translation_suggestion_counts_by_lang_code, {}) def test_grant_reviewer_translation_multi_reviewing_rights_increases_count( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2025,7 +3435,8 @@ def test_grant_reviewer_translation_multi_reviewing_rights_increases_count( stats.translation_suggestion_counts_by_lang_code, {}) def test_grant_reviewer_existing_translation_reviewing_rights_no_count_diff( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') # Assert that the translation reviewer count increased by one. @@ -2051,7 +3462,8 @@ def test_grant_reviewer_existing_translation_reviewing_rights_no_count_diff( stats.translation_suggestion_counts_by_lang_code, {}) def test_remove_all_reviewer_translation_reviewing_rights_decreases_count( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') # Assert that the translation reviewer count increased by one. @@ -2071,7 +3483,8 @@ def test_remove_all_reviewer_translation_reviewing_rights_decreases_count( self._assert_community_contribution_stats_is_in_default_state() def test_remove_some_reviewer_translation_reviewing_rights_decreases_count( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2098,7 +3511,9 @@ def test_remove_some_reviewer_translation_reviewing_rights_decreases_count( self.assertDictEqual( stats.translation_suggestion_counts_by_lang_code, {}) - def test_remove_translation_contribution_reviewer_decreases_count(self): + def test_remove_translation_contribution_reviewer_decreases_count( + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2118,7 +3533,9 @@ def test_remove_translation_contribution_reviewer_decreases_count(self): # contribution reviewer was removed. self._assert_community_contribution_stats_is_in_default_state() - def test_grant_reviewer_question_reviewing_rights_increases_count(self): + def test_grant_reviewer_question_reviewing_rights_increases_count( + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) stats = suggestion_services.get_community_contribution_stats() @@ -2129,7 +3546,8 @@ def test_grant_reviewer_question_reviewing_rights_increases_count(self): stats.translation_suggestion_counts_by_lang_code, {}) def test_grant_reviewer_existing_question_reviewing_rights_no_count_diff( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) # Assert that the question reviewer count increased by one. stats = suggestion_services.get_community_contribution_stats() @@ -2151,7 +3569,8 @@ def test_grant_reviewer_existing_question_reviewing_rights_no_count_diff( stats.translation_suggestion_counts_by_lang_code, {}) def test_remove_reviewer_question_reviewing_rights_decreases_count( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) # Assert that the question reviewer count increased by one. stats = suggestion_services.get_community_contribution_stats() @@ -2168,7 +3587,8 @@ def test_remove_reviewer_question_reviewing_rights_decreases_count( self._assert_community_contribution_stats_is_in_default_state() def test_remove_question_contribution_reviewer_decreases_count( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.reviewer_1_id) # Assert that the question reviewer count increased by one. stats = suggestion_services.get_community_contribution_stats() @@ -2184,7 +3604,9 @@ def test_remove_question_contribution_reviewer_decreases_count( # contribution reviewer was removed. self._assert_community_contribution_stats_is_in_default_state() - def test_grant_reviewer_multiple_reviewing_rights_increases_counts(self): + def test_grant_reviewer_multiple_reviewing_rights_increases_counts( + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2200,7 +3622,8 @@ def test_grant_reviewer_multiple_reviewing_rights_increases_counts(self): stats.translation_suggestion_counts_by_lang_code, {}) def test_grant_multiple_reviewers_multi_reviewing_rights_increases_counts( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2222,7 +3645,8 @@ def test_grant_multiple_reviewers_multi_reviewing_rights_increases_counts( stats.translation_suggestion_counts_by_lang_code, {}) def test_remove_question_rights_from_multi_rights_reviewer_updates_count( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2249,7 +3673,8 @@ def test_remove_question_rights_from_multi_rights_reviewer_updates_count( stats.translation_suggestion_counts_by_lang_code, {}) def test_remove_translation_rights_from_multi_rights_reviewer_updates_count( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_question(self.reviewer_1_id) @@ -2275,7 +3700,9 @@ def test_remove_translation_rights_from_multi_rights_reviewer_updates_count( self.assertDictEqual( stats.translation_suggestion_counts_by_lang_code, {}) - def test_remove_multi_rights_contribution_reviewer_decreases_counts(self): + def test_remove_multi_rights_contribution_reviewer_decreases_counts( + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.reviewer_1_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2295,7 +3722,9 @@ def test_remove_multi_rights_contribution_reviewer_decreases_counts(self): self._assert_community_contribution_stats_is_in_default_state() - def test_grant_reviewer_voiceover_reviewing_permissions_does_nothing(self): + def test_grant_reviewer_voiceover_reviewing_permissions_does_nothing( + self + ) -> None: # Granting reviewers voiceover reviewing permissions does not change the # counts because voiceover suggestions are currently not offered on the # Contributor Dashboard. @@ -2304,7 +3733,9 @@ def test_grant_reviewer_voiceover_reviewing_permissions_does_nothing(self): self._assert_community_contribution_stats_is_in_default_state() - def test_remove_reviewer_voiceover_reviewing_permissions_does_nothing(self): + def test_remove_reviewer_voiceover_reviewing_permissions_does_nothing( + self + ) -> None: # Removing reviewers voiceover reviewing permissions does not change the # counts because voiceover suggestions are currently not offered on the # Contributor Dashboard. @@ -2320,17 +3751,17 @@ def test_remove_reviewer_voiceover_reviewing_permissions_does_nothing(self): class UserContributionReviewRightsTests(test_utils.GenericTestBase): - TRANSLATOR_EMAIL = 'translator@community.org' - TRANSLATOR_USERNAME = 'translator' + TRANSLATOR_EMAIL: Final = 'translator@community.org' + TRANSLATOR_USERNAME: Final = 'translator' - VOICE_ARTIST_EMAIL = 'voiceartist@community.org' - VOICE_ARTIST_USERNAME = 'voiceartist' + QUESTION_REVIEWER_EMAIL: Final = 'question@community.org' + QUESTION_REVIEWER_USERNAME: Final = 'questionreviewer' - QUESTION_REVIEWER_EMAIL = 'question@community.org' - QUESTION_REVIEWER_USERNAME = 'questionreviewer' + QUESTION_SUBMITTER_EMAIL: Final = 'submitter@community.org' + QUESTION_SUBMITTER_USERNAME: Final = 'questionsubmitter' - def setUp(self): - super(UserContributionReviewRightsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.TRANSLATOR_EMAIL, self.TRANSLATOR_USERNAME) self.translator_id = self.get_user_id_from_email(self.TRANSLATOR_EMAIL) @@ -2343,7 +3774,14 @@ def setUp(self): self.question_reviewer_id = ( self.get_user_id_from_email(self.QUESTION_REVIEWER_EMAIL)) - def test_assign_user_review_translation_suggestion_in_language(self): + self.signup( + self.QUESTION_SUBMITTER_EMAIL, self.QUESTION_SUBMITTER_USERNAME) + self.question_submitter_id = ( + self.get_user_id_from_email(self.QUESTION_SUBMITTER_EMAIL)) + + def test_assign_user_review_translation_suggestion_in_language( + self + ) -> None: self.assertFalse( user_services.can_review_translation_suggestions( self.translator_id)) @@ -2355,7 +3793,9 @@ def test_assign_user_review_translation_suggestion_in_language(self): user_services.can_review_translation_suggestions( self.translator_id, language_code='hi')) - def test_translation_review_assignement_adds_language_in_sorted_order(self): + def test_translation_review_assignement_adds_language_in_sorted_order( + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.translator_id, 'hi') user_contribution_rights = user_services.get_user_contribution_rights( @@ -2372,19 +3812,9 @@ def test_translation_review_assignement_adds_language_in_sorted_order(self): user_contribution_rights.can_review_translation_for_language_codes, ['en', 'hi']) - def test_assign_user_review_voiceover_application_in_language(self): - self.assertFalse( - user_services.can_review_voiceover_applications( - self.voice_artist_id)) - - user_services.allow_user_to_review_voiceover_in_language( - self.voice_artist_id, 'hi') - - self.assertTrue( - user_services.can_review_voiceover_applications( - self.voice_artist_id, language_code='hi')) - - def test_voiceover_review_assignement_adds_language_in_sorted_order(self): + def test_voiceover_review_assignement_adds_language_in_sorted_order( + self + ) -> None: user_services.allow_user_to_review_voiceover_in_language( self.voice_artist_id, 'hi') user_contribution_rights = user_services.get_user_contribution_rights( @@ -2401,7 +3831,7 @@ def test_voiceover_review_assignement_adds_language_in_sorted_order(self): user_contribution_rights.can_review_voiceover_for_language_codes, ['en', 'hi']) - def test_assign_user_review_question_suggestion(self): + def test_assign_user_review_question_suggestion(self) -> None: self.assertFalse( user_services.can_review_question_suggestions(self.voice_artist_id)) @@ -2410,7 +3840,7 @@ def test_assign_user_review_question_suggestion(self): self.assertTrue( user_services.can_review_question_suggestions(self.voice_artist_id)) - def test_assign_user_submit_question_suggestion(self): + def test_assign_user_submit_question_suggestion(self) -> None: self.assertFalse( user_services.can_submit_question_suggestions(self.voice_artist_id)) @@ -2420,7 +3850,8 @@ def test_assign_user_submit_question_suggestion(self): user_services.can_submit_question_suggestions(self.voice_artist_id)) def test_get_users_contribution_rights_with_multiple_reviewer_user_ids( - self): + self + ) -> None: user_services.allow_user_to_review_question(self.question_reviewer_id) user_services.allow_user_to_review_translation_in_language( self.translator_id, 'hi') @@ -2440,7 +3871,8 @@ def test_get_users_contribution_rights_with_multiple_reviewer_user_ids( self.assertItemsEqual(reviewer_ids, expected_reviewer_ids) def test_get_users_contribution_rights_with_one_reviewer_user_id( - self): + self + ) -> None: user_services.allow_user_to_review_translation_in_language( self.translator_id, 'hi') user_services.allow_user_to_review_translation_in_language( @@ -2460,14 +3892,15 @@ def test_get_users_contribution_rights_with_one_reviewer_user_id( ) def test_get_users_contribution_rights_returns_empty_for_no_reviewers_ids( - self): + self + ) -> None: users_contribution_rights = ( user_services.get_users_contribution_rights([]) ) self.assertEqual(len(users_contribution_rights), 0) - def test_get_all_reviewers_contribution_rights(self): + def test_get_all_reviewers_contribution_rights(self) -> None: self.assertEqual( user_services.get_all_reviewers_contribution_rights(), []) @@ -2483,7 +3916,8 @@ def test_get_all_reviewers_contribution_rights(self): [self.voice_artist_id, self.translator_id]) def test_get_reviewer_user_ids_to_notify_when_reviewers_want_notifications( - self): + self + ) -> None: # Assert that there are no reviewers at the start. self.assertEqual( user_services.get_all_reviewers_contribution_rights(), []) @@ -2511,7 +3945,8 @@ def test_get_reviewer_user_ids_to_notify_when_reviewers_want_notifications( self.assertIn(self.translator_id, reviewer_ids_to_notify) def test_get_reviewer_user_ids_to_notify_when_reviewers_do_not_want_emails( - self): + self + ) -> None: # Assert that there are no reviewers at the start. self.assertEqual( user_services.get_all_reviewers_contribution_rights(), []) @@ -2537,7 +3972,8 @@ def test_get_reviewer_user_ids_to_notify_when_reviewers_do_not_want_emails( self.assertEqual(len(reviewer_ids_to_notify), 0) def test_get_reviewer_user_ids_to_notify_returns_empty_for_no_reviewers( - self): + self + ) -> None: # Assert that there are no reviewers. self.assertEqual( user_services.get_all_reviewers_contribution_rights(), []) @@ -2547,7 +3983,7 @@ def test_get_reviewer_user_ids_to_notify_returns_empty_for_no_reviewers( self.assertEqual(len(reviewer_ids_to_notify), 0) - def test_remove_translation_review_rights_in_language(self): + def test_remove_translation_review_rights_in_language(self) -> None: user_services.allow_user_to_review_translation_in_language( self.translator_id, 'hi') self.assertTrue( @@ -2560,20 +3996,7 @@ def test_remove_translation_review_rights_in_language(self): user_services.can_review_translation_suggestions( self.translator_id, language_code='hi')) - def test_remove_voiceover_review_rights_in_language(self): - user_services.allow_user_to_review_voiceover_in_language( - self.voice_artist_id, 'hi') - self.assertTrue( - user_services.can_review_voiceover_applications( - self.voice_artist_id, language_code='hi')) - user_services.remove_voiceover_review_rights_in_language( - self.voice_artist_id, 'hi') - - self.assertFalse( - user_services.can_review_voiceover_applications( - self.voice_artist_id, language_code='hi')) - - def test_remove_question_review_rights(self): + def test_remove_question_review_rights(self) -> None: user_services.allow_user_to_review_question(self.question_reviewer_id) self.assertTrue( user_services.can_review_question_suggestions( @@ -2584,35 +4007,7 @@ def test_remove_question_review_rights(self): user_services.can_review_question_suggestions( self.question_reviewer_id)) - def test_remove_contribution_reviewer(self): - user_services.allow_user_to_review_translation_in_language( - self.translator_id, 'hi') - user_services.allow_user_to_review_voiceover_in_language( - self.translator_id, 'hi') - user_services.allow_user_to_review_question(self.translator_id) - self.assertTrue( - user_services.can_review_translation_suggestions( - self.translator_id, language_code='hi')) - self.assertTrue( - user_services.can_review_voiceover_applications( - self.translator_id, language_code='hi')) - self.assertTrue( - user_services.can_review_question_suggestions( - self.translator_id)) - - user_services.remove_contribution_reviewer(self.translator_id) - - self.assertFalse( - user_services.can_review_translation_suggestions( - self.translator_id, language_code='hi')) - self.assertFalse( - user_services.can_review_voiceover_applications( - self.translator_id, language_code='hi')) - self.assertFalse( - user_services.can_review_question_suggestions( - self.translator_id)) - - def test_removal_of_all_review_rights_deletes_model(self): + def test_removal_of_all_review_rights_deletes_model(self) -> None: user_services.allow_user_to_review_translation_in_language( self.translator_id, 'hi') user_services.allow_user_to_review_question(self.translator_id) @@ -2631,15 +4026,26 @@ def test_removal_of_all_review_rights_deletes_model(self): self.assertTrue(right_model is None) def test_get_question_reviewer_usernames_with_lanaguge_code_raise_error( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Expected language_code to be None'): user_services.get_contributor_usernames( constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION, language_code='hi') + def test_raise_error_if_no_language_code_provided_with_translation_category( + self + ) -> None: + with self.assertRaisesRegex( + Exception, 'The language_code cannot be None'): + user_services.get_contributor_usernames( + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION + ) + def test_get_contributor_usernames_in_voiceover_category_returns_correctly( - self): + self + ) -> None: usernames = user_services.get_contributor_usernames( constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER, language_code='hi') @@ -2655,8 +4061,65 @@ def test_get_contributor_usernames_in_voiceover_category_returns_correctly( self.assertEqual(usernames, [self.VOICE_ARTIST_USERNAME]) def test_get_contributor_usernames_with_invalid_category_raises( - self): - with self.assertRaisesRegexp( + self + ) -> None: + with self.assertRaisesRegex( Exception, 'Invalid category: invalid_category'): user_services.get_contributor_usernames( 'invalid_category', language_code='hi') + + def test_get_contributor_usernames_for_translation_returns_correctly( + self + ) -> None: + usernames = user_services.get_contributor_usernames( + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION, + language_code='hi') + self.assertEqual(usernames, []) + + user_services.allow_user_to_review_translation_in_language( + self.translator_id, 'hi') + usernames = user_services.get_contributor_usernames( + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION, + language_code='hi') + self.assertEqual(usernames, [self.TRANSLATOR_USERNAME]) + + def test_get_contributor_usernames_for_question_returns_correctly( + self + ) -> None: + usernames = user_services.get_contributor_usernames( + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION) + self.assertEqual(usernames, []) + + user_services.allow_user_to_review_question(self.question_reviewer_id) + usernames = user_services.get_contributor_usernames( + constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION) + self.assertEqual(usernames, [self.QUESTION_REVIEWER_USERNAME]) + + def test_get_contributor_usernames_for_submit_returns_correctly( + self + ) -> None: + usernames = user_services.get_contributor_usernames( + constants.CONTRIBUTION_RIGHT_CATEGORY_SUBMIT_QUESTION) + self.assertEqual(usernames, []) + + user_services.allow_user_to_submit_question(self.question_submitter_id) + usernames = user_services.get_contributor_usernames( + constants.CONTRIBUTION_RIGHT_CATEGORY_SUBMIT_QUESTION) + self.assertEqual(usernames, [self.QUESTION_SUBMITTER_USERNAME]) + + def test_remove_question_submit_rights(self) -> None: + auth_id = 'someUser' + user_email = 'user@example.com' + + user_id = user_services.create_new_user(auth_id, user_email).user_id + user_services.allow_user_to_submit_question(user_id) + + pre_user_contribution_rights = ( + user_services.get_user_contribution_rights(user_id)) + self.assertTrue(pre_user_contribution_rights.can_submit_questions) + + user_services.remove_question_submit_rights(user_id) + + user_contribution_rights = ( + user_services.get_user_contribution_rights(user_id)) + self.assertFalse(user_contribution_rights.can_submit_questions) diff --git a/core/domain/value_generators_domain.py b/core/domain/value_generators_domain.py index 4e3102e0a3d6..9297e55ef48c 100644 --- a/core/domain/value_generators_domain.py +++ b/core/domain/value_generators_domain.py @@ -26,7 +26,7 @@ from core import feconf from core import utils -from typing import Any, Dict, Tuple, Type +from typing import Any, Dict, Type class BaseValueGenerator: @@ -53,7 +53,7 @@ def id(self) -> str: return self.__class__.__name__ @classmethod - def get_html_template(cls) -> bytes: + def get_html_template(cls) -> str: """Returns the HTML template for the class. Returns: @@ -61,16 +61,15 @@ def get_html_template(cls) -> bytes: """ return utils.get_file_contents(os.path.join( os.getcwd(), feconf.VALUE_GENERATORS_DIR, 'templates', - '%s.html' % cls.__name__)) + '%s.component.html' % cls.__name__)) - # Since child classes of BaseValueGenerator can use - # the 'generate_value' function with different types - # of arguments, 'args', 'kwargs' and return type - # are set to 'Any'. + # Here we use type Any because child classes of BaseValueGenerator can use + # the 'generate_value' function with different types of arguments, 'args', + # 'kwargs' and return type are set to 'Any'. def generate_value( self, - *args: Tuple[Any], - **kwargs: Dict[str, Any] + *args: Any, + **kwargs: Any ) -> Any: """Generates a new value, using the given customization args. diff --git a/core/domain/value_generators_domain_test.py b/core/domain/value_generators_domain_test.py index f07a6c646846..63a83105c557 100644 --- a/core/domain/value_generators_domain_test.py +++ b/core/domain/value_generators_domain_test.py @@ -24,11 +24,24 @@ from core.domain import value_generators_domain from core.tests import test_utils +from extensions.value_generators.models import generators class ValueGeneratorsUnitTests(test_utils.GenericTestBase): """Test the value generator registry.""" + def test_registry_generator_not_found(self) -> None: + """Tests that get_generator_class_by_id raises exception + when it isn't found. + """ + generator_id = 'aajfejaekj' + with self.assertRaisesRegex( + KeyError, generator_id + ): + value_generators_domain.Registry.get_generator_class_by_id( + generator_id + ) + def test_value_generator_registry(self) -> None: copier_id = 'Copier' @@ -42,20 +55,76 @@ def test_value_generator_registry(self) -> None: def test_generate_value_of_base_value_generator_raises_error(self) -> None: base_generator = value_generators_domain.BaseValueGenerator() - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'generate_value() method has not yet been implemented')): base_generator.generate_value() + def test_registry_template_random_selector_contents(self) -> None: + contents_registry = ( + '\n' + '\n' + ) + class_object = value_generators_domain.Registry() + self.assertEqual( + contents_registry, + class_object.get_generator_class_by_id( + 'RandomSelector' + ).get_html_template() + ) + + def test_registry_template_copier_contents(self) -> None: + contents_registry = ( + '\n ' + '\n ' + '\n\n' + ) + class_object = value_generators_domain.Registry() + self.assertEqual( + contents_registry, + class_object.get_generator_class_by_id( + 'Copier' + ).get_html_template() + ) + + def test_get_value_generator_classes_not_subclass(self) -> None: + """Test that the value generator registry discovers all classes + correctly and excludes classes that are not subclasses of + BaseValueGenerator. + """ + + class MockCopier(): + """This is a dummy class for self.swap to test that the value + generator registry discovers all classes correctly and excludes + classes that are not subclasses of BaseValueGenerator. + We need to have a class in the returned list of value generators + that isn't a subclass of BaseValueGenerator to test. + """ + + pass + + module = importlib.import_module( + 'extensions.value_generators.models.generators' + ) + expected_generators = { + 'RandomSelector': type(generators.RandomSelector()) + } + with self.swap(module, 'Copier', MockCopier): + value_generators = ( + value_generators_domain.Registry.get_all_generator_classes() + ) + self.assertEqual(expected_generators, value_generators) + class ValueGeneratorNameTests(test_utils.GenericTestBase): def test_value_generator_names(self) -> None: """This function checks for duplicate value generators.""" - all_python_files = ( - self.get_all_python_files()) # type: ignore[no-untyped-call] + all_python_files = self.get_all_python_files() all_value_generators = [] for file_name in all_python_files: diff --git a/core/domain/visualization_registry.py b/core/domain/visualization_registry.py index 0315efbfa830..1bd9226f0c18 100644 --- a/core/domain/visualization_registry.py +++ b/core/domain/visualization_registry.py @@ -22,15 +22,17 @@ from extensions.visualizations import models +from typing import Dict, List, Type + class Registry: """Registry of all visualizations.""" # Dict mapping visualization class names to their classes. - visualizations_dict = {} + visualizations_dict: Dict[str, Type[models.BaseVisualization]] = {} @classmethod - def _refresh_registry(cls): + def _refresh_registry(cls) -> None: """Clears and adds new visualization instances to the registry.""" cls.visualizations_dict.clear() @@ -46,7 +48,9 @@ def _refresh_registry(cls): cls.visualizations_dict[clazz.__name__] = clazz @classmethod - def get_visualization_class(cls, visualization_id): + def get_visualization_class( + cls, visualization_id: str + ) -> Type[models.BaseVisualization]: """Gets a visualization class by its id (which is also its class name). The registry will refresh if the desired class is not found. If it's @@ -60,7 +64,7 @@ def get_visualization_class(cls, visualization_id): return cls.visualizations_dict[visualization_id] @classmethod - def get_all_visualization_ids(cls): + def get_all_visualization_ids(cls) -> List[str]: """Gets a visualization class by its id (which is also its class name). """ diff --git a/core/domain/visualization_registry_test.py b/core/domain/visualization_registry_test.py index d140250652ea..c7dcdbe1f4c6 100644 --- a/core/domain/visualization_registry_test.py +++ b/core/domain/visualization_registry_test.py @@ -29,31 +29,31 @@ class VisualizationRegistryUnitTests(test_utils.GenericTestBase): """Test for the visualization registry.""" - def test_visualization_registry(self): + def test_visualization_registry(self) -> None: """Sanity checks on the visualization registry.""" self.assertGreater( len(visualization_registry.Registry.get_all_visualization_ids()), 0) - def test_get_visualization_class_with_invalid_id_raises_error(self): - with self.assertRaisesRegexp( + def test_get_visualization_class_with_invalid_id_raises_error(self) -> None: + with self.assertRaisesRegex( TypeError, 'is not a valid visualization id.'): visualization_registry.Registry.get_visualization_class( 'invalid_visualization_id') - def test_visualization_class_with_invalid_option_names(self): + def test_visualization_class_with_invalid_option_names(self) -> None: sorted_tiles = visualization_registry.Registry.get_visualization_class( 'SortedTiles') sorted_tiles_instance = sorted_tiles('AnswerFrequencies', {}, True) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, re.escape( 'For visualization SortedTiles, expected option names ' '[\'header\', \'use_percentages\']; received names []')): sorted_tiles_instance.validate() - def test_visualization_class_with_invalid_option_value(self): + def test_visualization_class_with_invalid_option_value(self) -> None: sorted_tiles = visualization_registry.Registry.get_visualization_class( 'SortedTiles') option_names = { @@ -63,12 +63,16 @@ def test_visualization_class_with_invalid_option_value(self): sorted_tiles_instance = sorted_tiles( 'AnswerFrequencies', option_names, True) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Expected bool, received invalid_value'): sorted_tiles_instance.validate() + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally test + # wrong inputs that we can normally catch by typing. def test_visualization_class_with_invalid_addressed_info_is_supported_value( - self): + self + ) -> None: sorted_tiles = visualization_registry.Registry.get_visualization_class( 'SortedTiles') option_names = { @@ -76,15 +80,15 @@ def test_visualization_class_with_invalid_addressed_info_is_supported_value( 'use_percentages': True } sorted_tiles_instance = sorted_tiles( - 'AnswerFrequencies', option_names, 'invalid_value') + 'AnswerFrequencies', option_names, 'invalid_value') # type: ignore[arg-type] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'For visualization SortedTiles, expected a bool value for ' 'addressed_info_is_supported; received invalid_value'): sorted_tiles_instance.validate() - def test_get_all_visualization_ids(self): + def test_get_all_visualization_ids(self) -> None: visualization_ids = ( visualization_registry.Registry.get_all_visualization_ids()) expected_visualizations = ['FrequencyTable', 'ClickHexbins', @@ -96,7 +100,7 @@ def test_get_all_visualization_ids(self): class VisualizationsNameTests(test_utils.GenericTestBase): - def test_visualization_names(self): + def test_visualization_names(self) -> None: """This function checks for duplicate visualizations.""" all_python_files = self.get_all_python_files() diff --git a/core/domain/voiceover_services.py b/core/domain/voiceover_services.py deleted file mode 100644 index dcaaa3761622..000000000000 --- a/core/domain/voiceover_services.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright 2018 The Oppia Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS-IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Functions to perform actions related to voiceover application.""" - -from __future__ import annotations - -from core import feconf -from core.domain import email_manager -from core.domain import exp_fetchers -from core.domain import opportunity_services -from core.domain import rights_domain -from core.domain import rights_manager -from core.domain import suggestion_registry -from core.domain import user_services -from core.platform import models - -(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion]) - - -def _get_voiceover_application_class(target_type): - """Returns the voiceover application class for a given target type. - - Args: - target_type: str. The target type of the voiceover application. - - Returns: - class. The voiceover application class for the given target type. - - Raises: - Exception. The voiceover application target type is invalid. - """ - target_type_to_classes = ( - suggestion_registry.VOICEOVER_APPLICATION_TARGET_TYPE_TO_DOMAIN_CLASSES) - if target_type in target_type_to_classes: - return target_type_to_classes[target_type] - else: - raise Exception( - 'Invalid target type for voiceover application: %s' % target_type) - - -def _get_voiceover_application_model(voiceover_application): - """Returns the GeneralVoiceoverApplicationModel object for the give - voiceover application object. - - Args: - voiceover_application: BaseVoiceoverApplication. The voiceover - application object. - - Returns: - GeneralVoiceoverApplicationModel. The model object out of the given - application object. - """ - return suggestion_models.GeneralVoiceoverApplicationModel( - id=voiceover_application.voiceover_application_id, - target_type=voiceover_application.target_type, - target_id=voiceover_application.target_id, - status=voiceover_application.status, - author_id=voiceover_application.author_id, - final_reviewer_id=voiceover_application.final_reviewer_id, - language_code=voiceover_application.language_code, - filename=voiceover_application.filename, - content=voiceover_application.content, - rejection_message=voiceover_application.rejection_message) - - -def _get_voiceover_application_from_model(voiceover_application_model): - """Returns the BaseVoiceoverApplication object for the give - voiceover application model object. - - Args: - voiceover_application_model: GeneralVoiceoverApplicationModel. The - voiceover application model object. - - Returns: - BaseVoiceoverApplication. The domain object out of the given voiceover - application model object. - """ - voiceover_application_class = _get_voiceover_application_class( - voiceover_application_model.target_type) - return voiceover_application_class( - voiceover_application_model.id, - voiceover_application_model.target_id, - voiceover_application_model.status, - voiceover_application_model.author_id, - voiceover_application_model.final_reviewer_id, - voiceover_application_model.language_code, - voiceover_application_model.filename, - voiceover_application_model.content, - voiceover_application_model.rejection_message) - - -def _save_voiceover_applications(voiceover_applications): - """Saves a list of given voiceover application object in datastore. - - Args: - voiceover_applications: list(BaseVoiceoverApplication). The list of - voiceover application objects. - """ - voiceover_application_models = [] - for voiceover_application in voiceover_applications: - voiceover_application.validate() - voiceover_application_model = _get_voiceover_application_model( - voiceover_application) - voiceover_application_models.append(voiceover_application_model) - - suggestion_models.GeneralVoiceoverApplicationModel.update_timestamps_multi( - voiceover_application_models) - suggestion_models.GeneralVoiceoverApplicationModel.put_multi( - voiceover_application_models) - - -def get_voiceover_application_by_id(voiceover_application_id): - """Returns voiceover application model corresponding to give id. - - Args: - voiceover_application_id: str. The voiceover application id. - - Returns: - BaseVoiceoverApplication. The voiceover application object for the give - application id. - """ - voiceover_application_model = ( - suggestion_models.GeneralVoiceoverApplicationModel.get_by_id( - voiceover_application_id)) - return _get_voiceover_application_from_model(voiceover_application_model) - - -def get_reviewable_voiceover_applications(user_id): - """Returns a list of voiceover applications which the given user can review. - - Args: - user_id: str. The user ID of the reviewer. - - Returns: - list(BaseVoiceoverApplication). A list of voiceover application which - the given user can review. - """ - voiceover_application_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_reviewable_voiceover_applications(user_id)) - - return [ - _get_voiceover_application_from_model(model) for model in ( - voiceover_application_models)] - - -def get_user_submitted_voiceover_applications(user_id, status=None): - """Returns a list of voiceover application submitted by the given user which - are currently in the given status. - - Args: - user_id: str. The id of the user. - status: str|None. The status of the voiceover application. - - Returns: - BaseVoiceoverApplication). A list of voiceover application which are - submitted by the given user. - """ - voiceover_application_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_user_voiceover_applications(user_id, status)) - - return [ - _get_voiceover_application_from_model(model) for model in ( - voiceover_application_models)] - - -def accept_voiceover_application(voiceover_application_id, reviewer_id): - """Accept the voiceover application of given voiceover application id. - - Args: - voiceover_application_id: str. The id of the voiceover application which - need to be accepted. - reviewer_id: str. The user ID of the reviewer. - """ - voiceover_application = get_voiceover_application_by_id( - voiceover_application_id) - if reviewer_id == voiceover_application.author_id: - raise Exception( - 'Applicants are not allowed to review their own ' - 'voiceover application.') - - reviewer = user_services.get_user_actions_info(reviewer_id) - - voiceover_application.accept(reviewer_id) - - _save_voiceover_applications([voiceover_application]) - - if voiceover_application.target_type == feconf.ENTITY_TYPE_EXPLORATION: - rights_manager.assign_role_for_exploration( - reviewer, voiceover_application.target_id, - voiceover_application.author_id, rights_domain.ROLE_VOICE_ARTIST) - opportunity_services.update_exploration_voiceover_opportunities( - voiceover_application.target_id, - voiceover_application.language_code) - opportunities = ( - opportunity_services.get_exploration_opportunity_summaries_by_ids([ - voiceover_application.target_id])) - email_manager.send_accepted_voiceover_application_email( - voiceover_application.author_id, - opportunities[voiceover_application.target_id].chapter_title, - voiceover_application.language_code) - # TODO(#7969): Add notification to the user's dashboard for the accepted - # voiceover application. - - voiceover_application_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_voiceover_applications( - voiceover_application.target_type, voiceover_application.target_id, - voiceover_application.language_code)) - rejected_voiceover_applications = [] - for model in voiceover_application_models: - voiceover_application = _get_voiceover_application_from_model( - model) - if not voiceover_application.is_handled: - voiceover_application.reject( - reviewer_id, 'We have to reject your application as another ' - 'application for the same opportunity got accepted.') - rejected_voiceover_applications.append(voiceover_application) - - _save_voiceover_applications(rejected_voiceover_applications) - - -def reject_voiceover_application( - voiceover_application_id, reviewer_id, rejection_message): - """Rejects the voiceover application of given voiceover application id. - - Args: - voiceover_application_id: str. The is of the voiceover application which - need to be rejected. - reviewer_id: str. The user ID of the reviewer. - rejection_message: str. The plain text message submitted by the - reviewer while rejecting the application. - """ - voiceover_application = get_voiceover_application_by_id( - voiceover_application_id) - if reviewer_id == voiceover_application.author_id: - raise Exception( - 'Applicants are not allowed to review their own ' - 'voiceover application.') - - reviewer = user_services.get_user_actions_info(reviewer_id) - - voiceover_application.reject(reviewer.user_id, rejection_message) - _save_voiceover_applications([voiceover_application]) - - if voiceover_application.target_type == feconf.ENTITY_TYPE_EXPLORATION: - opportunities = ( - opportunity_services.get_exploration_opportunity_summaries_by_ids([ - voiceover_application.target_id])) - email_manager.send_rejected_voiceover_application_email( - voiceover_application.author_id, - opportunities[voiceover_application.target_id].chapter_title, - voiceover_application.language_code, rejection_message) - # TODO(#7969): Add notification to the user's dashboard for the accepted - # voiceover application. - - -def create_new_voiceover_application( - target_type, target_id, language_code, content, filename, author_id): - """Creates a new voiceover application withe the given data. - - Args: - target_type: str. The string representing the type of the target entity. - target_id: str. The ID of the target entity. - language_code: str. The language code for the voiceover application. - content: str. The html content which is voiceover in the - application. - filename: str. The filename of the voiceover audio. - author_id: str. The ID of the user who submitted the voiceover - application. - """ - voiceover_application_class = _get_voiceover_application_class(target_type) - voiceover_application_id = ( - suggestion_models.GeneralVoiceoverApplicationModel.get_new_id('')) - voiceover_application = voiceover_application_class( - voiceover_application_id, target_id, suggestion_models.STATUS_IN_REVIEW, - author_id, None, language_code, filename, content, None) - - _save_voiceover_applications([voiceover_application]) - - -def get_text_to_create_voiceover_application( - target_type, target_id, language_code): - """Returns a text to voiceover for a voiceover application. - - Args: - target_type: str. The string representing the type of the target entity. - target_id: str. The ID of the target entity. - language_code: str. The language code for the content. - - Returns: - str. The text which can be voiceover for a voiceover application. - """ - if target_type == feconf.ENTITY_TYPE_EXPLORATION: - exploration = exp_fetchers.get_exploration_by_id(target_id) - init_state_name = exploration.init_state_name - state = exploration.states[init_state_name] - if exploration.language_code == language_code: - return state.content.html - else: - return state.written_translations.get_translated_content( - state.content.content_id, language_code) - else: - raise Exception('Invalid target type: %s' % target_type) diff --git a/core/domain/voiceover_services_test.py b/core/domain/voiceover_services_test.py deleted file mode 100644 index 446533aa35e8..000000000000 --- a/core/domain/voiceover_services_test.py +++ /dev/null @@ -1,419 +0,0 @@ -# Copyright 2019 The Oppia Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS-IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for voiceover services.""" - -from __future__ import annotations - -from core import feconf -from core.constants import constants -from core.domain import exp_domain -from core.domain import exp_services -from core.domain import opportunity_services -from core.domain import question_services -from core.domain import rights_manager -from core.domain import story_domain -from core.domain import story_services -from core.domain import topic_domain -from core.domain import topic_services -from core.domain import user_services -from core.domain import voiceover_services -from core.platform import models -from core.tests import test_utils - -(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion]) - - -class VoiceoverApplicationServicesUnitTests(test_utils.GenericTestBase): - """Provides testing of the voiceover services.""" - - APPLICANT_USERNAME = 'applicant' - APPLICANT_EMAIL = 'applicant@example.com' - - def setUp(self): - super(VoiceoverApplicationServicesUnitTests, self).setUp() - self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) - self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) - self.signup(self.APPLICANT_EMAIL, self.APPLICANT_USERNAME) - - self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) - self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) - self.applicant_id = self.get_user_id_from_email(self.APPLICANT_EMAIL) - - self.applicant = user_services.get_user_actions_info(self.applicant_id) - - self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) - self.admin = user_services.get_user_actions_info(self.admin_id) - - self.TOPIC_ID = 'topic' - self.STORY_ID = 'story' - self.USER_ID = 'user' - self.SKILL_ID = 'skill' - self.QUESTION_ID = question_services.get_new_question_id() - explorations = [self.save_new_valid_exploration( - '%s' % i, - self.owner_id, - title='title %d' % i, - category='category%d' % i, - end_state_name='End State', - correctness_feedback_enabled=True - ) for i in range(2)] - - for exp in explorations: - self.publish_exploration(self.owner_id, exp.id) - - topic = topic_domain.Topic.create_default_topic( - self.TOPIC_ID, 'topic', 'abbrev', 'description') - topic.thumbnail_filename = 'thumbnail.svg' - topic.thumbnail_bg_color = '#C6DCDA' - topic.subtopics = [ - topic_domain.Subtopic( - 1, 'Title', ['skill_id_1'], 'image.svg', - constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, - 'dummy-subtopic-three')] - topic.next_subtopic_id = 2 - topic_services.save_new_topic(self.owner_id, topic) - topic_services.publish_topic(self.TOPIC_ID, self.admin_id) - - story = story_domain.Story.create_default_story( - self.STORY_ID, 'A story', 'Description', self.TOPIC_ID, - 'a-story') - story_services.save_new_story(self.owner_id, story) - topic_services.add_canonical_story( - self.owner_id, self.TOPIC_ID, self.STORY_ID) - topic_services.publish_story( - self.TOPIC_ID, self.STORY_ID, self.admin_id) - story_services.update_story( - self.owner_id, self.STORY_ID, [story_domain.StoryChange({ - 'cmd': 'add_story_node', - 'node_id': 'node_1', - 'title': 'Node1', - }), story_domain.StoryChange({ - 'cmd': 'update_story_node_property', - 'property_name': 'exploration_id', - 'node_id': 'node_1', - 'old_value': None, - 'new_value': '0' - })], 'Changes.') - - self.add_user_role( - self.CURRICULUM_ADMIN_USERNAME, feconf.ROLE_ID_VOICEOVER_ADMIN) - - def test_voiceover_application_creation(self): - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(user_voiceover_applications, []) - - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual(user_voiceover_applications[0].target_id, '0') - - def test_get_voiceover_application_from_model_with_invalid_type_raise_error( - self): - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_id', - target_type='exploration', - target_id='0', - status='review', - author_id='author_id', - final_reviewer_id=None, - language_code='en', - filename='filename.mp3', - content='

    content

    ', - rejection_message=None).put() - voiceover_application_model = ( - suggestion_models.GeneralVoiceoverApplicationModel.get_by_id( - 'application_id')) - voiceover_application_model.target_type = 'invalid_type' - voiceover_application_model.update_timestamps() - voiceover_application_model.put() - with self.assertRaisesRegexp( - Exception, - 'Invalid target type for voiceover application: invalid_type'): - voiceover_services.get_voiceover_application_by_id('application_id') - - def test_newly_created_voiceover_application_have_in_review_status(self): - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(user_voiceover_applications, []) - - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_IN_REVIEW) - - def test_get_reviewable_voiceover_applications(self): - voiceover_applications = ( - voiceover_services.get_reviewable_voiceover_applications( - self.admin_id)) - self.assertEqual(voiceover_applications, []) - - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - - voiceover_applications = ( - voiceover_services.get_reviewable_voiceover_applications( - self.admin_id)) - self.assertEqual(len(voiceover_applications), 1) - self.assertEqual( - voiceover_applications[0].status, - suggestion_models.STATUS_IN_REVIEW) - - def test_accept_application_assigns_role_to_entity(self): - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_IN_REVIEW) - - voiceover_services.accept_voiceover_application( - user_voiceover_applications[0].voiceover_application_id, - self.admin_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id, status=suggestion_models.STATUS_ACCEPTED)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_ACCEPTED) - - exploration_rights = rights_manager.get_exploration_rights('0') - can_voiceover = rights_manager.check_can_voiceover_activity( - self.applicant, exploration_rights) - - self.assertTrue(can_voiceover) - - def test_accept_application_removes_exploration_voiceover_opportunity(self): - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_IN_REVIEW) - - opportunities, _, more = ( - opportunity_services.get_voiceover_opportunities('en', None)) - self.assertEqual(len(opportunities), 1) - - voiceover_services.accept_voiceover_application( - user_voiceover_applications[0].voiceover_application_id, - self.admin_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id, status=suggestion_models.STATUS_ACCEPTED)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_ACCEPTED) - - opportunities, _, more = ( - opportunity_services.get_voiceover_opportunities('en', None)) - self.assertEqual(len(opportunities), 0) - self.assertFalse(more) - - def test_accept_application_removes_rejectes_other_similar_applications( - self): - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.owner_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_IN_REVIEW) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.owner_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_IN_REVIEW) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - voiceover_services.accept_voiceover_application( - user_voiceover_applications[0].voiceover_application_id, - self.admin_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id, status=suggestion_models.STATUS_ACCEPTED)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_ACCEPTED) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.owner_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_REJECTED) - self.assertEqual( - user_voiceover_applications[0].rejection_message, - 'We have to reject your application as another application for the ' - 'same opportunity got accepted.') - - def test_author_accepts_own_voiceover_application_raise_exception(self): - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - - with self.assertRaisesRegexp( - Exception, 'Applicants are not allowed to review their own ' - 'voiceover application.'): - voiceover_services.accept_voiceover_application( - user_voiceover_applications[0].voiceover_application_id, - self.applicant_id) - - def test_reject_voiceover_application(self): - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_IN_REVIEW) - - opportunities, _, _ = ( - opportunity_services.get_voiceover_opportunities('en', None)) - self.assertEqual(len(opportunities), 1) - - voiceover_services.reject_voiceover_application( - user_voiceover_applications[0].voiceover_application_id, - self.admin_id, 'Rejection message') - - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - self.assertEqual(len(user_voiceover_applications), 1) - self.assertEqual( - user_voiceover_applications[0].status, - suggestion_models.STATUS_REJECTED) - - opportunities, _, _ = ( - opportunity_services.get_voiceover_opportunities('en', None)) - self.assertEqual(len(opportunities), 1) - - def test_author_rejects_own_voiceover_application_raise_exception(self): - voiceover_services.create_new_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '', - 'audio_file.mp3', self.applicant_id) - user_voiceover_applications = ( - voiceover_services.get_user_submitted_voiceover_applications( - self.applicant_id)) - - with self.assertRaisesRegexp( - Exception, 'Applicants are not allowed to review their own ' - 'voiceover application.'): - voiceover_services.reject_voiceover_application( - user_voiceover_applications[0].voiceover_application_id, - self.applicant_id, 'Testing rejection') - - def test_get_text_to_create_voiceover_application(self): - exp_services.update_exploration( - self.owner_id, '0', [ - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'property_name': ( - exp_domain.STATE_PROPERTY_CONTENT), - 'state_name': 'Introduction', - 'new_value': { - 'content_id': 'content', - 'html': '

    The new content to voiceover

    ' - } - })], 'Adds new content to init state') - - content = voiceover_services.get_text_to_create_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'en') - self.assertEqual(content, '

    The new content to voiceover

    ') - - def test_get_text_to_create_voiceover_application_in_diff_language(self): - exp_services.update_exploration( - self.owner_id, '0', [ - exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, - 'property_name': ( - exp_domain.STATE_PROPERTY_CONTENT), - 'state_name': 'Introduction', - 'new_value': { - 'content_id': 'content', - 'html': '

    The new content to voiceover

    ' - } - }), exp_domain.ExplorationChange({ - 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, - 'state_name': 'Introduction', - 'content_id': 'content', - 'language_code': 'hi', - 'content_html': '

    The new content to voiceover

    ', - 'translation_html': '

    Translation in Hindi

    ', - 'data_format': 'html' - })], 'Adds new content to init state and its translation') - - content = voiceover_services.get_text_to_create_voiceover_application( - feconf.ENTITY_TYPE_EXPLORATION, '0', 'hi') - self.assertEqual(content, '

    Translation in Hindi

    ') - - def test_get_text_to_create_voiceover_application_for_invalid_type(self): - with self.assertRaisesRegexp( - Exception, 'Invalid target type: invalid_type'): - voiceover_services.get_text_to_create_voiceover_application( - 'invalid_type', '0', 'hi') diff --git a/core/domain/wipeout_domain.py b/core/domain/wipeout_domain.py index e8d05994e98e..624ec275c29d 100644 --- a/core/domain/wipeout_domain.py +++ b/core/domain/wipeout_domain.py @@ -16,8 +16,8 @@ from __future__ import annotations +from core import feconf from core import utils -from core.platform import models from typing import Dict, Optional @@ -91,6 +91,6 @@ def validate(self) -> None: contains wrong key. """ for key in self.pseudonymizable_entity_mappings.keys(): - if key not in [name.value for name in models.NAMES]: + if key not in [name.value for name in feconf.ValidModelNames]: raise utils.ValidationError( 'pseudonymizable_entity_mappings contain wrong key') diff --git a/core/domain/wipeout_domain_test.py b/core/domain/wipeout_domain_test.py index 24a91f5add6f..e3c092c0f832 100644 --- a/core/domain/wipeout_domain_test.py +++ b/core/domain/wipeout_domain_test.py @@ -27,10 +27,10 @@ class PendingDeletionRequestUnitTests(test_utils.GenericTestBase): """Tests for topic domain objects.""" def setUp(self) -> None: - super(PendingDeletionRequestUnitTests, self).setUp() + super().setUp() self.signup('a@example.com', 'A') self.signup('b@example.com', 'B') - self.user_id_a = self.get_user_id_from_email('a@example.com') # type: ignore[no-untyped-call] + self.user_id_a = self.get_user_id_from_email('a@example.com') def test_create_default_pending_deletion_request(self) -> None: """Tests the create_default_topic() function.""" @@ -53,7 +53,7 @@ def test_validate_fails_for_wrong_key_in_activity_mappings(self) -> None: pending_deletion_request.pseudonymizable_entity_mappings = { 'wrong_key': {} } - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( utils.ValidationError, 'pseudonymizable_entity_mappings contain wrong key' ): diff --git a/core/domain/wipeout_service.py b/core/domain/wipeout_service.py index 8b4d2397bdf6..a0e54ba7db04 100644 --- a/core/domain/wipeout_service.py +++ b/core/domain/wipeout_service.py @@ -22,7 +22,6 @@ import re from core import feconf -from core import python_utils from core import utils from core.domain import auth_services from core.domain import collection_services @@ -37,17 +36,43 @@ from core.domain import wipeout_domain from core.platform import models +from typing import Dict, Final, List, Optional, Sequence, Tuple, Type, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import app_feedback_report_models + from mypy_imports import base_models + from mypy_imports import blog_models + from mypy_imports import bulk_email_services + from mypy_imports import collection_models + from mypy_imports import config_models + from mypy_imports import datastore_services + from mypy_imports import exp_models + from mypy_imports import feedback_models + from mypy_imports import improvements_models + from mypy_imports import question_models + from mypy_imports import skill_models + from mypy_imports import story_models + from mypy_imports import subtopic_models + from mypy_imports import suggestion_models + from mypy_imports import topic_models + from mypy_imports import transaction_services + from mypy_imports import user_models + ( - app_feedback_report_models, base_models, blog_models, - collection_models, config_models, exp_models, feedback_models, - question_models, skill_models, story_models, subtopic_models, - suggestion_models, topic_models, user_models + app_feedback_report_models, base_models, + blog_models, collection_models, config_models, + exp_models, feedback_models, improvements_models, + question_models, skill_models, story_models, + subtopic_models, suggestion_models, topic_models, + user_models ) = models.Registry.import_models([ - models.NAMES.app_feedback_report, models.NAMES.base_model, - models.NAMES.blog, models.NAMES.collection, models.NAMES.config, - models.NAMES.exploration, models.NAMES.feedback, models.NAMES.question, - models.NAMES.skill, models.NAMES.story, models.NAMES.subtopic, - models.NAMES.suggestion, models.NAMES.topic, models.NAMES.user, + models.Names.APP_FEEDBACK_REPORT, models.Names.BASE_MODEL, + models.Names.BLOG, models.Names.COLLECTION, models.Names.CONFIG, + models.Names.EXPLORATION, models.Names.FEEDBACK, models.Names.IMPROVEMENTS, + models.Names.QUESTION, models.Names.SKILL, models.Names.STORY, + models.Names.SUBTOPIC, models.Names.SUGGESTION, models.Names.TOPIC, + models.Names.USER, ]) datastore_services = models.Registry.import_datastore_services() @@ -55,11 +80,15 @@ bulk_email_services = models.Registry.import_bulk_email_services() -WIPEOUT_LOGS_PREFIX = '[WIPEOUT]' -PERIOD_AFTER_WHICH_USERNAME_CANNOT_BE_REUSED = datetime.timedelta(weeks=1) +WIPEOUT_LOGS_PREFIX: Final = '[WIPEOUT]' +PERIOD_AFTER_WHICH_USERNAME_CANNOT_BE_REUSED: Final = ( + datetime.timedelta(weeks=1) +) -def get_pending_deletion_request(user_id): +def get_pending_deletion_request( + user_id: str +) -> wipeout_domain.PendingDeletionRequest: """Return the pending deletion request. Args: @@ -79,7 +108,7 @@ def get_pending_deletion_request(user_id): ) -def get_number_of_pending_deletion_requests(): +def get_number_of_pending_deletion_requests() -> int: """Get number of pending deletion request. Returns: @@ -88,7 +117,9 @@ def get_number_of_pending_deletion_requests(): return user_models.PendingDeletionRequestModel.query().count() -def save_pending_deletion_requests(pending_deletion_requests): +def save_pending_deletion_requests( + pending_deletion_requests: List[wipeout_domain.PendingDeletionRequest] +) -> None: """Save a list of pending deletion request domain objects as PendingDeletionRequestModel entities in the datastore. @@ -102,7 +133,7 @@ def save_pending_deletion_requests(pending_deletion_requests): user_ids, include_deleted=True) ) final_pending_deletion_request_models = [] - for deletion_request_model, deletion_request in python_utils.ZIP( + for deletion_request_model, deletion_request in zip( pending_deletion_request_models, pending_deletion_requests): deletion_request.validate() deletion_request_dict = { @@ -128,7 +159,7 @@ def save_pending_deletion_requests(pending_deletion_requests): final_pending_deletion_request_models) -def pre_delete_user(user_id): +def pre_delete_user(user_id: str) -> None: """Prepare user for the full deletion. 1. Mark all the activities that are private and solely owned by the user being deleted as deleted. @@ -141,6 +172,9 @@ def pre_delete_user(user_id): corresponds to a profile user then only that profile is deleted. For a full user, all of its associated profile users are deleted too. + + Raises: + Exception. No data available for when the user was created on. """ pending_deletion_requests = [] user_settings = user_services.get_user_settings(user_id, strict=True) @@ -150,7 +184,7 @@ def pre_delete_user(user_id): user_services.get_all_profiles_auth_details_by_parent_user_id(user_id) ] profile_users_settings_list = user_services.get_users_settings( - linked_profile_user_ids) + linked_profile_user_ids, strict=True) for profile_user_settings in profile_users_settings_list: profile_id = profile_user_settings.user_id user_services.mark_user_for_deletion(profile_id) @@ -173,11 +207,15 @@ def pre_delete_user(user_id): bulk_email_services.permanently_delete_user_from_list( user_settings.email) + user_services.mark_user_for_deletion(user_id) + date_now = datetime.datetime.utcnow() date_before_which_username_should_be_saved = ( date_now - PERIOD_AFTER_WHICH_USERNAME_CANNOT_BE_REUSED) - user_services.mark_user_for_deletion(user_id) - + if user_settings.created_on is None: + raise Exception( + 'No data available for when the user was created on.' + ) normalized_long_term_username = ( user_settings.normalized_username if user_settings.created_on < date_before_which_username_should_be_saved @@ -194,12 +232,14 @@ def pre_delete_user(user_id): save_pending_deletion_requests(pending_deletion_requests) -def delete_users_pending_to_be_deleted(): +def delete_users_pending_to_be_deleted() -> None: """Taskqueue service method for deleting users that are pending to be deleted. Once these users are deleted, the job results will be mailed to the admin. """ - pending_deletion_request_models = ( + pending_deletion_request_models: Sequence[ + user_models.PendingDeletionRequestModel + ] = ( user_models.PendingDeletionRequestModel.query().fetch()) if len(pending_deletion_request_models) == 0: return @@ -225,7 +265,7 @@ def delete_users_pending_to_be_deleted(): email_manager.send_mail_to_admin(email_subject, email_message) -def check_completion_of_user_deletion(): +def check_completion_of_user_deletion() -> None: """Taskqueue service method for checking the completion of user deletion. It checks if all models do not contain the user ID of the deleted user in their fields. If any field contains the user ID of the deleted user, the @@ -235,7 +275,9 @@ def check_completion_of_user_deletion(): user, the final email announcing that the deletion was completed is sent, and the deletion request is deleted. """ - pending_deletion_request_models = ( + pending_deletion_request_models: Sequence[ + user_models.PendingDeletionRequestModel + ] = ( user_models.PendingDeletionRequestModel.query().fetch()) email_message = 'Results of the Completion of User Deletion Cron Job' @@ -260,7 +302,9 @@ def check_completion_of_user_deletion(): email_manager.send_mail_to_admin(email_subject, email_message) -def run_user_deletion(pending_deletion_request): +def run_user_deletion( + pending_deletion_request: wipeout_domain.PendingDeletionRequest +) -> str: """Run the user deletion. Args: @@ -280,7 +324,9 @@ def run_user_deletion(pending_deletion_request): return wipeout_domain.USER_DELETION_SUCCESS -def run_user_deletion_completion(pending_deletion_request): +def run_user_deletion_completion( + pending_deletion_request: wipeout_domain.PendingDeletionRequest +) -> str: """Run the user deletion verification. Args: @@ -321,20 +367,22 @@ def run_user_deletion_completion(pending_deletion_request): return wipeout_domain.USER_VERIFICATION_FAILURE -def _delete_models_with_delete_at_end_policy(user_id): +def _delete_models_with_delete_at_end_policy(user_id: str) -> None: """Delete auth and user models with deletion policy 'DELETE_AT_END'. Args: user_id: str. The unique ID of the user that is being deleted. """ for model_class in models.Registry.get_storage_model_classes( - [models.NAMES.auth, models.NAMES.user]): + [models.Names.AUTH, models.Names.USER]): policy = model_class.get_deletion_policy() if policy == base_models.DELETION_POLICY.DELETE_AT_END: model_class.apply_deletion_policy(user_id) -def delete_user(pending_deletion_request): +def delete_user( + pending_deletion_request: wipeout_domain.PendingDeletionRequest +) -> None: """Delete all the models for user specified in pending_deletion_request on the basis of the user role. @@ -347,80 +395,94 @@ def delete_user(pending_deletion_request): auth_services.delete_external_auth_associations(user_id) - _delete_models(user_id, models.NAMES.auth) - _delete_models(user_id, models.NAMES.user) + _delete_models(user_id, models.Names.AUTH) + _delete_models(user_id, models.Names.USER) _pseudonymize_config_models(pending_deletion_request) - _delete_models(user_id, models.NAMES.feedback) - _delete_models(user_id, models.NAMES.improvements) + _delete_models(user_id, models.Names.FEEDBACK) + _delete_models(user_id, models.Names.SUGGESTION) if feconf.ROLE_ID_MOBILE_LEARNER not in user_roles: remove_user_from_activities_with_associated_rights_models( pending_deletion_request.user_id) - _pseudonymize_app_feedback_report_models(pending_deletion_request) + _pseudonymize_one_model_class( + pending_deletion_request, + improvements_models.ExplorationStatsTaskEntryModel, + 'resolver_id', + models.Names.IMPROVEMENTS + ) + _pseudonymize_one_model_class( + pending_deletion_request, + app_feedback_report_models.AppFeedbackReportModel, + 'scrubbed_by', + models.Names.APP_FEEDBACK_REPORT + ) _pseudonymize_feedback_models(pending_deletion_request) - _pseudonymize_suggestion_models(pending_deletion_request) _pseudonymize_activity_models_without_associated_rights_models( pending_deletion_request, - models.NAMES.question, + models.Names.QUESTION, question_models.QuestionSnapshotMetadataModel, question_models.QuestionCommitLogEntryModel, 'question_id') _pseudonymize_activity_models_without_associated_rights_models( pending_deletion_request, - models.NAMES.skill, + models.Names.SKILL, skill_models.SkillSnapshotMetadataModel, skill_models.SkillCommitLogEntryModel, 'skill_id') _pseudonymize_activity_models_without_associated_rights_models( pending_deletion_request, - models.NAMES.story, + models.Names.STORY, story_models.StorySnapshotMetadataModel, story_models.StoryCommitLogEntryModel, 'story_id') _pseudonymize_activity_models_without_associated_rights_models( pending_deletion_request, - models.NAMES.subtopic, + models.Names.SUBTOPIC, subtopic_models.SubtopicPageSnapshotMetadataModel, subtopic_models.SubtopicPageCommitLogEntryModel, 'subtopic_page_id') _pseudonymize_activity_models_with_associated_rights_models( pending_deletion_request, - models.NAMES.exploration, + models.Names.EXPLORATION, exp_models.ExplorationSnapshotMetadataModel, exp_models.ExplorationRightsSnapshotMetadataModel, exp_models.ExplorationRightsSnapshotContentModel, exp_models.ExplorationCommitLogEntryModel, 'exploration_id', feconf.EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS, - ('owner_ids', 'editor_ids', 'voice_artist_ids', 'viewer_ids')) + ['owner_ids', 'editor_ids', 'voice_artist_ids', 'viewer_ids']) _remove_user_id_from_contributors_in_summary_models( user_id, exp_models.ExpSummaryModel) _pseudonymize_activity_models_with_associated_rights_models( pending_deletion_request, - models.NAMES.collection, + models.Names.COLLECTION, collection_models.CollectionSnapshotMetadataModel, collection_models.CollectionRightsSnapshotMetadataModel, collection_models.CollectionRightsSnapshotContentModel, collection_models.CollectionCommitLogEntryModel, 'collection_id', feconf.COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS, - ('owner_ids', 'editor_ids', 'voice_artist_ids', 'viewer_ids')) + ['owner_ids', 'editor_ids', 'voice_artist_ids', 'viewer_ids']) _remove_user_id_from_contributors_in_summary_models( user_id, collection_models.CollectionSummaryModel) _pseudonymize_activity_models_with_associated_rights_models( pending_deletion_request, - models.NAMES.topic, + models.Names.TOPIC, topic_models.TopicSnapshotMetadataModel, topic_models.TopicRightsSnapshotMetadataModel, topic_models.TopicRightsSnapshotContentModel, topic_models.TopicCommitLogEntryModel, 'topic_id', feconf.TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS, - ('manager_ids',)) + ['manager_ids']) _pseudonymize_blog_post_models(pending_deletion_request) - _delete_models(user_id, models.NAMES.email) + _pseudonymize_version_history_models(pending_deletion_request) + _delete_models(user_id, models.Names.EMAIL) + _delete_models(user_id, models.Names.LEARNER_GROUP) -def verify_user_deleted(user_id, include_delete_at_end_models=False): +def verify_user_deleted( + user_id: str, include_delete_at_end_models: bool = False +) -> bool: """Verify that all the models for user specified in pending_deletion_request are deleted. @@ -456,7 +518,9 @@ def verify_user_deleted(user_id, include_delete_at_end_models=False): return user_is_verified -def remove_user_from_activities_with_associated_rights_models(user_id): +def remove_user_from_activities_with_associated_rights_models( + user_id: str +) -> None: """Remove the user from exploration, collection, and topic models. Args: @@ -559,7 +623,9 @@ def remove_user_from_activities_with_associated_rights_models(user_id): user_services.get_system_user(), user_id) -def _generate_entity_to_pseudonymized_ids_mapping(entity_ids): +def _generate_entity_to_pseudonymized_ids_mapping( + entity_ids: List[str] +) -> Dict[str, str]: """Generate mapping from entity IDs to pseudonymous user IDs. Args: @@ -580,22 +646,21 @@ def _generate_entity_to_pseudonymized_ids_mapping(entity_ids): def _save_pseudonymizable_entity_mappings_to_same_pseudonym( - pending_deletion_request, entity_category, entity_ids): + pending_deletion_request: wipeout_domain.PendingDeletionRequest, + entity_category: models.Names, + entity_ids: List[str] +) -> None: """Generate mapping from entity IDs to a single pseudonymized user ID. Args: pending_deletion_request: PendingDeletionRequest. The pending deletion request object to which to save the entity mappings. - entity_category: models.NAMES. The category of the models that + entity_category: models.Names. The category of the models that contain the entity IDs. entity_ids: list(str). List of entity IDs for which to generate new pseudonymous user IDs. The IDs are of entities (e.g. models in config, collection, skill, or suggestion) that were modified in some way by the user who is currently being deleted. - - Returns: - dict(str, str). Mapping between the entity IDs and pseudonymous user - ID. """ if ( entity_category.value not in @@ -609,14 +674,17 @@ def _save_pseudonymizable_entity_mappings_to_same_pseudonym( def _save_pseudonymizable_entity_mappings_to_different_pseudonyms( - pending_deletion_request, entity_category, entity_ids): + pending_deletion_request: wipeout_domain.PendingDeletionRequest, + entity_category: models.Names, + entity_ids: List[str] +) -> None: """Save the entity mappings for some entity category into the pending deletion request. Args: pending_deletion_request: PendingDeletionRequest. The pending deletion request object to which to save the entity mappings. - entity_category: models.NAMES. The category of the models that + entity_category: models.Names. The category of the models that contain the entity IDs. entity_ids: list(str). The IDs for which to generate the mappings. """ @@ -631,12 +699,12 @@ def _save_pseudonymizable_entity_mappings_to_different_pseudonyms( save_pending_deletion_requests([pending_deletion_request]) -def _delete_models(user_id, module_name): +def _delete_models(user_id: str, module_name: models.Names) -> None: """Delete all the models from the given module, for a given user. Args: user_id: str. The id of the user to be deleted. - module_name: models.NAMES. The name of the module containing the models + module_name: models.Names. The name of the module containing the models that are being deleted. """ for model_class in models.Registry.get_storage_model_classes([module_name]): @@ -645,18 +713,27 @@ def _delete_models(user_id, module_name): model_class.apply_deletion_policy(user_id) +# The type of the argument 'commit_log_model_class' is Optional[...], because +# when we are dealing with config_model classes we pass None to this argument, +# because we do not have commit_log_model classes for config_model classes. def _collect_and_save_entity_ids_from_snapshots_and_commits( - pending_deletion_request, - activity_category, - snapshot_metadata_model_classes, - commit_log_model_class, - commit_log_model_field_name): + pending_deletion_request: wipeout_domain.PendingDeletionRequest, + activity_category: models.Names, + snapshot_metadata_model_classes: List[ + Type[base_models.BaseSnapshotMetadataModel] + ], + commit_log_model_class: Optional[Type[base_models.BaseCommitLogEntryModel]], + commit_log_model_field_name: Optional[str] +) -> Tuple[ + List[base_models.BaseSnapshotMetadataModel], + List[base_models.BaseCommitLogEntryModel] +]: """Collect and save the activity IDs that for the user with user_id. Args: pending_deletion_request: PendingDeletionRequest. The pending deletion request object for which to collect the entity IDs. - activity_category: models.NAMES. The category of the models that are + activity_category: models.Names. The category of the models that are that contain the entity IDs. snapshot_metadata_model_classes: list(class). The snapshot metadata model classes that contain the entity IDs. @@ -668,10 +745,14 @@ def _collect_and_save_entity_ids_from_snapshots_and_commits( Returns: (list(BaseSnapshotMetadataModel), list(BaseCommitLogEntryModel)). The tuple of snapshot metadata and commit log models. + + Raises: + Exception. Field name can only be None when commit log model class is + not provided. """ user_id = pending_deletion_request.user_id - snapshot_metadata_models = [] + snapshot_metadata_models: List[base_models.BaseSnapshotMetadataModel] = [] for snapshot_model_class in snapshot_metadata_model_classes: snapshot_metadata_models.extend(snapshot_model_class.query( datastore_services.any_of( @@ -684,11 +765,16 @@ def _collect_and_save_entity_ids_from_snapshots_and_commits( for model in snapshot_metadata_models) commit_log_ids = set() - commit_log_models = [] + commit_log_models: List[base_models.BaseCommitLogEntryModel] = [] if commit_log_model_class is not None: - commit_log_models = commit_log_model_class.query( + commit_log_models = list(commit_log_model_class.query( commit_log_model_class.user_id == user_id - ).fetch() + ).fetch()) + if commit_log_model_field_name is None: + raise Exception( + 'Field name can only be None when commit log model class is ' + 'not provided.' + ) commit_log_ids = set( getattr(model, commit_log_model_field_name) for model in commit_log_models) @@ -716,7 +802,9 @@ def _collect_and_save_entity_ids_from_snapshots_and_commits( return (snapshot_metadata_models, commit_log_models) -def _pseudonymize_config_models(pending_deletion_request): +def _pseudonymize_config_models( + pending_deletion_request: wipeout_domain.PendingDeletionRequest +) -> None: """Pseudonymize the config models for the user. Args: @@ -730,8 +818,8 @@ def _pseudonymize_config_models(pending_deletion_request): snapshot_metadata_models, _ = ( _collect_and_save_entity_ids_from_snapshots_and_commits( pending_deletion_request, - models.NAMES.config, - snapshot_model_classes, + models.Names.CONFIG, + list(snapshot_model_classes), None, None ) @@ -739,7 +827,9 @@ def _pseudonymize_config_models(pending_deletion_request): @transaction_services.run_in_transaction_wrapper def _pseudonymize_models_transactional( - activity_related_models, pseudonymized_id): + activity_related_models: List[base_models.BaseModel], + pseudonymized_id: str + ) -> None: """Pseudonymize user ID fields in the models. This function is run in a transaction, with the maximum number of @@ -762,7 +852,7 @@ def _pseudonymize_models_transactional( config_ids_to_pids = ( pending_deletion_request.pseudonymizable_entity_mappings[ - models.NAMES.config.value]) + models.Names.CONFIG.value]) for config_id, pseudonymized_id in config_ids_to_pids.items(): config_related_models = [ model for model in snapshot_metadata_models @@ -779,11 +869,12 @@ def _pseudonymize_models_transactional( def _pseudonymize_activity_models_without_associated_rights_models( - pending_deletion_request, - activity_category, - snapshot_model_class, - commit_log_model_class, - commit_log_model_field_name): + pending_deletion_request: wipeout_domain.PendingDeletionRequest, + activity_category: models.Names, + snapshot_model_class: Type[base_models.BaseSnapshotMetadataModel], + commit_log_model_class: Type[base_models.BaseCommitLogEntryModel], + commit_log_model_field_name: str +) -> None: """Collect the activity IDs that for the user with user_id. Verify that each snapshot has corresponding commit log. @@ -797,7 +888,7 @@ def _pseudonymize_activity_models_without_associated_rights_models( Args: pending_deletion_request: PendingDeletionRequest. The pending deletion request object for which to pseudonymize the models. - activity_category: models.NAMES. The category of the models that are + activity_category: models.Names. The category of the models that are being pseudonymized. snapshot_model_class: class. The metadata model class that is being pseudonymized. @@ -805,10 +896,6 @@ def _pseudonymize_activity_models_without_associated_rights_models( pseudonymized. commit_log_model_field_name: str. The name of the field holding the activity ID in the corresponding commit log model. - - Returns: - (list(BaseSnapshotMetadataModel), list(BaseCommitLogEntryModel)). - The tuple of snapshot metadata and commit log models. """ snapshot_metadata_models, commit_log_models = ( _collect_and_save_entity_ids_from_snapshots_and_commits( @@ -822,7 +909,9 @@ def _pseudonymize_activity_models_without_associated_rights_models( @transaction_services.run_in_transaction_wrapper def _pseudonymize_models_transactional( - activity_related_models, pseudonymized_id): + activity_related_models: List[base_models.BaseModel], + pseudonymized_id: str + ) -> None: """Pseudonymize user ID fields in the models. This function is run in a transaction, with the maximum number of @@ -847,19 +936,24 @@ def _pseudonymize_models_transactional( for commit_log_model in commit_log_models: commit_log_model.user_id = pseudonymized_id commit_log_model.update_timestamps() - datastore_services.put_multi(metadata_models + commit_log_models) + all_models: List[base_models.BaseModel] = [] + for metadata_model in metadata_models: + all_models.append(metadata_model) + for commit_log_model in commit_log_models: + all_models.append(commit_log_model) + datastore_services.put_multi(all_models) activity_ids_to_pids = ( pending_deletion_request.pseudonymizable_entity_mappings[ activity_category.value]) for activity_id, pseudonymized_id in activity_ids_to_pids.items(): - activity_related_models = [ + activity_related_models: List[base_models.BaseModel] = [ model for model in snapshot_metadata_models if model.get_unversioned_instance_id() == activity_id - ] + [ - model for model in commit_log_models - if getattr(model, commit_log_model_field_name) == activity_id ] + for model in commit_log_models: + if getattr(model, commit_log_model_field_name) == activity_id: + activity_related_models.append(model) for i in range( 0, len(activity_related_models), @@ -871,22 +965,27 @@ def _pseudonymize_models_transactional( def _pseudonymize_activity_models_with_associated_rights_models( - pending_deletion_request, - activity_category, - snapshot_metadata_model_class, - rights_snapshot_metadata_model_class, - rights_snapshot_content_model_class, - commit_log_model_class, - commit_log_model_field_name, - allowed_commands, - rights_user_id_fields): + pending_deletion_request: wipeout_domain.PendingDeletionRequest, + activity_category: models.Names, + snapshot_metadata_model_class: Type[base_models.BaseSnapshotMetadataModel], + rights_snapshot_metadata_model_class: Type[ + base_models.BaseSnapshotMetadataModel + ], + rights_snapshot_content_model_class: Type[ + base_models.BaseSnapshotContentModel + ], + commit_log_model_class: Type[base_models.BaseCommitLogEntryModel], + commit_log_model_field_name: str, + allowed_commands: List[feconf.ValidCmdDict], + rights_user_id_fields: List[str] +) -> None: """Pseudonymize the activity models with associated rights models for the user with user_id. Args: pending_deletion_request: PendingDeletionRequest. The pending deletion request object to be saved in the datastore. - activity_category: models.NAMES. The category of the models that are + activity_category: models.Names. The category of the models that are being pseudonymized. snapshot_metadata_model_class: CollectionSnapshotMetadataModel|ExplorationSnapshotMetadataModel. @@ -922,7 +1021,9 @@ def _pseudonymize_activity_models_with_associated_rights_models( @transaction_services.run_in_transaction_wrapper def _pseudonymize_models_transactional( - activity_related_models, pseudonymized_id): + activity_related_models: List[base_models.BaseModel], + pseudonymized_id: str + ) -> None: """Pseudonymize user ID fields in the models. This function is run in a transaction, with the maximum number of @@ -1014,12 +1115,17 @@ def _pseudonymize_models_transactional( for commit_log_model in commit_log_models: commit_log_model.user_id = pseudonymized_id commit_log_model.update_timestamps() - - datastore_services.put_multi( + all_models: List[base_models.BaseModel] = [] + for snapshot_metadata_model in ( snapshot_metadata_models + - rights_snapshot_metadata_models + - rights_snapshot_content_models + - commit_log_models) + rights_snapshot_metadata_models + ): + all_models.append(snapshot_metadata_model) + for snapshot_content_model in rights_snapshot_content_models: + all_models.append(snapshot_content_model) + for commit_log_model in commit_log_models: + all_models.append(commit_log_model) + datastore_services.put_multi(all_models) activity_ids_to_pids = ( pending_deletion_request.pseudonymizable_entity_mappings[ @@ -1039,14 +1145,18 @@ def _pseudonymize_models_transactional( ) ) - activity_related_models = ( - activity_related_snapshot_metadata_models + - activity_related_snapshot_content_models + - [ - model for model in commit_log_models - if getattr(model, commit_log_model_field_name) == activity_id - ] - ) + activity_related_models: List[base_models.BaseModel] = [ + model for model in commit_log_models + if getattr(model, commit_log_model_field_name) == activity_id + ] + for snapshot_content_model in activity_related_snapshot_content_models: + # Here, we assert that snapshot_content_model is never going to + # be a None value, because above we are fetching snapshot_models + # only for those ids that belong to already existing models. + assert snapshot_content_model is not None + activity_related_models.append(snapshot_content_model) + for metadata_model in activity_related_snapshot_metadata_models: + activity_related_models.append(metadata_model) for i in range( 0, @@ -1060,7 +1170,12 @@ def _pseudonymize_models_transactional( def _remove_user_id_from_contributors_in_summary_models( - user_id, summary_model_class): + user_id: str, + summary_model_class: Union[ + Type[collection_models.CollectionSummaryModel], + Type[exp_models.ExpSummaryModel] + ] +) -> None: """Remove the user ID from contributor_ids and contributor_summary fields in relevant summary models. @@ -1069,12 +1184,19 @@ def _remove_user_id_from_contributors_in_summary_models( summary_model_class: CollectionSummaryModel|ExpSummaryModel. Class of the summary model from which should the user ID be removed. """ - related_summary_models = summary_model_class.query( + related_summary_models: Sequence[ + Union[ + collection_models.CollectionSummaryModel, + exp_models.ExpSummaryModel + ] + ] = summary_model_class.query( summary_model_class.contributor_ids == user_id ).fetch() @transaction_services.run_in_transaction_wrapper - def _remove_user_id_from_models_transactional(summary_models): + def _remove_user_id_from_models_transactional( + summary_models: List[base_models.BaseModel] + ) -> None: """Remove the user ID from contributor_ids and contributor_summary fields. @@ -1105,60 +1227,76 @@ def _remove_user_id_from_models_transactional(summary_models): i:i + feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION]) -def _pseudonymize_app_feedback_report_models(pending_deletion_request): - """Pseudonymize the app feedback report models for the user with user_id, - if they scrubbed a feedback report. If the user scrubs multiple reports, - they will be given the same pseudonym for each model entity. +def _pseudonymize_one_model_class( + pending_deletion_request: wipeout_domain.PendingDeletionRequest, + model_class: Type[base_models.BaseModel], + name_of_property_containing_user_ids: str, + module_name: models.Names +) -> None: + """Pseudonymize one model class for the user with the user_id associated + with the given pending deletion request. Args: pending_deletion_request: PendingDeletionRequest. The pending deletion - request object to be saved in the datastore. + request object. + model_class: class. The model class that contains the entity IDs. + name_of_property_containing_user_ids: str. The name of the property that + contains the user IDs. We fetch the models corresponding to the + user IDs stored in this property. + module_name: models.Names. The name of the module containing the models + that are being pseudonymized. """ - model_class = app_feedback_report_models.AppFeedbackReportModel user_id = pending_deletion_request.user_id - feedback_report_models = model_class.query( - model_class.scrubbed_by == user_id).fetch() - report_ids = set(model.id for model in feedback_report_models) + models_to_pseudonymize: Sequence[base_models.BaseModel] = model_class.query( + getattr(model_class, name_of_property_containing_user_ids) == user_id + ).fetch() + model_ids = set(model.id for model in models_to_pseudonymize) # Fill in any missing keys in the category's # pseudonymizable_entity_mappings, using the same pseudonym for each entity # so that a user will have the same pseudonymized ID for each entity # referencing them. - entity_category = models.NAMES.app_feedback_report _save_pseudonymizable_entity_mappings_to_same_pseudonym( - pending_deletion_request, entity_category, report_ids) + pending_deletion_request, module_name, list(model_ids)) + + report_ids_to_pids = ( + pending_deletion_request.pseudonymizable_entity_mappings[ + module_name.value]) @transaction_services.run_in_transaction_wrapper - def _pseudonymize_models_transactional(feedback_report_models): + def _pseudonymize_models_transactional( + models_to_pseudonymize: List[base_models.BaseModel] + ) -> None: """Pseudonymize user ID fields in the models. This function is run in a transaction, with the maximum number of feedback_report_models being MAX_NUMBER_OF_OPS_IN_TRANSACTION. Args: - feedback_report_models: list(FeedbackReportModel). The models with a - user ID in the 'scrubbed_by' field that we want to pseudonymize. + models_to_pseudonymize: list(BaseModel). The models that we want + to pseudonymize. """ - for report_model in feedback_report_models: - report_model.scrubbed_by = ( - report_ids_to_pids[report_model.id]) - model_class.update_timestamps_multi(feedback_report_models) - model_class.put_multi(feedback_report_models) - - report_ids_to_pids = ( - pending_deletion_request.pseudonymizable_entity_mappings[ - models.NAMES.app_feedback_report.value]) + for model in models_to_pseudonymize: + setattr( + model, + name_of_property_containing_user_ids, + report_ids_to_pids[model.id] + ) + model_class.update_timestamps_multi(models_to_pseudonymize) + model_class.put_multi(models_to_pseudonymize) for i in range( - 0, len(feedback_report_models), - feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION): + 0, len(models_to_pseudonymize), feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION + ): _pseudonymize_models_transactional( - feedback_report_models[ + models_to_pseudonymize[ i:i + feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION]) -def _pseudonymize_feedback_models(pending_deletion_request): +def _pseudonymize_feedback_models( + pending_deletion_request: wipeout_domain.PendingDeletionRequest +) -> None: """Pseudonymize the feedback models for the user with user_id. Args: @@ -1174,7 +1312,9 @@ def _pseudonymize_feedback_models(pending_deletion_request): # these models we generate a pseudonymous user ID and replace the user ID # with that pseudonymous user ID in all the models. feedback_thread_model_class = feedback_models.GeneralFeedbackThreadModel - feedback_thread_models = feedback_thread_model_class.query( + feedback_thread_models: Sequence[ + feedback_models.GeneralFeedbackThreadModel + ] = feedback_thread_model_class.query( datastore_services.any_of( feedback_thread_model_class.original_author_id == user_id, feedback_thread_model_class.last_nonempty_message_author_id == ( @@ -1183,13 +1323,17 @@ def _pseudonymize_feedback_models(pending_deletion_request): feedback_ids = set(model.id for model in feedback_thread_models) feedback_message_model_class = feedback_models.GeneralFeedbackMessageModel - feedback_message_models = feedback_message_model_class.query( + feedback_message_models: Sequence[ + feedback_models.GeneralFeedbackMessageModel + ] = feedback_message_model_class.query( feedback_message_model_class.author_id == user_id ).fetch() feedback_ids |= set(model.thread_id for model in feedback_message_models) suggestion_model_class = suggestion_models.GeneralSuggestionModel - general_suggestion_models = suggestion_model_class.query( + general_suggestion_models: Sequence[ + suggestion_models.GeneralSuggestionModel + ] = suggestion_model_class.query( datastore_services.any_of( suggestion_model_class.author_id == user_id, suggestion_model_class.final_reviewer_id == user_id @@ -1197,11 +1341,13 @@ def _pseudonymize_feedback_models(pending_deletion_request): feedback_ids |= set(model.id for model in general_suggestion_models) _save_pseudonymizable_entity_mappings_to_different_pseudonyms( - pending_deletion_request, models.NAMES.feedback, feedback_ids) + pending_deletion_request, models.Names.FEEDBACK, list(feedback_ids)) @transaction_services.run_in_transaction_wrapper def _pseudonymize_models_transactional( - feedback_related_models, pseudonymized_id): + feedback_related_models: List[base_models.BaseModel], + pseudonymized_id: str + ) -> None: """Pseudonymize user ID fields in the models. This function is run in a transaction, with the maximum number of @@ -1240,26 +1386,29 @@ def _pseudonymize_models_transactional( if general_suggestion_model.final_reviewer_id == user_id: general_suggestion_model.final_reviewer_id = pseudonymized_id general_suggestion_model.update_timestamps() - - datastore_services.put_multi( - feedback_thread_models + - feedback_message_models + - general_suggestion_models) + all_models: List[base_models.BaseModel] = [] + for feedback_thread_model in feedback_thread_models: + all_models.append(feedback_thread_model) + for feedback_message_model in feedback_message_models: + all_models.append(feedback_message_model) + for general_suggestion_model in general_suggestion_models: + all_models.append(general_suggestion_model) + datastore_services.put_multi(all_models) feedback_ids_to_pids = ( pending_deletion_request.pseudonymizable_entity_mappings[ - models.NAMES.feedback.value]) + models.Names.FEEDBACK.value]) for feedback_id, pseudonymized_id in feedback_ids_to_pids.items(): - feedback_related_models = [ + feedback_related_models: List[base_models.BaseModel] = [ model for model in feedback_thread_models if model.id == feedback_id - ] + [ - model for model in feedback_message_models - if model.thread_id == feedback_id - ] + [ - model for model in general_suggestion_models - if model.id == feedback_id ] + for feedback_model in feedback_message_models: + if feedback_model.thread_id == feedback_id: + feedback_related_models.append(feedback_model) + for suggestion_model in general_suggestion_models: + if suggestion_model.id == feedback_id: + feedback_related_models.append(suggestion_model) for i in range( 0, len(feedback_related_models), @@ -1270,71 +1419,9 @@ def _pseudonymize_models_transactional( pseudonymized_id) -def _pseudonymize_suggestion_models(pending_deletion_request): - """Pseudonymize the suggestion models for the user with user_id. - - Args: - pending_deletion_request: PendingDeletionRequest. The pending deletion - request object to be saved in the datastore. - """ - user_id = pending_deletion_request.user_id - - suggestion_models.TranslationContributionStatsModel.apply_deletion_policy( - user_id - ) - - voiceover_application_class = ( - suggestion_models.GeneralVoiceoverApplicationModel) - - voiceover_application_models = voiceover_application_class.query( - datastore_services.any_of( - voiceover_application_class.author_id == user_id, - voiceover_application_class.final_reviewer_id == user_id - )).fetch() - suggestion_ids = set(model.id for model in voiceover_application_models) - - _save_pseudonymizable_entity_mappings_to_different_pseudonyms( - pending_deletion_request, models.NAMES.suggestion, suggestion_ids) - - @transaction_services.run_in_transaction_wrapper - def _pseudonymize_models_transactional(voiceover_application_models): - """Pseudonymize user ID fields in the models. - - This function is run in a transaction, with the maximum number of - voiceover_application_models being MAX_NUMBER_OF_OPS_IN_TRANSACTION. - - Args: - voiceover_application_models: - list(GeneralVoiceoverApplicationModel). Models whose user IDs - should be pseudonymized. - """ - for voiceover_application_model in voiceover_application_models: - if voiceover_application_model.author_id == user_id: - voiceover_application_model.author_id = ( - suggestion_ids_to_pids[voiceover_application_model.id] - ) - if voiceover_application_model.final_reviewer_id == user_id: - voiceover_application_model.final_reviewer_id = ( - suggestion_ids_to_pids[voiceover_application_model.id] - ) - voiceover_application_class.update_timestamps_multi( - voiceover_application_models) - voiceover_application_class.put_multi(voiceover_application_models) - - suggestion_ids_to_pids = ( - pending_deletion_request.pseudonymizable_entity_mappings[ - models.NAMES.suggestion.value]) - for i in range( - 0, - len(voiceover_application_models), - feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION): - _pseudonymize_models_transactional( - voiceover_application_models[ - i:i + feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION] - ) - - -def _pseudonymize_blog_post_models(pending_deletion_request): +def _pseudonymize_blog_post_models( + pending_deletion_request: wipeout_domain.PendingDeletionRequest +) -> None: """Pseudonymizes the blog post models for the user with user_id. Also removes the user-id from the list of editor ids from the blog post rights model. @@ -1351,19 +1438,32 @@ def _pseudonymize_blog_post_models(pending_deletion_request): # we generate a pseudonymous user ID and replace the user ID # with that pseudonymous user ID in all the models. blog_post_model_class = blog_models.BlogPostModel - blog_post_models_list = blog_post_model_class.query( + blog_post_models_list: Sequence[ + blog_models.BlogPostModel + ] = blog_post_model_class.query( blog_post_model_class.author_id == user_id ).fetch() - blog_post_ids = {model.id for model in blog_post_models_list} + blog_related_model_ids = {model.id for model in blog_post_models_list} blog_post_summary_model_class = blog_models.BlogPostSummaryModel - blog_post_summary_models = blog_post_summary_model_class.query( + blog_post_summary_models: Sequence[ + blog_models.BlogPostSummaryModel + ] = blog_post_summary_model_class.query( blog_post_summary_model_class.author_id == user_id ).fetch() - blog_post_ids |= {model.id for model in blog_post_summary_models} + blog_related_model_ids |= {model.id for model in blog_post_summary_models} + + blog_author_details_model_class = blog_models.BlogAuthorDetailsModel + blog_author_details_model = blog_author_details_model_class.get_by_author( + user_id) + if blog_author_details_model is not None: + blog_related_model_ids |= {blog_author_details_model.id} _save_pseudonymizable_entity_mappings_to_different_pseudonyms( - pending_deletion_request, models.NAMES.blog, blog_post_ids) + pending_deletion_request, + models.Names.BLOG, + list(blog_related_model_ids) + ) # We want to remove the user ID from the list of editor ids on all the # blog post rights models related to the user. @@ -1371,7 +1471,9 @@ def _pseudonymize_blog_post_models(pending_deletion_request): @transaction_services.run_in_transaction_wrapper def _pseudonymize_models_transactional( - blog_posts_related_models, pseudonymized_id): + blog_posts_related_models: List[base_models.BaseModel], + pseudonymized_id: str + ) -> None: """Pseudonymize user ID fields in the models. This function is run in a transaction, with the maximum number of @@ -1383,33 +1485,62 @@ def _pseudonymize_models_transactional( pseudonymized_id: str. New pseudonymized user ID to be used for the models. """ - blog_post_models_list = [ + blog_post_models_list: List[ + Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel, + blog_models.BlogAuthorDetailsModel + ] + ] = [ model for model in blog_posts_related_models if isinstance(model, blog_post_model_class)] for blog_post_model in blog_post_models_list: if blog_post_model.author_id == user_id: blog_post_model.author_id = pseudonymized_id - blog_post_model.update_timestamps() + blog_post_model.update_timestamps() - blog_post_summary_models_list = [ + blog_post_summary_models_list: List[ + Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel, + blog_models.BlogAuthorDetailsModel + ] + ] = [ model for model in blog_posts_related_models if isinstance(model, blog_post_summary_model_class)] for blog_post_summary in blog_post_summary_models_list: if blog_post_summary.author_id == user_id: blog_post_summary.author_id = pseudonymized_id - blog_post_summary.update_timestamps() + blog_post_summary.update_timestamps() + all_models: List[ + Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel, + blog_models.BlogAuthorDetailsModel + ] + ] = blog_post_models_list + blog_post_summary_models_list - datastore_services.put_multi( - blog_post_models_list + blog_post_summary_models_list) + for model in blog_posts_related_models: + if isinstance(model, blog_author_details_model_class): + if model.author_id == user_id: + model.author_id = pseudonymized_id + model.update_timestamps() + all_models.append(model) + break + + datastore_services.put_multi(all_models) blog_post_ids_to_pids = ( pending_deletion_request.pseudonymizable_entity_mappings[ - models.NAMES.blog.value]) - for blog_post_id, pseudonymized_id in blog_post_ids_to_pids.items(): + models.Names.BLOG.value]) + for blog_related_ids, pseudonymized_id in blog_post_ids_to_pids.items(): blog_posts_related_models = [ model for model in itertools.chain( - blog_post_models_list, blog_post_summary_models) - if model.id == blog_post_id + blog_post_models_list, + blog_post_summary_models, + [blog_author_details_model] + ) + if model is not None and model.id == blog_related_ids ] transaction_slices = utils.grouper( blog_posts_related_models, @@ -1418,3 +1549,77 @@ def _pseudonymize_models_transactional( _pseudonymize_models_transactional( [m for m in transaction_slice if m is not None], pseudonymized_id) + + +def _pseudonymize_version_history_models( + pending_deletion_request: wipeout_domain.PendingDeletionRequest +) -> None: + """Pseudonymizes the version history models for the user with the given + user_id. + + Args: + pending_deletion_request: PendingDeletionRequest. The pending + deletion request object to be saved in the datastore. + """ + user_id = pending_deletion_request.user_id + + version_history_model_class = exp_models.ExplorationVersionHistoryModel + version_history_models: Sequence[ + exp_models.ExplorationVersionHistoryModel + ] = version_history_model_class.query( + user_id == version_history_model_class.committer_ids + ).fetch() + + @transaction_services.run_in_transaction_wrapper + def _pseudonymize_models_transactional( + version_history_models: List[exp_models.ExplorationVersionHistoryModel], + exp_ids_to_pids: Dict[str, str] + ) -> None: + """Pseudonymize user ID fields in the models. + + This function is run in a transaction, with the maximum number of + version_history_models being MAX_NUMBER_OF_OPS_IN_TRANSACTION. + + Args: + version_history_models: list(ExplorationVersionHistoryModel). Models + whose user IDs should be pseudonymized. + exp_ids_to_pids: dict(str, str). A mapping of exploration ids to + pseudonymous ids. + """ + for model in version_history_models: + # Pseudonymize user id from state_version_history. + for state_name in model.state_version_history: + state_version_history = ( + model.state_version_history[state_name]) + if state_version_history['committer_id'] == user_id: + state_version_history['committer_id'] = ( + exp_ids_to_pids[model.exploration_id]) + + # Pseudonymize user id from metadata_last_edited_committer_id. + if model.metadata_last_edited_committer_id == user_id: + model.metadata_last_edited_committer_id = ( + exp_ids_to_pids[model.exploration_id]) + + # Pseudonymize user id from committer_ids. + for idx, committer_id in enumerate(model.committer_ids): + if committer_id == user_id: + model.committer_ids[idx] = ( + exp_ids_to_pids[model.exploration_id]) + + version_history_model_class.update_timestamps_multi( + version_history_models) + version_history_model_class.put_multi(version_history_models) + + exp_ids_to_pids = ( + pending_deletion_request.pseudonymizable_entity_mappings[ + models.Names.EXPLORATION.value]) + + for i in range( + 0, + len(version_history_models), + feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION + ): + _pseudonymize_models_transactional( + version_history_models[ + i:i + feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION], + exp_ids_to_pids) diff --git a/core/domain/wipeout_service_test.py b/core/domain/wipeout_service_test.py index 6bd1b33c3d11..6fef1a96b1f1 100644 --- a/core/domain/wipeout_service_test.py +++ b/core/domain/wipeout_service_test.py @@ -38,6 +38,7 @@ from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services +from core.domain import translation_domain from core.domain import user_domain from core.domain import user_services from core.domain import wipeout_domain @@ -45,19 +46,41 @@ from core.platform import models from core.tests import test_utils +from typing import Final, List, Sequence + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import app_feedback_report_models + from mypy_imports import auth_models + from mypy_imports import blog_models + from mypy_imports import collection_models + from mypy_imports import config_models + from mypy_imports import datastore_services + from mypy_imports import exp_models + from mypy_imports import feedback_models + from mypy_imports import improvements_models + from mypy_imports import learner_group_models + from mypy_imports import question_models + from mypy_imports import skill_models + from mypy_imports import story_models + from mypy_imports import subtopic_models + from mypy_imports import suggestion_models + from mypy_imports import topic_models + from mypy_imports import user_models + ( app_feedback_report_models, auth_models, blog_models, collection_models, config_models, email_models, exp_models, - feedback_models, improvements_models, question_models, skill_models, - story_models, subtopic_models, suggestion_models, topic_models, - user_models + feedback_models, improvements_models, learner_group_models, + question_models, skill_models, story_models, subtopic_models, + suggestion_models, topic_models, user_models ) = models.Registry.import_models([ - models.NAMES.app_feedback_report, models.NAMES.auth, models.NAMES.blog, - models.NAMES.collection, models.NAMES.config, models.NAMES.email, - models.NAMES.exploration, models.NAMES.feedback, models.NAMES.improvements, - models.NAMES.question, models.NAMES.skill, models.NAMES.story, - models.NAMES.subtopic, models.NAMES.suggestion, models.NAMES.topic, - models.NAMES.user + models.Names.APP_FEEDBACK_REPORT, models.Names.AUTH, models.Names.BLOG, + models.Names.COLLECTION, models.Names.CONFIG, models.Names.EMAIL, + models.Names.EXPLORATION, models.Names.FEEDBACK, models.Names.IMPROVEMENTS, + models.Names.LEARNER_GROUP, models.Names.QUESTION, models.Names.SKILL, + models.Names.STORY, models.Names.SUBTOPIC, models.Names.SUGGESTION, + models.Names.TOPIC, models.Names.USER ]) datastore_services = models.Registry.import_datastore_services() @@ -66,19 +89,19 @@ class WipeoutServiceHelpersTests(test_utils.GenericTestBase): """Provides testing of the pre-deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' - def setUp(self): - super(WipeoutServiceHelpersTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) - def test_gets_pending_deletion_request(self): + def test_gets_pending_deletion_request(self) -> None: wipeout_service.save_pending_deletion_requests( [ wipeout_domain.PendingDeletionRequest.create_default( @@ -95,7 +118,8 @@ def test_gets_pending_deletion_request(self): pending_deletion_request.pseudonymizable_entity_mappings, {}) def test_get_number_of_pending_deletion_requests_returns_correct_number( - self): + self + ) -> None: number_of_pending_deletion_requests = ( wipeout_service.get_number_of_pending_deletion_requests()) self.assertEqual(number_of_pending_deletion_requests, 0) @@ -112,7 +136,7 @@ def test_get_number_of_pending_deletion_requests_returns_correct_number( wipeout_service.get_number_of_pending_deletion_requests()) self.assertEqual(number_of_pending_deletion_requests, 2) - def test_saves_pending_deletion_request_when_new(self): + def test_saves_pending_deletion_request_when_new(self) -> None: pending_deletion_request = ( wipeout_domain.PendingDeletionRequest.create_default( self.user_1_id, self.USER_1_EMAIL)) @@ -130,7 +154,7 @@ def test_saves_pending_deletion_request_when_new(self): self.assertEqual( pending_deletion_request_model.pseudonymizable_entity_mappings, {}) - def test_saves_pending_deletion_request_when_already_existing(self): + def test_saves_pending_deletion_request_when_already_existing(self) -> None: pending_deletion_request_model_old = ( user_models.PendingDeletionRequestModel( id=self.user_1_id, @@ -171,15 +195,15 @@ def test_saves_pending_deletion_request_when_already_existing(self): class WipeoutServicePreDeleteTests(test_utils.GenericTestBase): """Provides testing of the pre-deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - USER_3_EMAIL = 'other@email.com' - USER_3_USERNAME = 'username3' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + USER_3_EMAIL: Final = 'other@email.com' + USER_3_USERNAME: Final = 'username3' - def setUp(self): - super(WipeoutServicePreDeleteTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.add_user_role( @@ -192,22 +216,24 @@ def setUp(self): self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) - user_data_dict = { + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': self.user_1_id, } - new_user_data_dict = { + new_user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias3', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': None, } self.modifiable_user_data = ( @@ -224,7 +250,7 @@ def setUp(self): [self.modifiable_new_user_data] )[0].user_id - def tearDown(self): + def tearDown(self) -> None: pending_deletion_request_models = ( user_models.PendingDeletionRequestModel.get_all()) for pending_deletion_request_model in pending_deletion_request_models: @@ -239,7 +265,7 @@ def tearDown(self): pending_deletion_request), wipeout_domain.USER_VERIFICATION_SUCCESS) - def test_pre_delete_user_email_subscriptions(self): + def test_pre_delete_user_email_subscriptions(self) -> None: email_preferences = user_services.get_email_preferences(self.user_1_id) self.assertEqual( email_preferences.can_receive_email_updates, @@ -254,8 +280,8 @@ def test_pre_delete_user_email_subscriptions(self): email_preferences.can_receive_subscription_email, feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE) - observed_log_messages = [] - def _mock_logging_function(msg, *args): + observed_log_messages: List[str] = [] + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) @@ -274,22 +300,24 @@ def _mock_logging_function(msg, *args): self.assertFalse(email_preferences.can_receive_feedback_message_email) self.assertFalse(email_preferences.can_receive_subscription_email) - def test_pre_delete_profile_users_works_correctly(self): + def test_pre_delete_profile_users_works_correctly(self) -> None: user_settings = user_services.get_user_settings(self.profile_user_id) self.assertFalse(user_settings.deleted) self.assertFalse(user_settings.deleted) wipeout_service.pre_delete_user(self.profile_user_id) self.process_and_flush_pending_tasks() - user_settings = user_models.UserSettingsModel.get_by_id( + user_settings_model = user_models.UserSettingsModel.get_by_id( self.profile_user_id) - self.assertTrue(user_settings.deleted) + self.assertTrue(user_settings_model.deleted) user_auth_details = ( auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id)) self.assertTrue(user_auth_details.deleted) - def test_pre_delete_user_for_full_user_also_deletes_all_profiles(self): + def test_pre_delete_user_for_full_user_also_deletes_all_profiles( + self + ) -> None: user_settings = user_services.get_user_settings(self.user_1_id) self.assertFalse(user_settings.deleted) profile_user_settings = user_services.get_user_settings( @@ -302,19 +330,21 @@ def test_pre_delete_user_for_full_user_also_deletes_all_profiles(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - user_settings = user_models.UserSettingsModel.get_by_id(self.user_1_id) - self.assertTrue(user_settings.deleted) + user_settings_model = user_models.UserSettingsModel.get_by_id( + self.user_1_id + ) + self.assertTrue(user_settings_model.deleted) user_auth_details = ( auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id)) self.assertTrue(user_auth_details.deleted) - profile_user_settings = user_models.UserSettingsModel.get_by_id( + profile_user_settings_model = user_models.UserSettingsModel.get_by_id( self.profile_user_id) - self.assertTrue(profile_user_settings.deleted) - profile_auth_details = ( + self.assertTrue(profile_user_settings_model.deleted) + profile_auth_details_model = ( auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id)) - self.assertTrue(profile_auth_details.deleted) + self.assertTrue(profile_auth_details_model.deleted) - def test_pre_delete_user_without_activities_works_correctly(self): + def test_pre_delete_user_without_activities_works_correctly(self) -> None: user_models.UserSubscriptionsModel( id=self.user_1_id, exploration_ids=[], collection_ids=[] ).put() @@ -327,15 +357,19 @@ def test_pre_delete_user_without_activities_works_correctly(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - user_settings = user_models.UserSettingsModel.get_by_id(self.user_1_id) - self.assertTrue(user_settings.deleted) + user_settings_model = user_models.UserSettingsModel.get_by_id( + self.user_1_id + ) + self.assertTrue(user_settings_model.deleted) self.assertIsNone( auth_services.get_auth_id_from_user_id(self.user_1_id)) pending_deletion_model = ( user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id)) self.assertIsNotNone(pending_deletion_model) - def test_pre_delete_username_is_not_saved_for_user_younger_than_week(self): + def test_pre_delete_username_is_not_saved_for_user_younger_than_week( + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() @@ -344,7 +378,9 @@ def test_pre_delete_username_is_not_saved_for_user_younger_than_week(self): self.assertIsNone( pending_deletion_request.normalized_long_term_username) - def test_pre_delete_username_is_saved_for_user_older_than_week(self): + def test_pre_delete_username_is_saved_for_user_older_than_week( + self + ) -> None: date_10_days_ago = ( datetime.datetime.utcnow() - datetime.timedelta(days=10)) with self.mock_datetime_utcnow(date_10_days_ago): @@ -360,7 +396,7 @@ def test_pre_delete_username_is_saved_for_user_older_than_week(self): pending_deletion_request.normalized_long_term_username, self.USER_3_USERNAME) - def test_pre_delete_user_with_activities_multiple_owners(self): + def test_pre_delete_user_with_activities_multiple_owners(self) -> None: user_services.add_user_role( self.user_1_id, feconf.ROLE_ID_COLLECTION_EDITOR) self.save_new_valid_exploration('exp_id', self.user_1_id) @@ -384,7 +420,7 @@ def test_pre_delete_user_with_activities_multiple_owners(self): user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id)) self.assertIsNotNone(pending_deletion_model) - def test_pre_delete_user_collection_is_marked_deleted(self): + def test_pre_delete_user_collection_is_marked_deleted(self) -> None: self.save_new_valid_collection('col_id', self.user_1_id) collection_model = collection_models.CollectionModel.get_by_id('col_id') @@ -395,7 +431,7 @@ def test_pre_delete_user_collection_is_marked_deleted(self): self.assertIsNone(collection_models.CollectionModel.get_by_id('col_id')) - def test_pre_delete_user_exploration_is_marked_deleted(self): + def test_pre_delete_user_exploration_is_marked_deleted(self) -> None: self.save_new_valid_exploration('exp_id', self.user_1_id) exp_model = exp_models.ExplorationModel.get_by_id('exp_id') @@ -406,7 +442,7 @@ def test_pre_delete_user_exploration_is_marked_deleted(self): self.assertIsNone(exp_models.ExplorationModel.get_by_id('exp_id')) - def test_pre_delete_user_collection_ownership_is_released(self): + def test_pre_delete_user_collection_ownership_is_released(self) -> None: self.save_new_valid_collection('col_id', self.user_1_id) self.publish_collection(self.user_1_id, 'col_id') rights_manager.assign_role_for_collection( @@ -426,7 +462,7 @@ def test_pre_delete_user_collection_ownership_is_released(self): collection_models.CollectionSummaryModel.get_by_id('col_id')) self.assertTrue(collection_summary_model.community_owned) - def test_pre_delete_user_exploration_ownership_is_released(self): + def test_pre_delete_user_exploration_ownership_is_released(self) -> None: self.save_new_valid_exploration('exp_id', self.user_1_id) self.publish_exploration(self.user_1_id, 'exp_id') rights_manager.assign_role_for_exploration( @@ -446,7 +482,7 @@ def test_pre_delete_user_exploration_ownership_is_released(self): def test_pre_delete_user_exploration_ownership_is_released_with_voice_art( self - ): + ) -> None: self.save_new_valid_exploration('exp_id', self.user_1_id) self.publish_exploration(self.user_1_id, 'exp_id') rights_manager.assign_role_for_exploration( @@ -464,7 +500,7 @@ def test_pre_delete_user_exploration_ownership_is_released_with_voice_art( exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id') self.assertTrue(exp_summary_model.community_owned) - def test_pre_delete_user_collection_user_is_deassigned(self): + def test_pre_delete_user_collection_user_is_deassigned(self) -> None: self.save_new_valid_collection('col_id', self.user_1_id) rights_manager.assign_role_for_collection( user_services.get_system_user(), @@ -483,7 +519,7 @@ def test_pre_delete_user_collection_user_is_deassigned(self): collection_models.CollectionSummaryModel.get_by_id('col_id')) self.assertEqual(collection_summary_model.editor_ids, []) - def test_pre_delete_user_exploration_user_is_deassigned(self): + def test_pre_delete_user_exploration_user_is_deassigned(self) -> None: self.save_new_valid_exploration('exp_id', self.user_1_id) rights_manager.assign_role_for_exploration( user_services.get_system_user(), @@ -500,9 +536,9 @@ def test_pre_delete_user_exploration_user_is_deassigned(self): exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id') self.assertEqual(exp_summary_model.editor_ids, []) - def test_pre_delete_user_exp_user_with_voice_artist_role_is_deassigned( + def test_exp_user_with_voice_artist_role_is_deassigned_from_public_exp( self - ): + ) -> None: self.save_new_valid_exploration('exp_id', self.user_1_id) self.publish_exploration(self.user_1_id, 'exp_id') rights_manager.assign_role_for_exploration( @@ -521,7 +557,30 @@ def test_pre_delete_user_exp_user_with_voice_artist_role_is_deassigned( exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id') self.assertEqual(exp_summary_model.voice_artist_ids, []) - def test_pre_delete_user_user_is_deassigned_from_topics(self): + def test_exp_user_with_voice_artist_role_is_deassigned_from_private_exp( + self + ) -> None: + self.save_new_valid_exploration('exp_id', self.user_1_id) + self.publish_exploration(self.user_1_id, 'exp_id') + rights_manager.assign_role_for_exploration( + user_services.get_system_user(), + 'exp_id', + self.user_2_id, + feconf.ROLE_VOICE_ARTIST + ) + rights_manager.unpublish_exploration( + user_services.get_system_user(), 'exp_id') + + exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id') + self.assertEqual(exp_summary_model.voice_artist_ids, [self.user_2_id]) + + wipeout_service.pre_delete_user(self.user_2_id) + self.process_and_flush_pending_tasks() + + exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id') + self.assertEqual(exp_summary_model.voice_artist_ids, []) + + def test_pre_delete_user_user_is_deassigned_from_topics(self) -> None: self.save_new_topic('top_id', self.user_1_id) topic_services.assign_role( user_services.get_system_user(), @@ -538,17 +597,32 @@ def test_pre_delete_user_user_is_deassigned_from_topics(self): top_rights_model = topic_models.TopicRightsModel.get_by_id('top_id') self.assertEqual(top_rights_model.manager_ids, []) + def test_raises_error_if_created_on_is_unavailable(self) -> None: + user_settings = user_services.get_user_settings(self.user_1_id) + user_settings.created_on = None + + with self.swap_to_always_return( + user_services, + 'get_user_settings', + user_settings + ): + with self.assertRaisesRegex( + Exception, + 'No data available for when the user was created on.' + ): + wipeout_service.pre_delete_user(self.user_1_id) + class WipeoutServiceRunFunctionsTests(test_utils.GenericTestBase): """Provides testing of the pre-deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' - def setUp(self): - super(WipeoutServiceRunFunctionsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) @@ -560,7 +634,7 @@ def setUp(self): self.topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( - 1, 'Subtopic Title 1') + 1, 'Subtopic Title 1', 'url-frag-one') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' @@ -578,20 +652,22 @@ def setUp(self): self.pending_deletion_request = ( wipeout_service.get_pending_deletion_request(self.user_1_id)) - def test_run_user_deletion_with_user_not_deleted(self): + def test_run_user_deletion_with_user_not_deleted(self) -> None: self.assertEqual( wipeout_service.run_user_deletion(self.pending_deletion_request), wipeout_domain.USER_DELETION_SUCCESS ) - def test_run_user_deletion_with_user_already_deleted(self): + def test_run_user_deletion_with_user_already_deleted(self) -> None: wipeout_service.run_user_deletion(self.pending_deletion_request) self.assertEqual( wipeout_service.run_user_deletion(self.pending_deletion_request), wipeout_domain.USER_DELETION_ALREADY_DONE ) - def test_run_user_deletion_completion_with_user_not_yet_deleted(self): + def test_run_user_deletion_completion_with_user_not_yet_deleted( + self + ) -> None: self.assertEqual( wipeout_service.run_user_deletion_completion( self.pending_deletion_request), @@ -602,7 +678,9 @@ def test_run_user_deletion_completion_with_user_not_yet_deleted(self): self.assertIsNotNone( user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id)) - def test_run_user_deletion_completion_with_user_properly_deleted(self): + def test_run_user_deletion_completion_with_user_properly_deleted( + self + ) -> None: wipeout_service.run_user_deletion(self.pending_deletion_request) send_email_swap = self.swap_with_checks( @@ -638,7 +716,8 @@ def test_run_user_deletion_completion_with_user_properly_deleted(self): self.user_1_id)) def test_run_user_deletion_completion_user_wrongly_deleted_emails_enabled( - self): + self + ) -> None: wipeout_service.run_user_deletion(self.pending_deletion_request) user_models.CompletedActivitiesModel( @@ -671,7 +750,8 @@ def test_run_user_deletion_completion_user_wrongly_deleted_emails_enabled( user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id)) def test_run_user_deletion_completion_user_wrongly_deleted_emails_disabled( - self): + self + ) -> None: wipeout_service.run_user_deletion(self.pending_deletion_request) user_models.CompletedActivitiesModel( @@ -707,39 +787,45 @@ class WipeoutServiceDeleteAppFeedbackReportModelsTests( AppFeedbackReportModels with the deleted user. """ - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' # The timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC. - REPORT_SUBMITTED_TIMESTAMP_1 = datetime.datetime.fromtimestamp(1615151836) + REPORT_SUBMITTED_TIMESTAMP_1: Final = datetime.datetime.fromtimestamp( + 1615151836 + ) # The timestamp in sec since epoch for Mar 8 2021 10:7:16 UTC. - REPORT_SUBMITTED_TIMESTAMP_2 = datetime.datetime.fromtimestamp(1615199836) + REPORT_SUBMITTED_TIMESTAMP_2: Final = datetime.datetime.fromtimestamp( + 1615199836 + ) # The timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC. - TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836) + TICKET_CREATION_TIMESTAMP: Final = datetime.datetime.fromtimestamp( + 1616173836 + ) - PLATFORM_ANDROID = 'android' - REPORT_ID_1 = '%s.%s.%s' % ( + PLATFORM_ANDROID: Final = 'android' + REPORT_ID_1: Final = '%s.%s.%s' % ( PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP_1.second, 'randomInteger123') - REPORT_ID_2 = '%s.%s.%s' % ( + REPORT_ID_2: Final = '%s.%s.%s' % ( PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP_2.second, 'randomInteger321') - REPORT_ID_3 = '%s.%s.%s' % ( + REPORT_ID_3: Final = '%s.%s.%s' % ( PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP_2.second, 'differentInt') - TICKET_ID = '%s.%s.%s' % ( + TICKET_ID: Final = '%s.%s.%s' % ( 'random_hash', TICKET_CREATION_TIMESTAMP.second, '16CharString1234') - REPORT_TYPE_SUGGESTION = 'suggestion' - CATEGORY_OTHER = 'other' - PLATFORM_VERSION = '0.1-alpha-abcdef1234' - COUNTRY_LOCALE_CODE_INDIA = 'in' - ANDROID_DEVICE_MODEL = 'Pixel 4a' - ANDROID_SDK_VERSION = 28 - ENTRY_POINT_NAVIGATION_DRAWER = 'navigation_drawer' - TEXT_LANGUAGE_CODE_ENGLISH = 'en' - AUDIO_LANGUAGE_CODE_ENGLISH = 'en' - ANDROID_REPORT_INFO = { + REPORT_TYPE_SUGGESTION: Final = 'suggestion' + CATEGORY_OTHER: Final = 'other' + PLATFORM_VERSION: Final = '0.1-alpha-abcdef1234' + COUNTRY_LOCALE_CODE_INDIA: Final = 'in' + ANDROID_DEVICE_MODEL: Final = 'Pixel 4a' + ANDROID_SDK_VERSION: Final = 28 + ENTRY_POINT_NAVIGATION_DRAWER: Final = 'navigation_drawer' + TEXT_LANGUAGE_CODE_ENGLISH: Final = 'en' + AUDIO_LANGUAGE_CODE_ENGLISH: Final = 'en' + ANDROID_REPORT_INFO: Final = { 'user_feedback_other_text_input': 'add an admin', 'event_logs': ['event1', 'event2'], 'logcat_logs': ['logcat1', 'logcat2'], @@ -753,10 +839,10 @@ class WipeoutServiceDeleteAppFeedbackReportModelsTests( 'automatically_update_topics': False, 'is_curriculum_admin': False } - ANDROID_REPORT_INFO_SCHEMA_VERSION = 1 + ANDROID_REPORT_INFO_SCHEMA_VERSION: Final = 1 - def setUp(self): - super(WipeoutServiceDeleteAppFeedbackReportModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -832,13 +918,13 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_user_is_pseudonymized_from_report(self): + def test_user_is_pseudonymized_from_report(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) report_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id).pseudonymizable_entity_mappings[ - models.NAMES.app_feedback_report.value]) + models.Names.APP_FEEDBACK_REPORT.value]) # Verify the user is pseudonymized. report_model = ( @@ -848,13 +934,28 @@ def test_user_is_pseudonymized_from_report(self): report_model.scrubbed_by, report_mappings[self.REPORT_ID_1]) self.assertNotEqual(report_model.scrubbed_by, self.user_1_id) - def test_same_pseudonym_used_for_same_user(self): + def test_raises_error_when_field_name_is_not_provided_with_commit_model( + self + ) -> None: + with self.assertRaisesRegex( + Exception, + 'Field name can only be None when commit log model class' + ): + wipeout_service._collect_and_save_entity_ids_from_snapshots_and_commits( # pylint: disable=line-too-long, protected-access + wipeout_service.get_pending_deletion_request(self.user_1_id), + models.Names.QUESTION, + [question_models.QuestionSnapshotMetadataModel], + question_models.QuestionCommitLogEntryModel, + None + ) + + def test_same_pseudonym_used_for_same_user(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_2_id)) report_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id).pseudonymizable_entity_mappings[ - models.NAMES.app_feedback_report.value]) + models.Names.APP_FEEDBACK_REPORT.value]) # Verify the pseudonym is the same for all report instances. report_model_2 = ( @@ -873,7 +974,7 @@ def test_same_pseudonym_used_for_same_user(self): self.assertEqual( report_model_2.scrubbed_by, report_model_3.scrubbed_by) - def test_different_users_have_different_pseudonyms(self): + def test_different_users_have_different_pseudonyms(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) wipeout_service.delete_user( @@ -881,11 +982,11 @@ def test_different_users_have_different_pseudonyms(self): user_1_report_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id).pseudonymizable_entity_mappings[ - models.NAMES.app_feedback_report.value]) + models.Names.APP_FEEDBACK_REPORT.value]) user_2_report_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id).pseudonymizable_entity_mappings[ - models.NAMES.app_feedback_report.value]) + models.Names.APP_FEEDBACK_REPORT.value]) # Verify pseudonyms are different for different users. report_model_1 = ( @@ -911,36 +1012,42 @@ class WipeoutServiceVerifyDeleteAppFeedbackReportModelsTests( AppFeedbackReportModels with previous references to a deleted user. """ - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' # The timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC. - REPORT_SUBMITTED_TIMESTAMP_1 = datetime.datetime.fromtimestamp(1615151836) + REPORT_SUBMITTED_TIMESTAMP_1: Final = datetime.datetime.fromtimestamp( + 1615151836 + ) # The timestamp in sec since epoch for Mar 8 2021 10:7:16 UTC. - REPORT_SUBMITTED_TIMESTAMP_2 = datetime.datetime.fromtimestamp(1615199836) + REPORT_SUBMITTED_TIMESTAMP_2: Final = datetime.datetime.fromtimestamp( + 1615199836 + ) # The timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC. - TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836) + TICKET_CREATION_TIMESTAMP: Final = datetime.datetime.fromtimestamp( + 1616173836 + ) - PLATFORM_ANDROID = 'android' - REPORT_ID_1 = '%s.%s.%s' % ( + PLATFORM_ANDROID: Final = 'android' + REPORT_ID_1: Final = '%s.%s.%s' % ( PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP_1.second, 'randomInteger123') - REPORT_ID_2 = '%s.%s.%s' % ( + REPORT_ID_2: Final = '%s.%s.%s' % ( PLATFORM_ANDROID, REPORT_SUBMITTED_TIMESTAMP_2.second, 'randomInteger321') - TICKET_ID = '%s.%s.%s' % ( + TICKET_ID: Final = '%s.%s.%s' % ( 'random_hash', TICKET_CREATION_TIMESTAMP.second, '16CharString1234') - REPORT_TYPE_SUGGESTION = 'suggestion' - CATEGORY_OTHER = 'other' - PLATFORM_VERSION = '0.1-alpha-abcdef1234' - COUNTRY_LOCALE_CODE_INDIA = 'in' - ANDROID_DEVICE_MODEL = 'Pixel 4a' - ANDROID_SDK_VERSION = 28 - ENTRY_POINT_NAVIGATION_DRAWER = 'navigation_drawer' - TEXT_LANGUAGE_CODE_ENGLISH = 'en' - AUDIO_LANGUAGE_CODE_ENGLISH = 'en' - ANDROID_REPORT_INFO = { + REPORT_TYPE_SUGGESTION: Final = 'suggestion' + CATEGORY_OTHER: Final = 'other' + PLATFORM_VERSION: Final = '0.1-alpha-abcdef1234' + COUNTRY_LOCALE_CODE_INDIA: Final = 'in' + ANDROID_DEVICE_MODEL: Final = 'Pixel 4a' + ANDROID_SDK_VERSION: Final = 28 + ENTRY_POINT_NAVIGATION_DRAWER: Final = 'navigation_drawer' + TEXT_LANGUAGE_CODE_ENGLISH: Final = 'en' + AUDIO_LANGUAGE_CODE_ENGLISH: Final = 'en' + ANDROID_REPORT_INFO: Final = { 'user_feedback_other_text_input': 'add an admin', 'event_logs': ['event1', 'event2'], 'logcat_logs': ['logcat1', 'logcat2'], @@ -954,12 +1061,10 @@ class WipeoutServiceVerifyDeleteAppFeedbackReportModelsTests( 'automatically_update_topics': False, 'is_curriculum_admin': False } - ANDROID_REPORT_INFO_SCHEMA_VERSION = 1 + ANDROID_REPORT_INFO_SCHEMA_VERSION: Final = 1 - def setUp(self): - super( - WipeoutServiceVerifyDeleteAppFeedbackReportModelsTests, - self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -1011,12 +1116,14 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_user_is_deleted_returns_true(self): + def test_verify_user_delete_when_user_is_deleted_returns_true(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): + def test_verify_user_delete_when_user_is_not_deleted_returns_false( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -1054,15 +1161,15 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): class WipeoutServiceDeleteConfigModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - CONFIG_1_ID = 'config_1_id' - CONFIG_2_ID = 'config_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + CONFIG_1_ID: Final = 'config_1_id' + CONFIG_2_ID: Final = 'config_2_id' - def setUp(self): - super(WipeoutServiceDeleteConfigModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -1074,7 +1181,7 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_one_config_property_is_pseudonymized(self): + def test_one_config_property_is_pseudonymized(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -1082,7 +1189,7 @@ def test_one_config_property_is_pseudonymized(self): config_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.config.value] + ).pseudonymizable_entity_mappings[models.Names.CONFIG.value] ) metadata_model = ( config_models.ConfigPropertySnapshotMetadataModel.get_by_id( @@ -1092,7 +1199,8 @@ def test_one_config_property_is_pseudonymized(self): metadata_model.committer_id, config_mappings[self.CONFIG_1_ID]) def test_one_config_property_when_the_deletion_is_repeated_is_pseudonymized( - self): + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -1114,12 +1222,12 @@ def test_one_config_property_when_the_deletion_is_repeated_is_pseudonymized( config_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.config.value] + ).pseudonymizable_entity_mappings[models.Names.CONFIG.value] ) self.assertEqual( metadata_model.committer_id, config_mappings[self.CONFIG_1_ID]) - def test_multiple_config_properties_are_pseudonymized(self): + def test_multiple_config_properties_are_pseudonymized(self) -> None: config_models.ConfigPropertyModel( id=self.CONFIG_2_ID, value='b' ).commit(self.user_1_id, [{'cmd': 'command'}]) @@ -1130,7 +1238,7 @@ def test_multiple_config_properties_are_pseudonymized(self): config_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.config.value] + ).pseudonymizable_entity_mappings[models.Names.CONFIG.value] ) metadata_model_1 = ( config_models.ConfigPropertySnapshotMetadataModel.get_by_id( @@ -1147,7 +1255,8 @@ def test_multiple_config_properties_are_pseudonymized(self): metadata_model_2.committer_id, config_mappings[self.CONFIG_2_ID]) def test_multiple_config_properties_with_multiple_users_are_pseudonymized( - self): + self + ) -> None: config_models.ConfigPropertyModel( id=self.CONFIG_2_ID, value='b' ).commit(self.user_2_id, [{'cmd': 'command'}]) @@ -1159,7 +1268,7 @@ def test_multiple_config_properties_with_multiple_users_are_pseudonymized( config_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.config.value] + ).pseudonymizable_entity_mappings[models.Names.CONFIG.value] ) metadata_model_1 = ( config_models.ConfigPropertySnapshotMetadataModel.get_by_id( @@ -1183,7 +1292,7 @@ def test_multiple_config_properties_with_multiple_users_are_pseudonymized( config_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.config.value] + ).pseudonymizable_entity_mappings[models.Names.CONFIG.value] ) metadata_model_3 = ( config_models.ConfigPropertySnapshotMetadataModel.get_by_id( @@ -1192,7 +1301,9 @@ def test_multiple_config_properties_with_multiple_users_are_pseudonymized( self.assertEqual( metadata_model_3.committer_id, config_mappings_2[self.CONFIG_2_ID]) - def test_one_config_property_with_multiple_users_is_pseudonymized(self): + def test_one_config_property_with_multiple_users_is_pseudonymized( + self + ) -> None: config_models.ConfigPropertyModel.get_by_id( self.CONFIG_1_ID ).commit(self.user_2_id, [{'cmd': 'command'}]) @@ -1204,7 +1315,7 @@ def test_one_config_property_with_multiple_users_is_pseudonymized(self): config_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.config.value] + ).pseudonymizable_entity_mappings[models.Names.CONFIG.value] ) metadata_model_1 = ( config_models.ConfigPropertySnapshotMetadataModel.get_by_id( @@ -1227,7 +1338,7 @@ def test_one_config_property_with_multiple_users_is_pseudonymized(self): config_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.config.value] + ).pseudonymizable_entity_mappings[models.Names.CONFIG.value] ) metadata_model_3 = ( config_models.ConfigPropertySnapshotMetadataModel.get_by_id( @@ -1240,13 +1351,13 @@ def test_one_config_property_with_multiple_users_is_pseudonymized(self): class WipeoutServiceVerifyDeleteConfigModelsTests(test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - CONFIG_1_ID = 'config_1_id' - CONFIG_2_ID = 'config_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + CONFIG_1_ID: Final = 'config_1_id' + CONFIG_2_ID: Final = 'config_2_id' - def setUp(self): - super(WipeoutServiceVerifyDeleteConfigModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) config_model = config_models.ConfigPropertyModel( @@ -1260,12 +1371,16 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_user_is_deleted_returns_true(self): + def test_verify_user_delete_when_user_is_deleted_returns_true( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): + def test_verify_user_delete_when_user_is_not_deleted_returns_false( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -1284,15 +1399,15 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): class WipeoutServiceDeleteCollectionModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - COL_1_ID = 'col_1_id' - COL_2_ID = 'col_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + COL_1_ID: Final = 'col_1_id' + COL_2_ID: Final = 'col_2_id' - def setUp(self): - super(WipeoutServiceDeleteCollectionModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -1305,7 +1420,7 @@ def setUp(self): self.user_2_id, feconf.ROLE_OWNER) - def test_one_collection_snapshot_metadata_is_pseudonymized(self): + def test_one_collection_snapshot_metadata_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1315,7 +1430,7 @@ def test_one_collection_snapshot_metadata_is_pseudonymized(self): collection_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.collection.value] + ).pseudonymizable_entity_mappings[models.Names.COLLECTION.value] ) metadata_model = ( collection_models.CollectionSnapshotMetadataModel.get_by_id( @@ -1347,7 +1462,7 @@ def test_one_collection_snapshot_metadata_is_pseudonymized(self): [collection_mappings[self.COL_1_ID]]) self.assertEqual(rights_metadata_model_2.commit_cmds_user_ids, []) - def test_one_collection_snapshot_content_is_pseudonymized(self): + def test_one_collection_snapshot_content_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1357,7 +1472,7 @@ def test_one_collection_snapshot_content_is_pseudonymized(self): collection_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.collection.value] + ).pseudonymizable_entity_mappings[models.Names.COLLECTION.value] ) rights_content_model_1 = ( collection_models.CollectionRightsSnapshotContentModel.get_by_id( @@ -1377,7 +1492,7 @@ def test_one_collection_snapshot_content_is_pseudonymized(self): self.user_2_id ]) - def test_one_collection_commit_log_is_pseudonymized(self): + def test_one_collection_commit_log_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1387,7 +1502,7 @@ def test_one_collection_commit_log_is_pseudonymized(self): collection_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.collection.value] + ).pseudonymizable_entity_mappings[models.Names.COLLECTION.value] ) commit_log_model_1 = ( collection_models.CollectionCommitLogEntryModel.get_by_id( @@ -1404,7 +1519,9 @@ def test_one_collection_commit_log_is_pseudonymized(self): commit_log_model_2.user_id, collection_mappings[self.COL_1_ID]) - def test_one_collection_with_missing_snapshot_is_pseudonymized(self): + def test_one_collection_with_missing_snapshot_is_pseudonymized( + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() collection_models.CollectionCommitLogEntryModel( @@ -1437,7 +1554,7 @@ def test_one_collection_with_missing_snapshot_is_pseudonymized(self): collection_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.collection.value] + ).pseudonymizable_entity_mappings[models.Names.COLLECTION.value] ) metadata_model = ( collection_models.CollectionSnapshotMetadataModel.get_by_id( @@ -1465,7 +1582,8 @@ def test_one_collection_with_missing_snapshot_is_pseudonymized(self): collection_mappings[self.COL_2_ID]) def test_one_collection_when_the_deletion_is_repeated_is_pseudonymized( - self): + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1490,7 +1608,7 @@ def test_one_collection_when_the_deletion_is_repeated_is_pseudonymized( collection_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.collection.value] + ).pseudonymizable_entity_mappings[models.Names.COLLECTION.value] ) metadata_model = ( collection_models.CollectionSnapshotMetadataModel.get_by_id( @@ -1508,7 +1626,7 @@ def test_one_collection_when_the_deletion_is_repeated_is_pseudonymized( commit_log_model.user_id, collection_mappings[self.COL_1_ID]) - def test_collection_user_is_removed_from_contributors(self): + def test_collection_user_is_removed_from_contributors(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1535,7 +1653,8 @@ def test_collection_user_is_removed_from_contributors(self): self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary) def test_col_user_is_removed_from_contributor_ids_when_missing_from_summary( - self): + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1562,7 +1681,8 @@ def test_col_user_is_removed_from_contributor_ids_when_missing_from_summary( self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary) def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted( - self): + self + ) -> None: self.save_new_valid_collection(self.COL_2_ID, self.user_1_id) collection_services.delete_collection(self.user_1_id, self.COL_2_ID) @@ -1583,7 +1703,7 @@ def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted( self.assertIsNone( collection_models.CollectionModel.get_by_id(self.COL_2_ID)) - def test_multiple_collections_are_pseudonymized(self): + def test_multiple_collections_are_pseudonymized(self) -> None: self.save_new_valid_collection(self.COL_2_ID, self.user_1_id) self.publish_collection(self.user_1_id, self.COL_2_ID) @@ -1595,7 +1715,7 @@ def test_multiple_collections_are_pseudonymized(self): collection_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.collection.value] + ).pseudonymizable_entity_mappings[models.Names.COLLECTION.value] ) metadata_model = ( collection_models.CollectionSnapshotMetadataModel.get_by_id( @@ -1635,13 +1755,13 @@ class WipeoutServiceVerifyDeleteCollectionModelsTests( test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - COL_1_ID = 'col_1_id' - COL_2_ID = 'col_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + COL_1_ID: Final = 'col_1_id' + COL_2_ID: Final = 'col_2_id' - def setUp(self): - super(WipeoutServiceVerifyDeleteCollectionModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.save_new_valid_collection(self.COL_1_ID, self.user_1_id) @@ -1651,12 +1771,16 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_user_is_deleted_returns_true(self): + def test_verify_user_delete_when_user_is_deleted_returns_true( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): + def test_verify_user_delete_when_user_is_not_deleted_returns_false( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -1679,15 +1803,15 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): class WipeoutServiceDeleteExplorationModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' - def setUp(self): - super(WipeoutServiceDeleteExplorationModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -1700,7 +1824,7 @@ def setUp(self): self.user_2_id, feconf.ROLE_OWNER) - def test_one_exploration_snapshot_metadata_is_pseudonymized(self): + def test_one_exploration_snapshot_metadata_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1710,7 +1834,7 @@ def test_one_exploration_snapshot_metadata_is_pseudonymized(self): exploration_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.exploration.value] + ).pseudonymizable_entity_mappings[models.Names.EXPLORATION.value] ) metadata_model = ( exp_models.ExplorationSnapshotMetadataModel.get_by_id( @@ -1742,7 +1866,7 @@ def test_one_exploration_snapshot_metadata_is_pseudonymized(self): [exploration_mappings[self.EXP_1_ID]]) self.assertEqual(rights_metadata_model_2.commit_cmds_user_ids, []) - def test_one_exploration_snapshot_content_is_pseudonymized(self): + def test_one_exploration_snapshot_content_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1752,7 +1876,7 @@ def test_one_exploration_snapshot_content_is_pseudonymized(self): exploration_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.exploration.value] + ).pseudonymizable_entity_mappings[models.Names.EXPLORATION.value] ) rights_content_model_1 = ( exp_models.ExplorationRightsSnapshotContentModel.get_by_id( @@ -1772,7 +1896,7 @@ def test_one_exploration_snapshot_content_is_pseudonymized(self): self.user_2_id ]) - def test_one_exploration_commit_log_is_pseudonymized(self): + def test_one_exploration_commit_log_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1782,7 +1906,7 @@ def test_one_exploration_commit_log_is_pseudonymized(self): exploration_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.exploration.value] + ).pseudonymizable_entity_mappings[models.Names.EXPLORATION.value] ) commit_log_model_1 = ( exp_models.ExplorationCommitLogEntryModel.get_by_id( @@ -1798,7 +1922,9 @@ def test_one_exploration_commit_log_is_pseudonymized(self): self.assertEqual( commit_log_model_2.user_id, exploration_mappings[self.EXP_1_ID]) - def test_one_exploration_with_missing_snapshot_is_pseudonymized(self): + def test_one_exploration_with_missing_snapshot_is_pseudonymized( + self + ) -> None: exp_models.ExplorationCommitLogEntryModel( id='exploration-%s-1' % self.EXP_2_ID, exploration_id=self.EXP_2_ID, @@ -1831,7 +1957,7 @@ def test_one_exploration_with_missing_snapshot_is_pseudonymized(self): exploration_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.exploration.value] + ).pseudonymizable_entity_mappings[models.Names.EXPLORATION.value] ) metadata_model = ( exp_models.ExplorationSnapshotMetadataModel.get_by_id( @@ -1856,7 +1982,8 @@ def test_one_exploration_with_missing_snapshot_is_pseudonymized(self): commit_log_model_2.user_id, exploration_mappings[self.EXP_2_ID]) def test_one_exploration_when_the_deletion_is_repeated_is_pseudonymized( - self): + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1881,7 +2008,7 @@ def test_one_exploration_when_the_deletion_is_repeated_is_pseudonymized( exploration_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.exploration.value] + ).pseudonymizable_entity_mappings[models.Names.EXPLORATION.value] ) metadata_model = ( exp_models.ExplorationSnapshotMetadataModel.get_by_id( @@ -1897,7 +2024,7 @@ def test_one_exploration_when_the_deletion_is_repeated_is_pseudonymized( self.assertEqual( commit_log_model.user_id, exploration_mappings[self.EXP_1_ID]) - def test_exploration_user_is_removed_from_contributors(self): + def test_exploration_user_is_removed_from_contributors(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1922,7 +2049,8 @@ def test_exploration_user_is_removed_from_contributors(self): self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary) def test_exp_user_is_removed_from_contributor_ids_when_missing_from_summary( - self): + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -1947,7 +2075,8 @@ def test_exp_user_is_removed_from_contributor_ids_when_missing_from_summary( self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary) def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted( - self): + self + ) -> None: self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id) exp_services.delete_exploration(self.user_1_id, self.EXP_2_ID) @@ -1968,7 +2097,7 @@ def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted( self.assertIsNone( exp_models.ExplorationModel.get_by_id(self.EXP_2_ID)) - def test_multiple_explorations_are_pseudonymized(self): + def test_multiple_explorations_are_pseudonymized(self) -> None: self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id) self.publish_exploration(self.user_1_id, self.EXP_2_ID) @@ -1980,7 +2109,7 @@ def test_multiple_explorations_are_pseudonymized(self): exploration_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.exploration.value] + ).pseudonymizable_entity_mappings[models.Names.EXPLORATION.value] ) metadata_model = ( exp_models.ExplorationSnapshotMetadataModel.get_by_id( @@ -2016,13 +2145,13 @@ class WipeoutServiceVerifyDeleteExplorationModelsTests( test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' - def setUp(self): - super(WipeoutServiceVerifyDeleteExplorationModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.save_new_valid_exploration(self.EXP_1_ID, self.user_1_id) @@ -2032,12 +2161,14 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_user_is_deleted_returns_true(self): + def test_verify_user_delete_when_user_is_deleted_returns_true(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): + def test_verify_user_delete_when_user_is_not_deleted_returns_false( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -2060,20 +2191,20 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): class WipeoutServiceDeleteFeedbackModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - FEEDBACK_1_ID = 'feedback_1_id' - FEEDBACK_2_ID = 'feedback_2_id' - MESSAGE_1_ID = 'message_1_id' - MESSAGE_2_ID = 'message_2_id' - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - NUMBER_OF_MODELS = 150 - - def setUp(self): - super(WipeoutServiceDeleteFeedbackModelsTests, self).setUp() + FEEDBACK_1_ID: Final = 'feedback_1_id' + FEEDBACK_2_ID: Final = 'feedback_2_id' + MESSAGE_1_ID: Final = 'message_1_id' + MESSAGE_2_ID: Final = 'message_2_id' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + NUMBER_OF_MODELS: Final = 150 + + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -2112,7 +2243,7 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_one_feedback_is_pseudonymized(self): + def test_one_feedback_is_pseudonymized(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -2120,7 +2251,7 @@ def test_one_feedback_is_pseudonymized(self): feedback_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.feedback.value] + ).pseudonymizable_entity_mappings[models.Names.FEEDBACK.value] ) feedback_thread_model = ( feedback_models.GeneralFeedbackThreadModel.get_by_id( @@ -2139,7 +2270,9 @@ def test_one_feedback_is_pseudonymized(self): feedback_mappings[self.FEEDBACK_1_ID] ) - def test_one_feedback_when_the_deletion_is_repeated_is_pseudonymized(self): + def test_one_feedback_when_the_deletion_is_repeated_is_pseudonymized( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -2161,7 +2294,7 @@ def test_one_feedback_when_the_deletion_is_repeated_is_pseudonymized(self): feedback_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.feedback.value] + ).pseudonymizable_entity_mappings[models.Names.FEEDBACK.value] ) new_feedback_thread_model = ( feedback_models.GeneralFeedbackThreadModel.get_by_id( @@ -2172,7 +2305,7 @@ def test_one_feedback_when_the_deletion_is_repeated_is_pseudonymized(self): feedback_mappings[self.FEEDBACK_1_ID] ) - def test_multiple_feedbacks_are_pseudonymized(self): + def test_multiple_feedbacks_are_pseudonymized(self) -> None: feedback_thread_models = [] for i in range(self.NUMBER_OF_MODELS): feedback_thread_models.append( @@ -2201,8 +2334,8 @@ def test_multiple_feedbacks_are_pseudonymized(self): ) feedback_models.GeneralFeedbackMessageModel.update_timestamps_multi( feedback_message_models) - datastore_services.put_multi( - feedback_thread_models + feedback_message_models) + datastore_services.put_multi(feedback_message_models) + datastore_services.put_multi(feedback_thread_models) wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -2210,7 +2343,7 @@ def test_multiple_feedbacks_are_pseudonymized(self): feedback_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.feedback.value] + ).pseudonymizable_entity_mappings[models.Names.FEEDBACK.value] ) pseudonymized_feedback_thread_models = ( @@ -2219,6 +2352,8 @@ def test_multiple_feedbacks_are_pseudonymized(self): ) ) for feedback_thread_model in pseudonymized_feedback_thread_models: + # Ruling out the possibility of None for mypy type checking. + assert feedback_thread_model is not None self.assertEqual( feedback_thread_model.original_author_id, feedback_mappings[feedback_thread_model.id] @@ -2230,19 +2365,21 @@ def test_multiple_feedbacks_are_pseudonymized(self): ) ) for feedback_message_model in pseudonymized_feedback_message_models: + # Ruling out the possibility of None for mypy type checking. + assert feedback_message_model is not None self.assertEqual( feedback_message_model.author_id, feedback_mappings[feedback_message_model.thread_id] ) - def test_one_feedback_with_multiple_users_is_pseudonymized(self): + def test_one_feedback_with_multiple_users_is_pseudonymized(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) feedback_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.feedback.value] + ).pseudonymizable_entity_mappings[models.Names.FEEDBACK.value] ) # Verify first user is pseudonymized. @@ -2267,7 +2404,7 @@ def test_one_feedback_with_multiple_users_is_pseudonymized(self): feedback_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.feedback.value] + ).pseudonymizable_entity_mappings[models.Names.FEEDBACK.value] ) # Verify second user is pseudonymized. @@ -2280,14 +2417,14 @@ def test_one_feedback_with_multiple_users_is_pseudonymized(self): class WipeoutServiceVerifyDeleteFeedbackModelsTests(test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - FEEDBACK_1_ID = 'feedback_1_id' - MESSAGE_1_ID = 'message_1_id' - EXP_1_ID = 'exp_1_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + FEEDBACK_1_ID: Final = 'feedback_1_id' + MESSAGE_1_ID: Final = 'message_1_id' + EXP_1_ID: Final = 'exp_1_id' - def setUp(self): - super(WipeoutServiceVerifyDeleteFeedbackModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) feedback_models.GeneralFeedbackThreadModel( @@ -2323,12 +2460,14 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_user_is_deleted_returns_true(self): + def test_verify_user_delete_when_user_is_deleted_returns_true(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): + def test_verify_user_delete_when_user_is_not_deleted_returns_false( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -2354,17 +2493,17 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): class WipeoutServiceDeleteImprovementsModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' - def setUp(self): - super(WipeoutServiceDeleteImprovementsModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.improvements_model_1_id = ( - improvements_models.TaskEntryModel.create( + improvements_models.ExplorationStatsTaskEntryModel.create( entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, entity_id=self.EXP_1_ID, entity_version=1, @@ -2377,7 +2516,7 @@ def setUp(self): ) ) self.improvements_model_2_id = ( - improvements_models.TaskEntryModel.create( + improvements_models.ExplorationStatsTaskEntryModel.create( entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, entity_id=self.EXP_2_ID, entity_version=1, @@ -2390,46 +2529,50 @@ def setUp(self): ) ) - def test_delete_user_is_successful(self): + def test_delete_user_is_successful(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() self.assertIsNotNone( - improvements_models.TaskEntryModel.get_by_id( + improvements_models.ExplorationStatsTaskEntryModel.get_by_id( self.improvements_model_1_id)) self.assertIsNotNone( - improvements_models.TaskEntryModel.get_by_id( + improvements_models.ExplorationStatsTaskEntryModel.get_by_id( self.improvements_model_2_id)) wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) - self.assertIsNone( - improvements_models.TaskEntryModel.get_by_id( + task_entry_model1 = ( + improvements_models.ExplorationStatsTaskEntryModel.get( self.improvements_model_1_id)) - self.assertIsNone( - improvements_models.TaskEntryModel.get_by_id( + task_entry_model2 = ( + improvements_models.ExplorationStatsTaskEntryModel.get( self.improvements_model_2_id)) + self.assertNotEqual(task_entry_model1.resolver_id, self.user_1_id) + self.assertEqual(task_entry_model1.resolver_id[:3], 'pid') + self.assertEqual( + task_entry_model1.resolver_id, task_entry_model2.resolver_id) class WipeoutServiceVerifyDeleteImprovementsModelsTests( test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' - EXP_3_ID = 'exp_3_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' + EXP_3_ID: Final = 'exp_3_id' - def setUp(self): - super(WipeoutServiceVerifyDeleteImprovementsModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) - improvements_models.TaskEntryModel.create( + improvements_models.ExplorationStatsTaskEntryModel.create( entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, entity_id=self.EXP_1_ID, entity_version=1, @@ -2440,7 +2583,7 @@ def setUp(self): status=constants.TASK_STATUS_RESOLVED, resolver_id=self.user_1_id ) - improvements_models.TaskEntryModel.create( + improvements_models.ExplorationStatsTaskEntryModel.create( entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, entity_id=self.EXP_2_ID, entity_version=1, @@ -2454,27 +2597,34 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_user_is_deleted_returns_true(self): + def test_verify_user_delete_when_user_is_deleted_returns_true(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): + def test_verify_user_delete_when_user_is_not_deleted_returns_false( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - improvements_models.TaskEntryModel.create( - entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION, - entity_id=self.EXP_3_ID, - entity_version=1, - task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE, - target_type=constants.TASK_TARGET_TYPE_STATE, - target_id='State', - issue_description=None, - status=constants.TASK_STATUS_RESOLVED, - resolver_id=self.user_1_id + task_entry_id = ( + improvements_models.ExplorationStatsTaskEntryModel.generate_task_id( + constants.TASK_ENTITY_TYPE_EXPLORATION, + self.EXP_2_ID, + 1, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, + 'State' + ) ) + task_entry_model = ( + improvements_models.ExplorationStatsTaskEntryModel.get( + task_entry_id)) + task_entry_model.resolver_id = self.user_1_id + task_entry_model.update_timestamps() + task_entry_model.put() self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -2486,33 +2636,35 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): class WipeoutServiceDeleteQuestionModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - SKILL_1_ID = 'skill_1_id' - QUESTION_1_ID = 'question_1_id' - QUESTION_2_ID = 'question_2_id' - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + SKILL_1_ID: Final = 'skill_1_id' + QUESTION_1_ID: Final = 'question_1_id' + QUESTION_2_ID: Final = 'question_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' - def setUp(self): - super(WipeoutServiceDeleteQuestionModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) - self.set_curriculum_admins((self.USER_1_USERNAME, self.USER_2_USERNAME)) + self.set_curriculum_admins([self.USER_1_USERNAME, self.USER_2_USERNAME]) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) self.save_new_skill(self.SKILL_1_ID, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_1_ID, self.user_1_id, - self._create_valid_question_data('ABC'), - [self.SKILL_1_ID] + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_1_ID], + content_id_generator.next_content_id_index ) wipeout_service.pre_delete_user(self.user_1_id) wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_one_question_is_pseudonymized(self): + def test_one_question_is_pseudonymized(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -2520,7 +2672,7 @@ def test_one_question_is_pseudonymized(self): question_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.question.value] + ).pseudonymizable_entity_mappings[models.Names.QUESTION.value] ) metadata_model = ( question_models.QuestionSnapshotMetadataModel.get_by_id( @@ -2535,7 +2687,7 @@ def test_one_question_is_pseudonymized(self): self.assertEqual( commit_log_model.user_id, question_mappings[self.QUESTION_1_ID]) - def test_one_question_with_missing_snapshot_is_pseudonymized(self): + def test_one_question_with_missing_snapshot_is_pseudonymized(self) -> None: question_models.QuestionCommitLogEntryModel( id='question-%s-1' % self.QUESTION_2_ID, question_id=self.QUESTION_2_ID, @@ -2561,7 +2713,7 @@ def test_one_question_with_missing_snapshot_is_pseudonymized(self): question_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.question.value] + ).pseudonymizable_entity_mappings[models.Names.QUESTION.value] ) metadata_model = ( question_models.QuestionSnapshotMetadataModel.get_by_id( @@ -2585,7 +2737,9 @@ def test_one_question_with_missing_snapshot_is_pseudonymized(self): self.assertEqual( commit_log_model_2.user_id, question_mappings[self.QUESTION_2_ID]) - def test_one_question_when_the_deletion_is_repeated_is_pseudonymized(self): + def test_one_question_when_the_deletion_is_repeated_is_pseudonymized( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -2608,7 +2762,7 @@ def test_one_question_when_the_deletion_is_repeated_is_pseudonymized(self): question_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.question.value] + ).pseudonymizable_entity_mappings[models.Names.QUESTION.value] ) metadata_model = ( question_models.QuestionSnapshotMetadataModel.get_by_id( @@ -2625,12 +2779,14 @@ def test_one_question_when_the_deletion_is_repeated_is_pseudonymized(self): self.assertEqual( commit_log_model.user_id, question_mappings[self.QUESTION_1_ID]) - def test_multiple_questions_are_pseudonymized(self): + def test_multiple_questions_are_pseudonymized(self) -> None: + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_2_ID, self.user_1_id, - self._create_valid_question_data('ABC'), - [self.SKILL_1_ID] + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_1_ID], + content_id_generator.next_content_id_index ) wipeout_service.delete_user( @@ -2639,7 +2795,7 @@ def test_multiple_questions_are_pseudonymized(self): question_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.question.value] + ).pseudonymizable_entity_mappings[models.Names.QUESTION.value] ) metadata_model = ( question_models.QuestionSnapshotMetadataModel.get_by_id( @@ -2670,12 +2826,16 @@ def test_multiple_questions_are_pseudonymized(self): self.assertEqual( commit_log_model.user_id, question_mappings[self.QUESTION_2_ID]) - def test_multiple_questions_with_multiple_users_are_pseudonymized(self): + def test_multiple_questions_with_multiple_users_are_pseudonymized( + self + ) -> None: + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_2_ID, self.user_2_id, - self._create_valid_question_data('ABC'), - [self.SKILL_1_ID] + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_1_ID], + content_id_generator.next_content_id_index ) wipeout_service.delete_user( @@ -2685,7 +2845,7 @@ def test_multiple_questions_with_multiple_users_are_pseudonymized(self): question_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.question.value] + ).pseudonymizable_entity_mappings[models.Names.QUESTION.value] ) metadata_model = ( question_models.QuestionSnapshotMetadataModel.get_by_id( @@ -2725,7 +2885,7 @@ def test_multiple_questions_with_multiple_users_are_pseudonymized(self): question_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.question.value] + ).pseudonymizable_entity_mappings[models.Names.QUESTION.value] ) metadata_model = ( question_models.QuestionSnapshotMetadataModel.get_by_id( @@ -2744,7 +2904,7 @@ def test_multiple_questions_with_multiple_users_are_pseudonymized(self): self.assertEqual( commit_log_model.user_id, question_mappings_2[self.QUESTION_2_ID]) - def test_one_question_with_multiple_users_is_pseudonymized(self): + def test_one_question_with_multiple_users_is_pseudonymized(self) -> None: question_services.update_question( self.user_2_id, self.QUESTION_1_ID, @@ -2765,7 +2925,7 @@ def test_one_question_with_multiple_users_is_pseudonymized(self): question_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.question.value] + ).pseudonymizable_entity_mappings[models.Names.QUESTION.value] ) metadata_model = ( question_models.QuestionSnapshotMetadataModel.get_by_id( @@ -2805,7 +2965,7 @@ def test_one_question_with_multiple_users_is_pseudonymized(self): question_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.question.value] + ).pseudonymizable_entity_mappings[models.Names.QUESTION.value] ) metadata_model = ( question_models.QuestionSnapshotMetadataModel.get_by_id( @@ -2828,44 +2988,48 @@ def test_one_question_with_multiple_users_is_pseudonymized(self): class WipeoutServiceVerifyDeleteQuestionModelsTests(test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - SKILL_1_ID = 'SKILL_1_ID' - QUESTION_1_ID = 'QUESTION_1_ID' - QUESTION_2_ID = 'QUESTION_2_ID' - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + SKILL_1_ID: Final = 'SKILL_1_ID' + QUESTION_1_ID: Final = 'QUESTION_1_ID' + QUESTION_2_ID: Final = 'QUESTION_2_ID' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' - def setUp(self): - super(WipeoutServiceVerifyDeleteQuestionModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) - self.set_curriculum_admins((self.USER_1_USERNAME, self.USER_2_USERNAME)) + self.set_curriculum_admins([self.USER_1_USERNAME, self.USER_2_USERNAME]) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) self.save_new_skill(self.SKILL_1_ID, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_1_ID, self.user_1_id, - self._create_valid_question_data('ABC'), - [self.SKILL_1_ID] + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_1_ID], + content_id_generator.next_content_id_index ) + content_id_generator = translation_domain.ContentIdGenerator() self.save_new_question( self.QUESTION_2_ID, self.user_2_id, - self._create_valid_question_data('ABC'), - [self.SKILL_1_ID] + self._create_valid_question_data('ABC', content_id_generator), + [self.SKILL_1_ID], + content_id_generator.next_content_id_index ) wipeout_service.pre_delete_user(self.user_1_id) wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_verification_is_successful(self): + def test_verification_is_successful(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verification_when_deletion_failed_is_unsuccessful(self): + def test_verification_when_deletion_failed_is_unsuccessful(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_2_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id)) @@ -2887,18 +3051,18 @@ def test_verification_when_deletion_failed_is_unsuccessful(self): class WipeoutServiceDeleteSkillModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - SKILL_1_ID = 'skill_1_id' - SKILL_2_ID = 'skill_2_id' - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + SKILL_1_ID: Final = 'skill_1_id' + SKILL_2_ID: Final = 'skill_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' - def setUp(self): - super(WipeoutServiceDeleteSkillModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) - self.set_curriculum_admins((self.USER_1_USERNAME, self.USER_2_USERNAME)) + self.set_curriculum_admins([self.USER_1_USERNAME, self.USER_2_USERNAME]) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) self.save_new_skill(self.SKILL_1_ID, self.user_1_id) @@ -2906,7 +3070,7 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_one_skill_is_pseudonymized(self): + def test_one_skill_is_pseudonymized(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -2914,7 +3078,7 @@ def test_one_skill_is_pseudonymized(self): skill_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.skill.value] + ).pseudonymizable_entity_mappings[models.Names.SKILL.value] ) metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id( '%s-1' % self.SKILL_1_ID) @@ -2925,7 +3089,7 @@ def test_one_skill_is_pseudonymized(self): self.assertEqual( commit_log_model.user_id, skill_mappings[self.SKILL_1_ID]) - def test_one_skill_with_missing_snapshot_is_pseudonymized(self): + def test_one_skill_with_missing_snapshot_is_pseudonymized(self) -> None: skill_models.SkillCommitLogEntryModel( id='skill-%s-1' % self.SKILL_2_ID, skill_id=self.SKILL_2_ID, @@ -2951,7 +3115,7 @@ def test_one_skill_with_missing_snapshot_is_pseudonymized(self): skill_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.skill.value] + ).pseudonymizable_entity_mappings[models.Names.SKILL.value] ) metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id( '%s-1' % self.SKILL_1_ID) @@ -2966,7 +3130,9 @@ def test_one_skill_with_missing_snapshot_is_pseudonymized(self): self.assertEqual( commit_log_model_2.user_id, skill_mappings[self.SKILL_2_ID]) - def test_one_skill_when_the_deletion_is_repeated_is_pseudonymized(self): + def test_one_skill_when_the_deletion_is_repeated_is_pseudonymized( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -2986,7 +3152,7 @@ def test_one_skill_when_the_deletion_is_repeated_is_pseudonymized(self): skill_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.skill.value] + ).pseudonymizable_entity_mappings[models.Names.SKILL.value] ) metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id( '%s-1' % self.SKILL_1_ID) @@ -2997,7 +3163,7 @@ def test_one_skill_when_the_deletion_is_repeated_is_pseudonymized(self): self.assertEqual( commit_log_model.user_id, skill_mappings[self.SKILL_1_ID]) - def test_multiple_skills_are_pseudonymized(self): + def test_multiple_skills_are_pseudonymized(self) -> None: self.save_new_skill(self.SKILL_2_ID, self.user_1_id) wipeout_service.delete_user( @@ -3006,7 +3172,7 @@ def test_multiple_skills_are_pseudonymized(self): skill_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.skill.value] + ).pseudonymizable_entity_mappings[models.Names.SKILL.value] ) metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id( '%s-1' % self.SKILL_1_ID) @@ -3025,7 +3191,9 @@ def test_multiple_skills_are_pseudonymized(self): self.assertEqual( commit_log_model.user_id, skill_mappings[self.SKILL_2_ID]) - def test_multiple_skills_with_multiple_users_are_pseudonymized(self): + def test_multiple_skills_with_multiple_users_are_pseudonymized( + self + ) -> None: self.save_new_skill(self.SKILL_2_ID, self.user_2_id) wipeout_service.delete_user( @@ -3035,7 +3203,7 @@ def test_multiple_skills_with_multiple_users_are_pseudonymized(self): skill_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.skill.value] + ).pseudonymizable_entity_mappings[models.Names.SKILL.value] ) metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id( '%s-1' % self.SKILL_1_ID) @@ -3061,7 +3229,7 @@ def test_multiple_skills_with_multiple_users_are_pseudonymized(self): skill_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.skill.value] + ).pseudonymizable_entity_mappings[models.Names.SKILL.value] ) metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id( '%s-1' % self.SKILL_2_ID) @@ -3072,7 +3240,7 @@ def test_multiple_skills_with_multiple_users_are_pseudonymized(self): self.assertEqual( commit_log_model.user_id, skill_mappings_2[self.SKILL_2_ID]) - def test_one_skill_with_multiple_users_is_pseudonymized(self): + def test_one_skill_with_multiple_users_is_pseudonymized(self) -> None: skill_services.update_skill( self.user_2_id, self.SKILL_1_ID, @@ -3092,7 +3260,7 @@ def test_one_skill_with_multiple_users_is_pseudonymized(self): skill_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.skill.value] + ).pseudonymizable_entity_mappings[models.Names.SKILL.value] ) metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id( '%s-1' % self.SKILL_1_ID) @@ -3118,7 +3286,7 @@ def test_one_skill_with_multiple_users_is_pseudonymized(self): skill_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.skill.value] + ).pseudonymizable_entity_mappings[models.Names.SKILL.value] ) metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id( '%s-2' % self.SKILL_1_ID) @@ -3133,18 +3301,18 @@ def test_one_skill_with_multiple_users_is_pseudonymized(self): class WipeoutServiceVerifyDeleteSkillModelsTests(test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - SKILL_1_ID = 'skill_1_id' - SKILL_2_ID = 'skill_2_id' - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + SKILL_1_ID: Final = 'skill_1_id' + SKILL_2_ID: Final = 'skill_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' - def setUp(self): - super(WipeoutServiceVerifyDeleteSkillModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) - self.set_curriculum_admins((self.USER_1_USERNAME, self.USER_2_USERNAME)) + self.set_curriculum_admins([self.USER_1_USERNAME, self.USER_2_USERNAME]) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) self.save_new_skill(self.SKILL_1_ID, self.user_1_id) @@ -3153,12 +3321,12 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_verification_is_successful(self): + def test_verification_is_successful(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verification_when_deletion_failed_is_unsuccessful(self): + def test_verification_when_deletion_failed_is_unsuccessful(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_2_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id)) @@ -3185,16 +3353,16 @@ def test_verification_when_deletion_failed_is_unsuccessful(self): class WipeoutServiceDeleteStoryModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - TOPIC_1_ID = 'topic_1_id' - STORY_1_ID = 'story_1_id' - STORY_2_ID = 'story_2_id' - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + TOPIC_1_ID: Final = 'topic_1_id' + STORY_1_ID: Final = 'story_1_id' + STORY_2_ID: Final = 'story_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' - def setUp(self): - super(WipeoutServiceDeleteStoryModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -3210,7 +3378,7 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_one_story_is_pseudonymized(self): + def test_one_story_is_pseudonymized(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -3218,7 +3386,7 @@ def test_one_story_is_pseudonymized(self): story_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.story.value] + ).pseudonymizable_entity_mappings[models.Names.STORY.value] ) metadata_model = story_models.StorySnapshotMetadataModel.get_by_id( '%s-1' % self.STORY_1_ID) @@ -3229,7 +3397,7 @@ def test_one_story_is_pseudonymized(self): self.assertEqual( commit_log_model.user_id, story_mappings[self.STORY_1_ID]) - def test_one_story_with_missing_snapshot_is_pseudonymized(self): + def test_one_story_with_missing_snapshot_is_pseudonymized(self) -> None: story_models.StoryCommitLogEntryModel( id='story-%s-1' % self.STORY_2_ID, story_id=self.STORY_2_ID, @@ -3255,7 +3423,7 @@ def test_one_story_with_missing_snapshot_is_pseudonymized(self): story_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.story.value] + ).pseudonymizable_entity_mappings[models.Names.STORY.value] ) metadata_model = story_models.StorySnapshotMetadataModel.get_by_id( '%s-1' % self.STORY_1_ID) @@ -3270,7 +3438,9 @@ def test_one_story_with_missing_snapshot_is_pseudonymized(self): self.assertEqual( commit_log_model_2.user_id, story_mappings[self.STORY_2_ID]) - def test_one_story_when_the_deletion_is_repeated_is_pseudonymized(self): + def test_one_story_when_the_deletion_is_repeated_is_pseudonymized( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -3290,7 +3460,7 @@ def test_one_story_when_the_deletion_is_repeated_is_pseudonymized(self): story_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.story.value] + ).pseudonymizable_entity_mappings[models.Names.STORY.value] ) metadata_model = story_models.StorySnapshotMetadataModel.get_by_id( '%s-1' % self.STORY_1_ID) @@ -3301,7 +3471,7 @@ def test_one_story_when_the_deletion_is_repeated_is_pseudonymized(self): self.assertEqual( commit_log_model.user_id, story_mappings[self.STORY_1_ID]) - def test_multiple_stories_are_pseudonymized(self): + def test_multiple_stories_are_pseudonymized(self) -> None: self.save_new_topic( self.TOPIC_1_ID, self.user_1_id, name='Topic 2', abbreviated_name='abbrev-two', url_fragment='frag-two') @@ -3313,7 +3483,7 @@ def test_multiple_stories_are_pseudonymized(self): story_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.story.value] + ).pseudonymizable_entity_mappings[models.Names.STORY.value] ) metadata_model = story_models.StorySnapshotMetadataModel.get_by_id( '%s-1' % self.STORY_1_ID) @@ -3332,7 +3502,9 @@ def test_multiple_stories_are_pseudonymized(self): self.assertEqual( commit_log_model.user_id, story_mappings[self.STORY_2_ID]) - def test_multiple_stories_with_multiple_users_are_pseudonymized(self): + def test_multiple_stories_with_multiple_users_are_pseudonymized( + self + ) -> None: self.save_new_topic( self.TOPIC_1_ID, self.user_2_id, name='Topic 2', abbreviated_name='abbrev-three', url_fragment='frag-three') @@ -3345,7 +3517,7 @@ def test_multiple_stories_with_multiple_users_are_pseudonymized(self): story_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.story.value] + ).pseudonymizable_entity_mappings[models.Names.STORY.value] ) metadata_model = story_models.StorySnapshotMetadataModel.get_by_id( '%s-1' % self.STORY_1_ID) @@ -3371,7 +3543,7 @@ def test_multiple_stories_with_multiple_users_are_pseudonymized(self): story_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.story.value] + ).pseudonymizable_entity_mappings[models.Names.STORY.value] ) metadata_model = story_models.StorySnapshotMetadataModel.get_by_id( '%s-1' % self.STORY_2_ID) @@ -3382,7 +3554,7 @@ def test_multiple_stories_with_multiple_users_are_pseudonymized(self): self.assertEqual( commit_log_model.user_id, story_mappings_2[self.STORY_2_ID]) - def test_one_story_with_multiple_users_is_pseudonymized(self): + def test_one_story_with_multiple_users_is_pseudonymized(self) -> None: story_services.update_story( self.user_2_id, self.STORY_1_ID, @@ -3401,7 +3573,7 @@ def test_one_story_with_multiple_users_is_pseudonymized(self): story_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.story.value] + ).pseudonymizable_entity_mappings[models.Names.STORY.value] ) metadata_model = story_models.StorySnapshotMetadataModel.get_by_id( '%s-1' % self.STORY_1_ID) @@ -3427,7 +3599,7 @@ def test_one_story_with_multiple_users_is_pseudonymized(self): story_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.story.value] + ).pseudonymizable_entity_mappings[models.Names.STORY.value] ) metadata_model = story_models.StorySnapshotMetadataModel.get_by_id( '%s-2' % self.STORY_1_ID) @@ -3442,17 +3614,17 @@ def test_one_story_with_multiple_users_is_pseudonymized(self): class WipeoutServiceVerifyDeleteStoryModelsTests(test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - TOPIC_1_ID = 'topic_1_id' - TOPIC_2_ID = 'topic_2_id' - STORY_1_ID = 'story_1_id' - STORY_2_ID = 'story_2_id' - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - - def setUp(self): - super(WipeoutServiceVerifyDeleteStoryModelsTests, self).setUp() + TOPIC_1_ID: Final = 'topic_1_id' + TOPIC_2_ID: Final = 'topic_2_id' + STORY_1_ID: Final = 'story_1_id' + STORY_2_ID: Final = 'story_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -3473,12 +3645,12 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_verification_is_successful(self): + def test_verification_is_successful(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verification_when_deletion_failed_is_unsuccessful(self): + def test_verification_when_deletion_failed_is_unsuccessful(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_2_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id)) @@ -3504,16 +3676,16 @@ def test_verification_when_deletion_failed_is_unsuccessful(self): class WipeoutServiceDeleteSubtopicModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - TOP_1_ID = 'top_1_id' - SUBTOP_1_ID = 'subtop_1_id' - SUBTOP_2_ID = 'subtop_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + TOP_1_ID: Final = 'top_1_id' + SUBTOP_1_ID: Final = 1 + SUBTOP_2_ID: Final = 2 - def setUp(self): - super(WipeoutServiceDeleteSubtopicModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -3525,7 +3697,7 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_one_subtopic_is_pseudonymized(self): + def test_one_subtopic_is_pseudonymized(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -3533,7 +3705,7 @@ def test_one_subtopic_is_pseudonymized(self): subtopic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.subtopic.value] + ).pseudonymizable_entity_mappings[models.Names.SUBTOPIC.value] ) metadata_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id( @@ -3548,10 +3720,10 @@ def test_one_subtopic_is_pseudonymized(self): commit_log_model.user_id, subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)]) - def test_one_subtopic_with_missing_snapshot_is_pseudonymized(self): + def test_one_subtopic_with_missing_snapshot_is_pseudonymized(self) -> None: subtopic_models.SubtopicPageCommitLogEntryModel( id='%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID), - subtopic_page_id=self.SUBTOP_2_ID, + subtopic_page_id=str(self.SUBTOP_2_ID), user_id=self.user_1_id, commit_type='create_new', commit_cmds=[{}], @@ -3575,7 +3747,7 @@ def test_one_subtopic_with_missing_snapshot_is_pseudonymized(self): subtopic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.subtopic.value] + ).pseudonymizable_entity_mappings[models.Names.SUBTOPIC.value] ) metadata_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id( @@ -3590,7 +3762,9 @@ def test_one_subtopic_with_missing_snapshot_is_pseudonymized(self): commit_log_model.user_id, subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)]) - def test_one_subtopic_when_the_deletion_is_repeated_is_pseudonymized(self): + def test_one_subtopic_when_the_deletion_is_repeated_is_pseudonymized( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -3611,7 +3785,7 @@ def test_one_subtopic_when_the_deletion_is_repeated_is_pseudonymized(self): subtopic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.subtopic.value] + ).pseudonymizable_entity_mappings[models.Names.SUBTOPIC.value] ) metadata_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id( @@ -3626,7 +3800,7 @@ def test_one_subtopic_when_the_deletion_is_repeated_is_pseudonymized(self): commit_log_model.user_id, subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)]) - def test_multiple_subtopics_are_pseudonymized(self): + def test_multiple_subtopics_are_pseudonymized(self) -> None: self.save_new_subtopic(self.SUBTOP_2_ID, self.user_1_id, self.TOP_1_ID) wipeout_service.delete_user( @@ -3635,7 +3809,7 @@ def test_multiple_subtopics_are_pseudonymized(self): subtopic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.subtopic.value] + ).pseudonymizable_entity_mappings[models.Names.SUBTOPIC.value] ) metadata_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id( @@ -3662,7 +3836,9 @@ def test_multiple_subtopics_are_pseudonymized(self): commit_log_model.user_id, subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)]) - def test_multiple_subtopics_with_multiple_users_are_pseudonymized(self): + def test_multiple_subtopics_with_multiple_users_are_pseudonymized( + self + ) -> None: self.save_new_subtopic(self.SUBTOP_2_ID, self.user_2_id, self.TOP_1_ID) wipeout_service.delete_user( @@ -3672,7 +3848,7 @@ def test_multiple_subtopics_with_multiple_users_are_pseudonymized(self): subtopic_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.subtopic.value] + ).pseudonymizable_entity_mappings[models.Names.SUBTOPIC.value] ) metadata_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id( @@ -3704,7 +3880,7 @@ def test_multiple_subtopics_with_multiple_users_are_pseudonymized(self): subtopic_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.subtopic.value] + ).pseudonymizable_entity_mappings[models.Names.SUBTOPIC.value] ) metadata_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id( @@ -3719,7 +3895,7 @@ def test_multiple_subtopics_with_multiple_users_are_pseudonymized(self): commit_log_model.user_id, subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)]) - def test_one_subtopic_with_multiple_users_is_pseudonymized(self): + def test_one_subtopic_with_multiple_users_is_pseudonymized(self) -> None: subtopic_page_services.save_subtopic_page( self.user_2_id, self.subtopic_page, @@ -3745,7 +3921,7 @@ def test_one_subtopic_with_multiple_users_is_pseudonymized(self): subtopic_mappings_1 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.subtopic.value] + ).pseudonymizable_entity_mappings[models.Names.SUBTOPIC.value] ) metadata_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id( @@ -3777,7 +3953,7 @@ def test_one_subtopic_with_multiple_users_is_pseudonymized(self): subtopic_mappings_2 = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_2_id - ).pseudonymizable_entity_mappings[models.NAMES.subtopic.value] + ).pseudonymizable_entity_mappings[models.Names.SUBTOPIC.value] ) metadata_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id( @@ -3796,13 +3972,13 @@ def test_one_subtopic_with_multiple_users_is_pseudonymized(self): class WipeoutServiceVerifyDeleteSubtopicModelsTests(test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - TOP_1_ID = 'top_1_id' - SUBTOP_1_ID = 'subtop_1_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + TOP_1_ID: Final = 'top_1_id' + SUBTOP_1_ID: Final = 1 - def setUp(self): - super(WipeoutServiceVerifyDeleteSubtopicModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.save_new_topic(self.TOP_1_ID, self.user_1_id) @@ -3810,12 +3986,12 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verification_is_successful(self): + def test_verification_is_successful(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verification_when_deletion_failed_is_unsuccessful(self): + def test_verification_when_deletion_failed_is_unsuccessful(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -3838,44 +4014,23 @@ def test_verification_when_deletion_failed_is_unsuccessful(self): class WipeoutServiceDeleteSuggestionModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - VOICEOVER_1_ID = 'voiceover_1_id' - VOICEOVER_2_ID = 'voiceover_2_id' - TRANSLATION_STATS_1_ID = 'translation_1_id' - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' - - def setUp(self): - super(WipeoutServiceDeleteSuggestionModelsTests, self).setUp() + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + VOICEOVER_1_ID: Final = 'voiceover_1_id' + VOICEOVER_2_ID: Final = 'voiceover_2_id' + TRANSLATION_STATS_1_ID: Final = 'translation_1_id' + QUESTION_STATS_1_ID = 'question_1_id' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' + + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) - suggestion_models.GeneralVoiceoverApplicationModel( - id=self.VOICEOVER_1_ID, - target_type=feconf.ENTITY_TYPE_EXPLORATION, - target_id=self.EXP_1_ID, - language_code='en', - status=suggestion_models.STATUS_IN_REVIEW, - content='Text', - filename='filename.txt', - author_id=self.user_1_id, - final_reviewer_id=self.user_2_id, - ).put() - suggestion_models.GeneralVoiceoverApplicationModel( - id=self.VOICEOVER_2_ID, - target_type=feconf.ENTITY_TYPE_EXPLORATION, - target_id=self.EXP_2_ID, - language_code='en', - status=suggestion_models.STATUS_IN_REVIEW, - content='Text', - filename='filename.txt', - author_id=self.user_2_id, - final_reviewer_id=self.user_1_id, - ).put() suggestion_models.TranslationContributionStatsModel( id=self.TRANSLATION_STATS_1_ID, language_code='cs', @@ -3890,113 +4045,104 @@ def setUp(self): rejected_translation_word_count=6, contribution_dates=[] ).put() + suggestion_models.TranslationReviewStatsModel( + id=self.TRANSLATION_STATS_1_ID, + language_code='cs', + reviewer_user_id=self.user_1_id, + topic_id='topic', + reviewed_translations_count=1, + reviewed_translation_word_count=1, + accepted_translations_count=1, + accepted_translations_with_reviewer_edits_count=2, + accepted_translation_word_count=3, + first_contribution_date=( + datetime.date.fromtimestamp(1616173837)), + last_contribution_date=( + datetime.date.fromtimestamp(1616173837)) + ).put() + suggestion_models.QuestionContributionStatsModel( + id=self.QUESTION_STATS_1_ID, + contributor_user_id=self.user_1_id, + topic_id='topic', + submitted_questions_count=1, + accepted_questions_count=1, + accepted_questions_without_reviewer_edits_count=2, + first_contribution_date=( + datetime.date.fromtimestamp(1616173837)), + last_contribution_date=( + datetime.date.fromtimestamp(1616173837)) + ).put() + suggestion_models.QuestionReviewStatsModel( + id=self.QUESTION_STATS_1_ID, + reviewer_user_id=self.user_1_id, + topic_id='topic', + reviewed_questions_count=1, + accepted_questions_count=1, + accepted_questions_with_reviewer_edits_count=1, + first_contribution_date=( + datetime.date.fromtimestamp(1616173837)), + last_contribution_date=( + datetime.date.fromtimestamp(1616173837)) + ).put() wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_voiceover_application_is_pseudonymized(self): + def test_translation_contribution_stats_are_deleted(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) - suggestion_mappings = ( - user_models.PendingDeletionRequestModel.get_by_id( - self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.suggestion.value] - ) - # Verify user is pseudonymized. - voiceover_application_model_1 = ( - suggestion_models.GeneralVoiceoverApplicationModel.get_by_id( - self.VOICEOVER_1_ID) - ) - self.assertEqual( - voiceover_application_model_1.author_id, - suggestion_mappings[self.VOICEOVER_1_ID] - ) - voiceover_application_model_2 = ( - suggestion_models.GeneralVoiceoverApplicationModel.get_by_id( - self.VOICEOVER_2_ID) - ) - self.assertEqual( - voiceover_application_model_2.final_reviewer_id, - suggestion_mappings[self.VOICEOVER_2_ID] - ) + self.assertIsNone( + suggestion_models.TranslationContributionStatsModel.get_by_id( + self.TRANSLATION_STATS_1_ID)) - def test_translation_contribution_stats_are_deleted(self): + def test_translation_review_stats_are_deleted(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertIsNone( - suggestion_models.TranslationContributionStatsModel.get_by_id( + suggestion_models.TranslationReviewStatsModel.get_by_id( self.TRANSLATION_STATS_1_ID)) + def test_question_contribution_stats_are_deleted(self) -> None: + wipeout_service.delete_user( + wipeout_service.get_pending_deletion_request(self.user_1_id)) + + self.assertIsNone( + suggestion_models.QuestionContributionStatsModel.get_by_id( + self.QUESTION_STATS_1_ID)) + + def test_question_review_stats_are_deleted(self) -> None: + wipeout_service.delete_user( + wipeout_service.get_pending_deletion_request(self.user_1_id)) + + self.assertIsNone( + suggestion_models.QuestionReviewStatsModel.get_by_id( + self.QUESTION_STATS_1_ID)) + class WipeoutServiceVerifyDeleteSuggestionModelsTests( test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - VOICEOVER_1_ID = 'voiceover_1_id' - VOICEOVER_2_ID = 'voiceover_2_id' - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' - - def setUp(self): - super(WipeoutServiceVerifyDeleteSuggestionModelsTests, self).setUp() + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + VOICEOVER_1_ID: Final = 'voiceover_1_id' + VOICEOVER_2_ID: Final = 'voiceover_2_id' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' + + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) - suggestion_models.GeneralVoiceoverApplicationModel( - id=self.VOICEOVER_1_ID, - target_type=feconf.ENTITY_TYPE_EXPLORATION, - target_id=self.EXP_1_ID, - language_code='en', - status=suggestion_models.STATUS_IN_REVIEW, - content='Text', - filename='filename.txt', - author_id=self.user_1_id, - final_reviewer_id=self.user_2_id, - ).put() - suggestion_models.GeneralVoiceoverApplicationModel( - id=self.VOICEOVER_2_ID, - target_type=feconf.ENTITY_TYPE_EXPLORATION, - target_id=self.EXP_2_ID, - language_code='en', - status=suggestion_models.STATUS_IN_REVIEW, - content='Text', - filename='filename.txt', - author_id=self.user_2_id, - final_reviewer_id=self.user_1_id, - ).put() wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_user_is_deleted_returns_true(self): - wipeout_service.delete_user( - wipeout_service.get_pending_deletion_request(self.user_1_id)) - self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): - wipeout_service.delete_user( - wipeout_service.get_pending_deletion_request(self.user_1_id)) - self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - - suggestion_models.GeneralVoiceoverApplicationModel( - id=self.VOICEOVER_1_ID, - target_type=feconf.ENTITY_TYPE_EXPLORATION, - target_id=self.EXP_1_ID, - language_code='en', - status=suggestion_models.STATUS_IN_REVIEW, - content='Text', - filename='filename.txt', - author_id=self.user_1_id, - final_reviewer_id=self.user_2_id, - ).put() - - self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id)) - + def test_verify_user_delete_when_user_is_deleted_returns_true(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -4005,15 +4151,15 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): class WipeoutServiceDeleteTopicModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - TOP_1_ID = 'top_1_id' - TOP_2_ID = 'top_2_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + TOP_1_ID: Final = 'top_1_id' + TOP_2_ID: Final = 'top_2_id' - def setUp(self): - super(WipeoutServiceDeleteTopicModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -4038,7 +4184,7 @@ def setUp(self): topic_domain.ROLE_MANAGER, self.TOP_1_ID) - def test_one_topic_snapshot_metadata_is_pseudonymized(self): + def test_one_topic_snapshot_metadata_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -4048,7 +4194,7 @@ def test_one_topic_snapshot_metadata_is_pseudonymized(self): topic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.topic.value] + ).pseudonymizable_entity_mappings[models.Names.TOPIC.value] ) metadata_model = ( topic_models.TopicSnapshotMetadataModel.get_by_id( @@ -4078,7 +4224,7 @@ def test_one_topic_snapshot_metadata_is_pseudonymized(self): rights_metadata_model_2.commit_cmds_user_ids, [topic_mappings[self.TOP_1_ID]]) - def test_one_topic_snapshot_content_is_pseudonymized(self): + def test_one_topic_snapshot_content_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -4088,7 +4234,7 @@ def test_one_topic_snapshot_content_is_pseudonymized(self): topic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.topic.value] + ).pseudonymizable_entity_mappings[models.Names.TOPIC.value] ) rights_content_model_1 = ( topic_models.TopicRightsSnapshotContentModel.get_by_id( @@ -4107,7 +4253,7 @@ def test_one_topic_snapshot_content_is_pseudonymized(self): self.user_2_id ]) - def test_one_topic_commit_log_is_pseudonymized(self): + def test_one_topic_commit_log_is_pseudonymized(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -4117,7 +4263,7 @@ def test_one_topic_commit_log_is_pseudonymized(self): topic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.topic.value] + ).pseudonymizable_entity_mappings[models.Names.TOPIC.value] ) commit_log_model_1 = ( topic_models.TopicCommitLogEntryModel.get_by_id( @@ -4126,7 +4272,7 @@ def test_one_topic_commit_log_is_pseudonymized(self): self.assertEqual( commit_log_model_1.user_id, topic_mappings[self.TOP_1_ID]) - def test_one_topic_with_missing_snapshot_is_pseudonymized(self): + def test_one_topic_with_missing_snapshot_is_pseudonymized(self) -> None: topic_models.TopicCommitLogEntryModel( id='topic-%s-1' % self.TOP_2_ID, topic_id=self.TOP_2_ID, @@ -4158,7 +4304,7 @@ def test_one_topic_with_missing_snapshot_is_pseudonymized(self): topic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.topic.value] + ).pseudonymizable_entity_mappings[models.Names.TOPIC.value] ) metadata_model = ( topic_models.TopicSnapshotMetadataModel.get_by_id( @@ -4182,7 +4328,9 @@ def test_one_topic_with_missing_snapshot_is_pseudonymized(self): self.assertEqual( commit_log_model_2.user_id, topic_mappings[self.TOP_2_ID]) - def test_one_topic_when_the_deletion_is_repeated_is_pseudonymized(self): + def test_one_topic_when_the_deletion_is_repeated_is_pseudonymized( + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -4207,7 +4355,7 @@ def test_one_topic_when_the_deletion_is_repeated_is_pseudonymized(self): topic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.topic.value] + ).pseudonymizable_entity_mappings[models.Names.TOPIC.value] ) metadata_model = ( topic_models.TopicSnapshotMetadataModel.get_by_id( @@ -4223,7 +4371,7 @@ def test_one_topic_when_the_deletion_is_repeated_is_pseudonymized(self): self.assertEqual( commit_log_model.user_id, topic_mappings[self.TOP_1_ID]) - def test_multiple_topics_are_pseudonymized(self): + def test_multiple_topics_are_pseudonymized(self) -> None: self.save_new_topic( self.TOP_2_ID, self.user_1_id, @@ -4238,7 +4386,7 @@ def test_multiple_topics_are_pseudonymized(self): topic_mappings = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.topic.value] + ).pseudonymizable_entity_mappings[models.Names.TOPIC.value] ) metadata_model = ( topic_models.TopicSnapshotMetadataModel.get_by_id( @@ -4273,26 +4421,30 @@ def test_multiple_topics_are_pseudonymized(self): class WipeoutServiceVerifyDeleteTopicModelsTests(test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - TOP_1_ID = 'top_1_id' - TOP_2_ID = 'top_2_id' - SUBTOP_1_ID = 'subtop_1_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + TOP_1_ID: Final = 'top_1_id' + TOP_2_ID: Final = 'top_2_id' + SUBTOP_1_ID: Final = 'subtop_1_id' - def setUp(self): - super(WipeoutServiceVerifyDeleteTopicModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.save_new_topic(self.TOP_1_ID, self.user_1_id) wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_user_is_deleted_returns_true(self): + def test_verify_user_delete_when_user_is_deleted_returns_true( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): + def test_verify_user_delete_when_user_is_not_deleted_returns_false( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id)) @@ -4315,17 +4467,17 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): class WipeoutServiceDeleteUserModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - COLLECTION_1_ID = 'col_1_id' - COLLECTION_2_ID = 'col_2_id' - EXPLORATION_1_ID = 'exp_1_id' - EXPLORATION_2_ID = 'exp_2_id' - - def setUp(self): - super(WipeoutServiceDeleteUserModelsTests, self).setUp() + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + COLLECTION_1_ID: Final = 'col_1_id' + COLLECTION_2_ID: Final = 'col_2_id' + EXPLORATION_1_ID: Final = 'exp_1_id' + EXPLORATION_2_ID: Final = 'exp_2_id' + + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -4345,22 +4497,24 @@ def setUp(self): id=self.user_2_id, exploration_ids=[], collection_ids=[] ).put() self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL) - user_data_dict = { + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': self.user_1_id, } - new_user_data_dict = { + new_user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias3', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': None, } self.modifiable_user_data = ( @@ -4393,7 +4547,7 @@ def setUp(self): id=self.profile_user_id, exploration_ids=[], collection_ids=[] ).put() - def test_delete_user_for_profile_user_is_successful(self): + def test_delete_user_for_profile_user_is_successful(self) -> None: wipeout_service.pre_delete_user(self.profile_user_id) self.process_and_flush_pending_tasks() @@ -4432,7 +4586,9 @@ def test_delete_user_for_profile_user_is_successful(self): self.assertIsNone( user_models.LearnerGoalsModel.get_by_id(self.profile_user_id)) - def test_delete_user_for_full_user_and_its_profiles_is_successful(self): + def test_delete_user_for_full_user_and_its_profiles_is_successful( + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() @@ -4474,7 +4630,9 @@ def test_delete_user_for_full_user_and_its_profiles_is_successful(self): self.assertIsNone( user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id)) - def test_delete_user_with_collection_and_exploration_is_successful(self): + def test_delete_user_with_collection_and_exploration_is_successful( + self + ) -> None: self.save_new_valid_exploration( self.EXPLORATION_1_ID, self.user_1_id) @@ -4501,7 +4659,9 @@ def test_delete_user_with_collection_and_exploration_is_successful(self): self.assertIsNone( user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id)) - def test_delete_user_with_collections_and_explorations_is_successful(self): + def test_delete_user_with_collections_and_explorations_is_successful( + self + ) -> None: self.save_new_valid_exploration( self.EXPLORATION_1_ID, self.user_1_id) @@ -4548,7 +4708,8 @@ def test_delete_user_with_collections_and_explorations_is_successful(self): exp_models.ExplorationModel.get_by_id(self.EXPLORATION_2_ID)) def test_delete_user_with_collection_and_exploration_repeated_is_successful( - self): + self + ) -> None: self.save_new_valid_exploration( self.EXPLORATION_1_ID, self.user_1_id) @@ -4594,7 +4755,7 @@ def test_delete_user_with_collection_and_exploration_repeated_is_successful( self.assertIsNone( exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID)) - def test_delete_user_with_multiple_users_is_successful(self): + def test_delete_user_with_multiple_users_is_successful(self) -> None: wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() @@ -4623,7 +4784,9 @@ def test_delete_user_with_multiple_users_is_successful(self): self.assertIsNone( user_models.LearnerPlaylistModel.get_by_id(self.user_2_id)) - def test_after_deletion_user_and_its_profiles_cannot_do_anything(self): + def test_after_deletion_user_and_its_profiles_cannot_do_anything( + self + ) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -4631,13 +4794,17 @@ def test_after_deletion_user_and_its_profiles_cannot_do_anything(self): wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.profile_user_id)) - self.assertIsNone(user_services.get_user_settings(self.user_1_id)) - self.assertIsNone(user_services.get_user_settings(self.profile_user_id)) - with self.assertRaisesRegexp(Exception, 'User not found.'): + self.assertIsNone(user_services.get_user_settings( + self.user_1_id, strict=False + )) + self.assertIsNone(user_services.get_user_settings( + self.profile_user_id, strict=False + )) + with self.assertRaisesRegex(Exception, 'User not found.'): # Try to do some action with the deleted user. user_services.update_preferred_language_codes( self.user_1_id, ['en']) - with self.assertRaisesRegexp(Exception, 'User not found.'): + with self.assertRaisesRegex(Exception, 'User not found.'): # Try to do some action with the deleted user. user_services.update_preferred_language_codes( self.profile_user_id, ['en']) @@ -4646,34 +4813,36 @@ def test_after_deletion_user_and_its_profiles_cannot_do_anything(self): class WipeoutServiceVerifyDeleteUserModelsTests(test_utils.GenericTestBase): """Provides testing of the verification part of wipeout service.""" - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' - def setUp(self): - super(WipeoutServiceVerifyDeleteUserModelsTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL) - user_data_dict = { + user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': self.user_1_id, } - new_user_data_dict = { + new_user_data_dict: user_domain.RawUserDataDict = { 'schema_version': 1, 'display_alias': 'display_alias3', 'pin': '12345', 'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, + 'preferred_translation_language_code': None, 'user_id': None, } self.modifiable_user_data = ( @@ -4693,7 +4862,9 @@ def setUp(self): wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_verify_user_delete_when_profile_user_deleted_returns_true(self): + def test_verify_user_delete_when_profile_user_deleted_returns_true( + self + ) -> None: wipeout_service.pre_delete_user(self.profile_user_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -4701,7 +4872,7 @@ def test_verify_user_delete_when_profile_user_deleted_returns_true(self): self.assertTrue( wipeout_service.verify_user_deleted(self.profile_user_id)) - def test_verify_user_delete_when_user_is_deleted_returns_true(self): + def test_verify_user_delete_when_user_is_deleted_returns_true(self) -> None: wipeout_service.pre_delete_user(self.user_1_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -4713,7 +4884,9 @@ def test_verify_user_delete_when_user_is_deleted_returns_true(self): self.assertTrue( wipeout_service.verify_user_deleted(self.profile_user_id)) - def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): + def test_verify_user_delete_when_user_is_not_deleted_returns_false( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_2_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id)) @@ -4739,7 +4912,9 @@ def test_verify_user_delete_when_user_is_not_deleted_returns_false(self): wipeout_service.get_pending_deletion_request(self.user_2_id)) self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id)) - def test_verify_user_delete_when_profile_user_not_deleted_is_false(self): + def test_verify_user_delete_when_profile_user_not_deleted_is_false( + self + ) -> None: wipeout_service.pre_delete_user(self.profile_user_id) self.process_and_flush_pending_tasks() wipeout_service.delete_user( @@ -4771,7 +4946,8 @@ def test_verify_user_delete_when_profile_user_not_deleted_is_false(self): wipeout_service.verify_user_deleted(self.profile_user_id)) def test_verify_user_delete_when_external_auth_associations_are_not_deleted( - self): + self + ) -> None: self.assertFalse( auth_services.verify_external_auth_associations_are_deleted( self.user_1_id)) @@ -4791,21 +4967,21 @@ def test_verify_user_delete_when_external_auth_associations_are_not_deleted( class WipeoutServiceDeleteBlogPostModelsTests(test_utils.GenericTestBase): """Provides testing of the deletion part of wipeout service.""" - BLOG_1_ID = 'blog_1_id' - USER_1_EMAIL = 'some@email.com' - USER_1_USERNAME = 'username1' - USER_2_EMAIL = 'some-other@email.com' - USER_2_USERNAME = 'username2' - NUMBER_OF_MODELS = 150 - NONEXISTENT_USER_ID = 'id_x' - CONTENT = 'Dummy Content' - SUMMARY = 'Dummy Content' - TITLE = 'Dummy Title' - TAGS = ['tag1', 'tag2', 'tag3'] - THUMBNAIL = 'xyzabc' - - def setUp(self): - super(WipeoutServiceDeleteBlogPostModelsTests, self).setUp() + BLOG_1_ID: Final = 'blog_1_id' + USER_1_EMAIL: Final = 'some@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some-other@email.com' + USER_2_USERNAME: Final = 'username2' + NUMBER_OF_MODELS: Final = 150 + NONEXISTENT_USER_ID: Final = 'id_x' + CONTENT: Final = 'Dummy Content' + SUMMARY: Final = 'Dummy Content' + TITLE: Final = 'Dummy Title' + TAGS: Final = ['tag1', 'tag2', 'tag3'] + THUMBNAIL: Final = 'xyzabc' + + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) @@ -4843,11 +5019,19 @@ def setUp(self): self.blog_post_rights_model.update_timestamps() self.blog_post_rights_model.put() + blog_models.BlogAuthorDetailsModel.create( + author_id=self.user_1_id, + displayed_author_name='blog author', + author_bio='general bio' + ) + self.author_details_model = ( + blog_models.BlogAuthorDetailsModel.get_by_author(self.user_1_id)) + wipeout_service.pre_delete_user(self.user_1_id) wipeout_service.pre_delete_user(self.user_2_id) self.process_and_flush_pending_tasks() - def test_one_blog_post_model_is_pseudonymized(self): + def test_one_blog_post_model_is_pseudonymized(self) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -4855,7 +5039,7 @@ def test_one_blog_post_model_is_pseudonymized(self): pseudonymizable_user_id_mapping = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.blog.value] + ).pseudonymizable_entity_mappings[models.Names.BLOG.value] ) blog_post_model = ( blog_models.BlogPostModel.get_by_id( @@ -4874,6 +5058,17 @@ def test_one_blog_post_model_is_pseudonymized(self): pseudonymizable_user_id_mapping[self.BLOG_1_ID] ) + # Ruling out the possibility of None for mypy type checking. + assert self.author_details_model is not None + blog_author_model = blog_models.BlogAuthorDetailsModel.get_by_id( + self.author_details_model.id) + # Ruling out the possibility of None for mypy type checking. + assert blog_author_model is not None + self.assertEqual( + blog_author_model.author_id, + pseudonymizable_user_id_mapping[blog_author_model.id] + ) + # Verify that the user id is removed from the list of editor ids in # BlogPostRights model. blog_post_rights_model = ( @@ -4881,7 +5076,9 @@ def test_one_blog_post_model_is_pseudonymized(self): self.BLOG_1_ID)) self.assertTrue(self.user_1_id not in blog_post_rights_model.editor_ids) - def test_one_blog_when_the_deletion_is_repeated_is_pseudonymized(self): + def test_one_blog_when_the_deletion_is_repeated_is_pseudonymized( + self + ) -> None: wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -4912,7 +5109,7 @@ def test_one_blog_when_the_deletion_is_repeated_is_pseudonymized(self): pseudonymizable_user_id_mapping = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.blog.value] + ).pseudonymizable_entity_mappings[models.Names.BLOG.value] ) new_blog_post_model = ( blog_models.BlogPostModel.get_by_id( @@ -4923,6 +5120,18 @@ def test_one_blog_when_the_deletion_is_repeated_is_pseudonymized(self): pseudonymizable_user_id_mapping[self.BLOG_1_ID] ) + # Ruling out the possibility of None for mypy type checking. + assert self.author_details_model is not None + blog_author_model = blog_models.BlogAuthorDetailsModel.get_by_id( + self.author_details_model.id) + + # Ruling out the possibility of None for mypy type checking. + assert blog_author_model is not None + self.assertEqual( + blog_author_model.author_id, + pseudonymizable_user_id_mapping[blog_author_model.id] + ) + # Verify that the user id is removed from the list of editor ids in # BlogPostRights model. blog_post_rights_model = ( @@ -4930,7 +5139,7 @@ def test_one_blog_when_the_deletion_is_repeated_is_pseudonymized(self): self.BLOG_1_ID)) self.assertTrue(self.user_1_id not in blog_post_rights_model.editor_ids) - def test_multiple_blog_post_models_are_pseudonymized(self): + def test_multiple_blog_post_models_are_pseudonymized(self) -> None: blog_post_models_list = [] for i in range(self.NUMBER_OF_MODELS): blog_post_models_list.append( @@ -4976,9 +5185,9 @@ def test_multiple_blog_post_models_are_pseudonymized(self): blog_models.BlogPostRightsModel.update_timestamps_multi( blog_post_rights_models_list) - datastore_services.put_multi( - blog_post_models_list + blog_post_summary_models_list + - blog_post_rights_models_list) + datastore_services.put_multi(blog_post_models_list) + datastore_services.put_multi(blog_post_summary_models_list) + datastore_services.put_multi(blog_post_rights_models_list) wipeout_service.delete_user( wipeout_service.get_pending_deletion_request(self.user_1_id)) @@ -4986,7 +5195,7 @@ def test_multiple_blog_post_models_are_pseudonymized(self): pseudonymizable_user_id_mapping = ( user_models.PendingDeletionRequestModel.get_by_id( self.user_1_id - ).pseudonymizable_entity_mappings[models.NAMES.blog.value] + ).pseudonymizable_entity_mappings[models.Names.BLOG.value] ) pseudonymized_blog_post_models = ( @@ -4995,6 +5204,8 @@ def test_multiple_blog_post_models_are_pseudonymized(self): ) ) for blog_post_model in pseudonymized_blog_post_models: + # Ruling out the possibility of None for mypy type checking. + assert blog_post_model is not None self.assertEqual( blog_post_model.author_id, pseudonymizable_user_id_mapping[blog_post_model.id] @@ -5006,10 +5217,22 @@ def test_multiple_blog_post_models_are_pseudonymized(self): ) ) for blog_post_summary_model in pseudonymized_blog_post_summary_models: + # Ruling out the possibility of None for mypy type checking. + assert blog_post_summary_model is not None self.assertEqual( blog_post_summary_model.author_id, pseudonymizable_user_id_mapping[blog_post_summary_model.id] ) + # Ruling out the possibility of None for mypy type checking. + assert self.author_details_model is not None + blog_author_model = blog_models.BlogAuthorDetailsModel.get_by_id( + self.author_details_model.id) + # Ruling out the possibility of None for mypy type checking. + assert blog_author_model is not None + self.assertEqual( + blog_author_model.author_id, + pseudonymizable_user_id_mapping[blog_author_model.id] + ) # Verify that user id is removed from the list of editor ids in all # BlogPostRights models. @@ -5019,19 +5242,192 @@ def test_multiple_blog_post_models_are_pseudonymized(self): ) ) for blog_post_rights_model in blog_post_rights_models: + # Ruling out the possibility of None for mypy type checking. + assert blog_post_rights_model is not None self.assertTrue( self.user_1_id not in blog_post_rights_model.editor_ids) +class WipeoutServiceDeletelLearnerGroupModelsTests(test_utils.GenericTestBase): + """Provides testing of the deletion part of wipeout service.""" + + USER_1_EMAIL: Final = 'some1@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'some2@email.com' + USER_2_USERNAME: Final = 'username2' + USER_3_EMAIL: Final = 'some3@email.com' + USER_3_USERNAME: Final = 'username3' + USER_4_EMAIL: Final = 'some4@email.com' + USER_4_USERNAME: Final = 'username4' + LEARNER_GROUP_ID_1: Final = 'group_id_1' + LEARNER_GROUP_ID_2: Final = 'group_id_2' + + def setUp(self) -> None: + super().setUp() + self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) + self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) + self.signup(self.USER_3_EMAIL, self.USER_3_USERNAME) + self.signup(self.USER_4_EMAIL, self.USER_4_USERNAME) + + self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) + self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) + self.user_3_id = self.get_user_id_from_email(self.USER_3_EMAIL) + self.user_4_id = self.get_user_id_from_email(self.USER_4_EMAIL) + + learner_group_models.LearnerGroupModel( + id=self.LEARNER_GROUP_ID_1, + title='title_1', + description='description_1', + facilitator_user_ids=[self.user_1_id, self.user_4_id], + learner_user_ids=[self.user_2_id], + invited_learner_user_ids=[self.user_3_id], + subtopic_page_ids=[], + story_ids=[] + ).put() + + learner_group_models.LearnerGroupModel( + id=self.LEARNER_GROUP_ID_2, + title='title_2', + description='description_2', + facilitator_user_ids=[self.user_1_id], + learner_user_ids=[self.user_2_id], + invited_learner_user_ids=[self.user_3_id], + subtopic_page_ids=[], + story_ids=[] + ).put() + + def test_delete_learner_is_successful(self) -> None: + wipeout_service.pre_delete_user(self.user_2_id) + self.process_and_flush_pending_tasks() + + learner_group_model_1 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_1)) + learner_group_model_2 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_2)) + + self.assertIsNotNone(learner_group_model_1) + self.assertIsNotNone(learner_group_model_2) + + self.assertTrue( + self.user_2_id in learner_group_model_1.learner_user_ids) + self.assertTrue( + self.user_2_id in learner_group_model_2.learner_user_ids) + + wipeout_service.delete_user( + wipeout_service.get_pending_deletion_request(self.user_2_id)) + + # Deleting a user should not delete the learner groups that the user + # is a learner of but only remove their user id from learner_user_ids + # field of the learner group models. + learner_group_model_1 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_1)) + learner_group_model_2 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_2)) + + self.assertIsNotNone(learner_group_model_1) + self.assertIsNotNone(learner_group_model_2) + + self.assertTrue( + self.user_2_id not in learner_group_model_1.learner_user_ids) + self.assertTrue( + self.user_2_id not in learner_group_model_2.learner_user_ids) + + def test_delete_invited_user_is_successful(self) -> None: + wipeout_service.pre_delete_user(self.user_3_id) + self.process_and_flush_pending_tasks() + + learner_group_model_1 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_1)) + learner_group_model_2 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_2)) + + self.assertIsNotNone(learner_group_model_1) + self.assertIsNotNone(learner_group_model_2) + + self.assertTrue( + self.user_3_id in learner_group_model_1.invited_learner_user_ids) + self.assertTrue( + self.user_3_id in learner_group_model_2.invited_learner_user_ids) + + wipeout_service.delete_user( + wipeout_service.get_pending_deletion_request(self.user_3_id)) + + # Deleting a user should not delete the learner groups that the user + # has been invited to join as learner but only remove their user id + # from invited_learner_user_ids field of the learner group models. + learner_group_model_1 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_1)) + learner_group_model_2 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_2)) + + self.assertIsNotNone(learner_group_model_1) + self.assertIsNotNone(learner_group_model_2) + + self.assertTrue( + self.user_3_id not in ( + learner_group_model_1.invited_learner_user_ids)) + self.assertTrue( + self.user_3_id not in ( + learner_group_model_2.invited_learner_user_ids)) + + def test_delete_facilitator_is_successful(self) -> None: + wipeout_service.pre_delete_user(self.user_1_id) + self.process_and_flush_pending_tasks() + + learner_group_model_1 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_1)) + learner_group_model_2 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_2)) + + self.assertIsNotNone(learner_group_model_1) + self.assertIsNotNone(learner_group_model_2) + + self.assertTrue( + self.user_1_id in learner_group_model_1.facilitator_user_ids) + self.assertTrue( + self.user_1_id in learner_group_model_2.facilitator_user_ids) + + wipeout_service.delete_user( + wipeout_service.get_pending_deletion_request(self.user_1_id)) + + learner_group_model_1 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_1)) + learner_group_model_2 = ( + learner_group_models.LearnerGroupModel.get_by_id( + self.LEARNER_GROUP_ID_2)) + + # Deleting a user should not delete the learner groups with more + # one facilitators including the current user but only remove their + # user id from facilitator_user_ids field of the learner group models. + self.assertIsNotNone(learner_group_model_1) + self.assertTrue( + self.user_1_id not in learner_group_model_1.facilitator_user_ids) + + # Deleting a user should delete the learner groups with only the + # current user being facilitator. + self.assertIsNone(learner_group_model_2) + + class PendingUserDeletionTaskServiceTests(test_utils.GenericTestBase): """Provides testing for the delete users pending to be deleted taskqueue service methods of wipeout service.""" - USER_1_EMAIL = 'a@example.com' - USER_1_USERNAME = 'a' + USER_1_EMAIL: Final = 'a@example.com' + USER_1_USERNAME: Final = 'a' - def setUp(self): - super(PendingUserDeletionTaskServiceTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) user_models.CompletedActivitiesModel( @@ -5050,9 +5446,11 @@ def setUp(self): ).put() wipeout_service.pre_delete_user(self.user_1_id) - self.email_subjects = [] - self.email_bodies = [] - def _mock_send_mail_to_admin(email_subject, email_body): + self.email_subjects: List[str] = [] + self.email_bodies: List[str] = [] + def _mock_send_mail_to_admin( + email_subject: str, email_body: str + ) -> None: """Mocks email_manager.send_mail_to_admin() as it's not possible to send mail with self.testapp_swap, i.e with the URLs defined in main_cron. @@ -5067,7 +5465,9 @@ def _mock_send_mail_to_admin(email_subject, email_body): self.cannot_send_email_swap = self.swap( feconf, 'CAN_SEND_EMAILS', False) - def test_repeated_deletion_is_successful_when_emails_enabled(self): + def test_repeated_deletion_is_successful_when_emails_enabled( + self + ) -> None: with self.send_mail_to_admin_swap, self.can_send_email_swap: wipeout_service.delete_users_pending_to_be_deleted() self.assertIn('SUCCESS', self.email_bodies[0]) @@ -5076,7 +5476,9 @@ def test_repeated_deletion_is_successful_when_emails_enabled(self): self.assertIn('ALREADY DONE', self.email_bodies[1]) self.assertIn(self.user_1_id, self.email_bodies[1]) - def test_repeated_deletion_is_successful_when_emails_disabled(self): + def test_repeated_deletion_is_successful_when_emails_disabled( + self + ) -> None: send_mail_to_admin_swap = self.swap_with_checks( email_manager, 'send_mail_to_admin', @@ -5090,8 +5492,12 @@ def test_repeated_deletion_is_successful_when_emails_disabled(self): wipeout_service.delete_users_pending_to_be_deleted() self.assertEqual(len(self.email_bodies), 0) - def test_no_email_is_sent_when_there_are_no_users_pending_deletion(self): - pending_deletion_request_models = ( + def test_no_email_is_sent_when_there_are_no_users_pending_deletion( + self + ) -> None: + pending_deletion_request_models: Sequence[ + user_models.PendingDeletionRequestModel + ] = ( user_models.PendingDeletionRequestModel.query().fetch()) for pending_deletion_request_model in pending_deletion_request_models: pending_deletion_request_model.delete() @@ -5100,7 +5506,7 @@ def test_no_email_is_sent_when_there_are_no_users_pending_deletion(self): wipeout_service.delete_users_pending_to_be_deleted() self.assertEqual(len(self.email_bodies), 0) - def test_regular_deletion_is_successful(self): + def test_regular_deletion_is_successful(self) -> None: with self.send_mail_to_admin_swap, self.can_send_email_swap: wipeout_service.delete_users_pending_to_be_deleted() self.assertIn('SUCCESS', self.email_bodies[0]) @@ -5128,11 +5534,11 @@ class CheckCompletionOfUserDeletionTaskServiceTests( service methods of wipeout service. """ - USER_1_EMAIL = 'a@example.com' - USER_1_USERNAME = 'a' + USER_1_EMAIL: Final = 'a@example.com' + USER_1_USERNAME: Final = 'a' - def setUp(self): - super(CheckCompletionOfUserDeletionTaskServiceTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) user_models.CompletedActivitiesModel( @@ -5151,9 +5557,11 @@ def setUp(self): ).put() wipeout_service.pre_delete_user(self.user_1_id) - self.email_subjects = [] - self.email_bodies = [] - def _mock_send_mail_to_admin(email_subject, email_body): + self.email_subjects: List[str] = [] + self.email_bodies: List[str] = [] + def _mock_send_mail_to_admin( + email_subject: str, email_body: str + ) -> None: """Mocks email_manager.send_mail_to_admin() as it's not possible to send mail with self.testapp_swap, i.e with the URLs defined in main_cron. @@ -5168,13 +5576,17 @@ def _mock_send_mail_to_admin(email_subject, email_body): self.cannot_send_email_swap = self.swap( feconf, 'CAN_SEND_EMAILS', False) - def test_verification_when_user_is_not_deleted_emails_enabled(self): + def test_verification_when_user_is_not_deleted_emails_enabled( + self + ) -> None: with self.send_mail_to_admin_swap, self.can_send_email_swap: wipeout_service.check_completion_of_user_deletion() self.assertIn('NOT DELETED', self.email_bodies[0]) self.assertIn(self.user_1_id, self.email_bodies[0]) - def test_verification_when_user_is_not_deleted_emails_disabled(self): + def test_verification_when_user_is_not_deleted_emails_disabled( + self + ) -> None: send_mail_to_admin_swap = self.swap_with_checks( email_manager, 'send_mail_to_admin', @@ -5186,7 +5598,7 @@ def test_verification_when_user_is_not_deleted_emails_disabled(self): wipeout_service.check_completion_of_user_deletion() self.assertEqual(len(self.email_bodies), 0) - def test_verification_when_user_is_deleted_is_successful(self): + def test_verification_when_user_is_deleted_is_successful(self) -> None: pending_deletion_request = ( wipeout_service.get_pending_deletion_request(self.user_1_id)) wipeout_service.delete_user(pending_deletion_request) @@ -5202,7 +5614,7 @@ def test_verification_when_user_is_deleted_is_successful(self): self.assertIsNone( user_models.UserSettingsModel.get_by_id(self.user_1_id)) - def test_verification_when_user_is_wrongly_deleted_fails(self): + def test_verification_when_user_is_wrongly_deleted_fails(self) -> None: pending_deletion_request = ( wipeout_service.get_pending_deletion_request(self.user_1_id)) wipeout_service.delete_user(pending_deletion_request) @@ -5219,3 +5631,84 @@ def test_verification_when_user_is_wrongly_deleted_fails(self): wipeout_service.check_completion_of_user_deletion() self.assertIn('FAILURE', self.email_bodies[-1]) self.assertIn(self.user_1_id, self.email_bodies[-1]) + + +class WipeoutServiceDeleteVersionHistoryModelsTests(test_utils.GenericTestBase): + """Provides testing of the deletion part of wipeout service.""" + + USER_1_EMAIL: Final = 'user1@email.com' + USER_1_USERNAME: Final = 'username1' + USER_2_EMAIL: Final = 'user2@email.com' + USER_2_USERNAME: Final = 'username2' + EXPLORATION_ID_0: Final = 'An_exploration_0_id' + EXPLORATION_ID_1: Final = 'An_exploration_1_id' + EXPLORATION_ID_2: Final = 'An_exploration_2_id' + VERSION_1: Final = 1 + VERSION_2: Final = 2 + VERSION_3: Final = 3 + + def setUp(self) -> None: + super().setUp() + self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) + self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) + self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL) + self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL) + self.version_history_model_class = ( + exp_models.ExplorationVersionHistoryModel) + self.save_new_valid_exploration(self.EXPLORATION_ID_0, self.user_1_id) + self.publish_exploration(self.user_1_id, self.EXPLORATION_ID_0) + self.save_new_valid_exploration(self.EXPLORATION_ID_1, self.user_1_id) + self.publish_exploration(self.user_1_id, self.EXPLORATION_ID_1) + self.save_new_valid_exploration(self.EXPLORATION_ID_2, self.user_2_id) + self.publish_exploration(self.user_2_id, self.EXPLORATION_ID_2) + + def test_one_version_history_model_is_pseudonymized(self) -> None: + wipeout_service.pre_delete_user(self.user_2_id) + self.process_and_flush_pending_tasks() + wipeout_service.delete_user( + wipeout_service.get_pending_deletion_request(self.user_2_id)) + + pseudonymizable_user_id_mapping = ( + user_models.PendingDeletionRequestModel.get_by_id( + self.user_2_id).pseudonymizable_entity_mappings[ + models.Names.EXPLORATION.value]) + pseudonymized_id = pseudonymizable_user_id_mapping[ + self.EXPLORATION_ID_2] + pseudonymized_model = exp_models.ExplorationVersionHistoryModel.get( + self.version_history_model_class.get_instance_id( + self.EXPLORATION_ID_2, self.VERSION_1)) + + self.assertNotIn( + self.user_2_id, pseudonymized_model.committer_ids) + self.assertIn( + pseudonymized_id, pseudonymized_model.committer_ids) + + def test_multiple_version_history_models_are_pseudonymized(self) -> None: + wipeout_service.pre_delete_user(self.user_1_id) + self.process_and_flush_pending_tasks() + wipeout_service.delete_user( + wipeout_service.get_pending_deletion_request(self.user_1_id)) + + pseudonymizable_user_id_mapping = ( + user_models.PendingDeletionRequestModel.get_by_id( + self.user_1_id).pseudonymizable_entity_mappings[ + models.Names.EXPLORATION.value]) + version_history_ids = [ + self.version_history_model_class.get_instance_id( + self.EXPLORATION_ID_0, self.VERSION_1), + self.version_history_model_class.get_instance_id( + self.EXPLORATION_ID_1, self.VERSION_1) + ] + pseudonymized_models = ( + exp_models.ExplorationVersionHistoryModel.get_multi( + version_history_ids)) + + for model in pseudonymized_models: + # Ruling out the possibility of None for mypy type checking. + assert model is not None + pseudonymized_id = pseudonymizable_user_id_mapping[ + model.exploration_id] + self.assertNotIn( + self.user_1_id, model.committer_ids) + self.assertIn( + pseudonymized_id, model.committer_ids) diff --git a/core/feconf.py b/core/feconf.py index 8b75595b934a..ae717b869f7a 100644 --- a/core/feconf.py +++ b/core/feconf.py @@ -20,14 +20,12 @@ import copy import datetime +import enum import os from core.constants import constants -from typing import Dict, List, Union - -CommandType = ( - Dict[str, Union[str, List[str], Dict[str, Union[str, List[str]]]]]) +from typing import Callable, Dict, Final, List, TypedDict, Union # The datastore model ID for the list of featured activity references. This # value should not be changed. @@ -38,19 +36,50 @@ POST_COMMIT_STATUS_PUBLIC = 'public' POST_COMMIT_STATUS_PRIVATE = 'private' + +class ValidCmdDict(TypedDict): + """Dictionary representing valid commands specs.""" + + name: str + required_attribute_names: List[str] + optional_attribute_names: List[str] + user_id_attribute_names: List[str] + allowed_values: Dict[str, List[str]] + deprecated_values: Dict[str, List[str]] + + +class RteTypeTextAngularDict(TypedDict): + """Dict representing RTE_TYPE_TEXTANGULAR Dictionary.""" + + ALLOWED_PARENT_LIST: Dict[str, List[str]] + ALLOWED_TAG_LIST: List[str] + + +# Supported object types for ParamSpec. +SUPPORTED_OBJ_TYPES = { + 'UnicodeString', +} + + # Whether to unconditionally log info messages. DEBUG = False -# When DEV_MODE is true check that we are running in development environment. -# The SERVER_SOFTWARE environment variable does not exist in Travis, hence the -# need for an explicit check. -if constants.DEV_MODE and os.getenv('SERVER_SOFTWARE'): - server_software = os.getenv('SERVER_SOFTWARE') - if ( - server_software and - not server_software.startswith(('Development', 'gunicorn')) - ): - raise Exception('DEV_MODE can\'t be true on production.') + +def check_dev_mode_is_true() -> None: + """When DEV_MODE is true check that we are running in development + environment. The SERVER_SOFTWARE environment variable does not exist + in Travis, hence the need for an explicit check. + """ + if constants.DEV_MODE and os.getenv('SERVER_SOFTWARE'): + server_software = os.getenv('SERVER_SOFTWARE') + if ( + server_software and + not server_software.startswith(('Development', 'gunicorn')) + ): + raise Exception('DEV_MODE can\'t be true on production.') + + +check_dev_mode_is_true() CLASSIFIERS_DIR = os.path.join('extensions', 'classifiers') TESTS_DATA_DIR = os.path.join('core', 'tests', 'data') @@ -58,19 +87,13 @@ SAMPLE_COLLECTIONS_DIR = os.path.join('data', 'collections') CONTENT_VALIDATION_DIR = os.path.join('core', 'domain') -# backend_prod_files contain processed JS and HTML files that are served by -# Jinja, we are moving away from Jinja so this folder might not be needed later -# (#6964) -EXTENSIONS_DIR_PREFIX = ( - 'backend_prod_files' if not constants.DEV_MODE else '') +EXTENSIONS_DIR_PREFIX = ('build' if not constants.DEV_MODE else '') ACTIONS_DIR = ( os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'actions')) ISSUES_DIR = ( os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'issues')) INTERACTIONS_DIR = ( os.path.join('extensions', 'interactions')) -INTERACTIONS_LEGACY_SPECS_FILE_DIR = ( - os.path.join(INTERACTIONS_DIR, 'legacy_interaction_specs_by_state_version')) INTERACTIONS_SPECS_FILE_PATH = ( os.path.join(INTERACTIONS_DIR, 'interaction_specs.json')) RTE_EXTENSIONS_DIR = ( @@ -83,7 +106,11 @@ # Choose production templates folder when we are in production mode. FRONTEND_TEMPLATES_DIR = ( os.path.join('webpack_bundles') if constants.DEV_MODE else - os.path.join('backend_prod_files', 'webpack_bundles')) + os.path.join('build', 'webpack_bundles')) +# To know more about AOT visit https://angular.io/guide/glossary#aot +FRONTEND_AOT_DIR = ( + os.path.join('dist', 'oppia-angular') if constants.DEV_MODE else + os.path.join('dist', 'oppia-angular-prod')) DEPENDENCIES_TEMPLATES_DIR = ( os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'dependencies')) @@ -105,13 +132,56 @@ LEGACY_HTML_FIELD_TYPES_TO_RULE_SPECS_EXTENSIONS_MODULE_DIR = os.path.join( 'interactions', 'legacy_html_field_types_to_rule_specs_by_state_version') + +class ValidModelNames(enum.Enum): + """Enum for valid model names.""" + + ACTIVITY = 'activity' + APP_FEEDBACK_REPORT = 'app_feedback_report' + AUDIT = 'audit' + BASE_MODEL = 'base_model' + BEAM_JOB = 'beam_job' + BLOG = 'blog' + CLASSIFIER = 'classifier' + CLASSROOM = 'classroom' + COLLECTION = 'collection' + CONFIG = 'CONFIG' + EMAIL = 'email' + EXPLORATION = 'exploration' + FEEDBACK = 'feedback' + IMPROVEMENTS = 'improvements' + JOB = 'job' + LEARNER_GROUP = 'learner_group' + OPPORTUNITY = 'opportunity' + QUESTION = 'question' + RECOMMENDATIONS = 'recommendations' + SKILL = 'skill' + STATISTICS = 'statistics' + AUTH = 'auth' + STORY = 'story' + SUBTOPIC = 'subtopic' + SUGGESTION = 'suggestion' + TOPIC = 'topic' + TRANSLATION = 'translation' + USER = 'user' + + # A mapping of interaction ids to classifier properties. # TODO(#10217): As of now we support only one algorithm per interaction. # However, we do have the necessary storage infrastructure to support multiple # algorithms per interaction. Hence, whenever we find a secondary algorithm # candidate for any of the supported interactions, the logical functions to # support multiple algorithms need to be implemented. -INTERACTION_CLASSIFIER_MAPPING = { + + +class ClassifierDict(TypedDict): + """Representing INTERACTION_CLASSIFIER_MAPPING dict values.""" + + algorithm_id: str + algorithm_version: int + + +INTERACTION_CLASSIFIER_MAPPING: Dict[str, ClassifierDict] = { 'TextInput': { 'algorithm_id': 'TextClassifier', 'algorithm_version': 1 @@ -125,7 +195,7 @@ TRAINING_JOB_STATUS_NEW = 'NEW' TRAINING_JOB_STATUS_PENDING = 'PENDING' -ALLOWED_TRAINING_JOB_STATUSES = [ +ALLOWED_TRAINING_JOB_STATUSES: List[str] = [ TRAINING_JOB_STATUS_COMPLETE, TRAINING_JOB_STATUS_FAILED, TRAINING_JOB_STATUS_NEW, @@ -149,7 +219,13 @@ # The maximum number of characters allowed for userbio length. MAX_BIO_LENGTH_IN_CHARS = 2000 -ALLOWED_TRAINING_JOB_STATUS_CHANGES = { +MAX_CHARS_IN_BLOG_POST_URL = ( + constants.MAX_CHARS_IN_BLOG_POST_TITLE + + len('-') + + constants.BLOG_POST_ID_LENGTH +) + +ALLOWED_TRAINING_JOB_STATUS_CHANGES: Dict[str, List[str]] = { TRAINING_JOB_STATUS_COMPLETE: [], TRAINING_JOB_STATUS_NEW: [TRAINING_JOB_STATUS_PENDING], TRAINING_JOB_STATUS_PENDING: [TRAINING_JOB_STATUS_COMPLETE, @@ -177,7 +253,9 @@ ENTITY_TYPE_SKILL = 'skill' ENTITY_TYPE_STORY = 'story' ENTITY_TYPE_QUESTION = 'question' -ENTITY_TYPE_VOICEOVER_APPLICATION = 'voiceover_application' + +DIAGNOSTIC_TEST_QUESTION_TYPE_MAIN = 'main_question' +DIAGNOSTIC_TEST_QUESTION_TYPE_BACKUP = 'backup_question' IMAGE_CONTEXT_QUESTION_SUGGESTIONS = 'question_suggestions' IMAGE_CONTEXT_EXPLORATION_SUGGESTIONS = 'exploration_suggestions' @@ -186,7 +264,6 @@ MAX_TASK_MODELS_PER_HISTORY_PAGE = 10 PERIOD_TO_HARD_DELETE_MODELS_MARKED_AS_DELETED = datetime.timedelta(weeks=8) -PERIOD_TO_MARK_MODELS_AS_DELETED = datetime.timedelta(weeks=4) # The maximum number of activities allowed in the playlist of the learner. This # limit applies to both the explorations playlist and the collections playlist. @@ -209,7 +286,7 @@ # The maximum number of results to retrieve in a datastore query # for suggestions. -DEFAULT_SUGGESTION_QUERY_LIMIT = 100 +DEFAULT_SUGGESTION_QUERY_LIMIT = 1000 # The maximum number of results to retrieve in a datastore query # for top rated published explorations in /library page. @@ -261,7 +338,7 @@ # incompatible changes are made to the states blob schema in the data store, # this version number must be changed and the exploration migration job # executed. -CURRENT_STATE_SCHEMA_VERSION = 49 +CURRENT_STATE_SCHEMA_VERSION = 55 # The current version of the all collection blob schemas (such as the nodes # structure within the Collection domain object). If any backward-incompatible @@ -331,35 +408,22 @@ # Default name for the initial state of an exploration. DEFAULT_INIT_STATE_NAME = 'Introduction' -# Default content id for the state's content. -DEFAULT_NEW_STATE_CONTENT_ID = 'content' -# Default content id for the interaction's default outcome. -DEFAULT_OUTCOME_CONTENT_ID = 'default_outcome' # Default content id for the explanation in the concept card of a skill. DEFAULT_EXPLANATION_CONTENT_ID = 'explanation' # Content id assigned to rule inputs that do not match any interaction # customization argument choices. INVALID_CONTENT_ID = 'invalid_content_id' -# Default recorded_voiceovers dict for a default state template. -DEFAULT_RECORDED_VOICEOVERS: Dict[str, Dict[str, Dict[str, str]]] = { - 'voiceovers_mapping': { - 'content': {}, - 'default_outcome': {} - } -} -# Default written_translations dict for a default state template. -DEFAULT_WRITTEN_TRANSLATIONS: Dict[str, Dict[str, Dict[str, str]]] = { - 'translations_mapping': { - 'content': {}, - 'default_outcome': {} - } -} # The default content text for the initial state of an exploration. DEFAULT_INIT_STATE_CONTENT_STR = '' # Whether new explorations should have automatic text-to-speech enabled # by default. -DEFAULT_AUTO_TTS_ENABLED = True +DEFAULT_AUTO_TTS_ENABLED = False +# Whether new explorations should have correctness-feedback enabled +# by default. +DEFAULT_CORRECTNESS_FEEDBACK_ENABLED = True +# Default value for next_content_id_index in exploration/question. +DEFUALT_NEXT_CONTENT_ID_INDEX = 0 # Default title for a newly-minted collection. DEFAULT_COLLECTION_TITLE = '' @@ -454,27 +518,23 @@ def get_empty_ratings() -> Dict[str, int]: # Use GAE email service by default. EMAIL_SERVICE_PROVIDER = EMAIL_SERVICE_PROVIDER_MAILGUN # If the Mailgun email API is used, the "None" below should be replaced -# with the Mailgun API key. -MAILGUN_API_KEY = None -# If the Mailgun email API is used, the "None" below should be replaced # with the Mailgun domain name (ending with mailgun.org). MAILGUN_DOMAIN_NAME = None # Audience ID of the mailing list for Oppia in Mailchimp. MAILCHIMP_AUDIENCE_ID = None -# Mailchimp API Key. -MAILCHIMP_API_KEY = None # Mailchimp username. MAILCHIMP_USERNAME = None -# Mailchimp secret, used to authenticate webhook requests. -MAILCHIMP_WEBHOOK_SECRET = None +# Valid Mailchimp merge keys. +VALID_MAILCHIMP_FIELD_KEYS = ['NAME'] +# Valid Mailchimp tags. +VALID_MAILCHIMP_TAGS = ['Account', 'Android', 'Web'] ES_LOCALHOST_PORT = 9200 # NOTE TO RELEASE COORDINATORS: Replace this with the correct ElasticSearch # auth information during deployment. ES_CLOUD_ID = None ES_USERNAME = None -ES_PASSWORD = None # NOTE TO RELEASE COORDINATORS: Replace this with the correct Redis Host and # Port when switching to prod server. Keep this in sync with redis.conf in the @@ -500,7 +560,7 @@ def get_empty_ratings() -> Dict[str, int]: DATAFLOW_TEMP_LOCATION = 'gs://todo/todo' DATAFLOW_STAGING_LOCATION = 'gs://todo/todo' -OPPIA_VERSION = '3.1.4' +OPPIA_VERSION = '3.2.9' OPPIA_PYTHON_PACKAGE_PATH = './build/oppia-beam-job-%s.tar.gz' % OPPIA_VERSION # Committer id for system actions. The username for the system committer @@ -597,8 +657,10 @@ def get_empty_ratings() -> Dict[str, int]: EMAIL_INTENT_ADD_CONTRIBUTOR_DASHBOARD_REVIEWERS = ( 'add_contributor_dashboard_reviewers' ) -EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES = 'voiceover_application_updates' EMAIL_INTENT_ACCOUNT_DELETED = 'account_deleted' +EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS = ( + 'notify_contributor_dashboard_achievements' +) # Possible intents for email sent in bulk. BULK_EMAIL_INTENT_MARKETING = 'bulk_email_marketing' BULK_EMAIL_INTENT_IMPROVE_EXPLORATION = 'bulk_email_improve_exploration' @@ -612,19 +674,22 @@ def get_empty_ratings() -> Dict[str, int]: MESSAGE_TYPE_SUGGESTION = 'suggestion' MODERATOR_ACTION_UNPUBLISH_EXPLORATION = 'unpublish_exploration' -DEFAULT_SALUTATION_HTML_FN = ( +DEFAULT_SALUTATION_HTML_FN: Callable[[str], str] = ( lambda recipient_username: 'Hi %s,' % recipient_username) -DEFAULT_SIGNOFF_HTML_FN = ( +DEFAULT_SIGNOFF_HTML_FN: Callable[[str], str] = ( lambda sender_username: ( 'Thanks!
    %s (Oppia moderator)' % sender_username)) - -VALID_MODERATOR_ACTIONS = { +DEFAULT_EMAIL_SUBJECT_FN: Callable[[str], str] = ( + lambda exp_title: ( + 'Your Oppia exploration "%s" has been unpublished' % exp_title)) + +VALID_MODERATOR_ACTIONS: Dict[ + str, + Dict[str, Union[str, Callable[[str], str]]] +] = { MODERATOR_ACTION_UNPUBLISH_EXPLORATION: { 'email_config': 'unpublish_exploration_email_html_body', - 'email_subject_fn': ( - lambda exp_title: ( - 'Your Oppia exploration "%s" has been unpublished' % exp_title) - ), + 'email_subject_fn': DEFAULT_EMAIL_SUBJECT_FN, 'email_intent': 'unpublish_exploration', 'email_salutation_html_fn': DEFAULT_SALUTATION_HTML_FN, 'email_signoff_html_fn': DEFAULT_SIGNOFF_HTML_FN, @@ -632,7 +697,7 @@ def get_empty_ratings() -> Dict[str, int]: } # When the site terms were last updated, in UTC. -REGISTRATION_PAGE_LAST_UPDATED_UTC = datetime.datetime(2015, 10, 14, 2, 40, 0) +TERMS_PAGE_LAST_UPDATED_UTC = datetime.datetime(2020, 10, 19) # Format of string for dashboard statistics logs. # NOTE TO DEVELOPERS: This format should not be changed, since it is used in @@ -672,9 +737,13 @@ def get_empty_ratings() -> Dict[str, int]: # homepage. MAX_NUM_CARDS_TO_DISPLAY_ON_BLOG_HOMEPAGE = 10 +# The maximum number of blog post cards to be visible on each page in blog +# search results homepage. +MAX_NUM_CARDS_TO_DISPLAY_ON_BLOG_SEARCH_RESULTS_PAGE = 10 + # The maximum number of blog post cards to be visible on each page in author # specific blog post page. -MAX_NUM_CARDS_TO_DISPLAY_ON_AUTHOR_SPECIFIC_BLOG_POST_PAGE = 12 +MAX_NUM_CARDS_TO_DISPLAY_ON_BLOG_AUTHOR_PROFILE_PAGE = 12 # The maximum number of blog post cards to be visible as suggestions on the # blog post page. @@ -749,14 +818,16 @@ def get_empty_ratings() -> Dict[str, int]: DEMO_EXPLORATIONS = { u'0': 'welcome', u'1': 'multiples.yaml', - u'2': 'binary_search', + # Exploration with ID 2 was removed as it contained string values inside + # NumericInput interaction. u'3': 'root_linear_coefficient_theorem', u'4': 'three_balls', # TODO(bhenning): Replace demo exploration '5' with a new exploration # described in #1376. u'6': 'boot_verbs.yaml', u'7': 'hola.yaml', - u'8': 'adventure.yaml', + # Exploration with ID 8 was removed as it contained string values inside + # NumericInput interaction. u'9': 'pitch_perfect.yaml', u'10': 'test_interactions', u'11': 'modeling_graphs', @@ -807,8 +878,9 @@ def get_empty_ratings() -> Dict[str, int]: '%s/email/flagexplorationemailhandler' % TASKQUEUE_URL_PREFIX) TASK_URL_INSTANT_FEEDBACK_EMAILS = ( '%s/email/instantfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX) -TASK_URL_SUGGESTION_EMAILS = ( - '%s/email/suggestionemailhandler' % TASKQUEUE_URL_PREFIX) +TASK_URL_CONTRIBUTOR_DASHBOARD_ACHIEVEMENT_NOTIFICATION_EMAILS = ( + '%s/email/contributordashboardachievementnotificationemailhandler' % ( + TASKQUEUE_URL_PREFIX)) TASK_URL_DEFERRED = ( '%s/deferredtaskshandler' % TASKQUEUE_URL_PREFIX) @@ -817,14 +889,19 @@ def get_empty_ratings() -> Dict[str, int]: ADMIN_URL = '/admin' ADMIN_ROLE_HANDLER_URL = '/adminrolehandler' BLOG_ADMIN_PAGE_URL = '/blog-admin' +CLASSROOM_ADMIN_PAGE_URL = '/classroom-admin' BLOG_ADMIN_ROLE_HANDLER_URL = '/blogadminrolehandler' BLOG_DASHBOARD_DATA_URL = '/blogdashboardhandler/data' BLOG_DASHBOARD_URL = '/blog-dashboard' +DIAGNOSTIC_TEST_PLAYER_PAGE_URL = '/diagnostic-test-player' BLOG_EDITOR_DATA_URL_PREFIX = '/blogeditorhandler/data' BULK_EMAIL_WEBHOOK_ENDPOINT = '/bulk_email_webhook_endpoint' BLOG_HOMEPAGE_DATA_URL = '/blogdatahandler/data' BLOG_HOMEPAGE_URL = '/blog' -AUTHOR_SPECIFIC_BLOG_POST_PAGE_URL_PREFIX = '/blog/author' +BLOG_SEARCH_DATA_URL = '/blog/searchhandler/data' +BLOG_TITLE_HANDLER = '/blogtitlehandler/data' +BLOG_AUTHOR_PROFILE_PAGE_URL_PREFIX = '/blog/author' +BLOG_AUTHOR_PROFILE_PAGE_DATA_URL_PREFIX = '/blog/author/data' CLASSROOM_DATA_HANDLER = '/classroom_data_handler' COLLECTION_DATA_URL_PREFIX = '/collection_handler/data' COLLECTION_EDITOR_DATA_URL_PREFIX = '/collection_editor_handler/data' @@ -836,6 +913,9 @@ def get_empty_ratings() -> Dict[str, int]: COLLECTION_URL_PREFIX = '/collection' CONCEPT_CARD_DATA_URL_PREFIX = '/concept_card_handler' CONTRIBUTOR_DASHBOARD_URL = '/contributor-dashboard' +CONTRIBUTOR_STATS_SUMMARIES_URL = '/contributorstatssummaries' +CONTRIBUTOR_ALL_STATS_SUMMARIES_URL = '/contributorallstatssummaries' +CONTRIBUTOR_CERTIFICATE_URL = '/contributorcertificate' CONTRIBUTOR_DASHBOARD_ADMIN_URL = '/contributor-dashboard-admin' CONTRIBUTOR_OPPORTUNITIES_DATA_URL = '/opportunitiessummaryhandler' CREATOR_DASHBOARD_DATA_URL = '/creatordashboardhandler/data' @@ -849,6 +929,7 @@ def get_empty_ratings() -> Dict[str, int]: DASHBOARD_CREATE_MODE_URL = '%s?mode=create' % CREATOR_DASHBOARD_URL EDITOR_URL_PREFIX = '/create' EXPLORATION_DATA_PREFIX = '/createhandler/data' +EXPLORATION_IMAGE_UPLOAD_PREFIX = '/createhandler/imageupload' EXPLORATION_FEATURES_PREFIX = '/explorehandler/features' EXPLORATION_INIT_URL_PREFIX = '/explorehandler/init' EXPLORATION_LEARNER_ANSWER_DETAILS = ( @@ -878,6 +959,8 @@ def get_empty_ratings() -> Dict[str, int]: LEARNER_DASHBOARD_URL = '/learner-dashboard' LEARNER_DASHBOARD_TOPIC_AND_STORY_DATA_URL = ( '/learnerdashboardtopicsandstoriesprogresshandler/data') +LEARNER_COMPLETED_CHAPTERS_COUNT_DATA_URL = ( + '/learnercompletedchapterscounthandler/data') LEARNER_DASHBOARD_COLLECTION_DATA_URL = ( '/learnerdashboardcollectionsprogresshandler/data') LEARNER_DASHBOARD_EXPLORATION_DATA_URL = ( @@ -898,6 +981,7 @@ def get_empty_ratings() -> Dict[str, int]: LIBRARY_TOP_RATED_URL = '/community-library/top-rated' MACHINE_TRANSLATION_DATA_URL = '/machine_translated_state_texts_handler' MERGE_SKILLS_URL = '/merge_skills_handler' +METADATA_VERSION_HISTORY_URL_PREFIX = '/version_history_handler/metadata' NEW_COLLECTION_URL = '/collection_editor_handler/create_new' NEW_EXPLORATION_URL = '/contributehandler/create_new' NEW_QUESTION_URL = '/question_editor_handler/create_new' @@ -922,6 +1006,7 @@ def get_empty_ratings() -> Dict[str, int]: PENDING_ACCOUNT_DELETION_URL = '/pending-account-deletion' REVIEW_TEST_DATA_URL_PREFIX = '/review_test_handler/data' REVIEW_TEST_URL_PREFIX = '/review_test' +REVIEWABLE_OPPORTUNITIES_URL = '/getreviewableopportunitieshandler' ROBOTS_TXT_URL = '/robots.txt' SITE_LANGUAGE_DATA_URL = '/save_site_language' SIGNUP_DATA_URL = '/signuphandler/data' @@ -934,6 +1019,10 @@ def get_empty_ratings() -> Dict[str, int]: SKILL_MASTERY_DATA_URL = '/skill_mastery_handler/data' SKILL_RIGHTS_URL_PREFIX = '/skill_editor_handler/rights' SKILL_DESCRIPTION_HANDLER = '/skill_description_handler' +DIAGNOSTIC_TEST_SKILL_ASSIGNMENT_HANDLER = ( + '/diagnostic_test_skill_assignment_handler') +DIAGNOSTIC_TEST_QUESTIONS_HANDLER_URL = '/diagnostic_test_questions_handler_url' +STATE_VERSION_HISTORY_URL_PREFIX = '/version_history_handler/state' STORY_DATA_HANDLER = '/story_data_handler' STORY_EDITOR_URL_PREFIX = '/story_editor' STORY_EDITOR_DATA_URL_PREFIX = '/story_editor_handler/data' @@ -958,6 +1047,7 @@ def get_empty_ratings() -> Dict[str, int]: TOPIC_VIEWER_URL_PREFIX = ( '/learn//') TOPIC_DATA_HANDLER = '/topic_data_handler' +TOPIC_ID_TO_TOPIC_NAME = '/topic_id_to_topic_name_handler' TOPIC_EDITOR_DATA_URL_PREFIX = '/topic_editor_handler/data' TOPIC_EDITOR_URL_PREFIX = '/topic_editor' TOPIC_NAME_HANDLER = '/topic_name_handler' @@ -967,6 +1057,8 @@ def get_empty_ratings() -> Dict[str, int]: TOPIC_URL_FRAGMENT_HANDLER = '/topic_url_fragment_handler' TOPICS_AND_SKILLS_DASHBOARD_DATA_URL = '/topics_and_skills_dashboard/data' UNASSIGN_SKILL_DATA_HANDLER_URL = '/topics_and_skills_dashboard/unassign_skill' +TOPIC_ID_TO_DIAGNOSTIC_TEST_SKILL_IDS_HANDLER = ( + '/topic_id_to_diagnostic_test_skill_ids_handler') TOPICS_AND_SKILLS_DASHBOARD_URL = '/topics-and-skills-dashboard' UNSUBSCRIBE_URL_PREFIX = '/unsubscribehandler' UPLOAD_EXPLORATION_URL = '/contributehandler/upload' @@ -974,6 +1066,17 @@ def get_empty_ratings() -> Dict[str, int]: USER_PERMISSIONS_URL_PREFIX = '/createhandler/permissions' USERNAME_CHECK_DATA_URL = '/usernamehandler/data' VALIDATE_STORY_EXPLORATIONS_URL_PREFIX = '/validate_story_explorations' +FACILITATOR_DASHBOARD_HANDLER = '/facilitator_dashboard_handler' +FACILITATOR_DASHBOARD_PAGE_URL = '/facilitator-dashboard' +LEARNER_DASHBOARD_LEARNER_GROUPS_HANDLER = ( + '/learner_dashboard_learner_groups_handler') +CREATE_LEARNER_GROUP_PAGE_URL = '/create-learner-group' +EDIT_LEARNER_GROUP_PAGE_URL = '/edit-learner-group' +CLASSROOM_ADMIN_DATA_HANDLER_URL = '/classroom_admin_data_handler' +NEW_CLASSROOM_ID_HANDLER_URL = '/new_classroom_id_handler' +CLASSROOM_HANDLER_URL = '/classroom' +CLASSROOM_URL_FRAGMENT_HANDLER = '/classroom_url_fragment_handler' +CLASSROOM_ID_HANDLER_URL = '/classroom_id_handler' # Event types. EVENT_TYPE_ALL_STATS = 'all_stats' @@ -1130,8 +1233,8 @@ def get_empty_ratings() -> Dict[str, int]: ROLE_ACTION_VIEW_BY_USERNAME = 'view_by_username' ROLE_ACTION_VIEW_BY_ROLE = 'view_by_role' -USER_FILTER_CRITERION_ROLE = 'role' -USER_FILTER_CRITERION_USERNAME = 'username' +USER_FILTER_CRITERION_ROLE: Final = 'role' +USER_FILTER_CRITERION_USERNAME: Final = 'username' # Max questions allowed in a session of practice questions. QUESTION_BATCH_SIZE = 10 @@ -1143,7 +1246,7 @@ def get_empty_ratings() -> Dict[str, int]: RTE_FORMAT_CKEDITOR = 'ck-editor' # RTE content specifications according to the type of the editor. -RTE_CONTENT_SPEC = { +RTE_CONTENT_SPEC: Dict[str, RteTypeTextAngularDict] = { 'RTE_TYPE_TEXTANGULAR': { # Valid parent-child relation in TextAngular. 'ALLOWED_PARENT_LIST': { @@ -1303,49 +1406,62 @@ def get_empty_ratings() -> Dict[str, int]: constants.ACTIVITY_STATUS_PRIVATE, constants.ACTIVITY_STATUS_PUBLIC] # Commands allowed in CollectionRightsChange and ExplorationRightsChange. -COMMON_RIGHTS_ALLOWED_COMMANDS: List[CommandType] = [{ +COMMON_RIGHTS_ALLOWED_COMMANDS: List[ValidCmdDict] = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_CHANGE_ROLE, 'required_attribute_names': ['assignee_id', 'old_role', 'new_role'], 'optional_attribute_names': [], 'user_id_attribute_names': ['assignee_id'], 'allowed_values': { - 'new_role': ALLOWED_ACTIVITY_ROLES, 'old_role': ALLOWED_ACTIVITY_ROLES} + 'new_role': ALLOWED_ACTIVITY_ROLES, 'old_role': ALLOWED_ACTIVITY_ROLES + }, + 'deprecated_values': {} }, { 'name': CMD_REMOVE_ROLE, 'required_attribute_names': ['removed_user_id', 'old_role'], 'optional_attribute_names': [], 'user_id_attribute_names': ['removed_user_id'], - 'allowed_values': {'old_role': ALLOWED_ACTIVITY_ROLES} + 'allowed_values': {'old_role': ALLOWED_ACTIVITY_ROLES}, + 'deprecated_values': {} }, { 'name': CMD_CHANGE_PRIVATE_VIEWABILITY, 'required_attribute_names': [ 'old_viewable_if_private', 'new_viewable_if_private'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_RELEASE_OWNERSHIP, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UPDATE_FIRST_PUBLISHED_MSEC, 'required_attribute_names': [ 'old_first_published_msec', 'new_first_published_msec'], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_COMMIT, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }] -COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS: List[CommandType] = copy.deepcopy( +COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS: List[ValidCmdDict] = copy.deepcopy( COMMON_RIGHTS_ALLOWED_COMMANDS ) COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS.append({ @@ -1356,7 +1472,8 @@ def get_empty_ratings() -> Dict[str, int]: 'allowed_values': { 'old_status': ALLOWED_ACTIVITY_STATUS, 'new_status': ALLOWED_ACTIVITY_STATUS - } + }, + 'deprecated_values': {} }) EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS = copy.deepcopy( @@ -1388,11 +1505,13 @@ def get_empty_ratings() -> Dict[str, int]: ALLOWED_TOPIC_ROLES = [ROLE_NONE, ROLE_MANAGER] # Commands allowed in TopicRightsChange. -TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS: List[CommandType] = [{ +TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS: List[ValidCmdDict] = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_CHANGE_ROLE, 'required_attribute_names': ['assignee_id', 'new_role', 'old_role'], @@ -1400,27 +1519,36 @@ def get_empty_ratings() -> Dict[str, int]: 'user_id_attribute_names': ['assignee_id'], 'allowed_values': { 'new_role': ALLOWED_TOPIC_ROLES, 'old_role': ALLOWED_TOPIC_ROLES - } + }, + 'deprecated_values': {} }, { 'name': CMD_REMOVE_MANAGER_ROLE, 'required_attribute_names': ['removed_user_id'], 'optional_attribute_names': [], - 'user_id_attribute_names': ['removed_user_id'] + 'user_id_attribute_names': ['removed_user_id'], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_PUBLISH_TOPIC, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_UNPUBLISH_TOPIC, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }, { 'name': CMD_DELETE_COMMIT, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }] USER_ID_RANDOM_PART_LENGTH = 32 @@ -1452,9 +1580,19 @@ def get_empty_ratings() -> Dict[str, int]: } # Constants defining various suggestion types. -SUGGESTION_TYPE_EDIT_STATE_CONTENT = 'edit_exploration_state_content' -SUGGESTION_TYPE_TRANSLATE_CONTENT = 'translate_content' -SUGGESTION_TYPE_ADD_QUESTION = 'add_question' +SUGGESTION_TYPE_EDIT_STATE_CONTENT: Final = 'edit_exploration_state_content' +SUGGESTION_TYPE_TRANSLATE_CONTENT: Final = 'translate_content' +SUGGESTION_TYPE_ADD_QUESTION: Final = 'add_question' + +CONTRIBUTION_TYPE_TRANSLATION: Final = 'translation' +CONTRIBUTION_TYPE_QUESTION: Final = 'question' +CONTRIBUTION_SUBTYPE_ACCEPTANCE: Final = 'acceptance' +CONTRIBUTION_SUBTYPE_REVIEW: Final = 'review' +CONTRIBUTION_SUBTYPE_EDIT: Final = 'edit' +CONTRIBUTION_SUBTYPE_SUBMISSION: Final = 'submission' + +TRANSLATION_TEAM_LEAD = 'Anubhuti Varshney' +QUESTION_TEAM_LEAD = 'Jatin Kumar Jadoun' # Suggestion fields that can be queried. ALLOWED_SUGGESTION_QUERY_FIELDS = [ @@ -1483,8 +1621,50 @@ def get_empty_ratings() -> Dict[str, int]: SUGGESTION_TYPE_ADD_QUESTION ] +# The sort keys of submitted questions shown on the Contributor Dashboard. +SUGGESTIONS_SORT_KEYS = [constants.SUGGESTIONS_SORT_KEY_DATE] + # Prefix for all access validation handlers. # The naming scheme for access validation handlers is # '/access_validation_handler/' # example '/access_validation_handler/validate_access_to_splash_page'. ACCESS_VALIDATION_HANDLER_PREFIX = '/access_validation_handler' + +# The possible commit types. +COMMIT_TYPE_CREATE = 'create' +COMMIT_TYPE_REVERT = 'revert' +COMMIT_TYPE_EDIT = 'edit' +COMMIT_TYPE_DELETE = 'delete' + +# Interaction IDs of math related interactions. +MATH_INTERACTION_IDS = [ + 'NumericExpressionInput', 'AlgebraicExpressionInput', 'MathEquationInput'] + +# The task entry ID template used by the task entry model. +TASK_ENTRY_ID_TEMPLATE = '%s.%s.%d.%s.%s.%s' + +# The composite entity ID template used by the task entry model. +COMPOSITE_ENTITY_ID_TEMPLATE = '%s.%s.%d' + +# The data type for the translated or translatable content in any +# BaseTranslatableObject. +ContentValueType = Union[str, List[str]] + +MIN_ALLOWED_MISSING_OR_UPDATE_NEEDED_WRITTEN_TRANSLATIONS = 10 + + +class TranslatableEntityType(enum.Enum): + """Represents all possible entity types which support new translations + architecture. + """ + + EXPLORATION = 'exploration' + QUESTION = 'question' + + +class TranslatedContentDict(TypedDict): + """Dictionary representing TranslatedContent object.""" + + content_value: ContentValueType + needs_update: bool + content_format: str diff --git a/core/feconf_test.py b/core/feconf_test.py new file mode 100644 index 000000000000..ba8afb05e730 --- /dev/null +++ b/core/feconf_test.py @@ -0,0 +1,99 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for core/feconf.py.""" + +from __future__ import annotations + +import datetime +import os + +from core import feconf +from core.tests import test_utils + +import bs4 + + +class FeconfTests(test_utils.GenericTestBase): + """Unit tests for core/feconf.py.""" + + def test_dev_mode_in_production_throws_error(self) -> None: + def mock_getenv(env: str) -> str: + if env == 'SERVER_SOFTWARE': + return 'Production' + return 'Development' + + swap_getenv = self.swap(os, 'getenv', mock_getenv) + with swap_getenv, self.assertRaisesRegex( + Exception, 'DEV_MODE can\'t be true on production.'): + feconf.check_dev_mode_is_true() + + def test_dev_mode_in_development_passes_succcessfully(self) -> None: + def mock_getenv(*unused_args: str) -> str: + return 'Development' + + swap_getenv = self.swap(os, 'getenv', mock_getenv) + with swap_getenv: + feconf.check_dev_mode_is_true() + + def test_get_empty_ratings(self) -> None: + self.assertEqual( + feconf.get_empty_ratings(), + {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}) + + def test_callable_variables_return_correctly(self) -> None: + recipient_username = 'Anshuman' + self.assertEqual( + feconf.DEFAULT_SALUTATION_HTML_FN(recipient_username), + 'Hi %s,' % recipient_username) + + sender_username = 'Ezio' + self.assertEqual( + feconf.DEFAULT_SIGNOFF_HTML_FN(sender_username), + 'Thanks!
    %s (Oppia moderator)' % sender_username) + + exploration_title = 'Test' + self.assertEqual( + feconf.DEFAULT_EMAIL_SUBJECT_FN(exploration_title), + 'Your Oppia exploration "Test" has been unpublished') + + self.assertEqual( + feconf.VALID_MODERATOR_ACTIONS[ + 'unpublish_exploration']['email_config'], + 'unpublish_exploration_email_html_body') + self.assertEqual( + feconf.VALID_MODERATOR_ACTIONS[ + 'unpublish_exploration']['email_intent'], + 'unpublish_exploration') + + def test_terms_page_last_updated_is_in_sync_with_terms_page(self) -> None: + with open( + 'core/templates/pages/terms-page/terms-page.component.html', + 'r', + encoding='utf-8' + ) as f: + terms_page_contents = f.read() + terms_page_parsed_html = bs4.BeautifulSoup( + terms_page_contents, 'html.parser') + max_date = max( + datetime.datetime.strptime( + element.get_text().split(':')[0], '%d %b %Y' + ) for element in terms_page_parsed_html.find( + 'ul', class_='e2e-test-changelog' + ).find_all('li') + ) + self.assertEqual( + feconf.TERMS_PAGE_LAST_UPDATED_UTC, max_date) diff --git a/core/handler_schema_constants.py b/core/handler_schema_constants.py index cc087c3231dd..c8554448d7f3 100644 --- a/core/handler_schema_constants.py +++ b/core/handler_schema_constants.py @@ -29,113 +29,28 @@ from __future__ import annotations HANDLER_CLASS_NAMES_WHICH_STILL_NEED_SCHEMAS = [ - 'AnswerSubmittedEventHandler', - 'AssetDevHandler', - 'AudioUploadHandler', - 'BulkEmailWebhookEndpoint', 'DeferredTasksHandler', - 'DeleteAccountPage', - 'EditableQuestionDataHandler', - 'EditableSkillDataHandler', - 'EditableStoryDataHandler', - 'EditableSubtopicPageDataHandler', - 'EditableTopicDataHandler', - 'ExplorationCompleteEventHandler', - 'ExplorationEmbedPage', - 'ExplorationMaybeLeaveHandler', - 'ExportAccountHandler', 'FeedbackThreadStatusChangeEmailHandler', - 'FetchSkillsHandler', 'FlagExplorationEmailHandler', - 'IncomingReplyEmailHandler', 'InstantFeedbackMessageEmailHandler', - 'LearnerAnswerDetailsSubmissionHandler', - 'LearnerGoalsHandler', - 'LeaveForRefresherExpEventHandler', - 'MemoryCacheAdminHandler', - 'MergeSkillHandler', - 'NewSkillHandler', - 'NewTopicHandler', - 'NotificationHandler', - 'NotificationsDashboardHandler', - 'NotificationsDashboardPage', - 'NotificationsHandler', - 'OldNotificationsDashboardRedirectPage', - 'PendingAccountDeletionPage', - 'PreferenceHandler', 'PreferencesHandler', - 'ProfileHandler', - 'ProfilePage', - 'QuebstionsListHandler', - 'QuestionCountDataHandler', 'QuestionCreationHandler', - 'QuestionPlayerHandler', - 'QuestionSkillLinkHandler', - 'QuestionsListHandler', - 'ReaderFeedbackHandler', - 'RecentCommitsHandler', - 'RecommendationsHandler', - 'ResubmitSuggestionHandler', - 'ReviewableSuggestionsHandler', - 'SignupHandler', - 'SignupPage', - 'SiteLanguageHandler', - 'SkillDataHandler', - 'SkillDescriptionHandler', - 'SkillMasteryDataHandler', - 'SkillsDashboardPageDataHandler', - 'SolutionHitEventHandler', - 'StartedTranslationTutorialEventHandler', - 'StateCompleteEventHandler', - 'StateHitEventHandler', - 'StoryUrlFragmentHandler', - 'SubtopicPageDataHandler', - 'SubtopicViewerPage', - 'SuggestionEmailHandler', - 'SuggestionHandler', - 'SuggestionListHandler', - 'SuggestionToExplorationActionHandler', - 'SuggestionToSkillActionHandler', - 'SuggestionsProviderHandler', - 'TopicAssignmentsHandler', - 'TopicEditorPage', - 'TopicEditorStoryHandler', - 'TopicNameHandler', - 'TopicPageDataHandler', - 'TopicPublishHandler', - 'TopicPublishSendMailHandler', - 'TopicRightsHandler', - 'TopicUrlFragmentHandler', - 'TopicViewerPage', - 'TopicsAndSkillsDashboardPage', - 'TopicsAndSkillsDashboardPageDataHandler', 'UnsentFeedbackEmailHandler', - 'UpdateQuestionSuggestionHandler', - 'UpdateTranslationSuggestionHandler', - 'UrlHandler', - 'UserInfoHandler', - 'UserSubmittedSuggestionsHandler', - 'UsernameCheckHandler', - 'ValidateExplorationsHandler', - 'ValueGeneratorHandler', - 'VoiceArtistManagementHandler', - 'OppiaMLVMHandler', - # Oppia Root page is the unified entry for page routes to the frontend. - # So, it should exempted from schema validation. - 'OppiaRootPage', - 'CsrfTokenHandler', - 'Error404Handler', - 'FrontendErrorHandler', - 'WarmupPage', - 'HomePageRedirectPage', - 'SplashRedirectPage' ] # These handlers do not require any schema validation. HANDLER_CLASS_NAMES_WHICH_DO_NOT_REQUIRE_SCHEMAS = [ + # Handler 'Error404Handler' is not supposed to be schema validated because + # this handler is defined to catch all 404 errors that will occur in any + # other handlers. + 'Error404Handler', 'SessionBeginHandler', 'SessionEndHandler', - 'SeedFirebaseHandler' + 'SeedFirebaseHandler', + # Oppia root page is the unified entry for page routes to the frontend. + # So, it should exempted from schema validation. + 'OppiaRootPage', + 'OppiaLightweightRootPage' ] # HANDLER_CLASS_NAMES_WITH_NO_SCHEMA is addressed everywhere in the diff --git a/core/handler_schema_constants_test.py b/core/handler_schema_constants_test.py new file mode 100644 index 000000000000..fa136c392740 --- /dev/null +++ b/core/handler_schema_constants_test.py @@ -0,0 +1,33 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for handler_schema_constants.py""" + +from __future__ import annotations + +from core import handler_schema_constants # pylint: disable=unused-import +from core.tests import test_utils + + +class HandlerSchemaConstantsTests(test_utils.GenericTestBase): + + # At Oppia, we require all files to have an associated test file, since + # that's how the backend test coverage checks detect that there are Python + # files to cover in the first place. For files that don't have logic + # (like core/handler_schema_constants.py), a trivial test like the one + # here is sufficient. + def test_trivial(self) -> None: + pass diff --git a/core/jobs/base_jobs.py b/core/jobs/base_jobs.py index fe8c667ccc91..f903af89c550 100644 --- a/core/jobs/base_jobs.py +++ b/core/jobs/base_jobs.py @@ -60,7 +60,7 @@ import apache_beam as beam -from typing import Any, Dict, List, Tuple, Type, cast # isort: skip +from typing import Dict, List, Tuple, Type, cast # isort: skip class JobMetaclass(type): @@ -79,7 +79,7 @@ def __new__( cls: Type[JobMetaclass], name: str, bases: Tuple[type, ...], - namespace: Dict[str, Any] + namespace: Dict[str, str] ) -> JobMetaclass: """Creates a new job class with type `JobMetaclass`. @@ -102,6 +102,11 @@ def __new__( Returns: class. The new class instance. + + Raises: + TypeError. The given name is already in use. + TypeError. The given name must end with "Job". + TypeError. The class with the given name must inherit from JobBase. """ if name in cls._JOB_REGISTRY: collision = cls._JOB_REGISTRY[name] @@ -110,6 +115,10 @@ def __new__( job_cls = super(JobMetaclass, cls).__new__(cls, name, bases, namespace) + # Here we use cast because the return value of '__new__' method + # is 'type' but we want to return a more narrower type 'JobMetaclass'. + # So, to narrow down the type from 'type' to 'JobMetaclass', we used + # cast here. if name == 'JobBase': return cast(JobMetaclass, job_cls) @@ -123,6 +132,10 @@ def __new__( else: raise TypeError('%s must inherit from JobBase' % name) + # Here we use cast because the return value of '__new__' method + # is 'type' but we want to return a more narrower type 'JobMetaclass'. + # So, to narrow down the type from 'type' to 'JobMetaclass', we used + # cast here. return cast(JobMetaclass, job_cls) @classmethod @@ -153,6 +166,9 @@ def get_job_class_by_name(cls, job_name: str) -> Type[JobBase]: Returns: class. The class associated to the given job name. + + Raises: + ValueError. Given job name is not registered as a job. """ if job_name not in cls._JOB_REGISTRY: raise ValueError('%s is not registered as a job' % job_name) diff --git a/core/jobs/base_jobs_test.py b/core/jobs/base_jobs_test.py index edd9c5bc64d9..cb9310fa9dd7 100644 --- a/core/jobs/base_jobs_test.py +++ b/core/jobs/base_jobs_test.py @@ -42,7 +42,7 @@ class JobMetaclassTests(test_utils.TestBase): def tearDown(self) -> None: MockJobMetaclass.clear() - super(JobMetaclassTests, self).tearDown() + super().tearDown() def test_does_not_put_base_classes_in_registry(self) -> None: class FooJobBase(base_jobs.JobBase, metaclass=MockJobMetaclass): # pylint: disable=unused-variable @@ -52,9 +52,10 @@ class FooJobBase(base_jobs.JobBase, metaclass=MockJobMetaclass): # pylint: disab self.assertEqual(MockJobMetaclass.get_all_jobs(), []) self.assertEqual(MockJobMetaclass.get_all_job_names(), []) - self.assertRaisesRegexp( # type: ignore[no-untyped-call] - ValueError, 'FooJobBase is not registered as a job', - lambda: MockJobMetaclass.get_job_class_by_name('FooJobBase')) + with self.assertRaisesRegex( + ValueError, 'FooJobBase is not registered as a job' + ): + MockJobMetaclass.get_job_class_by_name('FooJobBase') def test_puts_non_base_classes_in_registry(self) -> None: class FooJob(base_jobs.JobBase, metaclass=MockJobMetaclass): @@ -70,14 +71,14 @@ def test_raises_type_error_for_jobs_with_duplicate_names(self) -> None: # NOTE: Creates a 'FooJob' programmatically. MockJobMetaclass('FooJob', (base_jobs.JobBase,), {}) - with self.assertRaisesRegexp(TypeError, 'name is already used'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(TypeError, 'name is already used'): class FooJob(base_jobs.JobBase, metaclass=MockJobMetaclass): # pylint: disable=unused-variable """Job class with duplicate name.""" pass def test_raises_type_error_if_job_base_not_subclassed(self) -> None: - with self.assertRaisesRegexp(TypeError, 'must inherit from JobBase'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(TypeError, 'must inherit from JobBase'): class FooJob(metaclass=MockJobMetaclass): # pylint: disable=unused-variable """Job class that does not inherit from JobBase.""" @@ -85,7 +86,7 @@ def __init__(self) -> None: pass def test_raises_type_error_if_job_name_not_suffixed_with_job(self) -> None: - with self.assertRaisesRegexp(TypeError, 'must end with "Job"'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(TypeError, 'must end with "Job"'): class FooBar(base_jobs.JobBase, metaclass=MockJobMetaclass): # pylint: disable=unused-variable """Job class that does not have a name ending with "Job".""" @@ -95,7 +96,8 @@ class FooBar(base_jobs.JobBase, metaclass=MockJobMetaclass): # pylint: disable=u class JobBaseTests(job_test_utils.PipelinedTestBase): def test_run_raises_not_implemented_error(self) -> None: - self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, - re.escape('Subclasses must implement the run() method'), - base_jobs.JobBase(self.pipeline).run) + re.escape('Subclasses must implement the run() method') + ): + base_jobs.JobBase(self.pipeline).run() diff --git a/core/jobs/batch_jobs/blog_post_search_indexing_jobs.py b/core/jobs/batch_jobs/blog_post_search_indexing_jobs.py new file mode 100644 index 000000000000..9b9440e7125b --- /dev/null +++ b/core/jobs/batch_jobs/blog_post_search_indexing_jobs.py @@ -0,0 +1,103 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs that are run by CRON scheduler.""" + +from __future__ import annotations + +from core.domain import blog_domain +from core.domain import blog_services +from core.domain import search_services +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +import result +from typing import Final, Iterable, List + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import blog_models + from mypy_imports import search_services as platform_search_services + +(blog_models,) = models.Registry.import_models([models.Names.BLOG]) + +platform_search_services = models.Registry.import_search_services() + + +class IndexBlogPostsInSearchJob(base_jobs.JobBase): + """Job that indexes the blog posts in Elastic Search.""" + + MAX_BATCH_SIZE: Final = 1000 + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of 'SUCCESS' or 'FAILURE' results from + the Elastic Search. + + Returns: + PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from + the Elastic Search. + """ + return ( + self.pipeline + | 'Get all non-deleted models' >> ( + ndb_io.GetModels( + blog_models.BlogPostSummaryModel.get_all( + include_deleted=False + ) + )) + | 'Convert BlogPostSummaryModels to domain objects' >> beam.Map( + blog_services.get_blog_post_summary_from_model) + | 'Split models into batches' >> beam.transforms.util.BatchElements( + max_batch_size=self.MAX_BATCH_SIZE) + | 'Index batches of models' >> beam.ParDo( + IndexBlogPostSummaries()) + | 'Count the output' >> ( + job_result_transforms.ResultsToJobRunResults()) + ) + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class IndexBlogPostSummaries(beam.DoFn): # type: ignore[misc] + """DoFn to index blog post summaries.""" + + def process( + self, blog_post_summaries: List[blog_domain.BlogPostSummary] + ) -> Iterable[result.Result[None, Exception]]: + """Index blog post summaries and catch any errors. + + Args: + blog_post_summaries: list(BlogPostSummaries). List of Blog Post + Summary domain objects to be indexed. + + Yields: + JobRunResult. List containing one element, which is either SUCCESS, + or FAILURE. + """ + try: + search_services.index_blog_post_summaries( + blog_post_summaries) + for _ in blog_post_summaries: + yield result.Ok() + except platform_search_services.SearchException as e: + yield result.Err(e) diff --git a/core/jobs/batch_jobs/blog_post_search_indexing_jobs_test.py b/core/jobs/batch_jobs/blog_post_search_indexing_jobs_test.py new file mode 100644 index 000000000000..3768e6bc83dd --- /dev/null +++ b/core/jobs/batch_jobs/blog_post_search_indexing_jobs_test.py @@ -0,0 +1,247 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.blog_post_search_indexing_jobs.""" + +from __future__ import annotations + +import datetime +import math + +from core import utils +from core.domain import search_services +from core.jobs import job_test_utils +from core.jobs.batch_jobs import blog_post_search_indexing_jobs +from core.jobs.types import job_run_result +from core.platform import models + +from typing import Dict, Final, List, Tuple, Type, Union + +MYPY = False +if MYPY: + from mypy_imports import blog_models + from mypy_imports import search_services as platform_search_services + +(blog_models,) = models.Registry.import_models([models.Names.BLOG]) + +platform_search_services = models.Registry.import_search_services() + +StatsType = List[Tuple[str, List[Dict[str, Union[bool, int, str]]]]] + + +class IndexBlogPostSummariesInSearchJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + blog_post_search_indexing_jobs.IndexBlogPostsInSearchJob + ] = blog_post_search_indexing_jobs.IndexBlogPostsInSearchJob + + USER_ID_1: Final = 'id_1' + USERNAME: Final = 'someUsername' + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_indexes_non_deleted_model(self) -> None: + blog_summary = self.create_model( + blog_models.BlogPostSummaryModel, + id='abcd', + author_id=self.USER_ID_1, + deleted=False, + title='title', + summary='blog_post_summary', + url_fragment='sample-url-fragment', + tags=['tag1', 'tag2'], + thumbnail_filename='xyzabc', + published_on=datetime.datetime.utcnow(), + ) + blog_summary.update_timestamps() + blog_summary.put() + + add_docs_to_index_swap = self.swap_with_checks( + platform_search_services, + 'add_documents_to_index', + lambda _, __: None, + expected_args=[ + ( + [{ + 'id': 'abcd', + 'title': 'title', + 'tags': ['tag1', 'tag2'], + 'rank': math.floor( + utils.get_time_in_millisecs( + blog_summary.published_on + )), + }], + search_services.SEARCH_INDEX_BLOG_POSTS) + ] + ) + + with add_docs_to_index_swap: + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('SUCCESS: 1') + ]) + + def test_indexes_non_deleted_models(self) -> None: + date_time_now = datetime.datetime.utcnow() + for i in range(5): + blog_summary = self.create_model( + blog_models.BlogPostSummaryModel, + id='abcd%s' % i, + author_id=self.USER_ID_1, + deleted=False, + title='title', + summary='blog_post_summary', + url_fragment='sample-url-fragment', + tags=['tag1', 'tag2'], + thumbnail_filename='xyzabc', + published_on=date_time_now, + ) + blog_summary.update_timestamps() + blog_summary.put() + + add_docs_to_index_swap = self.swap_with_checks( + platform_search_services, + 'add_documents_to_index', + lambda _, __: None, + expected_args=[ + ( + [{ + 'id': 'abcd%s' % i, + 'title': 'title', + 'tags': ['tag1', 'tag2'], + 'rank': math.floor( + utils.get_time_in_millisecs( + blog_summary.published_on + )), + }], + search_services.SEARCH_INDEX_BLOG_POSTS + ) for i in range(5) + ] + ) + + max_batch_size_swap = self.swap( + blog_post_search_indexing_jobs.IndexBlogPostsInSearchJob, + 'MAX_BATCH_SIZE', 1) + + with add_docs_to_index_swap, max_batch_size_swap: + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('SUCCESS: 5') + ]) + + def test_reports_failed_when_indexing_fails(self) -> None: + blog_summary = self.create_model( + blog_models.BlogPostSummaryModel, + id='abcd', + author_id=self.USER_ID_1, + deleted=False, + title='title', + summary='blog_post_summary', + url_fragment='sample-url-fragment', + tags=['tag1', 'tag2'], + thumbnail_filename='xyzabc', + published_on=datetime.datetime.utcnow(), + ) + blog_summary.update_timestamps() + blog_summary.put() + + def add_docs_to_index_mock( + unused_documents: Dict[str, Union[int, str, List[str]]], + unused_index_name: str + ) -> None: + raise platform_search_services.SearchException('search exception') + + add_docs_to_index_swap = self.swap_with_checks( + platform_search_services, + 'add_documents_to_index', + add_docs_to_index_mock, + expected_args=[ + ( + [{ + 'id': 'abcd', + 'title': 'title', + 'tags': ['tag1', 'tag2'], + 'rank': math.floor( + utils.get_time_in_millisecs( + blog_summary.published_on + )), + }], + search_services.SEARCH_INDEX_BLOG_POSTS + ) + ] + ) + + with add_docs_to_index_swap: + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stderr( + 'ERROR: "search exception": 1' + ) + ]) + + def test_skips_deleted_model(self) -> None: + blog_summary = self.create_model( + blog_models.BlogPostSummaryModel, + id='abcd', + author_id=self.USER_ID_1, + deleted=True, + title='title', + summary='blog_post_summary', + url_fragment='sample-url-fragment', + tags=['tag1', 'tag2'], + thumbnail_filename='xyzabc', + published_on=datetime.datetime.utcnow(), + ) + blog_summary.update_timestamps() + blog_summary.put() + + add_docs_to_index_swap = self.swap_with_checks( + platform_search_services, + 'add_documents_to_index', + lambda _, __: None, + called=False + ) + + with add_docs_to_index_swap: + self.assert_job_output_is_empty() + + def test_skips_draft_blog_post_model(self) -> None: + blog_summary = self.create_model( + blog_models.BlogPostSummaryModel, + id='abcd', + author_id=self.USER_ID_1, + deleted=False, + title='title', + summary='blog_post_summary', + url_fragment='sample-url-fragment', + tags=['tag1', 'tag2'], + thumbnail_filename='xyzabc', + published_on=None, + ) + blog_summary.update_timestamps() + blog_summary.put() + + add_docs_to_index_swap = self.swap_with_checks( + platform_search_services, + 'add_documents_to_index', + lambda _, __: None, + expected_args=[( + [], search_services.SEARCH_INDEX_BLOG_POSTS + )] + ) + + with add_docs_to_index_swap: + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('SUCCESS: 1') + ]) diff --git a/core/jobs/batch_jobs/blog_validation_jobs.py b/core/jobs/batch_jobs/blog_validation_jobs.py index 267b1bd61850..afe4178ad2a5 100644 --- a/core/jobs/batch_jobs/blog_validation_jobs.py +++ b/core/jobs/batch_jobs/blog_validation_jobs.py @@ -18,6 +18,8 @@ from __future__ import annotations +import datetime + from core.jobs import base_jobs from core.jobs import job_utils from core.jobs.io import ndb_io @@ -26,11 +28,13 @@ import apache_beam as beam +from typing import Union + MYPY = False if MYPY: # pragma: no cover from mypy_imports import blog_models -(blog_models,) = models.Registry.import_models([models.NAMES.blog]) +(blog_models,) = models.Registry.import_models([models.Names.BLOG]) class FindDuplicateBlogPostTitlesJob(base_jobs.JobBase): @@ -109,14 +113,50 @@ def run( ) -class GetModelsWithDuplicatePropertyValues(beam.PTransform): - """Helper class to retrive models with duplicate properties.""" +class FindDuplicateBlogAuthorDetailsModelForAuthorJob(base_jobs.JobBase): + """Validates that only one Blog Author Detail Models exists corresponding to + given author id. + """ + + def run( + self + ) -> beam.PCollection[blog_validation_errors.DuplicateBlogAuthorModelError]: + return ( + self.pipeline + | 'Get every Blog Author Details Model' >> ( + ndb_io.GetModels(blog_models.BlogAuthorDetailsModel.query())) + | GetModelsWithDuplicatePropertyValues('author_id') + | 'Flatten models into a list of errors' >> beam.FlatMap( + lambda models: [ + blog_validation_errors.DuplicateBlogAuthorModelError(model) + for model in models + ]) + ) + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class GetModelsWithDuplicatePropertyValues(beam.PTransform): # type: ignore[misc] + """Helper class to retrieve models with duplicate properties.""" def __init__(self, property_name: str) -> None: - super(GetModelsWithDuplicatePropertyValues, self).__init__() + super().__init__() self.property_name = property_name - def expand(self, blog_model_pcoll): + def expand( + self, blog_model_pcoll: beam.PCollection[Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel, + blog_models.BlogAuthorDetailsModel + ]] + ) -> beam.PCollection[Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel, + blog_models.BlogAuthorDetailsModel + ]]: return ( blog_model_pcoll | 'Discard models with empty property value' >> ( @@ -128,10 +168,12 @@ def expand(self, blog_model_pcoll): | 'Discard %s key' % self.property_name >> ( beam.Values()) # pylint: disable=no-value-for-parameter | 'Discard models with unique %s' % self.property_name >> ( - beam.Filter(lambda models: len(models) > 1)) + beam.Filter(lambda models: len(list(models)) > 1)) ) - def get_property_value(self, model): + def get_property_value( + self, model: blog_models.BlogPostModel + ) -> Union[str, bool, datetime.datetime]: """Returns value of the given property of model Args: @@ -140,4 +182,10 @@ def get_property_value(self, model): Returns: value. The value of the property of model. """ - return job_utils.get_model_property(model, self.property_name) + property_value: Union[ + str, bool, datetime.datetime + ] = job_utils.get_model_property(model, self.property_name) + # Here, we are narrowing down the type from Any to all the possible + # types of a BlogPostModel's property. + assert isinstance(property_value, (str, bool, datetime.datetime)) + return property_value diff --git a/core/jobs/batch_jobs/blog_validation_jobs_test.py b/core/jobs/batch_jobs/blog_validation_jobs_test.py index bd802121d841..711b764dc45e 100644 --- a/core/jobs/batch_jobs/blog_validation_jobs_test.py +++ b/core/jobs/batch_jobs/blog_validation_jobs_test.py @@ -23,33 +23,37 @@ from core.jobs.types import blog_validation_errors from core.platform import models +from typing import Type + MYPY = False if MYPY: # pragma: no cover from mypy_imports import blog_models -(blog_models,) = models.Registry.import_models([models.NAMES.blog]) +(blog_models,) = models.Registry.import_models([models.Names.BLOG]) class FindDuplicateBlogPostTitlesJobTests(job_test_utils.JobTestBase): - JOB_CLASS = blog_validation_jobs.FindDuplicateBlogPostTitlesJob + JOB_CLASS: Type[ + blog_validation_jobs.FindDuplicateBlogPostTitlesJob + ] = blog_validation_jobs.FindDuplicateBlogPostTitlesJob def test_run_with_same_titles_for_blog_posts(self) -> None: - blog_post_model_1 = self.create_model( # type: ignore[no-untyped-call] + blog_post_model_1 = self.create_model( blog_models.BlogPostModel, id='validblogid1', title='Sample Title', content='

    hello

    ,', author_id='user', url_fragment='url_fragment_1') - blog_post_model_2 = self.create_model( # type: ignore[no-untyped-call] + blog_post_model_2 = self.create_model( blog_models.BlogPostModel, id='validblogid2', title='Sample Title', content='

    hello tho

    ,', author_id='user', url_fragment='url_fragment_2') - blog_post_model_3 = self.create_model( # type: ignore[no-untyped-call] + blog_post_model_3 = self.create_model( blog_models.BlogPostModel, id='validblogid3', title='Sample Diff Title', @@ -57,7 +61,7 @@ def test_run_with_same_titles_for_blog_posts(self) -> None: author_id='user', url_fragment='url_fragment_2') - self.put_multi( # type: ignore[no-untyped-call] + self.put_multi( [ blog_post_model_1, blog_post_model_2, @@ -65,7 +69,7 @@ def test_run_with_same_titles_for_blog_posts(self) -> None: ] ) - self.assert_job_output_is( # type: ignore[no-untyped-call] + self.assert_job_output_is( [ blog_validation_errors.DuplicateBlogTitleError( blog_post_model_1 @@ -79,24 +83,26 @@ def test_run_with_same_titles_for_blog_posts(self) -> None: class FindDuplicateBlogPostSummaryTitlesJobTests(job_test_utils.JobTestBase): - JOB_CLASS = blog_validation_jobs.FindDuplicateBlogPostSummaryTitlesJob + JOB_CLASS: Type[ + blog_validation_jobs.FindDuplicateBlogPostSummaryTitlesJob + ] = blog_validation_jobs.FindDuplicateBlogPostSummaryTitlesJob def test_run_with_same_titles_for_blog_posts(self) -> None: - blog_post_summary_model_1 = self.create_model( # type: ignore[no-untyped-call] + blog_post_summary_model_1 = self.create_model( blog_models.BlogPostSummaryModel, id='validblogid1', title='Sample Title', summary='

    hello

    ,', author_id='user', url_fragment='url_fragment_1') - blog_post_summary_model_2 = self.create_model( # type: ignore[no-untyped-call] + blog_post_summary_model_2 = self.create_model( blog_models.BlogPostSummaryModel, id='validblogid2', title='Sample Title', summary='

    hello tho

    ,', author_id='user', url_fragment='url_fragment_2') - blog_post_summary_model_3 = self.create_model( # type: ignore[no-untyped-call] + blog_post_summary_model_3 = self.create_model( blog_models.BlogPostSummaryModel, id='validblogid3', title='Sample Diff Title', @@ -104,14 +110,14 @@ def test_run_with_same_titles_for_blog_posts(self) -> None: author_id='user', url_fragment='url_fragment_2') - self.put_multi( # type: ignore[no-untyped-call] + self.put_multi( [ blog_post_summary_model_1, blog_post_summary_model_2, blog_post_summary_model_3, ]) - self.assert_job_output_is( # type: ignore[no-untyped-call] + self.assert_job_output_is( [ blog_validation_errors.DuplicateBlogTitleError( blog_post_summary_model_1 @@ -124,24 +130,26 @@ def test_run_with_same_titles_for_blog_posts(self) -> None: class FindDuplicateBlogPostUrlsJobTests(job_test_utils.JobTestBase): - JOB_CLASS = blog_validation_jobs.FindDuplicateBlogPostUrlsJob + JOB_CLASS: Type[ + blog_validation_jobs.FindDuplicateBlogPostUrlsJob + ] = blog_validation_jobs.FindDuplicateBlogPostUrlsJob def test_run_with_same_url_for_blog_posts(self) -> None: - blog_post_model_1 = self.create_model( # type: ignore[no-untyped-call] + blog_post_model_1 = self.create_model( blog_models.BlogPostModel, id='validblogid1', title='Sample Title 1', content='

    hello

    ,', author_id='user', url_fragment='url_fragment') - blog_post_model_2 = self.create_model( # type: ignore[no-untyped-call] + blog_post_model_2 = self.create_model( blog_models.BlogPostModel, id='validblogid2', title='Sample Title 2', content='

    hello tho

    ,', author_id='user', url_fragment='url_fragment') - blog_post_model_3 = self.create_model( # type: ignore[no-untyped-call] + blog_post_model_3 = self.create_model( blog_models.BlogPostModel, id='validblogid3', title='Sample Diff Title', @@ -149,14 +157,14 @@ def test_run_with_same_url_for_blog_posts(self) -> None: author_id='user', url_fragment='diff_url_fragment') - self.put_multi( # type: ignore[no-untyped-call] + self.put_multi( [ blog_post_model_1, blog_post_model_2, blog_post_model_3, ]) - self.assert_job_output_is( # type: ignore[no-untyped-call] + self.assert_job_output_is( [ blog_validation_errors.DuplicateBlogUrlError( blog_post_model_1 @@ -169,24 +177,26 @@ def test_run_with_same_url_for_blog_posts(self) -> None: class FindDuplicateBlogPostSummaryUrlsJobTests(job_test_utils.JobTestBase): - JOB_CLASS = blog_validation_jobs.FindDuplicateBlogPostSummaryUrlsJob + JOB_CLASS: Type[ + blog_validation_jobs.FindDuplicateBlogPostSummaryUrlsJob + ] = blog_validation_jobs.FindDuplicateBlogPostSummaryUrlsJob def test_run_with_same_url_for_blog_posts(self) -> None: - blog_post_summary_model_1 = self.create_model( # type: ignore[no-untyped-call] + blog_post_summary_model_1 = self.create_model( blog_models.BlogPostSummaryModel, id='validblogid1', title='Sample Title 1', summary='

    hello

    ,', author_id='user', url_fragment='url_fragment') - blog_post_summary_model_2 = self.create_model( # type: ignore[no-untyped-call] + blog_post_summary_model_2 = self.create_model( blog_models.BlogPostSummaryModel, id='validblogid2', title='Sample Title 2', summary='

    hello tho

    ,', author_id='user', url_fragment='url_fragment') - blog_post_summary_model_3 = self.create_model( # type: ignore[no-untyped-call] + blog_post_summary_model_3 = self.create_model( blog_models.BlogPostSummaryModel, id='validblogid3', title='Sample Diff Title', @@ -194,7 +204,7 @@ def test_run_with_same_url_for_blog_posts(self) -> None: author_id='user', url_fragment='diff_url_fragment') - self.put_multi( # type: ignore[no-untyped-call] + self.put_multi( [ blog_post_summary_model_1, blog_post_summary_model_2, @@ -202,7 +212,7 @@ def test_run_with_same_url_for_blog_posts(self) -> None: ] ) - self.assert_job_output_is(# type: ignore[no-untyped-call] + self.assert_job_output_is( [ blog_validation_errors.DuplicateBlogUrlError( blog_post_summary_model_1 @@ -211,3 +221,50 @@ def test_run_with_same_url_for_blog_posts(self) -> None: blog_post_summary_model_2 ), ]) + + +class FindDuplicateBlogAuthorDetailsModelForAuthorJobTests( + job_test_utils.JobTestBase +): + + JOB_CLASS: Type[ + blog_validation_jobs.FindDuplicateBlogAuthorDetailsModelForAuthorJob + ] = blog_validation_jobs.FindDuplicateBlogAuthorDetailsModelForAuthorJob + + def test_run_with_same_author_id_for_blog_posts(self) -> None: + author_details_model_1 = self.create_model( + blog_models.BlogAuthorDetailsModel, + id='validblogid1', + displayed_author_name='user one', + author_id='user', + author_bio='') + author_details_model_2 = self.create_model( + blog_models.BlogAuthorDetailsModel, + id='validblogid2', + displayed_author_name='user two', + author_id='user', + author_bio='author general bio') + author_details_model_3 = self.create_model( + blog_models.BlogAuthorDetailsModel, + id='validblogid3', + displayed_author_name='user name', + author_id='diffUserId', + author_bio='some author bio') + + self.put_multi( + [ + author_details_model_1, + author_details_model_2, + author_details_model_3, + ] + ) + + self.assert_job_output_is( + [ + blog_validation_errors.DuplicateBlogAuthorModelError( + author_details_model_1 + ), + blog_validation_errors.DuplicateBlogAuthorModelError( + author_details_model_2 + ), + ]) diff --git a/core/jobs/batch_jobs/collection_info_jobs.py b/core/jobs/batch_jobs/collection_info_jobs.py new file mode 100644 index 000000000000..cef0d4a402c4 --- /dev/null +++ b/core/jobs/batch_jobs/collection_info_jobs.py @@ -0,0 +1,127 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs that extract Collection models information.""" + +from __future__ import annotations + +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam + +from typing import Iterable, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import collection_models + from mypy_imports import feedback_models + from mypy_imports import user_models + +(collection_models, feedback_models, user_models) = ( + models.Registry.import_models([ + models.Names.COLLECTION, models.Names.FEEDBACK, models.Names.USER + ]) +) + + +class GetCollectionOwnersEmailsJob(base_jobs.JobBase): + """Job that extracts collection id and user email from datastore.""" + + @staticmethod + def _extract_user_and_collection_ids( + collection_rights_model: collection_models.CollectionRightsModel + ) -> Iterable[Tuple[str, str]]: + """Extracts user id and collection id. + + Args: + collection_rights_model: datastore_services.Model. + The collection rights model to extract user id and + collection id from. + + Yields: + (str,str). Tuple containing user id and collection id. + """ + for user_id in collection_rights_model.owner_ids: + yield (user_id, collection_rights_model.id) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + + collection_pairs = ( + self.pipeline + | 'get collection models ' >> ndb_io.GetModels( + collection_models.CollectionRightsModel.get_all()) + | 'Flatten owner_ids and format' >> beam.FlatMap( + self._extract_user_and_collection_ids) + ) + + user_pairs = ( + self.pipeline + | 'Get all user settings models' >> ndb_io.GetModels( + user_models.UserSettingsModel.get_all()) + | 'Extract id and email' >> beam.Map( + lambda user_setting: ( + user_setting.id, user_setting.email)) + ) + + collection_ids_to_email_mapping = ( + (collection_pairs, user_pairs) + | 'Group by user_id' >> beam.CoGroupByKey() + | 'Drop user id' >> beam.Values() # pylint: disable=no-value-for-parameter + | 'Filter out results without any collection' >> beam.Filter( + lambda collection_ids_and_email: len( + collection_ids_and_email[0]) > 0 + ) + ) + + return ( + collection_ids_to_email_mapping + | 'Get final result' >> beam.MapTuple( + lambda collection, email: job_run_result.JobRunResult.as_stdout( + 'collection_ids: %s, email: %s' % (collection, email) + )) + ) + + +class MatchEntityTypeCollectionJob(base_jobs.JobBase): + """Job that match entity_type as collection.""" + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of 'SUCCESS' or 'FAILURE' results from + matching entity_type as collection. + + Returns: + PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from + matching entity_type as collection. + """ + feedback_model_matched_as_collection = ( + self.pipeline + | 'Get all GeneralFeedbackThread models' >> ndb_io.GetModels( + feedback_models.GeneralFeedbackThreadModel.get_all()) + | 'Extract entity_type' >> beam.Map( + lambda feeback_model: feeback_model.entity_type) + | 'Match entity_type' >> beam.Filter( + lambda entity_type: entity_type == 'collection') + ) + + return ( + feedback_model_matched_as_collection + | 'Count the output' >> ( + job_result_transforms.CountObjectsToJobRunResult()) + ) diff --git a/core/jobs/batch_jobs/collection_info_jobs_test.py b/core/jobs/batch_jobs/collection_info_jobs_test.py new file mode 100644 index 000000000000..8d70b0e96b81 --- /dev/null +++ b/core/jobs/batch_jobs/collection_info_jobs_test.py @@ -0,0 +1,238 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.collection_info_jobs.""" + +from __future__ import annotations + +from core import feconf +from core.constants import constants +from core.jobs import job_test_utils +from core.jobs.batch_jobs import collection_info_jobs +from core.jobs.types import job_run_result +from core.platform import models + +from typing import Final, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import collection_models + from mypy_imports import feedback_models + from mypy_imports import user_models + +(collection_models, feedback_models, user_models) = ( + models.Registry.import_models([ + models.Names.COLLECTION, models.Names.FEEDBACK, models.Names.USER + ]) +) + + +class GetCollectionOwnersEmailsJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + collection_info_jobs.GetCollectionOwnersEmailsJob + ] = collection_info_jobs.GetCollectionOwnersEmailsJob + + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_3: Final = 'id_3' + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_counts_single_collection(self) -> None: + user = self.create_model( + user_models.UserSettingsModel, + id=self.USER_ID_1, + email='some@email.com', + roles=[feconf.ROLE_ID_COLLECTION_EDITOR] + ) + user.update_timestamps() + collection = self.create_model( + collection_models.CollectionRightsModel, + id='col_1', + owner_ids=[self.USER_ID_1], + editor_ids=[self.USER_ID_1], + voice_artist_ids=[self.USER_ID_1], + community_owned=False, + status=constants.ACTIVITY_STATUS_PUBLIC, + viewable_if_private=False, + first_published_msec=0.2 + ) + collection.update_timestamps() + self.put_multi([user, collection]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout=( + 'collection_ids: [\'col_1\'], email: [\'some@email.com\']')) + ]) + + def test_counts_multiple_collection(self) -> None: + user1 = self.create_model( + user_models.UserSettingsModel, + id=self.USER_ID_1, + email='some@email.com', + roles=[feconf.ROLE_ID_COLLECTION_EDITOR] + ) + user2 = self.create_model( + user_models.UserSettingsModel, + id=self.USER_ID_2, + email='some2@email.com', + roles=[feconf.ROLE_ID_COLLECTION_EDITOR] + ) + # Checking a user who has no collection. + user3 = self.create_model( + user_models.UserSettingsModel, + id=self.USER_ID_3, + email='some3@email.com', + roles=[feconf.ROLE_ID_COLLECTION_EDITOR] + ) + user1.update_timestamps() + user2.update_timestamps() + user3.update_timestamps() + collection1 = self.create_model( + collection_models.CollectionRightsModel, + id='col_1', + owner_ids=[self.USER_ID_1, self.USER_ID_2], + editor_ids=[self.USER_ID_1], + voice_artist_ids=[self.USER_ID_1], + community_owned=False, + status=constants.ACTIVITY_STATUS_PUBLIC, + viewable_if_private=False, + first_published_msec=0.2 + ) + collection1.update_timestamps() + collection2 = self.create_model( + collection_models.CollectionRightsModel, + id='col_2', + owner_ids=[self.USER_ID_2], + editor_ids=[self.USER_ID_1], + voice_artist_ids=[self.USER_ID_1], + community_owned=False, + status=constants.ACTIVITY_STATUS_PUBLIC, + viewable_if_private=False, + first_published_msec=0.2 + ) + collection2.update_timestamps() + self.put_multi([user1, user2, user3, collection1, collection2]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout=( + 'collection_ids: [\'col_1\'], email: ' + '[\'some@email.com\']')), + job_run_result.JobRunResult( + stdout=( + 'collection_ids: [\'col_1\', \'col_2\'], email: ' + '[\'some2@email.com\']') + ) + ]) + + +class MatchEntityTypeCollectionJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + collection_info_jobs.MatchEntityTypeCollectionJob + ] = collection_info_jobs.MatchEntityTypeCollectionJob + + USER_ID: Final = 'user_1' + ENTITY_ID: Final = 'col_id_1' + ENTITY_ID_1: Final = 'exp_id_1' + ENTITY_ID_2: Final = 'top_id_1' + + ENTITY_TYPE: Final = 'collection' + ENTITY_TYPE_1: Final = feconf.ENTITY_TYPE_EXPLORATION + ENTITY_TYPE_2: Final = feconf.ENTITY_TYPE_TOPIC + + STATUS: Final = 'open' + SUBJECT: Final = 'dummy subject' + HAS_SUGGESTION: Final = True + SUMMARY: Final = 'This is a great summary.' + MESSAGE_COUNT: Final = 0 + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_match_single_collection(self) -> None: + feedback_thread_model = self.create_model( + feedback_models.GeneralFeedbackThreadModel, + id='%s.%s.%s' % (self.ENTITY_TYPE, self.ENTITY_ID, 'random'), + entity_type=self.ENTITY_TYPE, + entity_id=self.ENTITY_ID, + original_author_id=self.USER_ID, + status=self.STATUS, + subject=self.SUBJECT, + has_suggestion=self.HAS_SUGGESTION, + summary=self.SUMMARY, + message_count=self.MESSAGE_COUNT + ) + feedback_thread_model.update_timestamps() + feedback_thread_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SUCCESS: 1') + ]) + + def test_match_multiple_collection(self) -> None: + feedback_thread_model = self.create_model( + feedback_models.GeneralFeedbackThreadModel, + id='%s.%s.%s' % (self.ENTITY_TYPE, self.ENTITY_ID, 'random'), + entity_type=self.ENTITY_TYPE, + entity_id=self.ENTITY_ID, + original_author_id=self.USER_ID, + status=self.STATUS, + subject=self.SUBJECT, + has_suggestion=self.HAS_SUGGESTION, + summary=self.SUMMARY, + message_count=self.MESSAGE_COUNT + ) + feedback_thread_model.update_timestamps() + feedback_thread_model.put() + + feedback_thread_model1 = self.create_model( + feedback_models.GeneralFeedbackThreadModel, + id='%s.%s.%s' % (self.ENTITY_TYPE_1, self.ENTITY_ID_1, 'random'), + entity_type=self.ENTITY_TYPE_1, + entity_id=self.ENTITY_ID_1, + original_author_id=self.USER_ID, + status=self.STATUS, + subject=self.SUBJECT, + has_suggestion=self.HAS_SUGGESTION, + summary=self.SUMMARY, + message_count=self.MESSAGE_COUNT + ) + feedback_thread_model1.update_timestamps() + feedback_thread_model1.put() + + feedback_thread_model2 = self.create_model( + feedback_models.GeneralFeedbackThreadModel, + id='%s.%s.%s' % (self.ENTITY_TYPE_2, self.ENTITY_ID_2, 'random'), + entity_type=self.ENTITY_TYPE_2, + entity_id=self.ENTITY_ID_2, + original_author_id=self.USER_ID, + status=self.STATUS, + subject=self.SUBJECT, + has_suggestion=self.HAS_SUGGESTION, + summary=self.SUMMARY, + message_count=self.MESSAGE_COUNT + ) + feedback_thread_model2.update_timestamps() + feedback_thread_model2.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SUCCESS: 1') + ]) diff --git a/core/jobs/batch_jobs/email_deletion_jobs.py b/core/jobs/batch_jobs/email_deletion_jobs.py new file mode 100644 index 000000000000..3e8660f1da1f --- /dev/null +++ b/core/jobs/batch_jobs/email_deletion_jobs.py @@ -0,0 +1,138 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Validation Jobs for blog models""" + +from __future__ import annotations + +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import email_models + from mypy_imports import feedback_models + from mypy_imports import user_models + +(email_models, feedback_models, user_models) = models.Registry.import_models([ + models.Names.EMAIL, models.Names.FEEDBACK, models.Names.USER +]) + + +class DeleteUnneededEmailRelatedModelsJob(base_jobs.JobBase): + """Job that deletes emails models that belonged to users that were deleted + as part of the wipeout process. + """ + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + deleted_user_ids_collection = ( + self.pipeline + | 'Get all deleted user models' >> ndb_io.GetModels( + user_models.DeletedUserModel.get_all()) + | 'Extract user IDs' >> beam.Map( + lambda deleted_user_model: deleted_user_model.id) + ) + deleted_user_ids = beam.pvalue.AsIter(deleted_user_ids_collection) + + sent_email_models_to_delete = ( + self.pipeline + | 'Get all sent email models' >> ndb_io.GetModels( + email_models.SentEmailModel.get_all()) + | 'Filter sent email models that belong to deleted users' >> ( + beam.Filter( + lambda model, ids: ( + model.sender_id in ids or model.recipient_id in ids), + ids=deleted_user_ids + )) + ) + sent_email_models_to_delete_result = ( + sent_email_models_to_delete + | 'Count sent email models to be deleted' >> ( + job_result_transforms.CountObjectsToJobRunResult('SENT EMAILS')) + ) + + bulk_email_models_to_delete = ( + self.pipeline + | 'Get all bulk email models' >> ndb_io.GetModels( + email_models.BulkEmailModel.get_all()) + | 'Filter bulk email models that belong to deleted users' >> ( + beam.Filter( + lambda model, ids: model.sender_id in ids, + ids=deleted_user_ids + )) + ) + bulk_email_models_to_delete_result = ( + bulk_email_models_to_delete + | 'Count bulk email models to be deleted' >> ( + job_result_transforms.CountObjectsToJobRunResult('BULK EMAILS')) + ) + + unsent_feedback_email_models_to_delete = ( + self.pipeline + | 'Get all unsent feedback models' >> ndb_io.GetModels( + feedback_models.UnsentFeedbackEmailModel.get_all()) + | 'Filter unsent feedback models that belong to deleted users' >> ( + beam.Filter( + lambda model, ids: model.id in ids, ids=deleted_user_ids)) + ) + unsent_feedback_email_models_to_delete_result = ( + unsent_feedback_email_models_to_delete + | 'Count unsent feedback email models to be deleted' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'FEEDBACK EMAILS')) + ) + + user_bulk_emails_models_to_delete = ( + self.pipeline + | 'Get all user bulk email models' >> ndb_io.GetModels( + user_models.UserBulkEmailsModel.get_all()) + | 'Filter user bulk email models that belong to deleted users' >> ( + beam.Filter( + lambda model, ids: model.id in ids, ids=deleted_user_ids)) + ) + user_bulk_emails_models_to_delete_result = ( + user_bulk_emails_models_to_delete + | 'Count user bulk email models to be deleted' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'USER BULK EMAILS')) + ) + + unused_models_deletion = ( + ( + sent_email_models_to_delete, + bulk_email_models_to_delete, + unsent_feedback_email_models_to_delete, + user_bulk_emails_models_to_delete + ) + | 'Merge models' >> beam.Flatten() + | 'Extract keys' >> beam.Map(lambda model: model.key) + | 'Delete models' >> ndb_io.DeleteModels() + ) + + return ( + ( + sent_email_models_to_delete_result, + bulk_email_models_to_delete_result, + unsent_feedback_email_models_to_delete_result, + user_bulk_emails_models_to_delete_result, + ) + | 'Merge results' >> beam.Flatten() + ) diff --git a/core/jobs/batch_jobs/email_deletion_jobs_test.py b/core/jobs/batch_jobs/email_deletion_jobs_test.py new file mode 100644 index 000000000000..7e246b81b807 --- /dev/null +++ b/core/jobs/batch_jobs/email_deletion_jobs_test.py @@ -0,0 +1,179 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.blog_validation_jobs.""" + +from __future__ import annotations + +import datetime + +from core import feconf +from core.jobs import job_test_utils +from core.jobs.batch_jobs import email_deletion_jobs +from core.jobs.types import job_run_result +from core.platform import models + +from typing import Final, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import email_models + from mypy_imports import feedback_models + from mypy_imports import user_models + +(email_models, feedback_models, user_models) = models.Registry.import_models([ + models.Names.EMAIL, models.Names.FEEDBACK, models.Names.USER +]) + + +class DeleteUnneededEmailRelatedModelsJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + email_deletion_jobs.DeleteUnneededEmailRelatedModelsJob + ] = email_deletion_jobs.DeleteUnneededEmailRelatedModelsJob + + USER_ID: Final = 'user_id' + DATETIME: Final = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') + + def setUp(self) -> None: + super().setUp() + + deleted_user_model = self.create_model( + user_models.DeletedUserModel, id=self.USER_ID + ) + deleted_user_model.update_timestamps() + deleted_user_model.put() + + self.sent_email_model_with_sender = self.create_model( + email_models.SentEmailModel, + id='sent_email_id1', + sender_id=self.USER_ID, + sender_email='sender@email.com', + recipient_id='recipient_id', + recipient_email='recipient@email.com', + intent=feconf.EMAIL_INTENT_SIGNUP, + subject='subject', + html_body='html_body', + sent_datetime=self.DATETIME + ) + self.sent_email_model_with_recipient = self.create_model( + email_models.SentEmailModel, + id='sent_email_id2', + sender_id='sender_id', + sender_email='sender@email.com', + recipient_id=self.USER_ID, + recipient_email='recipient@email.com', + intent=feconf.EMAIL_INTENT_SIGNUP, + subject='subject', + html_body='html_body', + sent_datetime=self.DATETIME + ) + self.bulk_email_model = self.create_model( + email_models.BulkEmailModel, + id='bulk_email_id', + sender_id=self.USER_ID, + sender_email='sender@email.com', + intent=feconf.BULK_EMAIL_INTENT_MARKETING, + subject='subject', + html_body='html_body', + sent_datetime=self.DATETIME + ) + self.unsent_feedback_email_model = self.create_model( + feedback_models.UnsentFeedbackEmailModel, id=self.USER_ID + ) + self.user_bulk_emails_model = self.create_model( + user_models.UserBulkEmailsModel, id=self.USER_ID + ) + + def test_job_deletes_sent_email_model_with_user_as_sender(self) -> None: + self.sent_email_model_with_sender.update_timestamps() + self.sent_email_model_with_sender.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SENT EMAILS SUCCESS: 1') + ]) + + self.assertIsNone( + email_models.SentEmailModel.get('sent_email_id', strict=False)) + + def test_job_deletes_sent_email_model_with_user_as_recipient(self) -> None: + self.sent_email_model_with_recipient.update_timestamps() + self.sent_email_model_with_recipient.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SENT EMAILS SUCCESS: 1') + ]) + + self.assertIsNone( + email_models.SentEmailModel.get('sent_email_id', strict=False)) + + def test_job_deletes_bulk_email_model_with_user_as_sender(self) -> None: + self.bulk_email_model.update_timestamps() + self.bulk_email_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='BULK EMAILS SUCCESS: 1') + ]) + + self.assertIsNone( + email_models.BulkEmailModel.get('bulk_email_id', strict=False)) + + def test_job_deletes_unsent_feedback_email_model(self) -> None: + self.unsent_feedback_email_model.update_timestamps() + self.unsent_feedback_email_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='FEEDBACK EMAILS SUCCESS: 1') + ]) + + self.assertIsNone( + feedback_models.UnsentFeedbackEmailModel.get( + self.USER_ID, strict=False)) + + def test_job_deletes_bulk_email_model(self) -> None: + self.user_bulk_emails_model.update_timestamps() + self.user_bulk_emails_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='USER BULK EMAILS SUCCESS: 1') + ]) + + self.assertIsNone( + user_models.UserBulkEmailsModel.get(self.USER_ID, strict=False)) + + def test_job_deletes_multiple_models(self) -> None: + self.sent_email_model_with_sender.update_timestamps() + self.sent_email_model_with_recipient.update_timestamps() + self.bulk_email_model.update_timestamps() + self.unsent_feedback_email_model.update_timestamps() + self.user_bulk_emails_model.update_timestamps() + self.put_multi([ + self.sent_email_model_with_sender, + self.sent_email_model_with_recipient, + self.bulk_email_model, + self.unsent_feedback_email_model, + self.user_bulk_emails_model + ]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SENT EMAILS SUCCESS: 2'), + job_run_result.JobRunResult(stdout='BULK EMAILS SUCCESS: 1'), + job_run_result.JobRunResult(stdout='FEEDBACK EMAILS SUCCESS: 1'), + job_run_result.JobRunResult(stdout='USER BULK EMAILS SUCCESS: 1'), + ]) + + self.assertIsNone( + user_models.UserBulkEmailsModel.get(self.USER_ID, strict=False)) diff --git a/core/jobs/batch_jobs/exp_migration_jobs.py b/core/jobs/batch_jobs/exp_migration_jobs.py new file mode 100644 index 000000000000..532e0988e4a2 --- /dev/null +++ b/core/jobs/batch_jobs/exp_migration_jobs.py @@ -0,0 +1,748 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs used for migrating the exploration models.""" + +from __future__ import annotations + +import logging + +from core import feconf +from core.constants import constants +from core.domain import exp_domain +from core.domain import exp_fetchers +from core.domain import exp_services +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.transforms import results_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +import result +from typing import Iterable, Sequence, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + from mypy_imports import exp_models + +(base_models, exp_models) = ( + models.Registry.import_models( + [models.Names.BASE_MODEL, models.Names.EXPLORATION])) +datastore_services = models.Registry.import_datastore_services() + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class MigrateExplorationModels(beam.PTransform): # type: ignore[misc] + """Transform that gets all Exploration models, performs migration + and filters any error results. + """ + + @staticmethod + def _migrate_exploration( + exp_model: exp_models.ExplorationModel, + exp_is_published: bool + ) -> result.Result[ + Tuple[str, exp_domain.Exploration], + Tuple[str, Exception] + ]: + """Migrates exploration and transform exploration model into + exploration object. + + Args: + exp_model: ExplorationModel. The exploration model to migrate. + exp_is_published: bool. Whether the exploration is published or not. + + Returns: + Result((str, Exploration), (str, Exception)). Result containing + tuple that consists of exploration ID and either Exploration object + or Exception. Exploration object is returned when the migration was + successful and Exception is returned otherwise. + """ + try: + exploration = exp_fetchers.get_exploration_from_model(exp_model) + exploration.validate(strict=exp_is_published) + + with datastore_services.get_ndb_context(): + if exp_services.get_story_id_linked_to_exploration( + exp_model.id) is not None: + exp_services.validate_exploration_for_story( + exploration, True) + + except Exception as e: + logging.exception(e) + return result.Err((exp_model.id, e)) + + return result.Ok((exp_model.id, exploration)) + + @staticmethod + def _generate_exploration_changes( + exp_id: str, exp_model: exp_models.ExplorationModel + ) -> Iterable[Tuple[str, exp_domain.ExplorationChange]]: + """Generates exploration change objects. The ExplorationChange object + is only generated when the exploration's states schema version is lower + than the latest schema version. + + Args: + exp_id: str. The ID of the exploration. + exp_model: ExplorationModel. The exploration for which to generate + the change objects. + + Yields: + (str, ExplorationChange). Tuple containing exploration ID and + ExplorationChange object. + """ + exp_states_version = exp_model.states_schema_version + if exp_states_version < feconf.CURRENT_STATE_SCHEMA_VERSION: + exp_change = exp_domain.ExplorationChange({ + 'cmd': ( + exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION), + 'from_version': str(exp_states_version), + 'to_version': str(feconf.CURRENT_STATE_SCHEMA_VERSION) + }) + yield (exp_id, exp_change) + + def expand( + self, pipeline: beam.Pipeline + ) -> Tuple[ + beam.PCollection[base_models.BaseModel], + beam.PCollection[job_run_result.JobRunResult] + ]: + """Migrate exploration objects and flush the input + in case of errors. + + Args: + pipeline: Pipeline. Input beam pipeline. + + Returns: + (PCollection, PCollection). Tuple containing + PCollection of models which should be put into the datastore and + a PCollection of results from the exploration migration. + """ + unmigrated_exploration_models = ( + pipeline + | 'Get all non-deleted exploration models' >> ( + ndb_io.GetModels( + exp_models.ExplorationModel.get_all( + include_deleted=False))) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add exploration keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda exp_model: exp_model.id) + ) + + exp_publication_status = ( + pipeline + | 'Get all non-deleted exploration rights models' >> ( + ndb_io.GetModels(exp_models.ExplorationRightsModel.get_all())) + | 'Extract publication status' >> beam.Map( + lambda exp_rights: ( + exp_rights.id, + exp_rights.status == constants.ACTIVITY_STATUS_PUBLIC + ) + ) + ) + + all_migrated_exp_results = ( + ( + unmigrated_exploration_models, + exp_publication_status + ) + | 'Merge model and staus' >> beam.CoGroupByKey() + | 'Get rid of exp ID' >> beam.Values() # pylint: disable=no-value-for-parameter + | 'Transform and migrate model' >> beam.MapTuple( # pylint: disable=no-value-for-parameter + lambda exploration_models, status: self._migrate_exploration( + exploration_models[0], status[0])) + ) + + migrated_exp_job_run_results = ( + all_migrated_exp_results + | 'Generate results for migration' >> ( + job_result_transforms.ResultsToJobRunResults('EXP PROCESSED')) + ) + + filtered_migrated_exp = ( + all_migrated_exp_results + | 'Filter migration results' >> ( + results_transforms.DrainResultsOnError()) + ) + migrated_exp = ( + filtered_migrated_exp + | 'Unwrap ok' >> beam.Map( + lambda result_item: result_item.unwrap()) + ) + + exp_changes = ( + unmigrated_exploration_models + | 'Generate exploration changes' >> beam.FlatMapTuple( + self._generate_exploration_changes) + ) + + exp_objects_list = ( + { + 'exp_model': unmigrated_exploration_models, + 'exploration': migrated_exp, + 'exp_changes': exp_changes + } + | 'Merge objects' >> beam.CoGroupByKey() + | 'Get rid of ID' >> beam.Values() # pylint: disable=no-value-for-parameter + ) + + transformed_exp_objects_list = ( + exp_objects_list + | 'Remove unmigrated explorations' >> beam.Filter( + lambda x: ( + len(x['exp_changes']) > 0 and + len(x['exploration']) > 0 + )) + | 'Reorganize the exploration objects' >> beam.Map(lambda objects: { + 'exp_model': objects['exp_model'][0], + 'exploration': objects['exploration'][0], + 'exp_changes': objects['exp_changes'] + }) + ) + + exp_objects_list_job_run_results = ( + transformed_exp_objects_list + | 'Transform exp objects into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'EXP MIGRATED')) + ) + + already_migrated_job_run_results = ( + exp_objects_list + | 'Remove migrated explorations' >> beam.Filter( + lambda x: ( + len(x['exp_changes']) == 0 and len(x['exploration']) > 0 + )) + | 'Transform previously migrated exps into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'EXP PREVIOUSLY MIGRATED')) + ) + + job_run_results = ( + migrated_exp_job_run_results, + exp_objects_list_job_run_results, + already_migrated_job_run_results + ) | 'Flatten job run results' >> beam.Flatten() + + return ( + transformed_exp_objects_list, + job_run_results + ) + + +class MigrateExplorationJob(base_jobs.JobBase): + """Job that migrates Exploration models.""" + + @staticmethod + def _update_exploration( + exp_model: exp_models.ExplorationModel, + migrated_exp: exp_domain.Exploration, + exp_changes: Sequence[exp_domain.ExplorationChange] + ) -> result.Result[ + Tuple[base_models.BaseModel], + Tuple[str, Exception] + ]: + """Generates newly updated exploration models. + + Args: + exp_model: ExplorationModel. The exploration which should be + updated. + migrated_exp: Exploration. The migrated exploration domain + object. + exp_changes: Sequence(ExplorationChange). The exploration changes + to apply. + + Returns: + Sequence(BaseModel). Sequence of models which should be put into + the datastore. + """ + try: + updated_exp_model = ( + exp_services.populate_exp_model_fields( + exp_model, migrated_exp)) + + commit_message = ( + 'Update exploration states schema version to %d.' + ) % ( + feconf.CURRENT_STATE_SCHEMA_VERSION + ) + models_to_put_values = [] + with datastore_services.get_ndb_context(): + models_to_put_values = ( + exp_services + .compute_models_to_put_when_saving_new_exp_version( + feconf.MIGRATION_BOT_USERNAME, + updated_exp_model.id, + exp_changes, + commit_message, + ) + ) + datastore_services.update_timestamps_multi( + list(models_to_put_values)) + except Exception as e: + logging.exception(e) + return result.Err((exp_model.id, e)) + + return result.Ok(models_to_put_values) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the exploration migration. + + Returns: + PCollection. A PCollection of results from the exploration + migration. + """ + + transformed_exp_objects_list, job_run_results = ( + self.pipeline + | 'Perform migration and filter migration results' >> ( + MigrateExplorationModels()) + ) + + exp_related_models_results = ( + transformed_exp_objects_list + | 'Generate exploration models to put' >> beam.Map( + lambda exp_objects: self._update_exploration( + exp_objects['exp_model'], + exp_objects['exploration'], + exp_objects['exp_changes'], + )) + ) + + exp_related_models_to_put = ( + exp_related_models_results + | 'Filter results with oks' >> beam.Filter( + lambda result_item: result_item.is_ok()) + | 'Unwrap models' >> beam.FlatMap( + lambda result_item: result_item.unwrap()) + ) + + exp_related_models_job_results = ( + exp_related_models_results + | 'Generate results for exp related models' >> ( + job_result_transforms.ResultsToJobRunResults( + 'EXP RELATED MODELS GENERATED')) + ) + unused_put_results = ( + exp_related_models_to_put + | 'Filter None models' >> beam.Filter(lambda x: x is not None) + | 'Put models into datastore' >> ndb_io.PutModels() + ) + + return ( + ( + job_run_results, + exp_related_models_job_results + ) + | beam.Flatten() + ) + + +class AuditExplorationMigrationJob(base_jobs.JobBase): + """Job that migrates Exploration models.""" + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the audit of exploration + migration. + + Returns: + PCollection. A PCollection of results from the exploration + migration. + """ + + unused_transformed_exp_objects_list, job_run_results = ( + self.pipeline + | 'Perform migration and filter migration results' >> ( + MigrateExplorationModels()) + ) + + return job_run_results + + +class RegenerateMissingExplorationStatsModelsJob(base_jobs.JobBase): + """Job that regenerates missing exploration stats models.""" + + @staticmethod + def _regenerate_stats_models( + exp_id: str, + unused_exp_model: exp_models.ExplorationModel + ) -> result.Result[ + Tuple[str, exp_domain.Exploration], + Tuple[str, Exception] + ]: + """Regenerates missing exploration stats models. + + Args: + exp_id: str. The ID of the exploration. + unused_exp_model: ExplorationModel. Exploration model. + + Returns: + Result((str, Exploration), (str, Exception)). Result containing + tuple that consists of exploration ID and either Exploration object + or Exception. Exploration object is returned when the regeneration + was successful and Exception is returned otherwise. + """ + results = None + try: + with datastore_services.get_ndb_context(): + results = ( + exp_services.regenerate_missing_stats_for_exploration( + exp_id + ) + ) + except Exception as e: + logging.exception(e) + return result.Err((exp_id, e)) + + return result.Ok((exp_id, results)) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the stats regeneration. + + Returns: + PCollection. A PCollection of results from the stats regeneration. + """ + + unmigrated_exploration_models = ( + self.pipeline + | 'Get all non-deleted exploration models' >> ( + ndb_io.GetModels( + exp_models.ExplorationModel.get_all( + include_deleted=False))) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add exploration keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda exp_model: exp_model.id) + # TODO(#15871): This filter should be removed after the explorations + # are fixed and it is possible to migrate them. + | 'Remove broken exploration' >> beam.Filter( + lambda id_and_exp: id_and_exp[0] not in ( + 'umPkwp0L1M0-', '670bU6d9JGBh')) + ) + + regenerated_stats_results = ( + unmigrated_exploration_models + | 'Transform and migrate model' >> beam.MapTuple( # pylint: disable=no-value-for-parameter + self._regenerate_stats_models) + ) + + regenerated_stats_job_run_results = ( + regenerated_stats_results + | 'Generate results for migration' >> ( + job_result_transforms.ResultsToJobRunResults('EXP PROCESSED')) + ) + + return regenerated_stats_job_run_results + + +class ExpSnapshotsMigrationAuditJob(base_jobs.JobBase): + """A reusable one-off job for testing the migration of all exp versions + to the latest schema version. This job runs the state migration, but does + not commit the new exploration to the datastore. + """ + + @staticmethod + def _migrate_exploration_snapshot_model( # pylint: disable=too-many-return-statements + exp_id: str, + exp_snapshot_model: exp_models.ExplorationSnapshotContentModel + ) -> result.Result[ + Tuple[str, Exception] + ]: + """Migrates exploration snapshot content model but does not put it in + the datastore. + + Args: + exp_id: str. The ID of the exploration. + exp_snapshot_model: ExplorationSnapshotContentModel. The + exploration model to migrate. + + Returns: + Result((str, Exception)). Result containing + tuple that consists of exploration ID and Exception if any. + """ + with datastore_services.get_ndb_context(): + latest_exploration = exp_fetchers.get_exploration_by_id( + exp_id, strict=False) + if latest_exploration is None: + return result.Err( + (exp_id, Exception('Exploration does not exist.')) + ) + + exploration_model = exp_models.ExplorationModel.get(exp_id) + if (exploration_model.states_schema_version != + feconf.CURRENT_STATE_SCHEMA_VERSION): + return result.Err( + ( + exp_id, + Exception('Exploration is not at latest schema version') + ) + ) + + try: + latest_exploration.validate() + except Exception: + return result.Err( + ( + exp_id, + Exception( + 'Exploration %s failed non-strict validation' + % exp_id + ) + ) + ) + + # Some (very) old explorations do not have a states schema version. + # These explorations have snapshots that were created before the + # states_schema_version system was introduced. We therefore set + # their states schema version to 0, since we now expect all + # snapshots to explicitly include this field. + if 'states_schema_version' not in exp_snapshot_model.content: + exp_snapshot_model.content['states_schema_version'] = 0 + + target_state_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION + current_state_schema_version = ( + exp_snapshot_model.content['states_schema_version'] + ) + if current_state_schema_version == target_state_schema_version: + return result.Err( + ( + exp_id, + Exception( + 'Snapshot is already at latest schema version' + ) + ) + ) + + versioned_exploration_states = ( + exp_domain.VersionedExplorationStatesDict( + states_schema_version=current_state_schema_version, + states=exp_snapshot_model.content['states'] + ) + ) + while ( # pragma: no branch + current_state_schema_version < target_state_schema_version + ): + try: + with datastore_services.get_ndb_context(): + exp_domain.Exploration.update_states_from_model( + versioned_exploration_states, + current_state_schema_version, + exp_id, + exploration_model.language_code) + current_state_schema_version += 1 + except Exception as e: + error_message = ( + 'Exploration snapshot %s failed migration to states ' + 'v%s: %s' % ( + exp_id, current_state_schema_version + 1, e)) + logging.exception(error_message) + return result.Err((exp_id, Exception(error_message))) + + if target_state_schema_version == current_state_schema_version: + return result.Ok((exp_id, 'SUCCESS')) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the audit of exploration + snapshot migration. + + Returns: + PCollection. A PCollection of results from the exploration + snapshot migration. + """ + unmigrated_exploration_models = ( + self.pipeline + | 'Get all exploration snapshot content models' >> ( + ndb_io.GetModels( + exp_models.ExplorationSnapshotContentModel.get_all( + include_deleted=False))) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add exploration keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda model: model.get_unversioned_instance_id()) + ) + + migrated_exp_results = ( + unmigrated_exploration_models + | 'Transform and migrate model' >> beam.MapTuple( # pylint: disable=no-value-for-parameter + self._migrate_exploration_snapshot_model) + ) + + migrated_exp_job_run_results = ( + migrated_exp_results + | 'Generate results for migration' >> ( + job_result_transforms.ResultsToJobRunResults('EXP PROCESSED')) + ) + + return migrated_exp_job_run_results + + +class ExpSnapshotsMigrationJob(base_jobs.JobBase): + """A reusable one-time job that may be used to migrate exploration schema + versions. This job will load all snapshots of all existing explorations + from the datastore and immediately store them back into the datastore. + The loading process of an exploration in exp_services automatically + performs schema updating. This job persists that conversion work, keeping + explorations up-to-date and improving the load time of new explorations. + + NOTE TO DEVELOPERS: Make sure to run ExpSnapshotsMigrationAuditJob before + running this job. + """ + + @staticmethod + def _migrate_exploration_snapshot_model( + exp_id: str, + exp_snapshot_model: exp_models.ExplorationSnapshotContentModel + ) -> result.Result[ + Tuple[str, Exception] + ]: + """Migrates exploration snapshot model and saves it in the datastore. + + Args: + exp_id: str. The ID of the exploration. + exp_snapshot_model: ExplorationSnapshotContentModel. The + snapshot model to migrate. + + Returns: + Result((str, Exploration), (str, Exception)). Result containing + tuple that consists of exploration ID and Exception if any. + """ + with datastore_services.get_ndb_context(): + latest_exploration = exp_fetchers.get_exploration_by_id( + exp_id, strict=False) + if latest_exploration is None: + return result.Err( + (exp_id, Exception('Exploration does not exist.')) + ) + + exploration_model = exp_models.ExplorationModel.get(exp_id) + if (exploration_model.states_schema_version != + feconf.CURRENT_STATE_SCHEMA_VERSION): + return result.Err( + ( + exp_id, + Exception('Exploration is not at latest schema version') + ) + ) + + try: + latest_exploration.validate() + except Exception: + return result.Err( + ( + exp_id, + Exception( + 'Exploration %s failed non-strict validation' % exp_id + ) + ) + ) + + # Some (very) old explorations do not have a states schema version. + # These explorations have snapshots that were created before the + # states_schema_version system was introduced. We therefore set their + # states schema version to 0, since we now expect all snapshots to + # explicitly include this field. + if 'states_schema_version' not in exp_snapshot_model.content: + exp_snapshot_model.content['states_schema_version'] = 0 + + target_state_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION + current_state_schema_version = ( + exp_snapshot_model.content['states_schema_version'] + ) + if current_state_schema_version == target_state_schema_version: + return result.Err( + ( + exp_id, + Exception('Snapshot is already at latest schema version') + ) + ) + + versioned_exploration_states = ( + exp_domain.VersionedExplorationStatesDict( + states_schema_version=current_state_schema_version, + states=exp_snapshot_model.content['states'] + ) + ) + while current_state_schema_version < target_state_schema_version: + try: + with datastore_services.get_ndb_context(): + exp_domain.Exploration.update_states_from_model( + versioned_exploration_states, + current_state_schema_version, + exp_id, + exploration_model.language_code) + current_state_schema_version += 1 + except Exception as e: + error_message = ( + 'Exploration snapshot %s failed migration to states ' + 'v%s: %s' % ( + exp_id, current_state_schema_version + 1, e)) + logging.exception(error_message) + return result.Err((exp_id, Exception(error_message))) + + exp_snapshot_model.content['states'] = ( + versioned_exploration_states['states'] + ) + exp_snapshot_model.content['states_schema_version'] = ( + current_state_schema_version + ) + with datastore_services.get_ndb_context(): + exp_snapshot_model.update_timestamps(update_last_updated_time=False) + exp_snapshot_model.put() + + return result.Ok((exp_id, 'SUCCESS')) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the audit of exploration + snapshot migration. + + Returns: + PCollection. A PCollection of results from the exploration + snapshot migration. + """ + unmigrated_exploration_models = ( + self.pipeline + | 'Get all exploration snapshot content models' >> ( + ndb_io.GetModels( + exp_models.ExplorationSnapshotContentModel.get_all( + include_deleted=False))) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add exploration keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda model: model.get_unversioned_instance_id()) + ) + + migrated_exp_results = ( + unmigrated_exploration_models + | 'Transform and migrate model' >> beam.MapTuple( # pylint: disable=no-value-for-parameter + self._migrate_exploration_snapshot_model) + ) + + migrated_exp_job_run_results = ( + migrated_exp_results + | 'Generate results for migration' >> ( + job_result_transforms.ResultsToJobRunResults('EXP PROCESSED')) + ) + + return migrated_exp_job_run_results diff --git a/core/jobs/batch_jobs/exp_migration_jobs_test.py b/core/jobs/batch_jobs/exp_migration_jobs_test.py new file mode 100644 index 000000000000..2ee18d2588e6 --- /dev/null +++ b/core/jobs/batch_jobs/exp_migration_jobs_test.py @@ -0,0 +1,1843 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.exp_migration_jobs.""" + +from __future__ import annotations + +from core import feconf +from core import utils +from core.constants import constants +from core.domain import caching_services +from core.domain import exp_domain +from core.domain import exp_fetchers +from core.domain import exp_services +from core.domain import opportunity_services +from core.domain import rights_domain +from core.domain import rights_manager +from core.domain import story_domain +from core.domain import story_services +from core.domain import topic_domain +from core.domain import topic_services +from core.domain import translation_domain +from core.domain import translation_services +from core.domain import user_services +from core.jobs import job_test_utils +from core.jobs.batch_jobs import exp_migration_jobs +from core.jobs.types import job_run_result +from core.platform import models +from core.tests import test_utils + +from typing import Sequence + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + from mypy_imports import opportunity_models + from mypy_imports import stats_models + from mypy_imports import translation_models + +( + exp_models, opportunity_models, + stats_models, translation_models +) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.OPPORTUNITY, + models.Names.STATISTICS, models.Names.TRANSLATION +]) + + +EXP_V46_DICT = utils.dict_from_yaml( +""" +author_notes: '' +auto_tts_enabled: true +blurb: '' +category: Art +correctness_feedback_enabled: true +edits_allowed: true +init_state_name: (untitled state) +language_code: en +objective: Objective for the exploration... +param_changes: [] +param_specs: {} +schema_version: 46 +states: + (untitled state): + classifier_model_id: null + content: + content_id: content + html: '' + interaction: + answer_groups: + - outcome: + dest: END + feedback: + content_id: feedback_1 + html:

    Correct!

    + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + rule_specs: + - inputs: + x: + - -

    Choice 1

    + -

    Choice 2

    + rule_type: IsEqualToOrdering + - inputs: + x: + - -

    Choice 1

    + rule_type: IsEqualToOrderingWithOneItemAtIncorrectPosition + - inputs: + x:

    Choice 1

    + y: 1 + rule_type: HasElementXAtPositionY + - inputs: + x:

    Choice 1

    + y:

    Choice 2

    + rule_type: HasElementXBeforeElementY + tagged_skill_misconception_id: null + training_data: [] + confirmed_unclassified_answers: [] + customization_args: + allowMultipleItemsInSamePosition: + value: true + choices: + value: + - content_id: ca_choices_2 + html:

    Choice 1

    + - content_id: ca_choices_3 + html:

    Choice 2

    + default_outcome: + dest: (untitled state) + feedback: + content_id: default_outcome + html: '' + labelled_as_correct: false + missing_prerequisite_skill_id: null + param_changes: [] + refresher_exploration_id: null + hints: [] + id: DragAndDropSortInput + solution: null + linked_skill_id: null + next_content_id_index: 4 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + ca_choices_2: {} + ca_choices_3: {} + content: {} + default_outcome: {} + feedback_1: {} + solicit_answer_details: false + written_translations: + translations_mapping: + ca_choices_2: {} + ca_choices_3: {} + content: {} + default_outcome: {} + feedback_1: {} + END: + classifier_model_id: null + content: + content_id: content + html:

    Congratulations, you have finished!

    + interaction: + answer_groups: [] + confirmed_unclassified_answers: [] + customization_args: + recommendedExplorationIds: + value: [] + default_outcome: null + hints: [] + id: EndExploration + solution: null + linked_skill_id: null + next_content_id_index: 0 + param_changes: [] + recorded_voiceovers: + voiceovers_mapping: + content: {} + solicit_answer_details: false + written_translations: + translations_mapping: + content: {} +states_schema_version: 41 +tags: [] +title: Title of exploration +""") +# Exploration migration backend tests with BEAM jobs involves creating and +# publishing the exploration. This requires a ElasticSearch stub for running +# while the backend tests run. JobTestBase does not initialize a +# ElasticSearch stub, so MigrateExplorationJobTests also inherits from +# GenericTestBase to successfully emulate the exploration publishing and +# verify the migration. + + +class MigrateExplorationJobTests( + job_test_utils.JobTestBase, test_utils.GenericTestBase +): + + JOB_CLASS = exp_migration_jobs.MigrateExplorationJob + + NEW_EXP_ID = 'exp_1' + EXP_ID_ONE = 'exp_one' + EXP_ID_TWO = 'exp_two' + EXP_TITLE = 'title' + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_migrated_exp_is_not_migrated(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration( + self.NEW_EXP_ID, title=self.EXP_TITLE, category='category') + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + + self.assertEqual( + exploration.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='EXP PREVIOUSLY MIGRATED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='EXP PROCESSED SUCCESS: 1') + ]) + + exp_model = exp_models.ExplorationModel.get(self.NEW_EXP_ID) + self.assertEqual(exp_model.version, 1) + + def test_broken_exp_is_not_migrated(self) -> None: + exploration_rights = rights_domain.ActivityRights( + self.EXP_ID_ONE, [feconf.SYSTEM_COMMITTER_ID], + [], [], []) + commit_cmds = [{'cmd': rights_domain.CMD_CREATE_NEW}] + + exp_models.ExplorationRightsModel( + id=exploration_rights.id, + owner_ids=exploration_rights.owner_ids, + editor_ids=exploration_rights.editor_ids, + voice_artist_ids=exploration_rights.voice_artist_ids, + viewer_ids=exploration_rights.viewer_ids, + community_owned=exploration_rights.community_owned, + status=exploration_rights.status, + viewable_if_private=exploration_rights.viewable_if_private, + first_published_msec=exploration_rights.first_published_msec, + ).commit( + feconf.SYSTEM_COMMITTER_ID, 'Created new exploration', commit_cmds) + exp_model = self.create_model( + exp_models.ExplorationModel, + id=self.EXP_ID_ONE, + title='title', + category=' category', + init_state_name='Introduction', + states_schema_version=49) + exp_model.update_timestamps() + exp_model.commit( + feconf.SYSTEM_COMMITTER_ID, 'Create exploration', [{ + 'cmd': exp_domain.CMD_CREATE_NEW + }]) + # Save a valid unmigrated exploration. + exp_model = exp_models.ExplorationModel( + id=self.EXP_ID_TWO, + category=EXP_V46_DICT['category'], + title=EXP_V46_DICT['title'], + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.EXP_ID_TWO, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit( + feconf.SYSTEM_COMMITTER_ID, + 'Created new exploration', + commit_cmds + ) + + self.assertEqual(exp_model.states_schema_version, 41) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_one\', ''ValidationError(' + '\'Names should not start or end with whitespace.\'))": 1' + ) + ), + job_run_result.JobRunResult(stdout='EXP PROCESSED SUCCESS: 1') + ]) + + migrated_exp_model = exp_models.ExplorationModel.get(self.EXP_ID_ONE) + self.assertEqual(migrated_exp_model.version, 1) + migrated_exp_model = exp_models.ExplorationModel.get(self.EXP_ID_TWO) + self.assertEqual(migrated_exp_model.version, 1) + + def create_story_linked_to_exploration(self) -> None: + """Creates a new story linked to the test exploration.""" + topic_id = 'topic_id_1' + story_id = 'story_id_1' + + topic = topic_domain.Topic.create_default_topic( + topic_id, 'topic', 'abbrev', 'description', 'fragment') + topic.thumbnail_filename = 'thumbnail.svg' + topic.thumbnail_bg_color = '#C6DCDA' + topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-url')] + topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] + topic_services.save_new_topic(feconf.SYSTEM_COMMITTER_ID, topic) + topic_services.publish_topic(topic_id, feconf.SYSTEM_COMMITTER_ID) + + story = story_domain.Story.create_default_story( + story_id, 'A story title', 'description', topic_id, + 'story-one') + story_services.save_new_story(feconf.SYSTEM_COMMITTER_ID, story) + topic_services.add_canonical_story( + feconf.SYSTEM_COMMITTER_ID, topic_id, story_id) + topic_services.publish_story( + topic_id, story_id, feconf.SYSTEM_COMMITTER_ID) + change_list = [ + story_domain.StoryChange({ + 'cmd': story_domain.CMD_ADD_STORY_NODE, + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'title': 'Title 1' + }), + story_domain.StoryChange({ + 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, + 'property_name': ( + story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID), + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'old_value': None, + 'new_value': self.NEW_EXP_ID + }) + ] + story_services.update_story( + feconf.SYSTEM_COMMITTER_ID, story_id, change_list, + 'Added node.') + + def test_unmigrated_valid_published_exp_migrates(self) -> None: + exp_model = exp_models.ExplorationModel( + id=self.NEW_EXP_ID, + category=EXP_V46_DICT['category'], + title=EXP_V46_DICT['title'], + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.NEW_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.NEW_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + + for lang_code in ['hi', 'bn']: + translation_models.EntityTranslationsModel.create_new( + feconf.TranslatableEntityType.EXPLORATION.value, + exp_model.id, + exp_model.version, + lang_code, + {} + ).put() + + all_translation_models: ( + Sequence[translation_models.EntityTranslationsModel]) = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(all_translation_models), 2) + + owner_action = user_services.get_user_actions_info( + feconf.SYSTEM_COMMITTER_ID) + exp_services.publish_exploration_and_update_user_profiles( + owner_action, self.NEW_EXP_ID) + opportunity_model = ( + opportunity_models.ExplorationOpportunitySummaryModel( + id=self.NEW_EXP_ID, + topic_id='topic_id1', + topic_name='topic', + story_id='story_id_1', + story_title='A story title', + chapter_title='Title 1', + content_count=20, + incomplete_translation_language_codes=['hi', 'ar'], + translation_counts={'hi': 1, 'ar': 2}, + language_codes_needing_voice_artists=['en'], + language_codes_with_assigned_voice_artists=[])) + opportunity_model.put() + + self.create_story_linked_to_exploration() + + self.assertEqual(exp_model.states_schema_version, 41) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='EXP MIGRATED SUCCESS: 1'), + job_run_result.JobRunResult(stdout='EXP PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='EXP RELATED MODELS GENERATED SUCCESS: 1') + ]) + + updated_opp_model = ( + opportunity_models.ExplorationOpportunitySummaryModel.get( + self.NEW_EXP_ID)) + updated_opp_summary = ( + opportunity_services + .get_exploration_opportunity_summary_from_model( + updated_opp_model)) + + expected_opp_summary_dict = { + 'id': 'exp_1', + 'topic_name': 'topic', + 'chapter_title': 'Title 1', + 'story_title': 'A story title', + 'content_count': 4, + 'translation_counts': { + 'hi': 0, + 'bn': 0 + }, + 'translation_in_review_counts': {}} + + self.assertEqual( + updated_opp_summary.to_dict(), expected_opp_summary_dict) + + all_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(all_translation_models), 4) + self.assertItemsEqual( + [m.entity_version for m in all_translation_models], [1, 1, 2, 2]) + + def test_unmigrated_invalid_published_exp_raise_error(self) -> None: + exp_model = exp_models.ExplorationModel( + id=self.NEW_EXP_ID, + category=EXP_V46_DICT['category'], + title='', + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.NEW_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.NEW_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + + owner_action = user_services.get_user_actions_info( + feconf.SYSTEM_COMMITTER_ID) + exp_services.publish_exploration_and_update_user_profiles( + owner_action, self.NEW_EXP_ID) + opportunity_model = ( + opportunity_models.ExplorationOpportunitySummaryModel( + id=self.NEW_EXP_ID, + topic_id='topic_id1', + topic_name='topic', + story_id='story_id_1', + story_title='A story title', + chapter_title='Title 1', + content_count=20, + incomplete_translation_language_codes=['hi', 'ar'], + translation_counts={'hi': 1, 'ar': 2}, + language_codes_needing_voice_artists=['en'], + language_codes_with_assigned_voice_artists=[])) + opportunity_model.put() + + self.create_story_linked_to_exploration() + + self.assertEqual(exp_model.states_schema_version, 41) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', stderr=( + 'EXP PROCESSED ERROR: "(\'exp_1\', ValidationError("Please ' + 'fix the following issues before saving this exploration: ' + '1. A title must be specified (in the \'Settings\' tab). ' + '"))": 1' + ) + ) + ]) + + def test_unmigrated_exp_with_invalid_related_data_raise_error(self) -> None: + exp_model = exp_models.ExplorationModel( + id=self.NEW_EXP_ID, + category=EXP_V46_DICT['category'], + title='A title', + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.NEW_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.NEW_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + + owner_action = user_services.get_user_actions_info( + feconf.SYSTEM_COMMITTER_ID) + exp_services.publish_exploration_and_update_user_profiles( + owner_action, self.NEW_EXP_ID) + opportunity_model = ( + opportunity_models.ExplorationOpportunitySummaryModel( + id=self.NEW_EXP_ID, + topic_id='topic_id1', + topic_name='topic', + story_id='story_id_1', + story_title='A story title', + chapter_title='Title 1', + content_count=20, + incomplete_translation_language_codes=['hi', 'ar'], + translation_counts={'hi': 1, 'ar': 2}, + language_codes_needing_voice_artists=['en'], + language_codes_with_assigned_voice_artists=[])) + opportunity_model.put() + + self.create_story_linked_to_exploration() + + self.assertEqual(exp_model.states_schema_version, 41) + + with self.swap_to_always_raise( + translation_services, + 'compute_translation_related_change', + Exception('Error generating related models') + ): + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'EXP RELATED MODELS GENERATED ERROR: \"(' + '\'exp_1\', Exception(' + '\'Error generating related models\'' + '))\": 1' + ) + ), + job_run_result.JobRunResult(stdout='EXP PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult(stdout='EXP MIGRATED SUCCESS: 1') + + ]) + + +# Exploration migration backend tests with BEAM jobs involves creating and +# publishing the exploration. This requires a ElasticSearch stub for running +# while the backend tests run. JobTestBase does not initialize a +# ElasticSearch stub, so MigrateExplorationJobTests also inherits from +# GenericTestBase to successfully emulate the exploration publishing and +# verify the migration. +class AuditExplorationMigrationJobTests( + job_test_utils.JobTestBase, test_utils.GenericTestBase +): + + JOB_CLASS = exp_migration_jobs.AuditExplorationMigrationJob + + NEW_EXP_ID = 'exp_1' + EXP_TITLE = 'title' + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_migrated_exp_is_not_migrated(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration( + self.NEW_EXP_ID, title=self.EXP_TITLE, category='category') + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + + self.assertEqual( + exploration.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='EXP PREVIOUSLY MIGRATED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='EXP PROCESSED SUCCESS: 1') + ]) + + exp_model = exp_models.ExplorationModel.get(self.NEW_EXP_ID) + self.assertEqual(exp_model.version, 1) + + def test_broken_exp_is_not_migrated(self) -> None: + exploration_rights = rights_domain.ActivityRights( + self.NEW_EXP_ID, [feconf.SYSTEM_COMMITTER_ID], + [], [], []) + commit_cmds = [{'cmd': rights_domain.CMD_CREATE_NEW}] + exp_models.ExplorationRightsModel( + id=exploration_rights.id, + owner_ids=exploration_rights.owner_ids, + editor_ids=exploration_rights.editor_ids, + voice_artist_ids=exploration_rights.voice_artist_ids, + viewer_ids=exploration_rights.viewer_ids, + community_owned=exploration_rights.community_owned, + status=exploration_rights.status, + viewable_if_private=exploration_rights.viewable_if_private, + first_published_msec=exploration_rights.first_published_msec, + ).commit( + feconf.SYSTEM_COMMITTER_ID, 'Created new exploration', commit_cmds) + + exp_model = self.create_model( + exp_models.ExplorationModel, + id=self.NEW_EXP_ID, + title='title', + category=' category', + init_state_name='Introduction', + states_schema_version=49) + exp_model.update_timestamps() + exp_model.commit( + feconf.SYSTEM_COMMITTER_ID, 'Create exploration', [{ + 'cmd': exp_domain.CMD_CREATE_NEW + }]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_1\', ''ValidationError(' + '\'Names should not start or end with whitespace.\'))": 1' + ) + ) + ]) + + migrated_exp_model = exp_models.ExplorationModel.get(self.NEW_EXP_ID) + self.assertEqual(migrated_exp_model.version, 1) + + def create_story_linked_to_exploration(self) -> None: + """Creates a new story linked to the test exploration.""" + topic_id = 'topic_id_1' + story_id = 'story_id_1' + + topic = topic_domain.Topic.create_default_topic( + topic_id, 'topic', 'abbrev', 'description', 'fragment') + topic.thumbnail_filename = 'thumbnail.svg' + topic.thumbnail_bg_color = '#C6DCDA' + topic.subtopics = [ + topic_domain.Subtopic( + 1, 'Title', ['skill_id_1'], 'image.svg', + constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, + 'dummy-subtopic-url')] + topic.next_subtopic_id = 2 + topic.skill_ids_for_diagnostic_test = ['skill_id_1'] + topic_services.save_new_topic(feconf.SYSTEM_COMMITTER_ID, topic) + topic_services.publish_topic(topic_id, feconf.SYSTEM_COMMITTER_ID) + + story = story_domain.Story.create_default_story( + story_id, 'A story title', 'description', topic_id, + 'story-one') + story_services.save_new_story(feconf.SYSTEM_COMMITTER_ID, story) + topic_services.add_canonical_story( + feconf.SYSTEM_COMMITTER_ID, topic_id, story_id) + topic_services.publish_story( + topic_id, story_id, feconf.SYSTEM_COMMITTER_ID) + change_list = [ + story_domain.StoryChange({ + 'cmd': story_domain.CMD_ADD_STORY_NODE, + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'title': 'Title 1' + }), + story_domain.StoryChange({ + 'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY, + 'property_name': ( + story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID), + 'node_id': story_domain.NODE_ID_PREFIX + '1', + 'old_value': None, + 'new_value': self.NEW_EXP_ID + }) + ] + story_services.update_story( + feconf.SYSTEM_COMMITTER_ID, story_id, change_list, + 'Added node.') + + def test_unmigrated_exp_is_migrated(self) -> None: + exp_model = exp_models.ExplorationModel( + id=self.NEW_EXP_ID, + category=EXP_V46_DICT['category'], + title=EXP_V46_DICT['title'], + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.NEW_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.NEW_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + + translation_models.EntityTranslationsModel.create_new( + feconf.TranslatableEntityType.EXPLORATION.value, + exp_model.id, + exp_model.version, + 'hi', + {} + ).put() + + all_translation_models: ( + Sequence[translation_models.EntityTranslationsModel]) = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual( + len(all_translation_models), 1) + + owner_action = user_services.get_user_actions_info( + feconf.SYSTEM_COMMITTER_ID) + exp_services.publish_exploration_and_update_user_profiles( + owner_action, self.NEW_EXP_ID) + opportunity_model = ( + opportunity_models.ExplorationOpportunitySummaryModel( + id=self.NEW_EXP_ID, + topic_id='topic_id1', + topic_name='topic', + story_id='story_id_1', + story_title='A story title', + chapter_title='Title 1', + content_count=20, + incomplete_translation_language_codes=['hi', 'ar'], + translation_counts={'hi': 1, 'ar': 2}, + language_codes_needing_voice_artists=['en'], + language_codes_with_assigned_voice_artists=[])) + opportunity_model.put() + + self.create_story_linked_to_exploration() + + exploration_model = exp_models.ExplorationModel.get(self.NEW_EXP_ID) + self.assertEqual(exploration_model.states_schema_version, 41) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='EXP PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='EXP MIGRATED SUCCESS: 1', stderr='') + ]) + + updated_opp_model = ( + opportunity_models.ExplorationOpportunitySummaryModel.get( + self.NEW_EXP_ID)) + updated_opp_summary = ( + opportunity_services + .get_exploration_opportunity_summary_from_model( + updated_opp_model)) + + expected_opp_summary_dict = { + 'id': 'exp_1', + 'topic_name': 'topic', + 'chapter_title': 'Title 1', + 'story_title': 'A story title', + 'content_count': 4, + 'translation_counts': { + 'hi': 0 + }, + 'translation_in_review_counts': {} + } + + self.assertEqual( + updated_opp_summary.to_dict(), expected_opp_summary_dict) + + def test_unmigrated_invalid_published_exp_raise_error(self) -> None: + exp_model = exp_models.ExplorationModel( + id=self.NEW_EXP_ID, + category=EXP_V46_DICT['category'], + title='', + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.NEW_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.NEW_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + + owner_action = user_services.get_user_actions_info( + feconf.SYSTEM_COMMITTER_ID) + exp_services.publish_exploration_and_update_user_profiles( + owner_action, self.NEW_EXP_ID) + opportunity_model = ( + opportunity_models.ExplorationOpportunitySummaryModel( + id=self.NEW_EXP_ID, + topic_id='topic_id1', + topic_name='topic', + story_id='story_id_1', + story_title='A story title', + chapter_title='Title 1', + content_count=20, + incomplete_translation_language_codes=['hi', 'ar'], + translation_counts={'hi': 1, 'ar': 2}, + language_codes_needing_voice_artists=['en'], + language_codes_with_assigned_voice_artists=[])) + opportunity_model.put() + + self.create_story_linked_to_exploration() + + self.assertEqual(exp_model.states_schema_version, 41) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', stderr=( + 'EXP PROCESSED ERROR: "(\'exp_1\', ValidationError("Please ' + 'fix the following issues before saving this exploration: ' + '1. A title must be specified (in the \'Settings\' tab). ' + '"))": 1' + ) + ) + ]) + + +class RegenerateMissingExplorationStatsModelsJobTests( + job_test_utils.JobTestBase, + test_utils.GenericTestBase +): + """Tests for the RegenerateExplorationStatsJob.""" + + JOB_CLASS = exp_migration_jobs.RegenerateMissingExplorationStatsModelsJob + + NEW_EXP_ID = 'exp_1' + EXP_TITLE = 'title' + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_job_regenerates_missing_stats_models(self) -> None: + exp_id = 'ID1' + owner_id = 'owner_id' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 1' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 2' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 3' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 4' + })], 'Changed title.') + exp_services.update_exploration( + owner_id, exp_id, [exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, + 'property_name': 'title', + 'new_value': 'New title 5' + })], 'Changed title.') + exp_stats_model_for_version_2 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 2) + ) + assert exp_stats_model_for_version_2 is not None + exp_stats_model_for_version_2.delete() + + exp_stats_model_for_version_4 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 4) + ) + assert exp_stats_model_for_version_4 is not None + exp_stats_model_for_version_4.delete() + + self.assertIsNone( + stats_models.ExplorationStatsModel.get_model(exp_id, 2) + ) + self.assertIsNone( + stats_models.ExplorationStatsModel.get_model(exp_id, 4) + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='EXP PROCESSED SUCCESS: 1') + ]) + + self.assertIsNotNone( + stats_models.ExplorationStatsModel.get_model(exp_id, 2) + ) + self.assertIsNotNone( + stats_models.ExplorationStatsModel.get_model(exp_id, 4) + ) + + def test_job_regenerates_missing_stats_models_when_no_models_exist( + self + ) -> None: + exp_id = 'ID1' + self.save_new_default_exploration(exp_id, 'owner_id') + exp_stats_model_for_version_1 = ( + stats_models.ExplorationStatsModel.get_model(exp_id, 1) + ) + assert exp_stats_model_for_version_1 is not None + exp_stats_model_for_version_1.delete() + + self.assertIsNone( + stats_models.ExplorationStatsModel.get_model(exp_id, 1) + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'ID1\', ' + 'Exception(\'No ExplorationStatsModels found\'))": 1' + ) + ) + ]) + + self.assertIsNotNone( + stats_models.ExplorationStatsModel.get_model(exp_id, 1) + ) + + +class ExpSnapshotsMigrationAuditJobTests( + job_test_utils.JobTestBase, + test_utils.GenericTestBase +): + """Tests for ExplorationMigrationAuditJob.""" + + JOB_CLASS = exp_migration_jobs.ExpSnapshotsMigrationAuditJob + ALBERT_EMAIL = 'albert@example.com' + ALBERT_NAME = 'albert' + + VALID_EXP_ID = 'exp_id0' + NEW_EXP_ID = 'exp_id1' + EXP_TITLE = 'title' + + def test_migration_audit_job_does_not_convert_up_to_date_exp(self) -> None: + """Tests that the snapshot migration audit job does not convert a + snapshot that is already the latest states schema version. + """ + # Create a new, default exploration whose snapshots should not be + # affected by the job. + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + content_id_generator = translation_domain.ContentIdGenerator() + init_state = exploration.states[exploration.init_state_name] + self.set_interaction_for_state( + init_state, 'EndExploration', content_id_generator) + exploration.next_content_id_index = ( + content_id_generator.next_content_id_index) + init_state.update_interaction_default_outcome(None) + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + self.assertEqual( + exploration.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + # Start migration job on sample exploration. + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'Snapshot' + ' is already at latest schema version\'))": 1' + ) + ) + ]) + + def test_migration_audit_job_skips_deleted_explorations(self) -> None: + """Tests that the snapshot migration job skips deleted explorations + and does not attempt to migrate any of the snapshots. + """ + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + content_id_generator = translation_domain.ContentIdGenerator() + init_state = exploration.states[exploration.init_state_name] + self.set_interaction_for_state( + init_state, 'EndExploration', content_id_generator) + exploration.next_content_id_index = ( + content_id_generator.next_content_id_index) + init_state.update_interaction_default_outcome(None) + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + + # Note: This creates a summary based on the upgraded model (which is + # fine). A summary is needed to delete the exploration. + exp_services.regenerate_exploration_and_contributors_summaries( + self.VALID_EXP_ID) + + # Delete the exploration before migration occurs. + exp_services.delete_exploration( + feconf.SYSTEM_COMMITTER_ID, self.VALID_EXP_ID) + + # Ensure the exploration is deleted. + with self.assertRaisesRegex(Exception, 'Entity .* not found'): + exp_fetchers.get_exploration_by_id(self.VALID_EXP_ID) + + # Start migration job on sample exploration. + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', ' + 'Exception(\'Exploration does not exist.\'))": 2' + ) + ) + ]) + + # Ensure the exploration is still deleted. + with self.assertRaisesRegex(Exception, 'Entity .* not found'): + exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) + + def test_migration_job_audit_success(self) -> None: + """Test that the audit job runs correctly on snapshots that use a + previous state schema. + """ + exp_model = exp_models.ExplorationModel( + id=self.VALID_EXP_ID, + category=EXP_V46_DICT['category'], + title=EXP_V46_DICT['title'], + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.VALID_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.VALID_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + self.assertLess( + exp_model.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + # Bring the main exploration to the latest schema. + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [self.VALID_EXP_ID]) + latest_schema_version = str(feconf.CURRENT_STATE_SCHEMA_VERSION) + migration_change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, + 'from_version': '46', + 'to_version': latest_schema_version + }) + ] + exp_services.update_exploration( + feconf.SYSTEM_COMMITTER_ID, + self.VALID_EXP_ID, + migration_change_list, + 'Ran Exploration Migration job.' + ) + exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID) + self.assertEqual( + exploration_model.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + # Start migration job on sample exploration. + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'Snapshot' + ' is already at latest schema version\'))": 1' + ) + ), + job_run_result.JobRunResult( + stdout='EXP PROCESSED SUCCESS: 1', + stderr='' + ) + ]) + + def test_migration_job_audit_failure(self) -> None: + """Test that the audit job catches any errors that would otherwise + occur during the migration. + """ + exp_model = exp_models.ExplorationModel( + id=self.VALID_EXP_ID, + category=EXP_V46_DICT['category'], + title=EXP_V46_DICT['title'], + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.VALID_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.VALID_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + + # Bring the main exploration to the latest schema. + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [self.VALID_EXP_ID]) + latest_schema_version = str(feconf.CURRENT_STATE_SCHEMA_VERSION) + migration_change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, + 'from_version': '42', + 'to_version': latest_schema_version + }) + ] + exp_services.update_exploration( + feconf.SYSTEM_COMMITTER_ID, + self.VALID_EXP_ID, + migration_change_list, + 'Ran Exploration Migration job.' + ) + exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID) + self.assertEqual( + exploration_model.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + # Make a mock conversion function that raises an error when trying to + # convert the old snapshot. + mock_conversion = classmethod( + lambda cls, exploration_dict: exploration_dict['property_that_dne']) + + with self.swap( + exp_domain.Exploration, '_convert_states_v46_dict_to_v47_dict', + mock_conversion + ): + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception("' + 'Exploration snapshot exp_id0 failed migration to ' + 'states v47: \'property_that_dne\'"))": 1' + ) + ), + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'' + 'Snapshot is already at latest schema version\'))": 1' + ) + ) + ]) + + def test_audit_job_detects_invalid_exploration(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, + exploration + ) + + exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID) + exploration_model.language_code = 'invalid_language_code' + exploration_model.commit( + feconf.SYSTEM_COMMITTER_ID, 'Changed language_code.', []) + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [self.VALID_EXP_ID]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', ' + 'Exception(\'Exploration exp_id0 failed non-strict ' + 'validation\'))": 2' + ) + ) + ]) + + def test_audit_job_detects_exploration_that_is_not_up_to_date(self) -> None: + swap_states_schema_41 = self.swap( + feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41) + swap_exp_schema_46 = self.swap( + exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46) + with swap_states_schema_41, swap_exp_schema_46: + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + self.assertLess( + exploration.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + swap_states_schema_42 = self.swap( + feconf, 'CURRENT_STATE_SCHEMA_VERSION', 42) + swap_exp_schema_47 = self.swap( + exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 47) + with swap_states_schema_42, swap_exp_schema_47: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'' + 'Exploration is not at latest schema version\'))": 1' + ) + ) + ]) + + def test_audit_job_handles_missing_states_schema_version(self) -> None: + swap_exp_schema_37 = self.swap( + exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 37) + with swap_exp_schema_37: + with self.swap(feconf, 'CURRENT_STATE_SCHEMA_VERSION', 44): + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + + # Bring the main exploration to the latest schema. + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [self.VALID_EXP_ID]) + migration_change_list = [ + exp_domain.ExplorationChange({ + 'cmd': ( + exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION), + 'from_version': '41', + 'to_version': '44' + }) + ] + with self.swap(feconf, 'CURRENT_STATE_SCHEMA_VERSION', 44): + exp_services.update_exploration( + feconf.SYSTEM_COMMITTER_ID, + self.VALID_EXP_ID, + migration_change_list, + 'Ran Exploration Migration job.' + ) + exploration_model = exp_models.ExplorationModel.get( + self.VALID_EXP_ID) + self.assertEqual(exploration_model.states_schema_version, 44) + + # Modify the snapshot to have no states schema version. (This + # implies a schema version of 0.) + snapshot_content_model = ( + exp_models.ExplorationSnapshotContentModel.get( + '%s-1' % self.VALID_EXP_ID)) + del snapshot_content_model.content['states_schema_version'] + snapshot_content_model.update_timestamps( + update_last_updated_time=False) + snapshot_content_model.put() + + # There is no failure due to a missing states schema version. + with self.swap(feconf, 'CURRENT_STATE_SCHEMA_VERSION', 44): + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception("' + 'Exploration snapshot exp_id0 failed migration to ' + 'states v1: type object \'Exploration\' has no ' + 'attribute \'_convert_states_v0_dict_to_v1_dict\'' + '"))": 1' + ) + ), + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'' + 'Snapshot is already at latest schema version\'))":' + ' 1' + ) + ) + ]) + + +class ExpSnapshotsMigrationJobTests( + job_test_utils.JobTestBase, + test_utils.GenericTestBase +): + + JOB_CLASS = exp_migration_jobs.ExpSnapshotsMigrationJob + ALBERT_EMAIL = 'albert@example.com' + ALBERT_NAME = 'albert' + + VALID_EXP_ID = 'exp_id0' + NEW_EXP_ID = 'exp_id1' + EXP_TITLE = 'title' + + def test_migration_job_does_not_convert_up_to_date_exp(self) -> None: + """Tests that the exploration migration job does not convert a + snapshot that is already the latest states schema version. + """ + # Create a new, default exploration that should not be affected by the + # job. + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + content_id_generator = translation_domain.ContentIdGenerator() + init_state = exploration.states[exploration.init_state_name] + self.set_interaction_for_state( + init_state, 'EndExploration', content_id_generator) + exploration.next_content_id_index = ( + content_id_generator.next_content_id_index) + init_state.update_interaction_default_outcome(None) + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + self.assertEqual( + exploration.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + # Start migration job on sample exploration. + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'' + 'Snapshot is already at latest schema version\'))": 1' + ) + ) + ]) + + def test_migration_job_succeeds_on_default_exploration(self) -> None: + exp_model = exp_models.ExplorationModel( + id=self.VALID_EXP_ID, + category=EXP_V46_DICT['category'], + title=EXP_V46_DICT['title'], + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.VALID_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.VALID_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + + # Bring the main exploration to the latest schema. + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [self.VALID_EXP_ID]) + latest_schema_version = str(feconf.CURRENT_STATE_SCHEMA_VERSION) + migration_change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, + 'from_version': '46', + 'to_version': latest_schema_version + }) + ] + exp_services.update_exploration( + feconf.SYSTEM_COMMITTER_ID, + self.VALID_EXP_ID, + migration_change_list, + 'Ran Exploration Migration job.' + ) + exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID) + self.assertEqual( + exploration_model.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + # Start migration job on sample exploration. + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'Snapshot' + ' is already at latest schema version\'))": 1' + ) + ), + job_run_result.JobRunResult( + stdout='EXP PROCESSED SUCCESS: 1', + stderr='' + ) + ]) + + def test_migration_job_skips_deleted_explorations(self) -> None: + """Tests that the exploration migration job skips deleted explorations + and does not attempt to migrate. + """ + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + content_id_generator = translation_domain.ContentIdGenerator() + init_state = exploration.states[exploration.init_state_name] + self.set_interaction_for_state( + init_state, 'EndExploration', content_id_generator) + exploration.next_content_id_index = ( + content_id_generator.next_content_id_index) + init_state.update_interaction_default_outcome(None) + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + + # Note: This creates a summary based on the upgraded model (which is + # fine). A summary is needed to delete the exploration. + exp_services.regenerate_exploration_and_contributors_summaries( + self.VALID_EXP_ID) + + # Delete the exploration before migration occurs. + exp_services.delete_exploration( + feconf.SYSTEM_COMMITTER_ID, self.VALID_EXP_ID) + + # Ensure the exploration is deleted. + with self.assertRaisesRegex(Exception, 'Entity .* not found'): + exp_fetchers.get_exploration_by_id(self.VALID_EXP_ID) + + # Start migration job on sample exploration. + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', ' + 'Exception(\'Exploration does not exist.\'))": 2' + ) + ) + ]) + + # Ensure the exploration is still deleted. + with self.assertRaisesRegex(Exception, 'Entity .* not found'): + exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID) + + def test_migration_job_detects_invalid_exploration(self) -> None: + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + + exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID) + exploration_model.language_code = 'invalid_language_code' + exploration_model.commit( + feconf.SYSTEM_COMMITTER_ID, 'Changed language_code.', []) + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [self.VALID_EXP_ID]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', ' + 'Exception(\'Exploration exp_id0 failed non-strict ' + 'validation\'))": 2' + ) + ) + ]) + + def test_migration_job_detects_exploration_that_is_not_up_to_date( + self + ) -> None: + swap_states_schema_41 = self.swap( + feconf, 'CURRENT_STATE_SCHEMA_VERSION', 41) + swap_exp_schema_46 = self.swap( + exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 46) + with swap_states_schema_41, swap_exp_schema_46: + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + self.assertLess( + exploration.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + swap_states_schema_42 = self.swap( + feconf, 'CURRENT_STATE_SCHEMA_VERSION', 42) + swap_exp_schema_47 = self.swap( + exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 47) + with swap_states_schema_42, swap_exp_schema_47: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'' + 'Exploration is not at latest schema version\'))": 1' + ) + ) + ]) + + def test_migration_job_audit_failure(self) -> None: + exp_model = exp_models.ExplorationModel( + id=self.VALID_EXP_ID, + category=EXP_V46_DICT['category'], + title=EXP_V46_DICT['title'], + objective=EXP_V46_DICT['objective'], + language_code=EXP_V46_DICT['language_code'], + tags=EXP_V46_DICT['tags'], + blurb=EXP_V46_DICT['blurb'], + author_notes=EXP_V46_DICT['author_notes'], + states_schema_version=EXP_V46_DICT['states_schema_version'], + init_state_name=EXP_V46_DICT['init_state_name'], + states=EXP_V46_DICT['states'], + auto_tts_enabled=EXP_V46_DICT['auto_tts_enabled'], + correctness_feedback_enabled=EXP_V46_DICT[ + 'correctness_feedback_enabled'] + ) + rights_manager.create_new_exploration_rights( + self.VALID_EXP_ID, feconf.SYSTEM_COMMITTER_ID) + exp_model.commit(feconf.SYSTEM_COMMITTER_ID, '', []) + exp_summary_model = exp_models.ExpSummaryModel(**{ + 'id': self.VALID_EXP_ID, + 'title': exp_model.title, + 'category': exp_model.category, + 'objective': exp_model.objective, + 'language_code': exp_model.language_code, + 'tags': exp_model.tags, + 'ratings': None, + 'scaled_average_rating': 4.0, + 'exploration_model_last_updated': exp_model.last_updated, + 'exploration_model_created_on': exp_model.created_on, + 'first_published_msec': None, + 'status': constants.ACTIVITY_STATUS_PRIVATE, + 'community_owned': False, + 'owner_ids': [feconf.SYSTEM_COMMITTER_ID], + 'editor_ids': [], + 'voice_artist_ids': [], + 'viewer_ids': [], + 'contributor_ids': [], + 'contributors_summary': {}, + 'version': exp_model.version + }) + exp_summary_model.update_timestamps() + exp_summary_model.put() + + # Bring the main exploration to the latest schema. + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [self.VALID_EXP_ID]) + latest_schema_version = str(feconf.CURRENT_STATE_SCHEMA_VERSION) + migration_change_list = [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, + 'from_version': '46', + 'to_version': latest_schema_version + }) + ] + exp_services.update_exploration( + feconf.SYSTEM_COMMITTER_ID, + self.VALID_EXP_ID, + migration_change_list, + 'Ran Exploration Migration job.' + ) + exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID) + self.assertEqual( + exploration_model.states_schema_version, + feconf.CURRENT_STATE_SCHEMA_VERSION) + + # Make a mock conversion function that raises an error when trying to + # convert the old snapshot. + mock_conversion = classmethod( + lambda cls, exploration_dict: exploration_dict['property_that_dne']) + + with self.swap( + exp_domain.Exploration, '_convert_states_v46_dict_to_v47_dict', + mock_conversion + ): + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception("' + 'Exploration snapshot exp_id0 failed migration to ' + 'states v47: \'property_that_dne\'"))": 1' + ) + ), + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'' + 'Snapshot is already at latest schema version\'))": 1' + ) + ) + ]) + + def test_audit_job_handles_missing_states_schema_version(self) -> None: + swap_exp_schema_37 = self.swap( + exp_domain.Exploration, 'CURRENT_EXP_SCHEMA_VERSION', 37) + with swap_exp_schema_37: + with self.swap(feconf, 'CURRENT_STATE_SCHEMA_VERSION', 44): + exploration = exp_domain.Exploration.create_default_exploration( + self.VALID_EXP_ID, title='title', category='category') + exp_services.save_new_exploration( + feconf.SYSTEM_COMMITTER_ID, exploration) + + # Bring the main exploration to the latest schema. + caching_services.delete_multi( + caching_services.CACHE_NAMESPACE_EXPLORATION, None, + [self.VALID_EXP_ID]) + migration_change_list = [ + exp_domain.ExplorationChange({ + 'cmd': ( + exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION), + 'from_version': '41', + 'to_version': '44' + }) + ] + with self.swap(feconf, 'CURRENT_STATE_SCHEMA_VERSION', 44): + exp_services.update_exploration( + feconf.SYSTEM_COMMITTER_ID, + self.VALID_EXP_ID, + migration_change_list, + 'Ran Exploration Migration job.' + ) + exploration_model = exp_models.ExplorationModel.get( + self.VALID_EXP_ID) + self.assertEqual(exploration_model.states_schema_version, 44) + + # Modify the snapshot to have no states schema version. (This + # implies a schema version of 0.) + snapshot_content_model = ( + exp_models.ExplorationSnapshotContentModel.get( + '%s-1' % self.VALID_EXP_ID)) + del snapshot_content_model.content['states_schema_version'] + snapshot_content_model.update_timestamps( + update_last_updated_time=False) + snapshot_content_model.put() + + # There is no failure due to a missing states schema version. + with self.swap(feconf, 'CURRENT_STATE_SCHEMA_VERSION', 44): + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception("' + 'Exploration snapshot exp_id0 failed migration to ' + 'states v1: type object \'Exploration\' has no ' + 'attribute \'_convert_states_v0_dict_to_v1_dict\'' + '"))": 1' + ) + ), + job_run_result.JobRunResult( + stdout='', + stderr=( + 'EXP PROCESSED ERROR: "(\'exp_id0\', Exception(\'' + 'Snapshot is already at latest schema version\'))":' + ' 1' + ) + ) + ]) diff --git a/core/jobs/batch_jobs/exp_recommendation_computation_jobs.py b/core/jobs/batch_jobs/exp_recommendation_computation_jobs.py index 528be2adb7d1..d182287a586b 100644 --- a/core/jobs/batch_jobs/exp_recommendation_computation_jobs.py +++ b/core/jobs/batch_jobs/exp_recommendation_computation_jobs.py @@ -18,6 +18,8 @@ from __future__ import annotations +from core.domain import exp_domain +from core.domain import exp_fetchers from core.domain import recommendations_services from core.jobs import base_jobs from core.jobs.io import ndb_io @@ -27,7 +29,7 @@ import apache_beam as beam -from typing import Dict, Iterable, List, Tuple, Union, cast +from typing import Dict, Final, Iterable, List, Tuple, Union MYPY = False if MYPY: # pragma: no cover @@ -35,15 +37,16 @@ from mypy_imports import exp_models from mypy_imports import recommendations_models -(exp_models, recommendations_models) = models.Registry.import_models( - [models.NAMES.exploration, models.NAMES.recommendations]) +(exp_models, recommendations_models) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.RECOMMENDATIONS +]) datastore_services = models.Registry.import_datastore_services() -MAX_RECOMMENDATIONS = 10 +MAX_RECOMMENDATIONS: Final = 10 # Note: There is a threshold so that bad recommendations will be # discarded even if an exploration has few similar explorations. -SIMILARITY_SCORE_THRESHOLD = 3.0 +SIMILARITY_SCORE_THRESHOLD: Final = 3.0 class ComputeExplorationRecommendationsJob(base_jobs.JobBase): @@ -58,16 +61,18 @@ def run(self) -> beam.PCollection[job_run_result.JobRunResult]: the Elastic Search. """ - exp_summary_models = ( + exp_summary_objects = ( self.pipeline | 'Get all non-deleted models' >> ( ndb_io.GetModels(exp_models.ExpSummaryModel.get_all())) + | 'Convert ExpSummaryModels to domain objects' >> beam.Map( + exp_fetchers.get_exploration_summary_from_model) ) - exp_summary_iter = beam.pvalue.AsIter(exp_summary_models) + exp_summary_iter = beam.pvalue.AsIter(exp_summary_objects) exp_recommendations_models = ( - exp_summary_models + exp_summary_objects | 'Compute similarity' >> beam.ParDo( ComputeSimilarity(), exp_summary_iter) | 'Group similarities per exploration ID' >> beam.GroupByKey() @@ -91,7 +96,7 @@ def run(self) -> beam.PCollection[job_run_result.JobRunResult]: @staticmethod def _sort_and_slice_similarities( - similarities: Iterable[Dict[str, Union[str, float]]] + similarities: Iterable[Dict[str, Union[str, float]]] ) -> List[str]: """Sorts similarities of explorations and slices them to a maximum length. @@ -114,7 +119,7 @@ def _sort_and_slice_similarities( @staticmethod def _create_recommendation( - exp_id: str, recommended_exp_ids: Iterable[str] + exp_id: str, recommended_exp_ids: Iterable[str] ) -> recommendations_models.ExplorationRecommendationsModel: """Creates exploration recommendation model. @@ -135,21 +140,25 @@ def _create_recommendation( return exp_recommendation_model +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. class ComputeSimilarity(beam.DoFn): # type: ignore[misc] """DoFn to compute similarities between exploration.""" def process( self, - ref_exp_summary_model: datastore_services.Model, - compared_exp_summary_models: Iterable[datastore_services.Model] + ref_exp_summary: exp_domain.ExplorationSummary, + compared_exp_summaries: Iterable[exp_domain.ExplorationSummary] ) -> Iterable[Tuple[str, Dict[str, Union[str, float]]]]: """Compute similarities between exploraitons. Args: - ref_exp_summary_model: ExpSummaryModel. Reference exploration + ref_exp_summary: ExplorationSummary. Reference exploration summary. We are trying to find explorations similar to this reference summary. - compared_exp_summary_models: list(ExpSummaryModel). List of other + compared_exp_summaries: list(ExplorationSummary). List of other explorations summaries against which we compare the reference summary. @@ -161,23 +170,17 @@ def process( similarity_score: float. The similarity score for the exploration. """ - ref_exp_summary_model = cast( - exp_models.ExpSummaryModel, ref_exp_summary_model) with datastore_services.get_ndb_context(): - for compared_exp_summary_model in compared_exp_summary_models: - compared_exp_summary_model = cast( - exp_models.ExpSummaryModel, - compared_exp_summary_model - ) - if compared_exp_summary_model.id == ref_exp_summary_model.id: + for compared_exp_summary in compared_exp_summaries: + if compared_exp_summary.id == ref_exp_summary.id: continue - similarity_score = recommendations_services.get_item_similarity( # type: ignore[no-untyped-call] - ref_exp_summary_model, compared_exp_summary_model + similarity_score = recommendations_services.get_item_similarity( + ref_exp_summary, compared_exp_summary ) if similarity_score >= SIMILARITY_SCORE_THRESHOLD: yield ( - ref_exp_summary_model.id, { + ref_exp_summary.id, { 'similarity_score': similarity_score, - 'exp_id': compared_exp_summary_model.id + 'exp_id': compared_exp_summary.id } ) diff --git a/core/jobs/batch_jobs/exp_recommendation_computation_jobs_test.py b/core/jobs/batch_jobs/exp_recommendation_computation_jobs_test.py index 9087c3075403..ad8913038132 100644 --- a/core/jobs/batch_jobs/exp_recommendation_computation_jobs_test.py +++ b/core/jobs/batch_jobs/exp_recommendation_computation_jobs_test.py @@ -27,28 +27,31 @@ from core.jobs.types import job_run_result from core.platform import models -from typing import Dict, List, Tuple, Union # isort:skip +from typing import Dict, Final, List, Tuple, Type, Union MYPY = False if MYPY: from mypy_imports import exp_models from mypy_imports import recommendations_models -(exp_models, recommendations_models) = models.Registry.import_models( - [models.NAMES.exploration, models.NAMES.recommendations]) +(exp_models, recommendations_models) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.RECOMMENDATIONS +]) StatsType = List[Tuple[str, List[Dict[str, Union[bool, int, str]]]]] class ComputeExplorationRecommendationsJobTests(job_test_utils.JobTestBase): - JOB_CLASS = ( - exp_recommendation_computation_jobs - .ComputeExplorationRecommendationsJob) + JOB_CLASS: Type[ + exp_recommendation_computation_jobs.ComputeExplorationRecommendationsJob + ] = ( + exp_recommendation_computation_jobs.ComputeExplorationRecommendationsJob + ) - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' - EXP_3_ID = 'exp_3_id' + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' + EXP_3_ID: Final = 'exp_3_id' def test_empty_storage(self) -> None: self.assert_job_output_is_empty() @@ -77,7 +80,7 @@ def test_does_nothing_when_only_one_exploration_exists(self) -> None: self.assertIsNone(exp_recommendations_model) def test_creates_recommendations_for_similar_explorations(self) -> None: - recommendations_services.create_default_topic_similarities() # type: ignore[no-untyped-call] + recommendations_services.create_default_topic_similarities() exp_summary_1 = self.create_model( exp_models.ExpSummaryModel, id=self.EXP_1_ID, @@ -130,7 +133,7 @@ def test_creates_recommendations_for_similar_explorations(self) -> None: ) def test_skips_private_explorations(self) -> None: - recommendations_services.create_default_topic_similarities() # type: ignore[no-untyped-call] + recommendations_services.create_default_topic_similarities() exp_summary_1 = self.create_model( exp_models.ExpSummaryModel, id=self.EXP_1_ID, @@ -171,9 +174,9 @@ def test_skips_private_explorations(self) -> None: self.assertIsNone(exp_recommendations_model_2) def test_does_not_create_recommendations_for_different_explorations( - self + self ) -> None: - recommendations_services.create_default_topic_similarities() # type: ignore[no-untyped-call] + recommendations_services.create_default_topic_similarities() exp_summary_1 = self.create_model( exp_models.ExpSummaryModel, id=self.EXP_1_ID, @@ -214,7 +217,7 @@ def test_does_not_create_recommendations_for_different_explorations( self.assertIsNone(exp_recommendations_model_2) def test_creates_recommendations_for_three_explorations(self) -> None: - recommendations_services.create_default_topic_similarities() # type: ignore[no-untyped-call] + recommendations_services.create_default_topic_similarities() exp_summary_1 = self.create_model( exp_models.ExpSummaryModel, id=self.EXP_1_ID, diff --git a/core/jobs/batch_jobs/exp_search_indexing_jobs.py b/core/jobs/batch_jobs/exp_search_indexing_jobs.py index c23755a90d92..2392b54f2ffe 100644 --- a/core/jobs/batch_jobs/exp_search_indexing_jobs.py +++ b/core/jobs/batch_jobs/exp_search_indexing_jobs.py @@ -18,6 +18,8 @@ from __future__ import annotations +from core.domain import exp_domain +from core.domain import exp_fetchers from core.domain import search_services from core.jobs import base_jobs from core.jobs.io import ndb_io @@ -27,24 +29,23 @@ import apache_beam as beam import result -from typing import Iterable, List, cast + +from typing import Final, Iterable, List MYPY = False if MYPY: # pragma: no cover - from mypy_imports import datastore_services from mypy_imports import exp_models from mypy_imports import search_services as platform_search_services -(exp_models,) = models.Registry.import_models([models.NAMES.exploration]) +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) -datastore_services = models.Registry.import_datastore_services() platform_search_services = models.Registry.import_search_services() class IndexExplorationsInSearchJob(base_jobs.JobBase): """Job that indexes the explorations in Elastic Search.""" - MAX_BATCH_SIZE = 1000 + MAX_BATCH_SIZE: Final = 1000 def run(self) -> beam.PCollection[job_run_result.JobRunResult]: """Returns a PCollection of 'SUCCESS' or 'FAILURE' results from @@ -59,6 +60,8 @@ def run(self) -> beam.PCollection[job_run_result.JobRunResult]: | 'Get all non-deleted models' >> ( ndb_io.GetModels( exp_models.ExpSummaryModel.get_all(include_deleted=False))) + | 'Convert ExpSummaryModels to domain objects' >> beam.Map( + exp_fetchers.get_exploration_summary_from_model) | 'Split models into batches' >> beam.transforms.util.BatchElements( max_batch_size=self.MAX_BATCH_SIZE) | 'Index batches of models' >> beam.ParDo( @@ -68,25 +71,29 @@ def run(self) -> beam.PCollection[job_run_result.JobRunResult]: ) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. class IndexExplorationSummaries(beam.DoFn): # type: ignore[misc] """DoFn to index exploration summaries.""" def process( - self, exp_summary_models: List[datastore_services.Model] + self, exp_summary: List[exp_domain.ExplorationSummary] ) -> Iterable[result.Result[None, Exception]]: """Index exploration summaries and catch any errors. Args: - exp_summary_models: list(Model). Models to index. + exp_summary: list(ExplorationSummary). List of Exp Summary domain + objects to be indexed. Yields: JobRunResult. List containing one element, which is either SUCCESS, or FAILURE. """ try: - search_services.index_exploration_summaries( # type: ignore[no-untyped-call] - cast(List[exp_models.ExpSummaryModel], exp_summary_models)) - for _ in exp_summary_models: + search_services.index_exploration_summaries(exp_summary) + for _ in exp_summary: yield result.Ok() except platform_search_services.SearchException as e: yield result.Err(e) diff --git a/core/jobs/batch_jobs/exp_search_indexing_jobs_test.py b/core/jobs/batch_jobs/exp_search_indexing_jobs_test.py index 4ae1a9940cf3..711a90065779 100644 --- a/core/jobs/batch_jobs/exp_search_indexing_jobs_test.py +++ b/core/jobs/batch_jobs/exp_search_indexing_jobs_test.py @@ -25,14 +25,14 @@ from core.jobs.types import job_run_result from core.platform import models -from typing import Dict, List, Tuple, Union # isort:skip +from typing import Dict, List, Tuple, Type, Union MYPY = False if MYPY: from mypy_imports import exp_models from mypy_imports import search_services as platform_search_services -(exp_models,) = models.Registry.import_models([models.NAMES.exploration]) +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) platform_search_services = models.Registry.import_search_services() @@ -41,7 +41,9 @@ class IndexExplorationsInSearchJobTests(job_test_utils.JobTestBase): - JOB_CLASS = exp_search_indexing_jobs.IndexExplorationsInSearchJob + JOB_CLASS: Type[ + exp_search_indexing_jobs.IndexExplorationsInSearchJob + ] = exp_search_indexing_jobs.IndexExplorationsInSearchJob def test_empty_storage(self) -> None: self.assert_job_output_is_empty() diff --git a/core/jobs/batch_jobs/exp_version_history_computation_job.py b/core/jobs/batch_jobs/exp_version_history_computation_job.py new file mode 100644 index 000000000000..c7ec0b8630bf --- /dev/null +++ b/core/jobs/batch_jobs/exp_version_history_computation_job.py @@ -0,0 +1,1123 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Job for computation of exploration version history data.""" + +from __future__ import annotations + +from core import feconf +from core.domain import exp_domain +from core.domain import exp_fetchers +from core.domain import exp_services +from core.domain import state_domain +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +from typing import Dict, List, Optional, Tuple, TypedDict, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) +datastore_services = models.Registry.import_datastore_services() + + +class UnformattedModelGroupForVerificationJobDict(TypedDict): + """Dictionary representing an unformatted model group for the + VerifyVersionHistoryModelsJob which verifies whether the version history + models were created correctly. + """ + + all_exp_models: List[exp_domain.Exploration] + exp_models_vlatest: List[exp_domain.Exploration] + snapshot_metadata_models: List[Optional[ + exp_models.ExplorationSnapshotMetadataModel]] + version_history_models: ( + List[Optional[exp_models.ExplorationVersionHistoryModel]] + ) + + +class FormattedModelGroupForVerificationJobDict(TypedDict): + """Dictionary representing a formatted model group for the + VerifyVersionHistoryModelsJob which verifies whether the version history + models were created correctly. + """ + + exp_vlatest: exp_domain.Exploration + all_explorations: List[exp_domain.Exploration] + snapshot_metadata_models: List[Optional[ + exp_models.ExplorationSnapshotMetadataModel]] + version_history_models: ( + List[Optional[exp_models.ExplorationVersionHistoryModel]] + ) + + +class UnformattedModelGroupForComputationJobDict(TypedDict): + """Dictionary representing an unformatted model group for the + ComputeExplorationVersionHistoryJob which verifies whether the version + history models were created correctly. + """ + + all_exp_models: List[exp_models.ExplorationModel] + exp_models_vlatest: List[exp_models.ExplorationModel] + snapshot_metadata_models: List[Optional[ + exp_models.ExplorationSnapshotMetadataModel]] + version_history_models: ( + List[Optional[exp_models.ExplorationVersionHistoryModel]] + ) + + +class FormattedModelGroupForComputationJobDict(TypedDict): + """Dictionary representing a formatted model group for the + ComputeExplorationVersionHistoryJob which verifies whether the version + history models were created correctly. + """ + + exp_vlatest: exp_models.ExplorationModel + all_explorations: List[exp_models.ExplorationModel] + snapshot_metadata_models: List[Optional[ + exp_models.ExplorationSnapshotMetadataModel]] + version_history_models: ( + List[Optional[exp_models.ExplorationVersionHistoryModel]] + ) + + +class VerifyVersionHistoryModelsJob(base_jobs.JobBase): + """Verifies that the creation or modification of the version history + models is correct. It does not consult those explorations for which + version history could not be created due to many reasons such as invalid + change list. It checks the correctness for those explorations for which the + version histories were created successfully. + """ + + def generate_exploration_from_snapshot( + self, snapshot_model: exp_models.ExplorationSnapshotContentModel + ) -> Optional[exp_models.ExplorationModel]: + """Generates exploration model from given snapshot content model. + + Args: + snapshot_model: ExplorationSnapshotContentModel. The snapshot + content model. + + Returns: + ExplorationModel. The exploration model. + """ + with datastore_services.get_ndb_context(): + try: + snapshot_dict = snapshot_model.content + exp_id = snapshot_model.get_unversioned_instance_id() + model_class = exp_models.ExplorationModel + reconstituted_model = model_class(id=exp_id)._reconstitute( # pylint: disable=protected-access + snapshot_dict + ) + reconstituted_model.created_on = snapshot_model.created_on + reconstituted_model.last_updated = snapshot_model.last_updated + return reconstituted_model + except Exception: + return None + + def convert_to_formatted_model_group( + self, model_group: UnformattedModelGroupForVerificationJobDict + ) -> Optional[FormattedModelGroupForVerificationJobDict]: + """Converts the given unformatted model group into a formatted one. + + Args: + model_group: UnformattedModelGroupForVerificationJobDict. + The unformatted model group for the verification job which is + to be converted into formatted model group. + + Returns: + Optional[FormattedModelGroupForVerificationJobDict]. The formatted + version of the given model group. + """ + all_exp_models = model_group['all_exp_models'] + exp_models_vlatest = model_group['exp_models_vlatest'] + snapshot_metadata_models = model_group['snapshot_metadata_models'] + version_history_models = model_group['version_history_models'] + + response_dict: Optional[ + FormattedModelGroupForVerificationJobDict] = None + + model_group_is_valid = len(exp_models_vlatest) == 1 + if model_group_is_valid: # pragma: no cover + exp_model_vlatest = exp_models_vlatest[0] + + all_explorations: List[Optional[exp_domain.Exploration]] = ( + [None] * exp_model_vlatest.version + ) + for exp_model in all_exp_models: + if ( + exp_model is not None and + exp_model.version >= 1 and + exp_model.version <= exp_model_vlatest.version + ): # pragma: no cover + all_explorations[exp_model.version - 1] = exp_model + model_group_is_valid = (all_explorations.count(None) == 0) + + if model_group_is_valid: + all_snapshot_metadata_models: List[Optional[ + exp_models.ExplorationSnapshotMetadataModel + ]] = ( + [None] * exp_model_vlatest.version + ) + for snapshot_metadata in snapshot_metadata_models: + if ( + snapshot_metadata is not None and + int(snapshot_metadata.get_version_string()) >= 1 and + int(snapshot_metadata.get_version_string()) <= ( + exp_model_vlatest.version) + ): + version = int(snapshot_metadata.get_version_string()) + all_snapshot_metadata_models[ + version - 1] = snapshot_metadata + model_group_is_valid = ( + all_snapshot_metadata_models.count(None) == 0 + ) + + if model_group_is_valid: # pragma: no cover + all_version_history_models: List[Optional[ + exp_models.ExplorationVersionHistoryModel + ]] = [None] * exp_model_vlatest.version + for version_history in version_history_models: + if ( + version_history is not None and + version_history.exploration_version is not None and + version_history.exploration_version >= 1 and + version_history.exploration_version <= + exp_model_vlatest.version + ): # pragma: no cover + all_version_history_models[ + version_history.exploration_version - 1 + ] = version_history + model_group_is_valid = ( + all_version_history_models.count(None) == 0 + ) + + # The following lists are just to fix the MyPy errors. + # No entity in the above lists are None if the model group + # is valid. + explorations_without_none: List[ + exp_domain.Exploration] = [] + for exploration in all_explorations: + if exploration is not None: # pragma: no cover + explorations_without_none.append(exploration) + if model_group_is_valid: # pragma: no cover + response_dict = { + 'exp_vlatest': exp_model_vlatest, + 'all_explorations': explorations_without_none, + 'snapshot_metadata_models': ( + all_snapshot_metadata_models), + 'version_history_models': all_version_history_models + } + return response_dict + + def verify_version_history_models( + self, model_group: FormattedModelGroupForVerificationJobDict + ) -> Tuple[str, bool]: + """Verifies that the version history models were created correctly. + + Args: + model_group: FormattedModelGroupForVerificationJobDict. The + formatted model group for the computation job for which version + history models are to be verified. + + Returns: + Tuple[str, bool]. The pair of exploration id and whether the + version history models were created correctly. + """ + exp_vlatest = model_group['exp_vlatest'] + snapshot_metadata_models = model_group['snapshot_metadata_models'] + vh_models = model_group['version_history_models'] + exp_id = exp_vlatest.id + latest_version = exp_vlatest.version + verified = True + + for version in range(2, latest_version + 1): + vh_model = vh_models[version - 1] + assert vh_model is not None + snapshot_metadata_model = snapshot_metadata_models[version - 1] + assert snapshot_metadata_model is not None + change_list: List[exp_domain.ExplorationChange] = [] + for change_dict in snapshot_metadata_model.commit_cmds: + try: + change_list.append(exp_domain.ExplorationChange( + change_dict + )) + except Exception: + continue + + exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list) + + effective_old_to_new_state_names = {} + for old_state_name, new_state_name in ( + exp_versions_diff.old_to_new_state_names.items() + ): + if old_state_name != new_state_name: # pragma: no cover + effective_old_to_new_state_names[ + old_state_name] = new_state_name + for old_state_name, new_state_name in ( + effective_old_to_new_state_names.items() + ): + if new_state_name not in vh_model.state_version_history: + verified = False + break + state_vh = vh_model.state_version_history[new_state_name] + if state_vh['previously_edited_in_version'] != version - 1: + verified = False + break + if state_vh['state_name_in_previous_version'] != old_state_name: + verified = False + break + + for state_name in exp_versions_diff.added_state_names: + if state_name not in vh_model.state_version_history: + verified = False + break + state_vh = vh_model.state_version_history[state_name] + if ( + state_vh['previously_edited_in_version'] is not None or + state_vh['state_name_in_previous_version'] is not None + ): + verified = False + break + + if not verified: + break + + return (exp_id, verified) + + def get_exploration_from_model( + self, exploration_model: exp_models.ExplorationModel + ) -> Optional[exp_domain.Exploration]: + """Gets Exploration domain object from exploration model. + + Args: + exploration_model: ExplorationModel. The exploration model which is + to be converted into Exploration domain object. + + Returns: + Optional[exp_domain.Exploration]. The Exploration domain object + for the given exploration model. + """ + try: + exploration = exp_fetchers.get_exploration_from_model( + exploration_model + ) + return exploration + except Exception: + return None + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + all_explorations = ( + self.pipeline + | 'Get all the exploration snapshot models' >> ndb_io.GetModels( + exp_models.ExplorationSnapshotContentModel.get_all( + include_deleted=False + ) + ) + | 'Filter the exploration snapshot models without None' >> + beam.Filter(lambda model: model is not None) + | 'Get reconstituted exploration models' >> + beam.Map(self.generate_exploration_from_snapshot) + | 'Get Exploration objects from models' >> + beam.Map(self.get_exploration_from_model) + | 'Filter explorations without None' >> + beam.Filter(lambda x: x is not None) + | 'Get id-model pair for exploration models' >> + beam.Map(lambda exploration: (exploration.id, exploration)) + ) + + all_explorations_vlatest = ( + self.pipeline + | 'Get all the exploration models at latest version' >> + ndb_io.GetModels(exp_models.ExplorationModel.get_all( + include_deleted=False + )) + | 'Get Exploration objects from exp models vlatest' >> + beam.Map(self.get_exploration_from_model) + | 'Filter the explorations without None' >> + beam.Filter(lambda x: x is not None) + | 'Get id-model pair for exploration models at vlatest' >> + beam.Map(lambda exploration: (exploration.id, exploration)) + ) + + all_snapshot_metadata = ( + self.pipeline + | 'Get all ExplorationSnapshotMetadataModels' >> ndb_io.GetModels( + exp_models.ExplorationSnapshotMetadataModel.get_all( + include_deleted=False + ) + ) + | 'Create key-value pairs with id and metadata models' >> + beam.Map(lambda model: ( + model.get_unversioned_instance_id(), model + ) + ) + ) + + all_version_history_models = ( + self.pipeline + | 'Get all ExplorationVersionHistoryModels' >> + ndb_io.GetModels( + exp_models.ExplorationVersionHistoryModel.get_all( + include_deleted=False + ) + ) + | 'Create key-value pairs with id and version history models' >> + beam.Map(lambda model: (model.exploration_id, model)) + ) + + verification_results = ( + ({ + 'all_exp_models': all_explorations, + 'exp_models_vlatest': all_explorations_vlatest, + 'snapshot_metadata_models': all_snapshot_metadata, + 'version_history_models': all_version_history_models + }) + | 'Group by key' >> beam.CoGroupByKey() + | 'Get rid of exploration id' >> + beam.Values() # pylint: disable=no-value-for-parameter + | 'Get formatted model groups' >> beam.Map( + self.convert_to_formatted_model_group + ) + | 'Filter valid model groups' >> beam.Filter( + lambda x: x is not None + ) + | 'Get the verification result for each model group' >> + beam.Map(self.verify_version_history_models) + ) + + verification_success = ( + verification_results + | 'Filter the verified explorations' >> + beam.Filter(lambda x: x[1]) + ) + + verification_failed = ( + verification_results + | 'Filter the unverified explorations' >> + beam.Filter(lambda x: not x[1]) + ) + + report_number_of_explorations_queried = ( + all_explorations_vlatest + | 'Count the number of explorations' >> + job_result_transforms.CountObjectsToJobRunResult( + 'ALL EXPLORATIONS' + ) + ) + + report_number_of_verified_explorations = ( + verification_success + | 'Count the number of verified explorations' >> + job_result_transforms.CountObjectsToJobRunResult( + 'VERIFIED EXPLORATIONS' + ) + ) + + report_number_of_unverified_explorations = ( + verification_failed + | 'Count the number of unverified explorations' >> + job_result_transforms.CountObjectsToJobRunResult( + 'UNVERIFIED EXPLORATIONS' + ) + ) + + report_details_of_unverified_explorations = ( + verification_failed + | 'Save info on the unverified explorations' >> beam.Map( + lambda x: job_run_result.JobRunResult.as_stderr( + 'Version history for exploration with ID %s was not ' + 'created correctly' % (x[0]) + ) + ) + ) + + return ( + ( + report_number_of_explorations_queried, + report_number_of_verified_explorations, + report_number_of_unverified_explorations, + report_details_of_unverified_explorations + ) + | 'Flatten' >> beam.Flatten() + ) + + +class DeleteExplorationVersionHistoryModelsJob(base_jobs.JobBase): + """Job that deletes ExplorationVersionHistoryModels.""" + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of 'SUCCESS' or 'FAILURE' results from + deleting ExplorationVersionHistoryModel. + + Returns: + PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from + deleting ExplorationVersionHistoryModel. + """ + version_history_models = ( + self.pipeline + | 'Get all ExplorationVersionHistoryModels' >> + ndb_io.GetModels( + exp_models.ExplorationVersionHistoryModel.get_all( + include_deleted=False + ) + ) + ) + + unused_delete_result = ( + version_history_models + | beam.Map(lambda model: model.key) + | 'Delete all models' >> ndb_io.DeleteModels() + ) + + return ( + version_history_models + | 'Create job run result' >> ( + job_result_transforms.CountObjectsToJobRunResult()) + ) + + +class ComputeExplorationVersionHistoryJob(base_jobs.JobBase): + """Computes and populates the version history data for an exploration.""" + + def convert_to_formatted_model_group( + self, model_group: UnformattedModelGroupForComputationJobDict + ) -> Optional[FormattedModelGroupForComputationJobDict]: + """Converts the given unformatted model group into a formatted one. + + Args: + model_group: UnformattedModelGroupForComputationJobDict. + The unformatted model group for the computation job which is + to be converted into formatted model group. + + Returns: + Optional[FormattedModelGroupForComputationJobDict]. The formatted + version of the given model group. + """ + all_exp_models = model_group['all_exp_models'] + exp_models_vlatest = model_group['exp_models_vlatest'] + snapshot_metadata_models = model_group['snapshot_metadata_models'] + version_history_models = model_group['version_history_models'] + + response_dict: Optional[ + FormattedModelGroupForComputationJobDict] = None + + model_group_is_valid = len(exp_models_vlatest) == 1 + if model_group_is_valid: # pragma: no cover + exp_model_vlatest = exp_models_vlatest[0] + + all_explorations: List[Optional[exp_models.ExplorationModel]] = ( + [None] * exp_model_vlatest.version + ) + for exp_model in all_exp_models: + if ( + exp_model is not None and + exp_model.version >= 1 and + exp_model.version <= exp_model_vlatest.version + ): + all_explorations[exp_model.version - 1] = exp_model + model_group_is_valid = (all_explorations.count(None) == 0) + + if model_group_is_valid: + all_snapshot_metadata_models: List[Optional[ + exp_models.ExplorationSnapshotMetadataModel + ]] = ( + [None] * exp_model_vlatest.version + ) + for snapshot_metadata in snapshot_metadata_models: + if ( + snapshot_metadata is not None and + int(snapshot_metadata.get_version_string()) >= 1 and + int(snapshot_metadata.get_version_string()) <= ( + exp_model_vlatest.version) + ): + version = int(snapshot_metadata.get_version_string()) + all_snapshot_metadata_models[ + version - 1] = snapshot_metadata + model_group_is_valid = ( + all_snapshot_metadata_models.count(None) == 0 + ) + + if model_group_is_valid: + all_version_history_models: List[Optional[ + exp_models.ExplorationVersionHistoryModel + ]] = [None] * exp_model_vlatest.version + for version_history in version_history_models: + if ( + version_history is not None and + version_history.exploration_version is not None and + version_history.exploration_version >= 1 and + version_history.exploration_version <= + exp_model_vlatest.version + ): # pragma: no cover + all_version_history_models[ + version_history.exploration_version - 1 + ] = version_history + + # The following lists are just to fix the MyPy errors. + # No entity in the above lists are None if the model group + # is valid. + explorations_without_none: List[ + exp_models.ExplorationModel] = [] + for exploration in all_explorations: + if exploration is not None: + explorations_without_none.append(exploration) + response_dict = { + 'exp_vlatest': exp_model_vlatest, + 'all_explorations': explorations_without_none, + 'snapshot_metadata_models': ( + all_snapshot_metadata_models + ), + 'version_history_models': all_version_history_models + } + return response_dict + + def get_updated_version_history_model( + self, + vh_model: Optional[exp_models.ExplorationVersionHistoryModel], + exp_id: str, + current_version: int, + committer_id: str, + updated_states_vh: Dict[str, state_domain.StateVersionHistory], + updated_metadata_vh: exp_domain.MetadataVersionHistory, + updated_committer_ids: List[str] + ) -> exp_models.ExplorationVersionHistoryModel: + """Updates the version history model or creates one for the given + version of the exploration. + + Args: + vh_model: Optional[ExplorationVersionHistoryModel]. The version + history model for the given version of the exploration. It is + None if the model does not exist. + exp_id: str. The id of the exploration. + current_version: int. The version number for which we want to + create the version history model. + committer_id: str. The user id of the user who committed the + changes in the exploration from versions (current_version - 1) + to (current_version). + updated_states_vh: dict(str, StateVersionHistory). The updated + states version history data for the given version of the + exploration. + updated_metadata_vh: MetadataVersionHistory. The updated metadata + version history data for the given version of the exploration. + updated_committer_ids: list[str]. A list of user ids who made the + 'previous commit' on each state and the exploration metadata. + + Returns: + ExplorationVersionHistoryModel. The updated version history model. + """ + # If the model is not already existing, then create it. + if vh_model is None: + vh_model = exp_models.ExplorationVersionHistoryModel( + id=exp_models.ExplorationVersionHistoryModel.get_instance_id( + exp_id, current_version + ), + exploration_id=exp_id, + exploration_version=current_version, + state_version_history={}, + metadata_last_edited_version_number=None, + metadata_last_edited_committer_id=committer_id, + committer_ids=[committer_id] + ) + # Update the required fields in the model. + vh_model.state_version_history = { + state_name: vh.to_dict() + for state_name, vh in updated_states_vh.items() + } + vh_model.metadata_last_edited_version_number = ( + updated_metadata_vh.last_edited_version_number + ) + vh_model.metadata_last_edited_committer_id = ( + updated_metadata_vh.last_edited_committer_id + ) + vh_model.committer_ids = updated_committer_ids + return vh_model + + def get_reverted_version_history_model( + self, + revert_to_vh_model: exp_models.ExplorationVersionHistoryModel, + current_vh_model: Optional[exp_models.ExplorationVersionHistoryModel], + exp_id: str, + current_version: int + ) -> exp_models.ExplorationVersionHistoryModel: + """Updates the version history model for the current version of the + exploration with the model data of the reverted version. + + Args: + revert_to_vh_model: ExplorationVersionHistoryModel. The exploration + version history model at the version to which the exploration + is reverted. + current_vh_model: Optional[ExplorationVersionHistoryModel]. The + version history model for the current version of the + exploration. It is None if the model does not exist. + exp_id: str. The id of the exploration. + current_version: int. The version number for which we want to + create the version history model. + + Returns: + ExplorationVersionHistoryModel. The updated version history model. + """ + # If the model does not exist, create it with the data from the + # reverted model. Otherwise, just update the data of the already + # existing model with the data from the reverted model. + if current_vh_model is None: + current_vh_model = exp_models.ExplorationVersionHistoryModel( + id=exp_models.ExplorationVersionHistoryModel.get_instance_id( + exp_id, current_version + ), + exploration_id=exp_id, + exploration_version=current_version, + state_version_history=revert_to_vh_model.state_version_history, + metadata_last_edited_version_number=( + revert_to_vh_model.metadata_last_edited_version_number + ), + metadata_last_edited_committer_id=( + revert_to_vh_model.metadata_last_edited_committer_id + ), + committer_ids=revert_to_vh_model.committer_ids + ) + else: + current_vh_model.state_version_history = ( + revert_to_vh_model.state_version_history + ) + current_vh_model.metadata_last_edited_version_number = ( + revert_to_vh_model.metadata_last_edited_version_number + ) + current_vh_model.metadata_last_edited_committer_id = ( + revert_to_vh_model.metadata_last_edited_committer_id + ) + current_vh_model.committer_ids = revert_to_vh_model.committer_ids + + return current_vh_model + + def check_for_revert_commit( + self, change_list: List[exp_domain.ExplorationChange] + ) -> Optional[int]: + """Checks if revert commit is present in the change list and returns + the version number (if present). + + Args: + change_list: list(ExplorationChange). The list of changes to check. + + Returns: + Optional[int]. The revert version number (if present) or None. + """ + for change in change_list: + if change.cmd == feconf.CMD_REVERT_COMMIT: + return int(change.version_number) + return None + + def create_version_history_models( + self, model_group: FormattedModelGroupForComputationJobDict + ) -> Union[ + Tuple[str, List[exp_models.ExplorationVersionHistoryModel]], + Tuple[ + str, + List[exp_models.ExplorationVersionHistoryModel], + Union[Exception, str], + int + ] + ]: + """Creates the version history models for a particular exploration. + + Args: + model_group: FormattedModelGroupForComputationJobDict. + The formatted model group for the computation job for which + version history models are to be created. + + Returns: + Union[ + Tuple[str, List[exp_models.ExplorationVersionHistoryModel]], + Tuple[ + str, + List[exp_models.ExplorationVersionHistoryModel], + Union[Exception, str], + int + ] + ]. The tuple of exploration id along with the created version + history or the tuple of exploration id along with error message + and version number of the exploration in case of any error. + """ + with datastore_services.get_ndb_context(): + exp_vlatest = model_group['exp_vlatest'] + versioned_explorations = model_group['all_explorations'] + snapshot_metadata_models = model_group['snapshot_metadata_models'] + version_history_models = model_group['version_history_models'] + + exp_version = exp_vlatest.version + exp_id = exp_vlatest.id + + snapshot_model_at_v1 = snapshot_metadata_models[0] + assert snapshot_model_at_v1 is not None + committer_id_v1 = snapshot_model_at_v1.committer_id + states_vh_at_v1 = { + state_name: state_domain.StateVersionHistory( + None, None, committer_id_v1 + ) + for state_name in versioned_explorations[0].states + } + metadata_vh_at_v1 = exp_domain.MetadataVersionHistory( + None, committer_id_v1 + ) + committer_ids_at_v1 = [committer_id_v1] + vh_model_at_v1 = self.get_updated_version_history_model( + version_history_models[0], + versioned_explorations[0].id, 1, committer_id_v1, + states_vh_at_v1, metadata_vh_at_v1, committer_ids_at_v1 + ) + vh_model_at_v1.update_timestamps() + version_history_models[0] = vh_model_at_v1 + + for version in range(2, exp_version + 1): + snapshot_metadata_model = snapshot_metadata_models[version - 1] + assert snapshot_metadata_model is not None + committer_id: str = snapshot_metadata_model.committer_id + change_list: List[exp_domain.ExplorationChange] = [] + for change_dict in snapshot_metadata_model.commit_cmds: + try: + change_list.append(exp_domain.ExplorationChange( + change_dict + )) + except Exception: + continue + + old_exploration = versioned_explorations[version - 2] + new_exploration = versioned_explorations[version - 1] + revert_to_version = self.check_for_revert_commit( + change_list + ) + if revert_to_version is not None: + if ( + revert_to_version <= 0 or + revert_to_version >= version + ): + return ( + exp_id, [], + 'Reverting to the version %d which is out of the ' + 'range [1, %d]' % (revert_to_version, version - 1), + version + ) + revert_to_vh_model = ( + version_history_models[revert_to_version - 1] + ) + assert revert_to_vh_model is not None + new_vh_model = self.get_reverted_version_history_model( + revert_to_vh_model, + version_history_models[version - 1], + exp_id, version + ) + new_vh_model.update_timestamps() + version_history_models[version - 1] = new_vh_model + else: + old_states_dict = old_exploration.states + new_states_dict = new_exploration.states + old_metadata_dict: exp_domain.ExplorationMetadataDict = { + 'title': old_exploration.title, + 'category': old_exploration.category, + 'objective': old_exploration.objective, + 'language_code': old_exploration.language_code, + 'tags': old_exploration.tags, + 'blurb': old_exploration.blurb, + 'author_notes': old_exploration.author_notes, + 'states_schema_version': ( + old_exploration.states_schema_version + ), + 'init_state_name': old_exploration.init_state_name, + 'param_specs': old_exploration.param_specs, + 'param_changes': old_exploration.param_changes, + 'auto_tts_enabled': old_exploration.auto_tts_enabled, + 'correctness_feedback_enabled': ( + old_exploration.correctness_feedback_enabled + ), + 'edits_allowed': old_exploration.edits_allowed + } + new_metadata_dict: exp_domain.ExplorationMetadataDict = { + 'title': new_exploration.title, + 'category': new_exploration.category, + 'objective': new_exploration.objective, + 'language_code': new_exploration.language_code, + 'tags': new_exploration.tags, + 'blurb': new_exploration.blurb, + 'author_notes': new_exploration.author_notes, + 'states_schema_version': ( + new_exploration.states_schema_version + ), + 'init_state_name': new_exploration.init_state_name, + 'param_specs': new_exploration.param_specs, + 'param_changes': new_exploration.param_changes, + 'auto_tts_enabled': new_exploration.auto_tts_enabled, + 'correctness_feedback_enabled': ( + new_exploration.correctness_feedback_enabled + ), + 'edits_allowed': new_exploration.edits_allowed + } + + old_vh_model = version_history_models[version - 2] + assert old_vh_model is not None + old_states_vh = { + state_name: ( + state_domain.StateVersionHistory.from_dict( + state_vh_dict + ) + ) + for state_name, state_vh_dict in + old_vh_model.state_version_history.items() + } + old_metadata_vh = exp_domain.MetadataVersionHistory( + old_vh_model.metadata_last_edited_version_number, + old_vh_model.metadata_last_edited_committer_id + ) + + try: + new_states_vh = ( + exp_services.update_states_version_history( + old_states_vh, change_list, old_states_dict, + new_states_dict, version, committer_id + ) + ) + new_metadata_vh = ( + exp_services.update_metadata_version_history( + old_metadata_vh, change_list, old_metadata_dict, + new_metadata_dict, version, committer_id + ) + ) + new_committer_ids = ( + exp_services.get_updated_committer_ids( + new_states_vh, + new_metadata_vh.last_edited_committer_id + ) + ) + new_vh_model = self.get_updated_version_history_model( + version_history_models[version - 1], + exp_id, version, committer_id, + new_states_vh, new_metadata_vh, new_committer_ids + ) + new_vh_model.update_timestamps() + version_history_models[version - 1] = new_vh_model + except Exception as e: + return (exp_id, [], e, version) + + # The following block is used to prevent MyPy errors. + vh_models_without_none: List[ + exp_models.ExplorationVersionHistoryModel] = [] + for vh_model in version_history_models: + if vh_model is not None: # pragma: no cover + vh_models_without_none.append(vh_model) + return (exp_id, vh_models_without_none) + + def generate_exploration_from_snapshot( + self, snapshot_model: exp_models.ExplorationSnapshotContentModel + ) -> Optional[exp_models.ExplorationModel]: + """Generates exploration model from given snapshot content model. + + Args: + snapshot_model: ExplorationSnapshotContentModel. The snapshot + content model. + + Returns: + ExplorationModel. The exploration model. + """ + with datastore_services.get_ndb_context(): + try: + snapshot_dict = snapshot_model.content + exp_id = snapshot_model.get_unversioned_instance_id() + model_class = exp_models.ExplorationModel + reconstituted_model = model_class(id=exp_id)._reconstitute( # pylint: disable=protected-access + snapshot_dict + ) + reconstituted_model.created_on = snapshot_model.created_on + reconstituted_model.last_updated = snapshot_model.last_updated + return reconstituted_model + except Exception: + return None + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + all_explorations = ( + self.pipeline + | 'Get all the exploration snapshot models' >> ndb_io.GetModels( + exp_models.ExplorationSnapshotContentModel.get_all( + include_deleted=False + ) + ) + | 'Filter the exploration snapshot models without None' >> + beam.Filter(lambda model: model is not None) + | 'Get reconstituted exploration models' >> + beam.Map(self.generate_exploration_from_snapshot) + | 'Filter explorations without None' >> + beam.Filter(lambda x: x is not None) + | 'Get id-model pair for exploration models' >> + beam.Map(lambda exploration: (exploration.id, exploration)) + ) + + all_explorations_vlatest = ( + self.pipeline + | 'Get all the exploration models at latest version' >> + ndb_io.GetModels(exp_models.ExplorationModel.get_all( + include_deleted=False + )) + | 'Filter the explorations without None' >> + beam.Filter(lambda x: x is not None) + | 'Get id-model pair for exploration models at vlatest' >> + beam.Map(lambda exploration: (exploration.id, exploration)) + ) + + all_snapshot_metadata = ( + self.pipeline + | 'Get all ExplorationSnapshotMetadataModels' >> ndb_io.GetModels( + exp_models.ExplorationSnapshotMetadataModel.get_all( + include_deleted=False + ) + ) + | 'Create key-value pairs with id and metadata models' >> + beam.Map(lambda model: ( + model.get_unversioned_instance_id(), model + ) + ) + ) + + all_version_history_models = ( + self.pipeline + | 'Get already existing ExplorationVersionHistoryModels' >> + ndb_io.GetModels( + exp_models.ExplorationVersionHistoryModel.get_all( + include_deleted=False + ) + ) + | 'Create key-value pairs with id and version history models' >> + beam.Map(lambda model: (model.exploration_id, model)) + ) + + model_groups = ( + ({ + 'all_exp_models': all_explorations, + 'exp_models_vlatest': all_explorations_vlatest, + 'snapshot_metadata_models': all_snapshot_metadata, + 'version_history_models': all_version_history_models + }) + | 'Group by key' >> beam.CoGroupByKey() + ) + + valid_model_groups = ( + model_groups + | 'Get rid of exploration id' >> + beam.Values() # pylint: disable=no-value-for-parameter + | 'Get formatted model groups' >> beam.Map( + self.convert_to_formatted_model_group + ) + | 'Filter valid model groups' >> beam.Filter( + lambda x: x is not None + ) + ) + + version_history_models = ( + valid_model_groups + | 'Create the version history models for each valid exploration' >> + beam.Map(self.create_version_history_models) + ) + + exps_having_invalid_change_list = ( + version_history_models + | 'Filter exps having invalid change list' >> + beam.Filter(lambda models: len(models[1]) == 0) + | 'Extract the exp ids having invalid change list' >> + beam.Map(lambda models: (models[0], models[2], models[3])) + ) + + exps_for_which_version_history_was_computed = ( + version_history_models + | 'Filter exps for which version history was computed' >> + beam.Filter(lambda models: len(models[1]) > 0) + | 'Extract the exp ids for which version history was computed' >> + beam.Map(lambda models: models[0]) + ) + + flattened_vh_models = ( + version_history_models + | 'Drop the exploration ids' >> + beam.Map(lambda models: models[1]) + | 'Flatten the models' >> beam.FlatMap(lambda x: x) + ) + + unused_put_result = ( + flattened_vh_models + | 'Save the models to the datastore' >> ndb_io.PutModels() + ) + + report_number_of_exps_queried = ( + all_explorations_vlatest + | 'Count queried explorations' >> + job_result_transforms.CountObjectsToJobRunResult('ALL EXPS') + ) + + report_exps_count_for_which_version_history_can_be_computed = ( + valid_model_groups + | 'Count exps for which version history can be computed' >> + job_result_transforms.CountObjectsToJobRunResult( + 'EXPS FOR WHICH VERSION HISTORY CAN BE COMPUTED' + ) + ) + + report_number_of_exps_with_invalid_change_list = ( + exps_having_invalid_change_list + | 'Count explorations having invalid change list' >> + job_result_transforms.CountObjectsToJobRunResult( + 'EXPS HAVING INVALID CHANGE LIST' + ) + ) + + report_details_of_exps_having_invalid_change_list = ( + exps_having_invalid_change_list + | 'Save info on explorations having invalid change list' >> + beam.Map(lambda error: job_run_result.JobRunResult.as_stderr( + 'Exploration %s has invalid change list. ' + 'Error: %s. Version: %s' % (error[0], error[1], error[2]) + )) + ) + + report_number_of_exps_for_which_version_history_was_computed = ( + exps_for_which_version_history_was_computed + | 'Count explorations for which version history was computed' >> + job_result_transforms.CountObjectsToJobRunResult( + 'EXPS FOR WHICH VERSION HISTORY CAN WAS COMPUTED' + ) + ) + + report_number_of_models_modified = ( + flattened_vh_models + | 'Count number of models created' >> + job_result_transforms.CountObjectsToJobRunResult( + 'CREATED OR MODIFIED VERSION HISTORY MODELS' + ) + ) + + return ( + ( + report_number_of_exps_queried, + report_exps_count_for_which_version_history_can_be_computed, + report_number_of_exps_with_invalid_change_list, + report_details_of_exps_having_invalid_change_list, + report_number_of_exps_for_which_version_history_was_computed, + report_number_of_models_modified + ) + | 'Flatten' >> beam.Flatten() + ) diff --git a/core/jobs/batch_jobs/exp_version_history_computation_job_test.py b/core/jobs/batch_jobs/exp_version_history_computation_job_test.py new file mode 100644 index 000000000000..766daa363c13 --- /dev/null +++ b/core/jobs/batch_jobs/exp_version_history_computation_job_test.py @@ -0,0 +1,992 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.exp_version_history_computation_jobs.""" + +from __future__ import annotations + +from core import feconf +from core.domain import exp_domain +from core.domain import exp_services +from core.domain import translation_domain +from core.domain import user_services +from core.jobs import job_test_utils +from core.jobs.batch_jobs import exp_version_history_computation_job +from core.jobs.types import job_run_result +from core.platform import models +from core.tests import test_utils + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) +datastore_services = models.Registry.import_datastore_services() + + +class ComputeExplorationVersionHistoryJobTests( + test_utils.GenericTestBase, job_test_utils.JobTestBase +): + JOB_CLASS = ( + exp_version_history_computation_job.ComputeExplorationVersionHistoryJob + ) + + USER_1_EMAIL = 'user1@example.com' + USER_2_EMAIL = 'user2@example.com' + USER_1_USERNAME = 'user1' + USER_2_USERNAME = 'user2' + EXP_ID_1 = 'exp_1' + EXP_ID_2 = 'exp_2' + + def setUp(self) -> None: + super().setUp() + + self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) + self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) + + self.user_1_id = user_services.get_user_id_from_username( + self.USER_1_USERNAME + ) + self.user_2_id = user_services.get_user_id_from_username( + self.USER_2_USERNAME + ) + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_creates_version_history_for_single_exp_with_valid_changes( + self + ) -> None: + assert self.user_1_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit message.') + version_history_keys = [ + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 1 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 2 + ) + ) + ] + # Deleting the version history models as they were created by + # exp_services while creating and updating the explorations. We want + # to test that the beam job can create the models from scratch. + datastore_services.delete_multi(version_history_keys) + + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is None + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('ALL EXPS SUCCESS: 1'), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN BE COMPUTED SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN WAS COMPUTED SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'CREATED OR MODIFIED VERSION HISTORY MODELS SUCCESS: 2' + ) + ]) + + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is not None + + def test_create_version_history_for_exp_with_revert_commit( + self + ) -> None: + assert self.user_1_id is not None + exploration = self.save_new_valid_exploration( + self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exploration.next_content_id_index + ) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit message.') + exp_services.revert_exploration( + self.user_1_id, self.EXP_ID_1, 2, 1 + ) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'Another new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit message.') + version_history_keys = [ + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 1 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 2 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 3 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 4 + ) + ) + ] + # Deleting the version history models as they were created by + # exp_services while creating and updating the explorations. We want + # to test that the beam job can create the models from scratch. + datastore_services.delete_multi(version_history_keys) + + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is None + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('ALL EXPS SUCCESS: 1'), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN BE COMPUTED SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN WAS COMPUTED SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'CREATED OR MODIFIED VERSION HISTORY MODELS SUCCESS: 4' + ) + ]) + + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is not None + + def test_no_model_is_created_for_exp_with_invalid_revert_version( + self + ) -> None: + assert self.user_1_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit message.') + exp_services.revert_exploration( + self.user_1_id, self.EXP_ID_1, 2, 1 + ) + version_history_keys = [ + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 1 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 2 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 3 + ) + ) + ] + # Deleting the version history models as they were created by + # exp_services while creating and updating the explorations. We want + # to test that the beam job can create the models from scratch. + datastore_services.delete_multi(version_history_keys) + + # Having invalid change list is not possible if the exploration is + # updated using exp_services. Hence, we have to simulate the scenario + # manually by changing the commit logs. + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + exp_models.ExplorationModel.get_snapshot_id( + self.EXP_ID_1, 3 + ) + ) + ) + snapshot_metadata_model.commit_cmds = [ + exp_domain.ExplorationChange({ + 'cmd': feconf.CMD_REVERT_COMMIT, + 'version_number': 4 + }).to_dict() + ] + snapshot_metadata_model.update_timestamps() + snapshot_metadata_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('ALL EXPS SUCCESS: 1'), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN BE COMPUTED SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'EXPS HAVING INVALID CHANGE LIST SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stderr( + 'Exploration exp_1 has invalid change list. Error: Reverting ' + 'to the version 4 which is out of the range [1, 2]. Version: 3' + ) + ]) + + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is None + + def test_creates_version_history_for_multiple_exps_with_valid_changes( + self + ) -> None: + assert self.user_1_id is not None + assert self.user_2_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + self.save_new_valid_exploration(self.EXP_ID_2, self.user_2_id) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + version_history_keys = [ + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 1 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 2 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_2, 1 + ) + ) + ] + # Deleting the version history models as they were created by + # exp_services while creating and updating the explorations. We want + # to test that the beam job can create the models from scratch. + datastore_services.delete_multi(version_history_keys) + + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is None + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('ALL EXPS SUCCESS: 2'), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN BE COMPUTED SUCCESS: 2' + ), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN WAS COMPUTED SUCCESS: 2' + ), + job_run_result.JobRunResult.as_stdout( + 'CREATED OR MODIFIED VERSION HISTORY MODELS SUCCESS: 3' + ) + ]) + + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is not None + + def test_job_can_run_when_version_history_already_exists(self) -> None: + assert self.user_1_id is not None + assert self.user_2_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + self.save_new_valid_exploration(self.EXP_ID_2, self.user_2_id) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + exp_services.revert_exploration( + self.user_1_id, self.EXP_ID_1, 2, 1 + ) + version_history_keys = [ + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 1 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 2 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 3 + ) + ), + datastore_services.Key( + exp_models.ExplorationVersionHistoryModel, + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_2, 1 + ) + ) + ] + + # We are not deleting the version history models this time. Also, + # they will be created while updating the exploration by exp_services. + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is not None + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('ALL EXPS SUCCESS: 2'), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN BE COMPUTED SUCCESS: 2' + ), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN WAS COMPUTED SUCCESS: 2' + ), + job_run_result.JobRunResult.as_stdout( + 'CREATED OR MODIFIED VERSION HISTORY MODELS SUCCESS: 4' + ) + ]) + + version_history_models = datastore_services.get_multi( + version_history_keys + ) + for model in version_history_models: + assert model is not None + + def test_ignore_changes_in_deprecated_properties(self) -> None: + assert self.user_1_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + exp_models.ExplorationModel.get_snapshot_id( + self.EXP_ID_1, 2 + ) + ) + ) + snapshot_metadata_model.commit_cmds.append({ + 'cmd': 'edit_state_property', + 'state_name': 'A new state', + 'property_name': 'fallbacks', + 'new_value': 'foo', + }) + snapshot_metadata_model.update_timestamps() + snapshot_metadata_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('ALL EXPS SUCCESS: 1'), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN BE COMPUTED SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN WAS COMPUTED SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'CREATED OR MODIFIED VERSION HISTORY MODELS SUCCESS: 2' + ) + ]) + + def test_with_invalid_change_list(self) -> None: + assert self.user_1_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + + # Corrupting the commit logs manually. + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + exp_models.ExplorationModel.get_snapshot_id( + self.EXP_ID_1, 2 + ) + ) + ) + snapshot_metadata_model.commit_cmds.append({ + 'cmd': 'delete_state', + 'state_name': 'Some other state', + }) + snapshot_metadata_model.update_timestamps() + snapshot_metadata_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('ALL EXPS SUCCESS: 1'), + job_run_result.JobRunResult.as_stdout( + 'EXPS FOR WHICH VERSION HISTORY CAN BE COMPUTED SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'EXPS HAVING INVALID CHANGE LIST SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stderr( + 'Exploration exp_1 has invalid change list. ' + 'Error: \'Some other state\'. Version: 2' + ) + ]) + + def test_with_corrupted_snapshot_model(self) -> None: + assert self.user_1_id is not None + self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + + snapshot_class = exp_models.ExplorationSnapshotContentModel + snapshot_model = snapshot_class.get('%s%s%s' % (self.EXP_ID_1, '-', 1)) + snapshot_model.content = None + snapshot_model.update_timestamps() + snapshot_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('ALL EXPS SUCCESS: 1') + ]) + + +class VerifyVersionHistoryModelsJobTests( + test_utils.GenericTestBase, job_test_utils.JobTestBase +): + JOB_CLASS = ( + exp_version_history_computation_job.VerifyVersionHistoryModelsJob + ) + + USER_1_EMAIL = 'user1@example.com' + USER_2_EMAIL = 'user2@example.com' + USER_1_USERNAME = 'user1' + USER_2_USERNAME = 'user2' + EXP_ID_1 = 'exp_1' + EXP_ID_2 = 'exp_2' + + def setUp(self) -> None: + super().setUp() + + self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) + self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME) + + self.user_1_id = user_services.get_user_id_from_username( + self.USER_1_USERNAME + ) + self.user_2_id = user_services.get_user_id_from_username( + self.USER_2_USERNAME + ) + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_with_valid_version_history_models(self) -> None: + assert self.user_1_id is not None + assert self.user_2_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + self.save_new_valid_exploration('3', self.user_2_id) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + + exp = self.save_new_valid_exploration(self.EXP_ID_2, self.user_2_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + exp_services.update_exploration(self.user_2_id, self.EXP_ID_2, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + exp_services.update_exploration(self.user_2_id, '3', [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'First state' + }) + ], 'A commit message.') + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout( + 'ALL EXPLORATIONS SUCCESS: 3' + ), + job_run_result.JobRunResult.as_stdout( + 'VERIFIED EXPLORATIONS SUCCESS: 3' + ) + ]) + + def test_with_invalid_version_history_models(self) -> None: + assert self.user_1_id is not None + assert self.user_2_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + self.save_new_valid_exploration(self.EXP_ID_2, self.user_2_id) + self.save_new_valid_exploration('3', self.user_2_id) + exp4 = self.save_new_valid_exploration('4', self.user_2_id) + self.save_new_valid_exploration('5', self.user_2_id) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + exp_services.update_exploration(self.user_2_id, self.EXP_ID_2, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'First state' + }) + ], 'A commit message.') + exp_services.update_exploration(self.user_2_id, '3', [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'First state' + }) + ], 'A commit message.') + content_id_generator = translation_domain.ContentIdGenerator( + exp4.next_content_id_index + ) + exp_services.update_exploration(self.user_1_id, '4', [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + exp_services.update_exploration(self.user_2_id, '5', [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_RENAME_STATE, + 'old_state_name': 'Introduction', + 'new_state_name': 'Second state' + }) + ], 'A commit message.') + + # Manually corrupting the version history model. + vh_model_1 = exp_models.ExplorationVersionHistoryModel.get( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_1, 2 + ) + ) + vh_model_1.state_version_history['A new state'][ + 'state_name_in_previous_version'] = 'Previous state' + vh_model_2 = exp_models.ExplorationVersionHistoryModel.get( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + self.EXP_ID_2, 2 + ) + ) + vh_model_2.state_version_history['First state'][ + 'previously_edited_in_version'] = 0 + vh_model_2.state_version_history['First state'][ + 'state_name_in_previous_version'] = 'Previous state' + vh_model_3 = exp_models.ExplorationVersionHistoryModel.get( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + '3', 2 + ) + ) + del vh_model_3.state_version_history['First state'] + vh_model_4 = exp_models.ExplorationVersionHistoryModel.get( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + '4', 2 + ) + ) + del vh_model_4.state_version_history['A new state'] + vh_model_5 = exp_models.ExplorationVersionHistoryModel.get( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + '5', 2 + ) + ) + vh_model_5.state_version_history['Second state'][ + 'state_name_in_previous_version'] = 'First state' + exp_models.ExplorationVersionHistoryModel.update_timestamps_multi([ + vh_model_1, vh_model_2, vh_model_3, vh_model_4, vh_model_5 + ]) + exp_models.ExplorationVersionHistoryModel.put_multi([ + vh_model_1, vh_model_2, vh_model_3, vh_model_4, vh_model_5 + ]) + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout( + 'ALL EXPLORATIONS SUCCESS: 5' + ), + job_run_result.JobRunResult.as_stdout( + 'UNVERIFIED EXPLORATIONS SUCCESS: 5' + ), + job_run_result.JobRunResult.as_stderr( + 'Version history for exploration with ID %s was not ' + 'created correctly' % (self.EXP_ID_1) + ), + job_run_result.JobRunResult.as_stderr( + 'Version history for exploration with ID %s was not ' + 'created correctly' % (self.EXP_ID_2) + ), + job_run_result.JobRunResult.as_stderr( + 'Version history for exploration with ID %s was not ' + 'created correctly' % ('3') + ), + job_run_result.JobRunResult.as_stderr( + 'Version history for exploration with ID %s was not ' + 'created correctly' % ('4') + ), + job_run_result.JobRunResult.as_stderr( + 'Version history for exploration with ID %s was not ' + 'created correctly' % ('5') + ) + ]) + + def test_with_corrupted_snapshot_model(self) -> None: + assert self.user_1_id is not None + self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + + snapshot_class = exp_models.ExplorationSnapshotContentModel + snapshot_model = snapshot_class.get('%s%s%s' % (self.EXP_ID_1, '-', 1)) + snapshot_model.content = None + snapshot_model.update_timestamps() + snapshot_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout( + 'ALL EXPLORATIONS SUCCESS: 1' + ) + ]) + + def test_ignore_changes_in_deprecated_properties(self) -> None: + assert self.user_1_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + snapshot_metadata_model = ( + exp_models.ExplorationSnapshotMetadataModel.get( + exp_models.ExplorationModel.get_snapshot_id( + self.EXP_ID_1, 2 + ) + ) + ) + snapshot_metadata_model.commit_cmds.append({ + 'cmd': 'edit_state_property', + 'state_name': 'A new state', + 'property_name': 'fallbacks', + 'new_value': 'foo', + }) + snapshot_metadata_model.update_timestamps() + snapshot_metadata_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout( + 'ALL EXPLORATIONS SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + 'VERIFIED EXPLORATIONS SUCCESS: 1' + ) + ]) + + +class DeleteExplorationVersionHistoryModelsJobTest( + test_utils.GenericTestBase, job_test_utils.JobTestBase +): + """Unit tests for DeleteExplorationVersionHistoryModelsJob.""" + + JOB_CLASS = ( + exp_version_history_computation_job. + DeleteExplorationVersionHistoryModelsJob + ) + + USER_1_EMAIL = 'user1@example.com' + USER_1_USERNAME = 'user1' + EXP_ID_1 = 'exp_1' + + def setUp(self) -> None: + super().setUp() + self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME) + self.user_1_id = user_services.get_user_id_from_username( + self.USER_1_USERNAME + ) + + def test_with_no_vh_models(self) -> None: + self.assert_job_output_is_empty() + + def test_with_vh_models(self) -> None: + assert self.user_1_id is not None + exp = self.save_new_valid_exploration(self.EXP_ID_1, self.user_1_id) + content_id_generator = translation_domain.ContentIdGenerator( + exp.next_content_id_index + ) + exp_services.update_exploration(self.user_1_id, self.EXP_ID_1, [ + exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_ADD_STATE, + 'state_name': 'A new state', + 'content_id_for_state_content': ( + content_id_generator.generate( + translation_domain.ContentType.CONTENT) + ), + 'content_id_for_default_outcome': ( + content_id_generator.generate( + translation_domain.ContentType.DEFAULT_OUTCOME) + ) + }), + exp_domain.ExplorationChange({ + 'cmd': 'edit_exploration_property', + 'property_name': 'next_content_id_index', + 'new_value': content_id_generator.next_content_id_index + }) + ], 'A commit messages.') + + self.assert_job_output_is([ + job_run_result.JobRunResult.as_stdout('SUCCESS: 2') + ]) diff --git a/core/jobs/batch_jobs/mailchimp_population_jobs.py b/core/jobs/batch_jobs/mailchimp_population_jobs.py new file mode 100644 index 000000000000..8744f1a28e64 --- /dev/null +++ b/core/jobs/batch_jobs/mailchimp_population_jobs.py @@ -0,0 +1,286 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Job to populate the mailchimp db with existing users.""" + +from __future__ import annotations + +import ast +import logging + +from core import feconf +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +import mailchimp3 +from mailchimp3 import mailchimpclient +import result + +from typing import Iterable, List, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import config_models + from mypy_imports import secrets_services + from mypy_imports import user_models + +(config_models, user_models) = models.Registry.import_models([ + models.Names.CONFIG, models.Names.USER +]) +secrets_services = models.Registry.import_secrets_services() + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that CombineFn class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'CombineFn' (has type 'Any')), we added an +# ignore here. +class CombineItems(beam.CombineFn): # type: ignore[misc] + """CombineFn for combining all user email, status tuples.""" + + def create_accumulator(self) -> List[str]: + """Base accumulator where the tuples are added.""" + return [] + + def add_input(self, accumulator: List[str], email: str) -> List[str]: + """Append each tuple to the accumulator list.""" + accumulator.append(email) + return accumulator + + def merge_accumulators( + self, accumulators: Iterable[List[str]] + ) -> List[str]: + """Merging accumulators is just combining both of them into a single + list. + """ + output_accumulator = [] + for accumulator in accumulators: + output_accumulator.extend(accumulator) + return output_accumulator + + def extract_output(self, accumulator: List[str]) -> List[str]: + """Output is the accumulator itself.""" + return accumulator + + +def _get_mailchimp_class() -> mailchimp3.MailChimp: + """Returns the mailchimp api class. This is separated into a separate + function to facilitate testing. + NOTE: No other functionalities should be added to this function. + + Returns: + Mailchimp. A mailchimp class instance with the API key and username + initialized. + """ + + # The following is a class initialized in the library with the API key and + # username and hence cannot be tested directly. The mailchimp functions are + # tested with a mock class. + mailchimp_api_key: Optional[str] = secrets_services.get_secret( + 'MAILCHIMP_API_KEY') + if not mailchimp_api_key: + logging.error('Mailchimp API key is not available.') + + return mailchimp3.MailChimp( # pragma: no cover + mc_api=mailchimp_api_key, + mc_user=feconf.MAILCHIMP_USERNAME + ) + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. +class SendBatchMailchimpRequest(beam.DoFn): # type: ignore[misc] + """DoFn to send batch mailchimp request for 500 users at a time.""" + + def process( + self, emails: List[str], batch_index_dict: int, test_run: bool + ) -> result.Result[str]: + """Add 500 users at a time, who have subscribed for newsletters, + to the MailChimp DB. + + Args: + emails: list(str). List of emails of users subscribed to + newsletters. + batch_index_dict: int. Current batch index. + test_run: bool. Whether to use mailchimp API or not. To be set to + TRUE only when run from a non-production server for testing. + + Raises: + Exception. Exception thrown by the api is raised. + + Yields: + JobRunResult. Job run result which is either 'Ok' or an error with + corresponding error message. + """ + sorted_emails = sorted(emails) + selected_emails = sorted_emails[ + batch_index_dict * 500: (batch_index_dict + 1) * 500] + + if test_run: + # There is a max limit of 1500 bytes for job output. Hence, only + # returning first and last 5 emails in batch for testing. + yield result.Ok( + ','.join(selected_emails[: 5] + selected_emails[-5:])) + return + mailchimp_data = [] + + client = _get_mailchimp_class() + for email in selected_emails: + mailchimp_data.append({ + 'email_address': email, + 'status': 'subscribed' + }) + + try: + response = client.lists.update_members( + feconf.MAILCHIMP_AUDIENCE_ID, + {'members': mailchimp_data, 'update_existing': False}) + except mailchimpclient.MailChimpError as error: + error_message = ast.literal_eval(str(error)) + yield result.Err(error_message['detail']) + return + + response_emails_count = ( + len(response['new_members']) + len(response['updated_members'])) + source_emails_count = len(selected_emails) + if response_emails_count == source_emails_count: + yield result.Ok('Request successful') + else: + failed_emails = [] + for user in response['errors']: + failed_emails.append(user['email_address']) + yield result.Err('User update failed for: %s' % failed_emails) + + +class MailchimpPopulateJob(base_jobs.JobBase): + """One-off job for populating the mailchimp db.""" + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + # Pcollection that returns the relevant config property with batch + # index. + config_property = ( + self.pipeline + | 'Get all config properties' >> ndb_io.GetModels( + config_models.ConfigPropertyModel.get_all()) + | 'Get the batch_index_for_mailchimp property value' >> beam.Filter( + lambda model: model.id == 'batch_index_for_mailchimp') + | 'Get value' >> beam.Map(lambda model: model.value) + ) + + batch_index_dict = beam.pvalue.AsSingleton(config_property) + + # PCollection with all user ids that have opted in for email + # newsletters. + relevant_user_ids = ( + self.pipeline + | 'Get all UserEmailPreferencesModel' >> ndb_io.GetModels( + user_models.UserEmailPreferencesModel.get_all().filter( + user_models.UserEmailPreferencesModel.site_updates == True # pylint: disable=singleton-comparison + )) + | 'Extract user ID' >> beam.Map( + lambda preferences_model: preferences_model.id) + ) + + valid_user_ids = beam.pvalue.AsIter(relevant_user_ids) + + # PCollection of all user emails opted in for newsletters. + relevant_user_emails = ( + self.pipeline + | 'Get all user settings models' >> ndb_io.GetModels( + user_models.UserSettingsModel.get_all()) + | 'Filter user models' >> ( + beam.Filter( + lambda model, ids: model.id in ids, ids=valid_user_ids)) + | 'Get email' >> (beam.Map(lambda model: model.email)) + ) + + mailchimp_results = ( + relevant_user_emails + # A large batch size is given so that all emails are included in a + # single list. + | 'Combine into a list' >> beam.CombineGlobally(CombineItems()) + | 'Send mailchimp request for current batch' >> beam.ParDo( + SendBatchMailchimpRequest(), batch_index_dict=batch_index_dict, + test_run=False) + | 'Get final result' >> beam.Map( + lambda result: job_run_result.JobRunResult.as_stdout( + result.value)) + ) + + return mailchimp_results + + +class MockMailchimpPopulateJob(base_jobs.JobBase): + """Test one-off job for populating the mailchimp db.""" + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + # Pcollection that returns the relevant config property with batch + # index. + config_property = ( + self.pipeline + | 'Get all config properties' >> ndb_io.GetModels( + config_models.ConfigPropertyModel.get_all()) + | 'Get the batch_index_for_mailchimp property value' >> beam.Filter( + lambda model: model.id == 'batch_index_for_mailchimp') + | 'Get value' >> beam.Map(lambda model: model.value) + ) + + batch_index_dict = beam.pvalue.AsSingleton(config_property) + + # PCollection with all user ids that have opted in for email + # newsletters. + relevant_user_ids = ( + self.pipeline + | 'Get all UserEmailPreferencesModel' >> ndb_io.GetModels( + user_models.UserEmailPreferencesModel.get_all().filter( + user_models.UserEmailPreferencesModel.site_updates == True # pylint: disable=singleton-comparison + )) + | 'Extract user ID' >> beam.Map( + lambda preferences_model: preferences_model.id) + ) + + valid_user_ids = beam.pvalue.AsIter(relevant_user_ids) + + # PCollection of all user emails opted in for newsletters. + relevant_user_emails = ( + self.pipeline + | 'Get all user settings models' >> ndb_io.GetModels( + user_models.UserSettingsModel.get_all()) + | 'Filter user models' >> ( + beam.Filter( + lambda model, ids: model.id in ids, ids=valid_user_ids)) + | 'Get email' >> (beam.Map(lambda model: model.email)) + ) + + mailchimp_results = ( + relevant_user_emails + # A large batch size is given so that all emails are included in a + # single list. + | 'Combine into a list' >> beam.CombineGlobally(CombineItems()) + | 'Send mailchimp request for current batch' >> beam.ParDo( + SendBatchMailchimpRequest(), batch_index_dict=batch_index_dict, + test_run=True) + | 'Get final result' >> beam.Map( + lambda result: job_run_result.JobRunResult.as_stdout( + result.value)) + ) + + return mailchimp_results diff --git a/core/jobs/batch_jobs/mailchimp_population_jobs_test.py b/core/jobs/batch_jobs/mailchimp_population_jobs_test.py new file mode 100644 index 000000000000..20a152a06cf6 --- /dev/null +++ b/core/jobs/batch_jobs/mailchimp_population_jobs_test.py @@ -0,0 +1,386 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.mailchimp_population_jobs.""" + +from __future__ import annotations + +import datetime + +from core import feconf +from core.jobs import job_test_utils +from core.jobs.batch_jobs import mailchimp_population_jobs +from core.jobs.types import job_run_result +from core.platform import models + +import mailchimp3 +from mailchimp3 import mailchimpclient + +from typing import Dict, Final, List, Mapping, Type, TypedDict, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import config_models + from mypy_imports import secrets_services + from mypy_imports import user_models + +(config_models, user_models) = models.Registry.import_models([ + models.Names.CONFIG, models.Names.USER +]) +secrets_services = models.Registry.import_secrets_services() + + +class MailChimpListsDataDict(TypedDict): + """Dictionary representation for data argument of update_members method.""" + + members: List[Dict[str, str]] + update_existing: bool + + +class MailchimpPopulateJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + mailchimp_population_jobs.MailchimpPopulateJob + ] = mailchimp_population_jobs.MailchimpPopulateJob + + USER_ID_PREFIX: Final = 'user_id_' + DATETIME: Final = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') + + class MockMailchimpClass: + """Class to mock Mailchimp class.""" + + class MailchimpLists: + """Class to mock Mailchimp lists object.""" + + def __init__(self) -> None: + self.parent_emails: List[str] = [] + + def update_members( + self, + _audience_id: str, + data: MailChimpListsDataDict + ) -> Mapping[str, Union[List[str], List[Dict[str, str]]]]: + """Mocks the update_members function of the mailchimp api. + + Args: + _audience_id: str. Audience Id of the mailchimp list. + data: list(dict(str,str)). Payload received. + + Returns: + dict. Returns correct dict based on whether invalid email + was received or not. + + Raises: + MailchimpError. Error 404 to mock API server error. + """ + emails = [] + for user in data['members']: + emails.append(user['email_address']) + + self.parent_emails = emails + + if 'invalid_email' in emails: + updated_members: List[str] = [] + invalid_email_dict: Dict[ + str, Union[List[str], List[Dict[str, str]]] + ] = { + 'new_members': emails[1:], + 'updated_members': updated_members, + 'errors': [{ + 'email_address': 'invalid_email' + }] + } + return invalid_email_dict + + # Mocking a request issue by throwing an exception for this + # particular case. + if 'errored_email' in emails: + raise mailchimpclient.MailChimpError({ + 'status': 404, 'title': 'Server Issue', + 'detail': 'Server Issue' + }) + + valid_email_dict: Dict[str, List[str]] = { + 'new_members': emails, + 'updated_members': [] + } + return valid_email_dict + + def __init__(self) -> None: + self.lists = self.MailchimpLists() + + def setUp(self) -> None: + super().setUp() + self.enabled_user_emails = [] + for i in range(0, 1200): + user_id = '%s%d' % (self.USER_ID_PREFIX, i) + user_email = '%s@email.com' % user_id + + if i % 2: + self.enabled_user_emails.append(user_email) + + user_model = self.create_model( + user_models.UserSettingsModel, + id=user_id, + email=user_email + ) + user_model.update_timestamps() + user_model.put() + + # Half of the users have emails enabled. + preferences_model = self.create_model( + user_models.UserEmailPreferencesModel, + id=user_id, + site_updates=bool(i % 2) + ) + preferences_model.update_timestamps() + preferences_model.put() + + self.enabled_user_emails.sort() + self.first_batch_emails = self.enabled_user_emails[0:500] + self.second_batch_emails = self.enabled_user_emails[500:1000] + + config_model = self.create_model( + config_models.ConfigPropertyModel, + id='batch_index_for_mailchimp', + value=0 + ) + config_model.update_timestamps() + config_model.commit('user_id_0', []) + + self.swap_audience_id = self.swap( + feconf, 'MAILCHIMP_AUDIENCE_ID', 'audience_id') + + def test_job_runs_correctly_for_first_batch(self) -> None: + mailchimp = self.MockMailchimpClass() + swapped_mailchimp = lambda: mailchimp + swap_mailchimp_context = self.swap( + mailchimp_population_jobs, '_get_mailchimp_class', + swapped_mailchimp) + + with swap_mailchimp_context, self.swap_audience_id: + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='Request successful') + ]) + + self.assertItemsEqual( + mailchimp.lists.parent_emails, self.first_batch_emails) + + def test_job_runs_correctly_for_second_batch(self) -> None: + config_model = self.create_model( + config_models.ConfigPropertyModel, + id='batch_index_for_mailchimp', + value=1 + ) + config_model.update_timestamps() + config_model.commit('user_id_0', []) + + mailchimp = self.MockMailchimpClass() + swapped_mailchimp = lambda: mailchimp + swap_mailchimp_context = self.swap( + mailchimp_population_jobs, '_get_mailchimp_class', + swapped_mailchimp) + + with swap_mailchimp_context, self.swap_audience_id: + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='Request successful') + ]) + + self.assertItemsEqual( + mailchimp.lists.parent_emails, self.second_batch_emails) + + def test_job_fails_with_mailchimp_api_key_missing(self) -> None: + config_model = self.create_model( + config_models.ConfigPropertyModel, + id='batch_index_for_mailchimp', + value=1 + ) + config_model.update_timestamps() + config_model.commit('user_id_0', []) + + mailchimp = self.MockMailchimpClass() + swap_api_key_secrets_return_none = self.swap_with_checks( + secrets_services, + 'get_secret', + lambda _: None, + expected_args=[('MAILCHIMP_API_KEY',)] + ) + swap_mailchimp_class = self.swap_to_always_return( + mailchimp3, 'MailChimp', mailchimp) + + with swap_api_key_secrets_return_none, self.swap_audience_id: + with swap_mailchimp_class: + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='Request successful') + ]) + + self.assertItemsEqual( + mailchimp.lists.parent_emails, self.second_batch_emails) + + def test_job_runs_correctly_with_invalid_email(self) -> None: + user_model = self.create_model( + user_models.UserSettingsModel, + id='user_id', + email='invalid_email' + ) + user_model.update_timestamps() + user_model.put() + + # Half of the users have emails enabled. + preferences_model = self.create_model( + user_models.UserEmailPreferencesModel, + id='user_id', + site_updates=True + ) + preferences_model.update_timestamps() + preferences_model.put() + + mailchimp = self.MockMailchimpClass() + swapped_mailchimp = lambda: mailchimp + swap_mailchimp_context = self.swap( + mailchimp_population_jobs, '_get_mailchimp_class', + swapped_mailchimp) + + with swap_mailchimp_context, self.swap_audience_id: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='User update failed for: [\'invalid_email\']') + ]) + + def test_job_fails_correctly_with_request_error(self) -> None: + user_model = self.create_model( + user_models.UserSettingsModel, + id='user_id', + email='errored_email' + ) + user_model.update_timestamps() + user_model.put() + + # Half of the users have emails enabled. + preferences_model = self.create_model( + user_models.UserEmailPreferencesModel, + id='user_id', + site_updates=True + ) + preferences_model.update_timestamps() + preferences_model.put() + + mailchimp = self.MockMailchimpClass() + swapped_mailchimp = lambda: mailchimp + swap_mailchimp_context = self.swap( + mailchimp_population_jobs, '_get_mailchimp_class', + swapped_mailchimp) + + with swap_mailchimp_context, self.swap_audience_id: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='Server Issue') + ]) + + +class MockMailchimpPopulateJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + mailchimp_population_jobs.MockMailchimpPopulateJob + ] = mailchimp_population_jobs.MockMailchimpPopulateJob + + USER_ID_PREFIX: Final = 'user_id_' + DATETIME: Final = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') + + def setUp(self) -> None: + super().setUp() + self.enabled_user_emails = [] + for i in range(0, 1200): + user_id = '%s%d' % (self.USER_ID_PREFIX, i) + user_email = '%s@email.com' % user_id + + if i % 2: + self.enabled_user_emails.append(user_email) + + user_model = self.create_model( + user_models.UserSettingsModel, + id=user_id, + email=user_email + ) + user_model.update_timestamps() + user_model.put() + + # Half of the users have emails enabled. + preferences_model = self.create_model( + user_models.UserEmailPreferencesModel, + id=user_id, + site_updates=bool(i % 2) + ) + preferences_model.update_timestamps() + preferences_model.put() + + self.enabled_user_emails = sorted(self.enabled_user_emails) + self.first_batch_emails = self.enabled_user_emails[0:500] + self.second_batch_emails = self.enabled_user_emails[500:1000] + + config_model = self.create_model( + config_models.ConfigPropertyModel, + id='batch_index_for_mailchimp', + value=0 + ) + config_model.update_timestamps() + config_model.commit('user_id_0', []) + + def test_job_runs_correctly_for_first_batch(self) -> None: + expected_emails = [ + 'user_id_1001@email.com', + 'user_id_1003@email.com', + 'user_id_1005@email.com', + 'user_id_1007@email.com', + 'user_id_1009@email.com', + 'user_id_813@email.com', + 'user_id_815@email.com', + 'user_id_817@email.com', + 'user_id_819@email.com', + 'user_id_81@email.com', + ] + + expected_output = ','.join(expected_emails) + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout=expected_output) + ]) + + def test_job_runs_correctly_for_second_batch(self) -> None: + config_model = self.create_model( + config_models.ConfigPropertyModel, + id='batch_index_for_mailchimp', + value=1 + ) + config_model.update_timestamps() + config_model.commit('user_id_0', []) + + expected_emails = [ + 'user_id_821@email.com', + 'user_id_823@email.com', + 'user_id_825@email.com', + 'user_id_827@email.com', + 'user_id_829@email.com', + 'user_id_995@email.com', + 'user_id_997@email.com', + 'user_id_999@email.com', + 'user_id_99@email.com', + 'user_id_9@email.com' + ] + + expected_output = ','.join(expected_emails) + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout=expected_output) + ]) diff --git a/core/jobs/batch_jobs/math_interactions_audit_jobs.py b/core/jobs/batch_jobs/math_interactions_audit_jobs.py new file mode 100644 index 000000000000..123dcd3f4405 --- /dev/null +++ b/core/jobs/batch_jobs/math_interactions_audit_jobs.py @@ -0,0 +1,146 @@ +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Audit jobs for math interactions: AlgebraicExpressionInput, +NumericExpressionInput, MathEquationInput. +""" + +from __future__ import annotations + +from core import feconf +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam + +from typing import List, Tuple + +MYPY = False +if MYPY: # pragma: no cover + # Here, state_domain is imported only for type checking. + from core.domain import state_domain + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) + + +class FindMathExplorationsWithRulesJob(base_jobs.JobBase): + """Finds explorations that use at least one of the math interactions + and accumulates the output along with the rules. + + Expected output: + (exp_id_1, state_name_1, [rule_type_1, rule_type_2, ...]) + (exp_id_2, state_name_4, [rule_type_1, rule_type_2, ...]) + ... + """ + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + + exp_models_pcoll = ( + self.pipeline + | 'Get all ExplorationModels' >> ndb_io.GetModels( + exp_models.ExplorationModel.get_all() + ) + ) + + exp_models_filtered = ( + exp_models_pcoll + | 'Filter Math ExplorationModels' >> beam.Filter( + self.contains_math_interactions + ) + ) + + exp_models_with_states = ( + exp_models_filtered + | 'Mapping exp_ids with states' >> ( + beam.FlatMap(self.flat_map_exp_with_states) + ) + ) + + exp_models_with_states_filtered = ( + exp_models_with_states + | 'Filtering out states without math interactions' >> ( + beam.Filter( + lambda tup: tup[2][ + 'interaction']['id'] in feconf.MATH_INTERACTION_IDS + ) + ) + ) + + exp_models_with_states_and_rules = ( + exp_models_with_states_filtered + | 'Mapping with rule types list' >> ( + beam.Map(self.map_with_rule_types) + ) + ) + + return ( + exp_models_with_states_and_rules + | 'Final output' >> beam.Map(job_run_result.JobRunResult.as_stdout) + ) + + def contains_math_interactions( + self, model: exp_models.ExplorationModel + ) -> bool: + """Checks if the exploration contains any state with any of the + math interactions. + + Args: + model: ExplorationModel. Model instance to be checked. + + Returns: + bool. Whether the exploration contains math interactions. + """ + return any( + state_dict['interaction']['id'] in feconf.MATH_INTERACTION_IDS + for state_dict in model.states.values()) + + def flat_map_exp_with_states( + self, model: exp_models.ExplorationModel + ) -> List[Tuple[str, str, state_domain.StateDict]]: + """Maps exploration model with it's states data. + + Args: + model: ExplorationModel. Model instance to be mapped. + + Returns: + List[Tuple[str, str, dict]]. List of tuples + (exp_id, state_name, state_dict). + """ + return [ + (model.id, state_name, state_dict) + for state_name, state_dict in model.states.items() + ] + + def map_with_rule_types( + self, tup: Tuple[str, str, state_domain.StateDict] + ) -> Tuple[str, str, List[str]]: + """Maps state tuple with it's rule types. + + Args: + tup: Tuple[str, str, dict]. State tuple to be modified. + + Returns: + Tuple[str, str, List[str]]. Mapped tuple + (exp_id, state_name, list of rules). + """ + answer_groups = tup[2]['interaction']['answer_groups'] + rule_types = [] + for answer_group in answer_groups: + for rule_spec in answer_group['rule_specs']: + rule_types.append(rule_spec['rule_type']) + + return (tup[0], tup[1], rule_types) diff --git a/core/jobs/batch_jobs/math_interactions_audit_jobs_test.py b/core/jobs/batch_jobs/math_interactions_audit_jobs_test.py new file mode 100644 index 000000000000..e15e0709f6fb --- /dev/null +++ b/core/jobs/batch_jobs/math_interactions_audit_jobs_test.py @@ -0,0 +1,213 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.math_interactions_audit_jobs.""" + +from __future__ import annotations + +from core.domain import state_domain +from core.jobs import job_test_utils +from core.jobs.batch_jobs import math_interactions_audit_jobs +from core.jobs.types import job_run_result +from core.platform import models + +from typing import Final, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import exp_models + +(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION]) + +datastore_services = models.Registry.import_datastore_services() + + +class FindMathExplorationsWithRulesJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + math_interactions_audit_jobs.FindMathExplorationsWithRulesJob + ] = math_interactions_audit_jobs.FindMathExplorationsWithRulesJob + + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' + EXP_3_ID: Final = 'exp_3_id' + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_job_finds_math_explorations_with_rules(self) -> None: + exp_model_1 = self.create_model( + exp_models.ExplorationModel, + id=self.EXP_1_ID, + title='exploration 1 title', + category='category', + objective='objective', + language_code='cs', + init_state_name='state', + states_schema_version=48, + states={ + 'init_state': state_domain.State.create_default_state( + 'state', 'content_0', 'default_outcome_1', + is_initial_state=True + ).to_dict(), + 'alg_state': state_domain.State.create_default_state( + 'state', 'content_2', 'default_outcome_3', + is_initial_state=True + ).to_dict(), + 'eq_state': state_domain.State.create_default_state( + 'state', 'content_4', 'default_outcome_5', + is_initial_state=True + ).to_dict(), + 'end_state': state_domain.State.create_default_state( + 'state', 'content_6', 'default_outcome_7', + is_initial_state=True + ).to_dict(), + }, + next_content_id_index=8 + ) + exp_model_1.states['alg_state']['interaction']['id'] = ( + 'AlgebraicExpressionInput') + exp_model_1.states['alg_state']['interaction']['answer_groups'] = [ + { + 'rule_specs': [{ + 'inputs': { + 'x': 'a + b' + }, + 'rule_type': 'IsEquivalentTo' + }, { + 'inputs': { + 'x': 'a - b' + }, + 'rule_type': 'ContainsSomeOf' + }] + } + ] + exp_model_1.states['eq_state']['interaction']['id'] = ( + 'MathEquationInput') + exp_model_1.states['eq_state']['interaction']['answer_groups'] = [ + { + 'rule_specs': [{ + 'inputs': { + 'x': 'x = y', + 'y': 'both' + }, + 'rule_type': 'MatchesExactlyWith' + }] + } + ] + exp_model_1.update_timestamps() + + exp_model_2 = self.create_model( + exp_models.ExplorationModel, + id=self.EXP_2_ID, + title='exploration 2 title', + category='category', + objective='objective', + language_code='cs', + init_state_name='state', + states_schema_version=48, + states={ + 'init_state': state_domain.State.create_default_state( + 'state', 'content_0', 'default_outcome_1', + is_initial_state=True + ).to_dict(), + 'num_state': state_domain.State.create_default_state( + 'state', 'content_2', 'default_outcome_3', + is_initial_state=True + ).to_dict(), + 'end_state': state_domain.State.create_default_state( + 'state', 'content_4', 'default_outcome_5', + is_initial_state=True + ).to_dict() + }, + next_content_id_index=6 + ) + exp_model_2.states['num_state']['interaction']['id'] = ( + 'NumericExpressionInput') + exp_model_2.states['num_state']['interaction']['answer_groups'] = [ + { + 'rule_specs': [{ + 'inputs': { + 'x': '1.2 + 3' + }, + 'rule_type': 'MatchesExactlyWith' + }, { + 'inputs': { + 'x': '1 - 2' + }, + 'rule_type': 'OmitsSomeOf' + }] + } + ] + exp_model_2.update_timestamps() + + exp_model_3 = self.create_model( + exp_models.ExplorationModel, + id=self.EXP_3_ID, + title='exploration 3 title', + category='category', + objective='objective', + language_code='cs', + init_state_name='state', + states_schema_version=48, + states={ + 'init_state': state_domain.State.create_default_state( + 'state', 'content_0', 'default_outcome_1', + is_initial_state=True + ).to_dict(), + 'text_state': state_domain.State.create_default_state( + 'state', 'content_2', 'default_outcome_3', + is_initial_state=True + ).to_dict(), + 'end_state': state_domain.State.create_default_state( + 'state', 'content_4', 'default_outcome_5', + is_initial_state=True + ).to_dict() + }, + next_content_id_index=6 + ) + + exp_model_3.states['text_state']['interaction']['id'] = ('TextInput') + exp_model_3.states['text_state']['interaction']['answer_groups'] = [{ + 'rule_specs': [{ + 'rule_type': 'CaseSensitiveEquals', + 'inputs': {'x': ''} + }] + }] + exp_model_3.update_timestamps() + + datastore_services.put_multi([exp_model_1, exp_model_2, exp_model_3]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout=( + '(\'exp_1_id\', \'alg_state\', ' + '[\'IsEquivalentTo\', \'ContainsSomeOf\'])' + ) + ), + job_run_result.JobRunResult( + stdout=( + '(\'exp_1_id\', \'eq_state\', [\'MatchesExactlyWith\'])' + ) + ), + job_run_result.JobRunResult( + stdout=( + '(\'exp_2_id\', \'num_state\', ' + '[\'MatchesExactlyWith\', \'OmitsSomeOf\'])' + ) + ) + ]) diff --git a/core/jobs/batch_jobs/model_validation_jobs.py b/core/jobs/batch_jobs/model_validation_jobs.py index 5a497fcfc392..2565234685a4 100644 --- a/core/jobs/batch_jobs/model_validation_jobs.py +++ b/core/jobs/batch_jobs/model_validation_jobs.py @@ -20,32 +20,45 @@ import collections -from core import python_utils from core.jobs import base_jobs from core.jobs import job_utils from core.jobs.io import ndb_io from core.jobs.transforms.validation import base_validation from core.jobs.transforms.validation import base_validation_registry from core.jobs.types import base_validation_errors +from core.jobs.types import model_property from core.platform import models import apache_beam as beam +from typing import Dict, FrozenSet, Iterable, Iterator, List, Set, Tuple, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) + datastore_services = models.Registry.import_datastore_services() -AUDIT_DO_FN_TYPES_BY_KIND = ( - base_validation_registry.get_audit_do_fn_types_by_kind()) -KIND_BY_INDEX = tuple(AUDIT_DO_FN_TYPES_BY_KIND.keys()) +AUDIT_DO_FN_TYPES_BY_KIND: Dict[str, FrozenSet[Type[beam.DoFn]]] = ( + base_validation_registry.get_audit_do_fn_types_by_kind() +) +KIND_BY_INDEX: Tuple[str, ...] = tuple(AUDIT_DO_FN_TYPES_BY_KIND.keys()) # Type is: dict(str, tuple(tuple(ModelProperty, tuple(str)))). Tuples of type # (ModelProperty, tuple(kind of models)), grouped by the kind of model the # properties belong to. -ID_REFERENCING_PROPERTIES_BY_KIND_OF_POSSESSOR = ( +ID_REFERENCING_PROPERTIES_BY_KIND_OF_POSSESSOR: Dict[ + str, Tuple[Tuple[model_property.ModelProperty, Tuple[str, ...]], ...] +] = ( base_validation_registry. - get_id_referencing_properties_by_kind_of_possessor()) + get_id_referencing_properties_by_kind_of_possessor() +) # Type is: set(str). All model kinds referenced by one or more properties. -ALL_MODEL_KINDS_REFERENCED_BY_PROPERTIES = ( +ALL_MODEL_KINDS_REFERENCED_BY_PROPERTIES: Set[str] = ( base_validation_registry.get_all_model_kinds_referenced_by_properties()) @@ -53,7 +66,7 @@ class ModelKey(collections.namedtuple('ModelKey', ['model_kind', 'model_id'])): """Helper class for wrapping a (model kind, model ID) pair.""" @classmethod - def from_model(cls, model): + def from_model(cls, model: base_models.BaseModel) -> ModelKey: """Creates a model key from the given model. Args: @@ -70,7 +83,7 @@ def from_model(cls, model): class AuditAllStorageModelsJob(base_jobs.JobBase): """Runs a comprehensive audit on every model in the datastore.""" - def run(self): + def run(self) -> beam.PCollection[base_validation_errors.BaseAuditError]: """Returns a PCollection of audit errors aggregated from all models. Returns: @@ -118,7 +131,7 @@ def run(self): beam.ParDo(base_validation.ValidateDeletedModel())) ] - model_groups = python_utils.ZIP(KIND_BY_INDEX, models_of_kind_by_index) + model_groups = zip(KIND_BY_INDEX, models_of_kind_by_index) for kind, models_of_kind in model_groups: audit_error_pcolls.extend(models_of_kind | ApplyAuditDoFns(kind)) @@ -148,7 +161,13 @@ def run(self): return audit_error_pcolls | 'Combine audit results' >> beam.Flatten() def _get_model_relationship_errors( - self, unused_join_key, counts_and_errors): + self, + unused_join_key: ModelKey, + counts_and_errors: Tuple[ + List[int], + List[base_validation_errors.ModelRelationshipError] + ] + ) -> List[base_validation_errors.ModelRelationshipError]: """Returns errors associated with the given model key if it's missing. Args: @@ -168,21 +187,28 @@ def _get_model_relationship_errors( return errors if sum(counts) == 0 else [] -class ApplyAuditDoFns(beam.PTransform): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class ApplyAuditDoFns(beam.PTransform): # type: ignore[misc] """Runs every Audit DoFn targeting the models of a specific kind.""" - def __init__(self, kind): + def __init__(self, kind: str) -> None: """Initializes a new ApplyAuditDoFns instance. Args: kind: str. The kind of models this PTransform will receive. """ - super(ApplyAuditDoFns, self).__init__( + super().__init__( label='Apply every Audit DoFn targeting %s' % kind) self._kind = kind self._do_fn_types = tuple(AUDIT_DO_FN_TYPES_BY_KIND[kind]) - def expand(self, inputs): + def expand( + self, inputs: beam.PCollection[base_models.BaseModel] + ) -> beam.PCollection[base_validation_errors.BaseAuditError]: """Returns audit errors from every Audit DoFn targeting the models. This is the method that PTransform requires us to override when @@ -203,20 +229,27 @@ def expand(self, inputs): ) -class GetExistingModelKeyCounts(beam.PTransform): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class GetExistingModelKeyCounts(beam.PTransform): # type: ignore[misc] """Returns PCollection of (key, count) pairs for each input model.""" - def __init__(self, kind): + def __init__(self, kind: str) -> None: """Initializes the PTransform. Args: kind: str. The kind of model this PTransform will receive. """ - super(GetExistingModelKeyCounts, self).__init__( + super().__init__( label='Generate (key, count)s for all existing %ss' % kind) self._kind = kind - def expand(self, input_or_inputs): + def expand( + self, input_or_inputs: beam.PCollection[base_models.BaseModel] + ) -> beam.PCollection[Tuple[ModelKey, int]]: """Returns a PCollection of (key, count) pairs for each input model. Args: @@ -233,21 +266,32 @@ def expand(self, input_or_inputs): ) -class GetMissingModelKeyErrors(beam.PTransform): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class GetMissingModelKeyErrors(beam.PTransform): # type: ignore[misc] """Returns PCollection of (key, error) pairs for each referenced model.""" - def __init__(self, kind): + def __init__(self, kind: str) -> None: """Initializes the PTransform. Args: kind: str. The kind of model this PTransform will receive. """ - super(GetMissingModelKeyErrors, self).__init__( + super().__init__( label='Generate (key, error)s from the ID properties in %s' % kind) self._id_referencing_properties = ( ID_REFERENCING_PROPERTIES_BY_KIND_OF_POSSESSOR[kind]) - def expand(self, input_or_inputs): + def expand( + self, input_or_inputs: beam.PCollection[base_models.BaseModel] + ) -> Iterable[ + beam.PCollection[ + Tuple[ModelKey, base_validation_errors.ModelRelationshipError] + ] + ]: """Returns PCollections of (key, error) pairs referenced by the models. Args: @@ -270,7 +314,13 @@ def expand(self, input_or_inputs): ) def _generate_missing_key_errors( - self, model, property_of_model, referenced_kinds): + self, + model: base_models.BaseModel, + property_of_model: model_property.ModelProperty, + referenced_kinds: Tuple[str, ...] + ) -> Iterator[ + Tuple[ModelKey, base_validation_errors.ModelRelationshipError] + ]: """Yields all model keys referenced by the given model's properties. Args: diff --git a/core/jobs/batch_jobs/model_validation_jobs_test.py b/core/jobs/batch_jobs/model_validation_jobs_test.py index 925f44a96496..be52e019c42f 100644 --- a/core/jobs/batch_jobs/model_validation_jobs_test.py +++ b/core/jobs/batch_jobs/model_validation_jobs_test.py @@ -26,20 +26,33 @@ from core.jobs.types import model_property from core.platform import models -(auth_models, base_models, user_models) = models.Registry.import_models( - [models.NAMES.auth, models.NAMES.base_model, models.NAMES.user]) +from typing import Final, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import auth_models + from mypy_imports import base_models + from mypy_imports import user_models + +(auth_models, base_models, user_models) = models.Registry.import_models([ + models.Names.AUTH, models.Names.BASE_MODEL, models.Names.USER +]) class AuditAllStorageModelsJobTests(job_test_utils.JobTestBase): - JOB_CLASS = model_validation_jobs.AuditAllStorageModelsJob + JOB_CLASS: Type[ + model_validation_jobs.AuditAllStorageModelsJob + ] = model_validation_jobs.AuditAllStorageModelsJob - VALID_USER_ID = 'uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH) + VALID_USER_ID: Final = 'uid_%s' % ( + 'a' * feconf.USER_ID_RANDOM_PART_LENGTH + ) - def test_empty_storage(self): + def test_empty_storage(self) -> None: self.assert_job_output_is_empty() - def test_base_validation(self): + def test_base_validation(self) -> None: base_model_with_invalid_id = self.create_model( base_models.BaseModel, id='123@?!*', deleted=False) base_model_with_invalid_timestamps = self.create_model( @@ -72,7 +85,7 @@ def test_base_validation(self): base_validation_errors.ModelExpiredError(expired_base_model), ]) - def test_user_audits(self): + def test_user_audits(self) -> None: user_settings_model_with_invalid_id = self.create_model( user_models.UserSettingsModel, id='128', email='a@a.com') @@ -90,7 +103,7 @@ def test_user_audits(self): user_settings_model_with_invalid_id, feconf.USER_ID_REGEX), ]) - def test_reports_error_when_id_property_target_does_not_exist(self): + def test_reports_error_when_id_property_target_does_not_exist(self) -> None: self.put_multi([ # UserEmailPreferencesModel.id -> UserSettingsModel.id. self.create_model( @@ -106,7 +119,7 @@ def test_reports_error_when_id_property_target_does_not_exist(self): self.VALID_USER_ID, 'UserSettingsModel', self.VALID_USER_ID), ]) - def test_empty_when_id_property_target_exists(self): + def test_empty_when_id_property_target_exists(self) -> None: self.put_multi([ self.create_model( user_models.UserEmailPreferencesModel, id=self.VALID_USER_ID), @@ -117,7 +130,7 @@ def test_empty_when_id_property_target_exists(self): self.assert_job_output_is_empty() - def test_empty_when_web_of_id_property_targets_exist(self): + def test_empty_when_web_of_id_property_targets_exist(self) -> None: self.put_multi([ self.create_model( auth_models.UserAuthDetailsModel, @@ -132,7 +145,9 @@ def test_empty_when_web_of_id_property_targets_exist(self): self.assert_job_output_is_empty() - def test_reports_missing_id_property_target_even_if_sibling_property_is_valid(self): # pylint: disable=line-too-long + def test_reports_missing_id_property_target_even_if_sibling_property_is_valid( # pylint: disable=line-too-long + self + ) -> None: self.put_multi([ self.create_model( auth_models.UserAuthDetailsModel, id=self.VALID_USER_ID, diff --git a/core/jobs/batch_jobs/opportunity_management_jobs.py b/core/jobs/batch_jobs/opportunity_management_jobs.py index 3bd877721e83..5711d00bfc63 100644 --- a/core/jobs/batch_jobs/opportunity_management_jobs.py +++ b/core/jobs/batch_jobs/opportunity_management_jobs.py @@ -22,7 +22,9 @@ from core.domain import exp_domain from core.domain import exp_fetchers +from core.domain import opportunity_domain from core.domain import opportunity_services +from core.domain import skill_fetchers from core.domain import story_domain from core.domain import story_fetchers from core.domain import topic_domain @@ -36,6 +38,7 @@ import apache_beam as beam import result + from typing import Dict, List MYPY = False @@ -43,19 +46,211 @@ from mypy_imports import datastore_services from mypy_imports import exp_models from mypy_imports import opportunity_models + from mypy_imports import question_models + from mypy_imports import skill_models from mypy_imports import story_models from mypy_imports import topic_models ( - exp_models, opportunity_models, story_models, + exp_models, + opportunity_models, + question_models, + skill_models, + story_models, topic_models ) = models.Registry.import_models([ - models.NAMES.exploration, models.NAMES.opportunity, models.NAMES.story, - models.NAMES.topic + models.Names.EXPLORATION, + models.Names.OPPORTUNITY, + models.Names.QUESTION, + models.Names.SKILL, + models.Names.STORY, + models.Names.TOPIC ]) + datastore_services = models.Registry.import_datastore_services() +class DeleteSkillOpportunityModelJob(base_jobs.JobBase): + """Job that deletes SkillOpportunityModels.""" + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of 'SUCCESS' or 'FAILURE' results from + deleting SkillOpportunityModel. + + Returns: + PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from + deleting SkillOpportunityModel. + """ + skill_opportunity_model = ( + self.pipeline + | 'Get all non-deleted skill models' >> ndb_io.GetModels( + opportunity_models.SkillOpportunityModel.get_all( + include_deleted=False)) + ) + + unused_delete_result = ( + skill_opportunity_model + | beam.Map(lambda model: model.key) + | 'Delete all models' >> ndb_io.DeleteModels() + ) + + return ( + skill_opportunity_model + | 'Create job run result' >> ( + job_result_transforms.CountObjectsToJobRunResult()) + ) + + +class GenerateSkillOpportunityModelJob(base_jobs.JobBase): + """Job for regenerating SkillOpportunityModel. + + NOTE: The DeleteSkillOpportunityModelJob must be run before this + job. + """ + + @staticmethod + def _count_unique_question_ids( + question_skill_link_models: List[ + question_models.QuestionSkillLinkModel + ] + ) -> int: + """Counts the number of unique question ids. + + Args: + question_skill_link_models: list(QuestionSkillLinkModel). + List of QuestionSkillLinkModels. + + Returns: + int. The number of unique question ids. + """ + + return len({link.question_id for link in question_skill_link_models}) + + @staticmethod + def _create_skill_opportunity_model( + skill: skill_models.SkillModel, + question_skill_links: List[question_models.QuestionSkillLinkModel] + ) -> result.Result[ + opportunity_models.SkillOpportunityModel, Exception + ]: + """Transforms a skill object and a list of QuestionSkillLink objects + into a skill opportunity model. + + Args: + skill: skill_models.SkillModel. The skill to create the opportunity + for. + question_skill_links: list(question_models.QuestionSkillLinkModel). + The list of QuestionSkillLinkModel for the given skill. + + Returns: + Result[opportunity_models.SkillOpportunityModel, Exception]. + Result object that contains SkillOpportunityModel when the operation + is successful and Exception when an exception occurs. + """ + try: + skill_opportunity = opportunity_domain.SkillOpportunity( + skill_id=skill.id, + skill_description=skill.description, + question_count=( + GenerateSkillOpportunityModelJob._count_unique_question_ids( + question_skill_links)) + ) + skill_opportunity.validate() + with datastore_services.get_ndb_context(): + opportunity_model = opportunity_models.SkillOpportunityModel( + id=skill_opportunity.id, + skill_description=skill_opportunity.skill_description, + question_count=skill_opportunity.question_count + ) + opportunity_model.update_timestamps() + return result.Ok(opportunity_model) + except Exception as e: + return result.Err(e) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of 'SUCCESS' or 'FAILURE' results from + generating SkillOpportunityModel. + + Returns: + PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from + generating SkillOpportunityModel. + """ + question_skill_link_models = ( + self.pipeline + | 'Get all non-deleted QuestionSkillLinkModels' >> ( + ndb_io.GetModels( + question_models.QuestionSkillLinkModel.get_all( + include_deleted=False)) + ) + | 'Group QuestionSkillLinkModels by skill ID' >> + beam.GroupBy(lambda n: n.skill_id) + ) + + skills = ( + self.pipeline + | 'Get all non-deleted SkillModels' >> ( + ndb_io.GetModels( + skill_models.SkillModel.get_all(include_deleted=False))) + | 'Get skill object from model' >> beam.Map( + skill_fetchers.get_skill_from_model) + | 'Group skill objects by skill ID' >> beam.GroupBy(lambda m: m.id) + ) + + skills_with_question_counts = ( + { + 'skill': skills, + 'question_skill_links': question_skill_link_models + } + | 'Merge by skill ID' >> beam.CoGroupByKey() + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Remove skill IDs' >> beam.Values() # pylint: disable=no-value-for-parameter + # We are using itertools.chain.from_iterable to flatten + # question_skill_links from a 2D list into a 1D list. + | 'Flatten skill and question_skill_links' >> beam.Map( + lambda skill_and_question_skill_links_object: { + 'skill': list( + skill_and_question_skill_links_object['skill'][0] + )[0], + 'question_skill_links': list( + itertools.chain.from_iterable( + skill_and_question_skill_links_object[ + 'question_skill_links' + ] + ) + ) + } + ) + ) + + opportunities_results = ( + skills_with_question_counts + | beam.Map( + lambda skills_with_question_counts_object: + self._create_skill_opportunity_model( + skills_with_question_counts_object['skill'], + skills_with_question_counts_object[ + 'question_skill_links' + ] + )) + ) + + unused_put_result = ( + opportunities_results + | 'Filter the results with OK status' >> beam.Filter( + lambda result: result.is_ok()) + | 'Fetch the models to be put' >> beam.Map( + lambda result: result.unwrap()) + | 'Put models into the datastore' >> ndb_io.PutModels() + ) + + return ( + opportunities_results + | 'Transform Results to JobRunResults' >> ( + job_result_transforms.ResultsToJobRunResults()) + ) + + class DeleteExplorationOpportunitySummariesJob(base_jobs.JobBase): """Job that deletes ExplorationOpportunitySummaryModels.""" @@ -120,7 +315,7 @@ def _generate_opportunities_related_to_topic( generated by the operation. """ try: - story_ids = topic.get_canonical_story_ids() # type: ignore[no-untyped-call] + story_ids = topic.get_canonical_story_ids() existing_story_ids = ( set(stories_dict.keys()).intersection(story_ids)) exp_ids: List[str] = list(itertools.chain.from_iterable( @@ -142,14 +337,15 @@ def _generate_opportunities_related_to_topic( stories = [ stories_dict[story_id] for story_id in existing_story_ids ] - for story in stories: - for exp_id in story.story_contents.get_all_linked_exp_ids(): - exploration_opportunity_summary_list.append( - opportunity_services.create_exp_opportunity_summary( # type: ignore[no-untyped-call] - topic, story, exps_dict[exp_id])) exploration_opportunity_summary_model_list = [] with datastore_services.get_ndb_context(): + for story in stories: + for exp_id in story.story_contents.get_all_linked_exp_ids(): + exploration_opportunity_summary_list.append( + opportunity_services.create_exp_opportunity_summary( + topic, story, exps_dict[exp_id])) + for opportunity in exploration_opportunity_summary_list: model = ( opportunity_models.ExplorationOpportunitySummaryModel( diff --git a/core/jobs/batch_jobs/opportunity_management_jobs_test.py b/core/jobs/batch_jobs/opportunity_management_jobs_test.py index bc729f168cdb..a4f51d420724 100644 --- a/core/jobs/batch_jobs/opportunity_management_jobs_test.py +++ b/core/jobs/batch_jobs/opportunity_management_jobs_test.py @@ -26,29 +26,324 @@ from core.jobs.types import job_run_result from core.platform import models +from typing import Final, Type + MYPY = False if MYPY: # pragma: no cover from mypy_imports import datastore_services from mypy_imports import exp_models from mypy_imports import opportunity_models + from mypy_imports import question_models + from mypy_imports import skill_models from mypy_imports import story_models from mypy_imports import topic_models ( - exp_models, opportunity_models, story_models, + exp_models, + opportunity_models, + story_models, topic_models, + skill_models, + question_models ) = models.Registry.import_models([ - models.NAMES.exploration, models.NAMES.opportunity, models.NAMES.story, - models.NAMES.topic + models.Names.EXPLORATION, + models.Names.OPPORTUNITY, + models.Names.STORY, + models.Names.TOPIC, + models.Names.SKILL, + models.Names.QUESTION ]) datastore_services = models.Registry.import_datastore_services() +class DeleteSkillOpportunityModelJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + opportunity_management_jobs.DeleteSkillOpportunityModelJob + ] = opportunity_management_jobs.DeleteSkillOpportunityModelJob + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_job_deletes_all_opportunities(self) -> None: + skill_opportunity_model_1 = self.create_model( + opportunity_models.SkillOpportunityModel, + id='opportunity_id1', + skill_description='A skill description', + question_count=20, + ) + skill_opportunity_model_1.update_timestamps() + skill_opportunity_model_1.put() + skill_opportunity_model_2 = self.create_model( + opportunity_models.SkillOpportunityModel, + id='opportunity_id2', + skill_description='A skill description', + question_count=20, + ) + skill_opportunity_model_2.update_timestamps() + skill_opportunity_model_2.put() + + all_skill_opportunity_models = list( + opportunity_models.SkillOpportunityModel.get_all()) + self.assertEqual(len(all_skill_opportunity_models), 2) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SUCCESS: 2') + ]) + + all_skill_opportunity_models = list( + opportunity_models.SkillOpportunityModel.get_all()) + self.assertEqual(len(all_skill_opportunity_models), 0) + + +class GenerateSkillOpportunityModelJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + opportunity_management_jobs.GenerateSkillOpportunityModelJob + ] = opportunity_management_jobs.GenerateSkillOpportunityModelJob + + SKILL_1_ID: Final = 'skill_1' + SKILL_1_DESCRIPTION: Final = 'skill 1' + SKILL_2_ID: Final = 'skill_2' + SKILL_2_DESCRIPTION: Final = 'skill 2' + QUESTION_1_ID: Final = 'question_1' + QUESTION_2_ID: Final = 'question_2' + + def setUp(self) -> None: + super().setUp() + + question_skill_link_model_1 = self.create_model( + question_models.QuestionSkillLinkModel, + question_id=self.QUESTION_1_ID, + skill_id=self.SKILL_1_ID, + skill_difficulty=1 + ) + + question_skill_link_model_2 = self.create_model( + question_models.QuestionSkillLinkModel, + question_id=self.QUESTION_2_ID, + skill_id=self.SKILL_2_ID, + skill_difficulty=1 + ) + question_skill_link_model_1.update_timestamps() + question_skill_link_model_2.update_timestamps() + + skill_1_model = self.create_model( + skill_models.SkillModel, + id=self.SKILL_1_ID, + description=self.SKILL_1_DESCRIPTION, + language_code=constants.DEFAULT_LANGUAGE_CODE, + misconceptions=[], + rubrics=[], + skill_contents={ + 'explanation': { + 'html': 'test explanation', + 'content_id': 'explanation', + }, + 'worked_examples': [], + 'recorded_voiceovers': { + 'voiceovers_mapping': {} + }, + 'written_translations': { + 'translations_mapping': { + 'content': {}, + 'default_outcome': {} + } + } + }, + next_misconception_id=0, + misconceptions_schema_version=feconf + .CURRENT_MISCONCEPTIONS_SCHEMA_VERSION, + rubric_schema_version=feconf + .CURRENT_RUBRIC_SCHEMA_VERSION, + skill_contents_schema_version=feconf + .CURRENT_SKILL_CONTENTS_SCHEMA_VERSION, + superseding_skill_id='blah', + all_questions_merged=False, + prerequisite_skill_ids=[] + ) + + skill_2_model = self.create_model( + skill_models.SkillModel, + id=self.SKILL_2_ID, + description=self.SKILL_2_DESCRIPTION, + language_code=constants.DEFAULT_LANGUAGE_CODE, + misconceptions=[], + rubrics=[], + skill_contents={ + 'explanation': { + 'html': 'test explanation', + 'content_id': 'explanation', + }, + 'worked_examples': [], + 'recorded_voiceovers': { + 'voiceovers_mapping': {} + }, + 'written_translations': { + 'translations_mapping': { + 'content': {}, + 'default_outcome': {} + } + } + }, + next_misconception_id=0, + misconceptions_schema_version=feconf + .CURRENT_MISCONCEPTIONS_SCHEMA_VERSION, + rubric_schema_version=feconf + .CURRENT_RUBRIC_SCHEMA_VERSION, + skill_contents_schema_version=feconf + .CURRENT_SKILL_CONTENTS_SCHEMA_VERSION, + superseding_skill_id='blah', + all_questions_merged=False, + prerequisite_skill_ids=[] + ) + skill_1_model.update_timestamps() + skill_2_model.update_timestamps() + + datastore_services.put_multi([ + skill_1_model, + skill_2_model, + question_skill_link_model_1, + question_skill_link_model_2 + ]) + + def test_generation_job_creates_new_models(self) -> None: + all_opportunity_models = list( + opportunity_models.SkillOpportunityModel.get_all()) + self.assertEqual(len(all_opportunity_models), 0) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SUCCESS: 2') + ]) + + opportunity_model_1 = ( + opportunity_models.SkillOpportunityModel.get( + self.SKILL_1_ID)) + # Ruling out the possibility of None for mypy type checking. + assert opportunity_model_1 is not None + self.assertEqual(opportunity_model_1.id, self.SKILL_1_ID) + self.assertEqual( + opportunity_model_1.skill_description, + self.SKILL_1_DESCRIPTION) + self.assertEqual(opportunity_model_1.question_count, 1) + + opportunity_model_2 = ( + opportunity_models.SkillOpportunityModel.get( + self.SKILL_2_ID)) + assert opportunity_model_2 is not None + self.assertEqual(opportunity_model_2.id, self.SKILL_2_ID) + self.assertEqual( + opportunity_model_2.skill_description, + self.SKILL_2_DESCRIPTION) + self.assertEqual(opportunity_model_2.question_count, 1) + + def test_generation_job_does_not_count_duplicate_question_ids(self) -> None: + all_opportunity_models = list( + opportunity_models.SkillOpportunityModel.get_all()) + self.assertEqual(len(all_opportunity_models), 0) + + question_1_duplicate_skilllinkmodel = self.create_model( + question_models.QuestionSkillLinkModel, + question_id=self.QUESTION_1_ID, + skill_id=self.SKILL_1_ID, + skill_difficulty=1 + ) + question_1_duplicate_skilllinkmodel.update_timestamps() + datastore_services.put_multi([question_1_duplicate_skilllinkmodel]) + + all_skill_link_models = list( + question_models.QuestionSkillLinkModel.get_all()) + self.assertEqual(len(all_skill_link_models), 3) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SUCCESS: 2') + ]) + + opportunity_model_1 = ( + opportunity_models.SkillOpportunityModel.get( + self.SKILL_1_ID)) + # Ruling out the possibility of None for mypy type checking. + assert opportunity_model_1 is not None + self.assertEqual(opportunity_model_1.id, self.SKILL_1_ID) + self.assertEqual( + opportunity_model_1.skill_description, + self.SKILL_1_DESCRIPTION) + self.assertEqual(opportunity_model_1.question_count, 1) + + opportunity_model_2 = ( + opportunity_models.SkillOpportunityModel.get( + self.SKILL_2_ID)) + assert opportunity_model_2 is not None + self.assertEqual(opportunity_model_2.id, self.SKILL_2_ID) + self.assertEqual( + opportunity_model_2.skill_description, + self.SKILL_2_DESCRIPTION) + self.assertEqual(opportunity_model_2.question_count, 1) + + def test_generation_job_counts_multiple_questions(self) -> None: + all_opportunity_models = list( + opportunity_models.SkillOpportunityModel.get_all()) + self.assertEqual(len(all_opportunity_models), 0) + + question_skill_link_model_1 = self.create_model( + question_models.QuestionSkillLinkModel, + question_id=self.QUESTION_1_ID, + skill_id=self.SKILL_2_ID, + skill_difficulty=1 + ) + question_skill_link_model_1.update_timestamps() + datastore_services.put_multi([question_skill_link_model_1]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SUCCESS: 2') + ]) + + opportunity_model_1 = ( + opportunity_models.SkillOpportunityModel.get( + self.SKILL_1_ID)) + # Ruling out the possibility of None for mypy type checking. + assert opportunity_model_1 is not None + self.assertEqual(opportunity_model_1.id, self.SKILL_1_ID) + self.assertEqual( + opportunity_model_1.skill_description, + self.SKILL_1_DESCRIPTION) + self.assertEqual(opportunity_model_1.question_count, 1) + + opportunity_model_2 = ( + opportunity_models.SkillOpportunityModel.get( + self.SKILL_2_ID)) + assert opportunity_model_2 is not None + self.assertEqual(opportunity_model_2.id, self.SKILL_2_ID) + self.assertEqual( + opportunity_model_2.skill_description, + self.SKILL_2_DESCRIPTION) + self.assertEqual(opportunity_model_2.question_count, 2) + + def test_generation_job_fails_when_validation_failure(self) -> None: + all_opportunity_models = list( + opportunity_models.SkillOpportunityModel.get_all()) + self.assertEqual(len(all_opportunity_models), 0) + + with self.swap( + opportunity_management_jobs.GenerateSkillOpportunityModelJob, + '_count_unique_question_ids', + lambda _: -1 + ): + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='', stderr='ERROR: \"Expected question_count to be ' + 'a non-negative integer, received -1\": 2' + ) + ]) + + class DeleteExplorationOpportunitySummariesJobTests(job_test_utils.JobTestBase): - JOB_CLASS = ( - opportunity_management_jobs.DeleteExplorationOpportunitySummariesJob) + JOB_CLASS: Type[ + opportunity_management_jobs.DeleteExplorationOpportunitySummariesJob + ] = opportunity_management_jobs.DeleteExplorationOpportunitySummariesJob def test_empty_storage(self) -> None: self.assert_job_output_is_empty() @@ -101,18 +396,23 @@ def test_job_deletes_all_opportunities(self) -> None: class GenerateExplorationOpportunitySummariesJobTests( job_test_utils.JobTestBase): - JOB_CLASS = ( - opportunity_management_jobs.GenerateExplorationOpportunitySummariesJob) - - VALID_USER_ID_1 = 'uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH) - VALID_USER_ID_2 = 'uid_%s' % ('b' * feconf.USER_ID_RANDOM_PART_LENGTH) - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' - TOPIC_1_ID = 'topic_1_id' - TOPIC_2_ID = 'topic_2_id' - STORY_1_ID = 'story_1_id' - STORY_2_ID = 'story_2_id' - LANG_1 = 'lang_1' + JOB_CLASS: Type[ + opportunity_management_jobs.GenerateExplorationOpportunitySummariesJob + ] = opportunity_management_jobs.GenerateExplorationOpportunitySummariesJob + + VALID_USER_ID_1: Final = 'uid_%s' % ( + 'a' * feconf.USER_ID_RANDOM_PART_LENGTH + ) + VALID_USER_ID_2: Final = 'uid_%s' % ( + 'b' * feconf.USER_ID_RANDOM_PART_LENGTH + ) + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' + TOPIC_1_ID: Final = 'topic_1_id' + TOPIC_2_ID: Final = 'topic_2_id' + STORY_1_ID: Final = 'story_1_id' + STORY_2_ID: Final = 'story_2_id' + LANG_1: Final = 'lang_1' def setUp(self) -> None: super().setUp() @@ -129,7 +429,9 @@ def setUp(self) -> None: canonical_story_references=[{ 'story_id': self.STORY_1_ID, 'story_is_published': False - }]) + }], + page_title_fragment_for_web='fragm' + ) self.topic_model.update_timestamps() topic_rights_model = self.create_model( topic_models.TopicRightsModel, id=self.TOPIC_1_ID) @@ -169,12 +471,16 @@ def setUp(self) -> None: objective='objective', language_code='cs', init_state_name='state', - states_schema_version=48, + states_schema_version=feconf.CURRENT_STATE_SCHEMA_VERSION, states={ - 'state': state_domain.State.create_default_state( # type: ignore[no-untyped-call] - 'state', is_initial_state=True + 'state': state_domain.State.create_default_state( + 'state', 'content_0', 'default_outcome_1', + is_initial_state=True ).to_dict() - }) + }, + next_content_id_index=2 + ) + exp_model.states['state']['content']['html'] = 'A text for translation' exp_model.update_timestamps() datastore_services.put_multi([ exp_model, story_model, self.topic_model, topic_rights_model @@ -204,7 +510,7 @@ def test_generation_job_returns_initial_opportunity(self) -> None: self.assertEqual(opportunity_model.story_title, 'story title') self.assertEqual(opportunity_model.chapter_title, 'node title') self.assertEqual(opportunity_model.content_count, 1) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( opportunity_model.incomplete_translation_language_codes, {l['id'] for l in constants.SUPPORTED_AUDIO_LANGUAGES} - {'cs'}) self.assertEqual(opportunity_model.translation_counts, {}) @@ -254,15 +560,20 @@ def test_generation_job_returns_multiple_opportunities_for_one_topic( objective='objective', language_code='en', init_state_name='state1', - states_schema_version=48, + states_schema_version=feconf.CURRENT_STATE_SCHEMA_VERSION, states={ - 'state1': state_domain.State.create_default_state( # type: ignore[no-untyped-call] - 'state1', is_initial_state=True + 'state1': state_domain.State.create_default_state( + 'state1', 'content_0', 'default_outcome_1', + is_initial_state=True ).to_dict(), - 'state2': state_domain.State.create_default_state( # type: ignore[no-untyped-call] - 'state2' + 'state2': state_domain.State.create_default_state( + 'state2', 'content_2', 'default_outcome_3', ).to_dict() - }) + }, + next_content_id_index=4 + ) + exp_model.states['state1']['content']['html'] = 'A text for translation' + exp_model.states['state2']['content']['html'] = 'A text for translation' exp_model.update_timestamps() datastore_services.put_multi([self.topic_model, exp_model, story_model]) @@ -289,7 +600,7 @@ def test_generation_job_returns_multiple_opportunities_for_one_topic( self.assertEqual(opportunity_model.story_title, 'story 2 title') self.assertEqual(opportunity_model.chapter_title, 'node 2 title') self.assertEqual(opportunity_model.content_count, 2) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( opportunity_model.incomplete_translation_language_codes, {l['id'] for l in constants.SUPPORTED_AUDIO_LANGUAGES} - {'en'}) self.assertEqual(opportunity_model.translation_counts, {}) @@ -312,7 +623,9 @@ def test_job_returns_one_opportunity_for_multiple_topics_with_same_exp( canonical_story_references=[{ 'story_id': self.STORY_2_ID, 'story_is_published': False - }]) + }], + page_title_fragment_for_web='fragm' + ) topic_model.update_timestamps() topic_rights_model = self.create_model( topic_models.TopicRightsModel, id=self.TOPIC_2_ID) @@ -367,7 +680,7 @@ def test_job_returns_one_opportunity_for_multiple_topics_with_same_exp( self.assertEqual(opportunity_model.story_title, 'story title') self.assertEqual(opportunity_model.chapter_title, 'node title') self.assertEqual(opportunity_model.content_count, 1) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( opportunity_model.incomplete_translation_language_codes, {l['id'] for l in constants.SUPPORTED_AUDIO_LANGUAGES} - {'cs'}) self.assertEqual(opportunity_model.translation_counts, {}) @@ -465,7 +778,9 @@ def test_generation_job_returns_multiple_opportunities_for_multiple_topics( canonical_story_references=[{ 'story_id': self.STORY_2_ID, 'story_is_published': False - }]) + }], + page_title_fragment_for_web='fragm', + ) topic_model.update_timestamps() topic_rights_model = self.create_model( topic_models.TopicRightsModel, id=self.TOPIC_2_ID) @@ -505,15 +820,20 @@ def test_generation_job_returns_multiple_opportunities_for_multiple_topics( objective='objective', language_code='en', init_state_name='state1', - states_schema_version=48, + states_schema_version=feconf.CURRENT_STATE_SCHEMA_VERSION, states={ - 'state1': state_domain.State.create_default_state( # type: ignore[no-untyped-call] - 'state1', is_initial_state=True + 'state1': state_domain.State.create_default_state( + 'state1', 'content_0', 'default_outcome_1', + is_initial_state=True ).to_dict(), - 'state2': state_domain.State.create_default_state( # type: ignore[no-untyped-call] - 'state2' + 'state2': state_domain.State.create_default_state( + 'state2', 'content_2', 'default_outcome_3', ).to_dict() - }) + }, + next_content_id_index=4 + ) + exp_model.states['state1']['content']['html'] = 'A text for translation' + exp_model.states['state2']['content']['html'] = 'A text for translation' exp_model.update_timestamps() datastore_services.put_multi([ exp_model, story_model, topic_model, topic_rights_model @@ -542,7 +862,7 @@ def test_generation_job_returns_multiple_opportunities_for_multiple_topics( self.assertEqual(opportunity_model.story_title, 'story 2 title') self.assertEqual(opportunity_model.chapter_title, 'node 2 title') self.assertEqual(opportunity_model.content_count, 2) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( opportunity_model.incomplete_translation_language_codes, {l['id'] for l in constants.SUPPORTED_AUDIO_LANGUAGES} - {'en'}) self.assertEqual(opportunity_model.translation_counts, {}) diff --git a/core/jobs/batch_jobs/rejecting_suggestion_for_invalid_content_ids_jobs.py b/core/jobs/batch_jobs/rejecting_suggestion_for_invalid_content_ids_jobs.py new file mode 100644 index 000000000000..7f83f5a57390 --- /dev/null +++ b/core/jobs/batch_jobs/rejecting_suggestion_for_invalid_content_ids_jobs.py @@ -0,0 +1,337 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rejecting suggestions whose content_id no longer exists and +updating the translation content. +""" + +from __future__ import annotations + +from core import feconf +from core.domain import exp_domain +from core.domain import html_cleaner +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam + +from typing import Dict, List, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + from mypy_imports import suggestion_models + +(exp_models, suggestion_models) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.SUGGESTION +]) + +datastore_services = models.Registry.import_datastore_services() + + +class RejectSuggestionWithMissingContentIdMigrationJob(base_jobs.JobBase): + """Job that rejects the suggestions for missing content ids and + updates the RTE content. + """ + + @staticmethod + def _update_suggestion_model( + suggestions: List[suggestion_models.GeneralSuggestionModel], + exp_model: exp_models.ExplorationModel + ) -> List[suggestion_models.GeneralSuggestionModel]: + """Updates the translation suggestion. The translation whose + content_id no longer exists, the suggestion status will be marked + as `rejected`. The RTE content of the suggestion will be updated + in case invalid data is present. + + Args: + suggestions: list(GeneralSuggestionModel). A list of translation + suggestion models corresponding to the given exploration. + exp_model: ExplorationModel. The exploration model. + + Returns: + suggestions. List[GeneralSuggestionModel]. Result containing the + list of updated suggestion models. + """ + total_content_ids = [] + for state in exp_model.states.values(): + written_translations = ( + state['written_translations']['translations_mapping']) + for content_id, _ in written_translations.items(): + total_content_ids.append(content_id) + + for suggestion in suggestions: + suggestion_change = suggestion.change_cmd + if not suggestion_change['content_id'] in total_content_ids: + suggestion.status = suggestion_models.STATUS_REJECTED + + translation_html = suggestion_change['translation_html'] + resulting_translation = [] + if isinstance(translation_html, list): + for translation in translation_html: + resulting_translation.append( + exp_domain.Exploration.fix_content(translation) + ) + suggestion_change['translation_html'] = ( + [data for data in resulting_translation + if not html_cleaner.is_html_empty(data)]) + + else: + suggestion_change['translation_html'] = ( + exp_domain.Exploration.fix_content(translation_html)) + + return suggestions + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the suggestion updation. + + Returns: + PCollection. A PCollection of results from the suggestion + migration. + """ + target_id_to_suggestion_models = ( + self.pipeline + | 'Get translation suggestion models in review' >> ndb_io.GetModels( + suggestion_models.GeneralSuggestionModel.get_all( + include_deleted=False).filter( + ( + suggestion_models + .GeneralSuggestionModel.suggestion_type + ) == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT + ).filter( + suggestion_models.GeneralSuggestionModel.status == ( + suggestion_models.STATUS_IN_REVIEW + ) + ) + ) + | 'Add target id as key' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda model: model.target_id) + | 'Group exploration suggestions' >> beam.GroupByKey() + ) + + exploration_models = ( + self.pipeline + | 'Get all exploration models' >> ndb_io.GetModels( + exp_models.ExplorationModel.get_all()) + | 'Add exploration id as key' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda model: model.id) + ) + + updated_suggestion_results = ( + { + 'suggestions': target_id_to_suggestion_models, + 'exploration': exploration_models + } + | 'Merge models' >> beam.CoGroupByKey() + | 'Remove keys' >> beam.Values() # pylint: disable=no-value-for-parameter + | 'Filter unwanted exploration' >> beam.Filter( + lambda objects: len(objects['suggestions']) != 0) + | 'Transform and migrate model' >> beam.Map( + lambda objects: ( + self._update_suggestion_model( + objects['suggestions'][0], + objects['exploration'][0] + ) + )) + | 'Flatten suggestion models' >> beam.FlatMap(lambda x: x) + ) + + updated_suggestions_count_job_run_results = ( + updated_suggestion_results + | 'Transform suggestion objects into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'SUGGESTION ITERATED')) + ) + + unused_put_results = ( + updated_suggestion_results + | 'Put models into the datastore' >> ndb_io.PutModels() + ) + + return updated_suggestions_count_job_run_results + + +class AuditRejectSuggestionWithMissingContentIdMigrationJob(base_jobs.JobBase): + """Audits the suggestions and returns the results.""" + + @staticmethod + def _report_errors_from_suggestion_models( + suggestions: List[suggestion_models.GeneralSuggestionModel], + exp_model: exp_models.ExplorationModel + ) -> List[Dict[str, Union[str, List[Dict[str, Union[List[str], str]]]]]]: + """Audits the translation suggestion. Reports the following + - The info related to suggestion in case the content id is missing + - Before and after content of the translation_html. + + Args: + suggestions: list(GeneralSuggestionModel). A list of translation + suggestion models corresponding to the given exploration. + exp_model: ExplorationModel. The exploration model. + + Returns: + result_after_migrations. list(dict). Result containing the info + of missing content id and the translation before and after + migration. + """ + info_for_missing_content_id = [] + info_for_content_updation = [] + result_after_migrations: ( + List[Dict[str, Union[str, List[Dict[str, + Union[List[str], str]]]]]]) = [] + total_content_ids = [] + + for state in exp_model.states.values(): + written_translations = ( + state['written_translations']['translations_mapping']) + for content_id, _ in written_translations.items(): + total_content_ids.append(content_id) + + for suggestion in suggestions: + suggestion_change = suggestion.change_cmd + if not suggestion_change['content_id'] in total_content_ids: + info_for_missing_content_id.append( + { + 'content_id': suggestion_change['content_id'], + 'state_name': suggestion_change['state_name'] + } + ) + + html_before: Union[List[str], str] = suggestion_change[ + 'translation_html'] + if isinstance(html_before, list): + # Ruling out the possibility of different types for + # mypy type checking. + assert isinstance(html_before, list) + html_after: Union[List[str], str] = [] + assert isinstance(html_after, list) + for translation in html_before: + html_after.append( + exp_domain.Exploration.fix_content(translation) + ) + + html_after = ( + [data for data in html_after + if not html_cleaner.is_html_empty(data)]) + + else: + # Ruling out the possibility of different types for mypy + # type checking. + assert isinstance(html_before, str) + html_after = exp_domain.Exploration.fix_content( + html_before) + assert isinstance(html_after, str) + + info_for_content_updation.append( + { + 'content_before': html_before, + 'content_after': html_after + } + ) + suggestion_change['translation_html'] = html_after + + result_after_migrations.append( + { + 'exp_id': exp_model.id, + 'missing_content_ids': info_for_missing_content_id, + 'content_translation': info_for_content_updation + } + ) + + return result_after_migrations + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the suggestion updation. + + Returns: + PCollection. A PCollection of results from the suggestion + migration. + """ + target_id_to_suggestion_models = ( + self.pipeline + | 'Get translation suggestion models in review' >> ndb_io.GetModels( + suggestion_models.GeneralSuggestionModel.get_all( + include_deleted=False).filter( + ( + suggestion_models + .GeneralSuggestionModel.suggestion_type + ) == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT + ).filter( + suggestion_models.GeneralSuggestionModel.status == ( + suggestion_models.STATUS_IN_REVIEW + ) + ) + ) + | 'Add target id as key' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda model: model.target_id) + | 'Group exploration suggestions' >> beam.GroupByKey() + ) + + exploration_models = ( + self.pipeline + | 'Get all exploration models' >> ndb_io.GetModels( + exp_models.ExplorationModel.get_all()) + | 'Add exploration id as key' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda model: model.id) + ) + + suggestion_results = ( + { + 'suggestions': target_id_to_suggestion_models, + 'exploration': exploration_models + } + | 'Merge models' >> beam.CoGroupByKey() + | 'Remove keys' >> beam.Values() # pylint: disable=no-value-for-parameter + | 'Filter unwanted exploration' >> beam.Filter( + lambda objects: len(objects['suggestions']) != 0) + | 'Transform and migrate model' >> beam.Map( + lambda objects: ( + self._report_errors_from_suggestion_models( + objects['suggestions'][0], + objects['exploration'][0] + ) + )) + | 'Flatten suggestion models' >> beam.FlatMap(lambda x: x) + ) + + report_suggestions = ( + suggestion_results + | 'Report the suggestions data' >> beam.Map( + lambda result: ( + job_run_result.JobRunResult.as_stdout( + f'Results are - {result}' + ) + ) + ) + ) + + report_count_suggestion = ( + suggestion_results + | 'Report count for suggestions' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'GROUP OF SUGGESTION PER EXP') + ) + ) + + return ( + ( + report_suggestions, + report_count_suggestion + ) + | 'Combine results' >> beam.Flatten() + ) diff --git a/core/jobs/batch_jobs/rejecting_suggestion_for_invalid_content_ids_jobs_test.py b/core/jobs/batch_jobs/rejecting_suggestion_for_invalid_content_ids_jobs_test.py new file mode 100644 index 000000000000..54bbdf302b00 --- /dev/null +++ b/core/jobs/batch_jobs/rejecting_suggestion_for_invalid_content_ids_jobs_test.py @@ -0,0 +1,463 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs. +rejecting_suggestion_for_invalid_content_ids_jobs. +""" + +from __future__ import annotations + +from core import feconf +from core.jobs import job_test_utils +from core.jobs.batch_jobs import ( + rejecting_suggestion_for_invalid_content_ids_jobs) +from core.jobs.types import job_run_result +from core.platform import models + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + from mypy_imports import suggestion_models + +(exp_models, suggestion_models) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.SUGGESTION +]) + +STATE_DICT_IN_V52 = { + 'content': {'content_id': 'content', 'html': ''}, + 'param_changes': [], + 'interaction': { + 'solution': None, + 'answer_groups': [], + 'default_outcome': { + 'param_changes': [], + 'feedback': { + 'content_id': 'default_outcome', + 'html': 'Default outcome' + }, + 'dest': 'Introduction', + 'dest_if_really_stuck': None, + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False + }, + 'customization_args': { + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { + 'unicode_str': '', + 'content_id': 'ca_placeholder_1' + } + } + }, + 'confirmed_unclassified_answers': [], + 'id': 'TextInput', + 'hints': [] + }, + 'linked_skill_id': None, + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content': {}, + 'default_outcome': {}, + 'ca_placeholder_1': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'content': {}, + 'default_outcome': {}, + 'ca_placeholder_1': {} + } + }, + 'classifier_model_id': None, + 'card_is_checkpoint': False, + 'solicit_answer_details': False, + 'next_content_id_index': 2 +} + +ERRORED_TRANSLATION_VALUE = ( + '

    ' + '

    ' +) + +CHANGE_DICT = { + 'cmd': 'add_translation', + 'content_id': 'invalid_content_id', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'Introduction', + 'translation_html': ERRORED_TRANSLATION_VALUE +} + +CHANGE_DICT_WITH_LIST_TRANSLATION = { + 'cmd': 'add_translation', + 'content_id': 'invalid_content_id', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'Introduction', + 'translation_html': [ERRORED_TRANSLATION_VALUE, ''] +} + + +class RejectSuggestionWithMissingContentIdMigrationJobTests( + job_test_utils.JobTestBase +): + + JOB_CLASS = ( + rejecting_suggestion_for_invalid_content_ids_jobs + .RejectSuggestionWithMissingContentIdMigrationJob + ) + TARGET_ID = 'exp1' + + def setUp(self) -> None: + super().setUp() + self.exp_1 = self.create_model( + exp_models.ExplorationModel, + id=self.TARGET_ID, + title='title', + init_state_name=feconf.DEFAULT_INIT_STATE_NAME, + category=feconf.DEFAULT_EXPLORATION_CATEGORY, + objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE, + language_code='en', + tags=['Topic'], + blurb='blurb', + author_notes='author notes', + states_schema_version=52, + param_specs={}, + param_changes=[], + auto_tts_enabled=feconf.DEFAULT_AUTO_TTS_ENABLED, + correctness_feedback_enabled=False, + states={feconf.DEFAULT_INIT_STATE_NAME: STATE_DICT_IN_V52}, + ) + self.put_multi([self.exp_1]) + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_invalid_suggestion_is_migrated(self) -> None: + CHANGE_DICT['translation_html'] = ERRORED_TRANSLATION_VALUE + CHANGE_DICT['content_id'] = 'invalid_content_id' + suggestion_1_invalid_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=CHANGE_DICT, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_1_invalid_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_1_invalid_model]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='SUGGESTION ITERATED SUCCESS: 1' + ) + ]) + + migrated_suggestion_invalid_model = ( + suggestion_models.GeneralSuggestionModel.get( + suggestion_1_invalid_model.id) + ) + self.assertEqual( + migrated_suggestion_invalid_model.status, + suggestion_models.STATUS_REJECTED + ) + self.assertEqual( + migrated_suggestion_invalid_model.change_cmd['translation_html'], ( + '

    ' + '

    ' + ) + ) + + def test_valid_suggestion_is_unchanged(self) -> None: + CHANGE_DICT['content_id'] = 'default_outcome' + CHANGE_DICT['translation_html'] = '

    Translation for content.

    ' + suggestion_2_valid_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=CHANGE_DICT, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_2_valid_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_2_valid_model]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='SUGGESTION ITERATED SUCCESS: 1' + ) + ]) + + migrated_suggestion_2_model = ( + suggestion_models.GeneralSuggestionModel.get( + suggestion_2_valid_model.id) + ) + self.assertEqual( + migrated_suggestion_2_model.status, + suggestion_models.STATUS_IN_REVIEW + ) + self.assertEqual( + migrated_suggestion_2_model.change_cmd['translation_html'], + '

    Translation for content.

    ') + + def test_invalid_suggestion_with_list_translation_is_migrated(self) -> None: + CHANGE_DICT_WITH_LIST_TRANSLATION['translation_html'] = [ + ERRORED_TRANSLATION_VALUE, ''] + CHANGE_DICT_WITH_LIST_TRANSLATION['content_id'] = 'invalid_content_id' + suggestion_3_invalid_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=CHANGE_DICT_WITH_LIST_TRANSLATION, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_3_invalid_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_3_invalid_model]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='SUGGESTION ITERATED SUCCESS: 1' + ) + ]) + + migrated_suggestion_invalid_model = ( + suggestion_models.GeneralSuggestionModel.get( + suggestion_3_invalid_model.id) + ) + self.assertEqual( + migrated_suggestion_invalid_model.status, + suggestion_models.STATUS_REJECTED + ) + self.assertEqual( + migrated_suggestion_invalid_model.change_cmd['translation_html'], [ + '

    ' + '

    ' + ] + ) + + +class AuditRejectSuggestionWithMissingContentIdMigrationJobTests( + job_test_utils.JobTestBase +): + + JOB_CLASS = ( + rejecting_suggestion_for_invalid_content_ids_jobs + .AuditRejectSuggestionWithMissingContentIdMigrationJob + ) + TARGET_ID = 'exp2' + + def setUp(self) -> None: + super().setUp() + self.exp_2 = self.create_model( + exp_models.ExplorationModel, + id=self.TARGET_ID, + title='title', + init_state_name=feconf.DEFAULT_INIT_STATE_NAME, + category=feconf.DEFAULT_EXPLORATION_CATEGORY, + objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE, + language_code='en', + tags=['Topic'], + blurb='blurb', + author_notes='author notes', + states_schema_version=52, + param_specs={}, + param_changes=[], + auto_tts_enabled=feconf.DEFAULT_AUTO_TTS_ENABLED, + correctness_feedback_enabled=False, + states={feconf.DEFAULT_INIT_STATE_NAME: STATE_DICT_IN_V52}, + ) + self.put_multi([self.exp_2]) + + def test_empty_storage_fo_audit(self) -> None: + self.assert_job_output_is_empty() + + def test_invalid_suggestion_is_reported_with_expected_data(self) -> None: + CHANGE_DICT['content_id'] = 'invalid_id' + CHANGE_DICT['translation_html'] = ERRORED_TRANSLATION_VALUE + suggestion_1_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=CHANGE_DICT, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_1_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_1_model]) + + errored_value = ( + '{\'exp_id\': \'exp2\', \'missing_content_ids\': ' + '[{\'content_id\': \'invalid_id\', \'state_name\': ' + '\'Introduction\'}], \'content_translation\': ' + '[{\'content_before\': \'

    ' + '

    \', \'content_after\': \'

    ' + '' + '

    \'}]}' + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='GROUP OF SUGGESTION PER EXP SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + f'Results are - {errored_value}' + ) + ]) + + migrated_suggestion_1_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id) + ) + self.assertEqual( + migrated_suggestion_1_model.status, + suggestion_models.STATUS_IN_REVIEW + ) + self.assertEqual( + migrated_suggestion_1_model.change_cmd['translation_html'], + ERRORED_TRANSLATION_VALUE) + + def test_valid_suggestion_is_reported_with_expected_data(self) -> None: + CHANGE_DICT['content_id'] = 'default_outcome' + CHANGE_DICT['translation_html'] = '

    Translation for content.

    ' + valid_suggestion_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=CHANGE_DICT, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + valid_suggestion_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + valid_suggestion_model]) + + errored_value = ( + '{\'exp_id\': \'exp2\', \'missing_content_ids\': [], ' + '\'content_translation\': [{\'content_before\': \'

    ' + 'Translation for content.

    \', \'content_after\': ' + '\'

    Translation for content.

    \'}]}' + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='GROUP OF SUGGESTION PER EXP SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + f'Results are - {errored_value}' + ) + ]) + + migrated_suggestion_2_model = ( + suggestion_models.GeneralSuggestionModel.get( + valid_suggestion_model.id) + ) + self.assertEqual( + migrated_suggestion_2_model.status, + suggestion_models.STATUS_IN_REVIEW + ) + self.assertEqual( + migrated_suggestion_2_model.change_cmd['translation_html'], + '

    Translation for content.

    ') + + def test_invalid_suggestion_list_type_is_reported_with_expected_data( + self + ) -> None: + CHANGE_DICT_WITH_LIST_TRANSLATION['content_id'] = 'invalid_id' + CHANGE_DICT_WITH_LIST_TRANSLATION[ + 'translation_html'] = [ERRORED_TRANSLATION_VALUE, ''] + suggestion_3_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=CHANGE_DICT_WITH_LIST_TRANSLATION, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_3_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_3_model]) + + errored_value = ( + '{\'exp_id\': \'exp2\', \'missing_content_ids\': ' + '[{\'content_id\': \'invalid_id\', \'state_name\': ' + '\'Introduction\'}], \'content_translation\': ' + '[{\'content_before\': [\'

    ' + '

    \', \'\'], \'content_after\': ' + '[\'

    ' + '

    \']}]}' + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='GROUP OF SUGGESTION PER EXP SUCCESS: 1' + ), + job_run_result.JobRunResult.as_stdout( + f'Results are - {errored_value}' + ) + ]) + + migrated_suggestion_3_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_3_model.id) + ) + self.assertEqual( + migrated_suggestion_3_model.status, + suggestion_models.STATUS_IN_REVIEW + ) + self.assertEqual( + migrated_suggestion_3_model.change_cmd['translation_html'], + [ERRORED_TRANSLATION_VALUE, '']) diff --git a/core/jobs/batch_jobs/skill_migration_jobs.py b/core/jobs/batch_jobs/skill_migration_jobs.py new file mode 100644 index 000000000000..328cf317c580 --- /dev/null +++ b/core/jobs/batch_jobs/skill_migration_jobs.py @@ -0,0 +1,330 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs used for migrating the skill models.""" + +from __future__ import annotations + +import logging + +from core import feconf +from core.domain import skill_domain +from core.domain import skill_fetchers +from core.domain import skill_services +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.transforms import results_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +import result + +from typing import Iterable, Sequence, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + from mypy_imports import skill_models + +(base_models, skill_models,) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.SKILL +]) + +datastore_services = models.Registry.import_datastore_services() + + +# TODO(#15927): This job needs to be kept in sync with AuditSkillMigrationJob +# and later we will unify these jobs together. +class MigrateSkillJob(base_jobs.JobBase): + """Job that migrates skill models.""" + + @staticmethod + def _migrate_skill( + skill_id: str, skill_model: skill_models.SkillModel + ) -> result.Result[Tuple[str, skill_domain.Skill], Tuple[str, Exception]]: + """Migrates skill and transform skill model into skill object. + + Args: + skill_id: str. The id of the skill. + skill_model: SkillModel. The skill model to migrate. + + Returns: + Result((str, Skill), (str, Exception)). Result containing tuple that + consists of skill ID and either skill object or Exception. Skill + object is returned when the migration was successful and Exception + is returned otherwise. + """ + try: + skill = skill_fetchers.get_skill_from_model(skill_model) + skill.validate() + except Exception as e: + logging.exception(e) + return result.Err((skill_id, e)) + + return result.Ok((skill_id, skill)) + + @staticmethod + def _generate_skill_changes( + skill_id: str, skill_model: skill_models.SkillModel + ) -> Iterable[Tuple[str, skill_domain.SkillChange]]: + """Generates skill change objects. Skill change object is generated when + schema version for some field is lower than the latest schema version. + + Args: + skill_id: str. The id of the skill. + skill_model: SkillModel. The skill for which to generate + the change objects. + + Yields: + (str, SkillChange). Tuple containing skill ID and skill change + object. + """ + contents_version = skill_model.skill_contents_schema_version + if contents_version < feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION: + skill_change = skill_domain.SkillChange({ + 'cmd': ( + skill_domain.CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION), + 'from_version': skill_model.skill_contents_schema_version, + 'to_version': feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION + }) + yield (skill_id, skill_change) + + misconceptions_version = skill_model.misconceptions_schema_version + if misconceptions_version < feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION: # pylint: disable=line-too-long + skill_change = skill_domain.SkillChange({ + 'cmd': ( + skill_domain + .CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION + ), + 'from_version': skill_model.misconceptions_schema_version, + 'to_version': feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION + }) + yield (skill_id, skill_change) + + rubric_schema_version = skill_model.rubric_schema_version + if rubric_schema_version < feconf.CURRENT_RUBRIC_SCHEMA_VERSION: + skill_change = skill_domain.SkillChange({ + 'cmd': ( + skill_domain.CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION), + 'from_version': skill_model.rubric_schema_version, + 'to_version': feconf.CURRENT_RUBRIC_SCHEMA_VERSION + }) + yield (skill_id, skill_change) + + @staticmethod + def _update_skill( + skill_model: skill_models.SkillModel, + migrated_skill: skill_domain.Skill, + skill_changes: Sequence[skill_domain.SkillChange] + ) -> Sequence[base_models.BaseModel]: + """Generates newly updated skill models. + + Args: + skill_model: SkillModel. The skill which should be updated. + migrated_skill: Skill. The migrated skill domain object. + skill_changes: sequence(SkillChange). The skill changes to apply. + + Returns: + sequence(BaseModel). Sequence of models which should be put into + the datastore. + """ + updated_skill_model = ( + skill_services.populate_skill_model_fields( + skill_model, migrated_skill)) + commit_message = ( + 'Update skill content schema version to %d and ' + 'skill misconceptions schema version to %d and ' + 'skill rubrics schema version to %d.' + ) % ( + feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION, + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION, + feconf.CURRENT_RUBRIC_SCHEMA_VERSION + ) + change_dicts = [change.to_dict() for change in skill_changes] + with datastore_services.get_ndb_context(): + models_to_put = updated_skill_model.compute_models_to_commit( + feconf.MIGRATION_BOT_USERNAME, + feconf.COMMIT_TYPE_EDIT, + commit_message, + change_dicts, + additional_models={} + ) + models_to_put_values = [] + for model in models_to_put.values(): + # Here, we are narrowing down the type from object to BaseModel. + assert isinstance(model, base_models.BaseModel) + models_to_put_values.append(model) + datastore_services.update_timestamps_multi(models_to_put_values) + return models_to_put_values + + @staticmethod + def _update_skill_summary( + migrated_skill: skill_domain.Skill, + skill_summary_model: skill_models.SkillSummaryModel + ) -> skill_models.SkillSummaryModel: + """Generates newly updated skill summary model. + + Args: + migrated_skill: Skill. The migrated skill domain object. + skill_summary_model: SkillSummaryModel. The skill summary model + to update. + + Returns: + SkillSummaryModel. The updated skill summary model to put into + the datastore. + """ + skill_summary = skill_services.compute_summary_of_skill(migrated_skill) + skill_summary.version += 1 + updated_skill_summary_model = ( + skill_services.populate_skill_summary_model_fields( + skill_summary_model, skill_summary + ) + ) + return updated_skill_summary_model + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the skill migration. + + Returns: + PCollection. A PCollection of results from the skill migration. + """ + unmigrated_skill_models = ( + self.pipeline + | 'Get all non-deleted skill models' >> ( + ndb_io.GetModels(skill_models.SkillModel.get_all())) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add skill model ID' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda skill_model: skill_model.id) + ) + skill_summary_models = ( + self.pipeline + | 'Get all non-deleted skill summary models' >> ( + ndb_io.GetModels(skill_models.SkillSummaryModel.get_all())) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add skill summary ID' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda skill_summary_model: skill_summary_model.id) + ) + + all_migrated_skill_results = ( + unmigrated_skill_models + | 'Transform and migrate model' >> beam.MapTuple( + self._migrate_skill) + ) + + migrated_skill_job_run_results = ( + all_migrated_skill_results + | 'Generate results for migration' >> ( + job_result_transforms.ResultsToJobRunResults('SKILL PROCESSED')) + ) + + filtered_migrated_skills = ( + all_migrated_skill_results + | 'Filter migration results' >> ( + results_transforms.DrainResultsOnError()) + ) + + migrated_skills = ( + filtered_migrated_skills + | 'Unwrap ok' >> beam.Map( + lambda result_item: result_item.unwrap()) + ) + + skill_changes = ( + unmigrated_skill_models + | 'Generate skill changes' >> beam.FlatMapTuple( + self._generate_skill_changes) + ) + + skill_objects_list = ( + { + 'skill_model': unmigrated_skill_models, + 'skill_summary_model': skill_summary_models, + 'skill': migrated_skills, + 'skill_changes': skill_changes + } + | 'Merge objects' >> beam.CoGroupByKey() + | 'Get rid of ID' >> beam.Values() # pylint: disable=no-value-for-parameter + ) + + transformed_skill_objects_list = ( + skill_objects_list + | 'Remove unmigrated skills' >> beam.Filter( + lambda x: len(x['skill_changes']) > 0 and len(x['skill']) > 0) + | 'Reorganize the skill objects' >> beam.Map(lambda objects: { + 'skill_model': objects['skill_model'][0], + 'skill_summary_model': objects['skill_summary_model'][0], + 'skill': objects['skill'][0], + 'skill_changes': objects['skill_changes'] + }) + + ) + + already_migrated_job_run_results = ( + skill_objects_list + | 'Remove migrated skills' >> beam.Filter( + lambda x: ( + len(x['skill_changes']) == 0 and len(x['skill']) > 0 + )) + | 'Transform previously migrated skills into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'SKILL PREVIOUSLY MIGRATED')) + ) + + skill_objects_list_job_run_results = ( + transformed_skill_objects_list + | 'Transform skill objects into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'SKILL MIGRATED')) + ) + + skill_models_to_put = ( + transformed_skill_objects_list + | 'Generate skill models to put' >> beam.FlatMap( + lambda skill_objects: self._update_skill( + skill_objects['skill_model'], + skill_objects['skill'], + skill_objects['skill_changes'], + )) + ) + + skill_summary_models_to_put = ( + transformed_skill_objects_list + | 'Generate skill summary models to put' >> beam.Map( + lambda skill_objects: self._update_skill_summary( + skill_objects['skill'], + skill_objects['skill_summary_model'] + )) + ) + + unused_put_results = ( + (skill_models_to_put, skill_summary_models_to_put) + | 'Merge models' >> beam.Flatten() + | 'Put models into the datastore' >> ndb_io.PutModels() + ) + + return ( + ( + migrated_skill_job_run_results, + already_migrated_job_run_results, + skill_objects_list_job_run_results + ) + | beam.Flatten() + ) diff --git a/core/jobs/batch_jobs/skill_migration_jobs_test.py b/core/jobs/batch_jobs/skill_migration_jobs_test.py new file mode 100644 index 000000000000..f43116e0e2f8 --- /dev/null +++ b/core/jobs/batch_jobs/skill_migration_jobs_test.py @@ -0,0 +1,386 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.exp_recommendation_computation_jobs.""" + +from __future__ import annotations + +import datetime + +from core import feconf +from core.domain import skill_domain +from core.jobs import job_test_utils +from core.jobs.batch_jobs import skill_migration_jobs +from core.jobs.types import job_run_result +from core.platform import models + +from typing import Final, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import skill_models + +(skill_models,) = models.Registry.import_models([models.Names.SKILL]) + + +class MigrateSkillJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + skill_migration_jobs.MigrateSkillJob + ] = skill_migration_jobs.MigrateSkillJob + + SKILL_1_ID: Final = 'skill_1' + SKILL_2_ID: Final = 'skill_2' + + def setUp(self) -> None: + super().setUp() + skill_summary_model = self.create_model( + skill_models.SkillSummaryModel, + id=self.SKILL_1_ID, + description='description', + misconception_count=0, + worked_examples_count=0, + language_code='cs', + skill_model_last_updated=datetime.datetime.utcnow(), + skill_model_created_on=datetime.datetime.utcnow(), + version=1 + ) + skill_summary_model.update_timestamps() + skill_summary_model.put() + self.latest_skill_contents = { + 'explanation': { + 'content_id': 'content_id', + 'html': 'bo ld' + }, + 'worked_examples': [], + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content_id': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'content_id': {} + } + } + } + self.latest_rubrics = [{ + 'difficulty': 'Easy', + 'explanations': ['ab'] + }, { + 'difficulty': 'Medium', + 'explanations': ['a b'] + }, { + 'difficulty': 'Hard', + 'explanations': ['a b'] + }] + self.latest_misconceptions = [{ + 'id': 1, + 'name': 'misconception_name', + 'notes': 'notenote ', + 'feedback': 'feedbackfeedback ', + 'must_be_addressed': False + }] + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_unmigrated_skill_with_unmigrated_rubric_is_migrated(self) -> None: + skill_model = self.create_model( + skill_models.SkillModel, + id=self.SKILL_1_ID, + description='description', + misconceptions_schema_version=( + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION), + rubric_schema_version=4, + rubrics=[{ + 'difficulty': 'Easy', + 'explanations': ['a\nb'] + }, { + 'difficulty': 'Medium', + 'explanations': ['a b'] + }, { + 'difficulty': 'Hard', + 'explanations': ['a b'] + }], + language_code='cs', + skill_contents_schema_version=( + feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION), + skill_contents=self.latest_skill_contents, + next_misconception_id=2, + all_questions_merged=False + ) + skill_model.update_timestamps() + skill_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{ + 'cmd': skill_domain.CMD_CREATE_NEW + }]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SKILL PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult(stdout='SKILL MIGRATED SUCCESS: 1'), + ]) + + migrated_skill_model = skill_models.SkillModel.get(self.SKILL_1_ID) + self.assertEqual(migrated_skill_model.version, 2) + self.assertEqual( + migrated_skill_model.rubric_schema_version, + feconf.CURRENT_RUBRIC_SCHEMA_VERSION) + self.assertEqual(migrated_skill_model.rubrics, self.latest_rubrics) + + def test_unmigrated_skill_with_unmigrated_misconceptions_is_migrated( + self + ) -> None: + skill_model = self.create_model( + skill_models.SkillModel, + id=self.SKILL_1_ID, + description='description', + misconceptions_schema_version=4, + rubric_schema_version=feconf.CURRENT_RUBRIC_SCHEMA_VERSION, + rubrics=self.latest_rubrics, + language_code='cs', + skill_contents_schema_version=( + feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION), + skill_contents=self.latest_skill_contents, + misconceptions=[{ + 'id': 1, + 'name': 'misconception_name', + 'notes': 'note\nnote ', + 'feedback': 'feedback\nfeedback ', + 'must_be_addressed': False + }], + next_misconception_id=2, + all_questions_merged=False + ) + skill_model.update_timestamps() + skill_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{ + 'cmd': skill_domain.CMD_CREATE_NEW + }]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SKILL PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult(stdout='SKILL MIGRATED SUCCESS: 1'), + ]) + + migrated_skill_model = skill_models.SkillModel.get(self.SKILL_1_ID) + self.assertEqual(migrated_skill_model.version, 2) + self.assertEqual( + migrated_skill_model.misconceptions_schema_version, + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION) + self.assertEqual( + migrated_skill_model.misconceptions, + self.latest_misconceptions + ) + + def test_unmigrated_skill_with_unmigrated_skill_contents_is_migrated( + self + ) -> None: + skill_model = self.create_model( + skill_models.SkillModel, + id=self.SKILL_1_ID, + description='description', + misconceptions_schema_version=( + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION), + rubric_schema_version=feconf.CURRENT_RUBRIC_SCHEMA_VERSION, + rubrics=self.latest_rubrics, + language_code='cs', + skill_contents_schema_version=3, + skill_contents={ + 'explanation': { + 'content_id': 'content_id', + 'html': 'bo ld\n' + }, + 'worked_examples': [], + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content_id': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'content_id': {} + } + } + }, + next_misconception_id=2, + all_questions_merged=False + ) + skill_model.update_timestamps() + skill_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{ + 'cmd': skill_domain.CMD_CREATE_NEW + }]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SKILL PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult(stdout='SKILL MIGRATED SUCCESS: 1'), + ]) + + migrated_skill_model = skill_models.SkillModel.get(self.SKILL_1_ID) + self.assertEqual(migrated_skill_model.version, 2) + self.assertEqual( + migrated_skill_model.skill_contents_schema_version, + feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION) + self.assertEqual( + migrated_skill_model.skill_contents, + self.latest_skill_contents + ) + + def test_skill_summary_of_unmigrated_skill_is_updated(self) -> None: + skill_model = self.create_model( + skill_models.SkillModel, + id=self.SKILL_1_ID, + description='description', + misconceptions_schema_version=( + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION), + rubric_schema_version=feconf.CURRENT_RUBRIC_SCHEMA_VERSION, + rubrics=self.latest_rubrics, + language_code='cs', + skill_contents_schema_version=3, + skill_contents={ + 'explanation': { + 'content_id': 'content_id', + 'html': 'bo ld\n' + }, + 'worked_examples': [], + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content_id': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'content_id': {} + } + } + }, + next_misconception_id=2, + all_questions_merged=False + ) + skill_model.update_timestamps() + skill_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{ + 'cmd': skill_domain.CMD_CREATE_NEW + }]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SKILL PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult(stdout='SKILL MIGRATED SUCCESS: 1'), + ]) + + migrated_skill_summary_model = skill_models.SkillSummaryModel.get( + self.SKILL_1_ID) + self.assertEqual(migrated_skill_summary_model.version, 2) + + def test_broken_skill_is_not_migrated(self) -> None: + skill_model_one = self.create_model( + skill_models.SkillModel, + id=self.SKILL_1_ID, + description='description', + misconceptions_schema_version=( + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION), + rubric_schema_version=4, + rubrics=[{ + 'difficulty': 'Easy', + 'explanations': ['a\nb'] + }, { + 'difficulty': 'aaa', + 'explanations': ['a b'] + }, { + 'difficulty': 'Hard', + 'explanations': ['a b'] + }], + language_code='cs', + skill_contents_schema_version=( + feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION), + skill_contents=self.latest_skill_contents, + next_misconception_id=2, + all_questions_merged=False + ) + skill_model_one.update_timestamps() + skill_model_one.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{ + 'cmd': skill_domain.CMD_CREATE_NEW + }]) + + skill_model_two = self.create_model( + skill_models.SkillModel, + id=self.SKILL_2_ID, + description='description', + misconceptions_schema_version=( + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION), + rubric_schema_version=4, + rubrics=[{ + 'difficulty': 'Easy', + 'explanations': ['a\nb'] + }, { + 'difficulty': 'Medium', + 'explanations': ['a b'] + }, { + 'difficulty': 'Hard', + 'explanations': ['a b'] + }], + language_code='cs', + skill_contents_schema_version=( + feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION), + skill_contents=self.latest_skill_contents, + next_misconception_id=2, + all_questions_merged=False + ) + skill_model_two.update_timestamps() + skill_model_two.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{ + 'cmd': skill_domain.CMD_CREATE_NEW + }]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'SKILL PROCESSED ERROR: "(\'skill_1\', ''ValidationError(' + '\'Invalid difficulty received for rubric: aaa\'))": 1' + ) + ), + job_run_result.JobRunResult(stdout='SKILL PROCESSED SUCCESS: 1'), + ]) + + migrated_skill_model = skill_models.SkillModel.get(self.SKILL_1_ID) + self.assertEqual(migrated_skill_model.version, 1) + migrated_skill_model = skill_models.SkillModel.get(self.SKILL_2_ID) + self.assertEqual(migrated_skill_model.version, 1) + + def test_migrated_skill_is_not_migrated(self) -> None: + skill_model = self.create_model( + skill_models.SkillModel, + id=self.SKILL_1_ID, + description='description', + misconceptions_schema_version=( + feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION), + rubric_schema_version=feconf.CURRENT_RUBRIC_SCHEMA_VERSION, + rubrics=self.latest_rubrics, + language_code='cs', + skill_contents_schema_version=( + feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION), + skill_contents=self.latest_skill_contents, + next_misconception_id=2, + all_questions_merged=False + ) + skill_model.update_timestamps() + skill_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create skill', [{ + 'cmd': skill_domain.CMD_CREATE_NEW + }]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SKILL PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='SKILL PREVIOUSLY MIGRATED SUCCESS: 1') + ]) + + unmigrated_skill_model = skill_models.SkillModel.get(self.SKILL_1_ID) + self.assertEqual(unmigrated_skill_model.version, 1) diff --git a/core/jobs/batch_jobs/store_profile_images_to_gcs_jobs.py b/core/jobs/batch_jobs/store_profile_images_to_gcs_jobs.py new file mode 100644 index 000000000000..a264bb9f70e7 --- /dev/null +++ b/core/jobs/batch_jobs/store_profile_images_to_gcs_jobs.py @@ -0,0 +1,332 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Store user profile picture to GCS.""" + +from __future__ import annotations + +import io + +from core import utils +from core.domain import user_services +from core.jobs import base_jobs +from core.jobs.io import gcs_io +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +from PIL import Image +import apache_beam as beam + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) + + +class StoreProfilePictureToGCSJob(base_jobs.JobBase): + """Store profile picture to GCS job.""" + + @staticmethod + def _generate_png_file_object( + user_model: user_models.UserSettingsModel + ) -> gcs_io.FileObjectDict: + """Returns file object for png images to write to the GCS. + + Args: + user_model: UserSettingsModel. The user settings model. + + Returns: + file_dict: gcs_io.FileObjectDict. The FileObjectDict containing + filepath and data. + """ + username = user_model.username + filepath = f'user/{username}/assets/profile_picture.png' + profile_picture_binary = utils.convert_png_data_url_to_binary( + user_model.profile_picture_data_url) + file_dict: gcs_io.FileObjectDict = { + 'filepath': filepath, + 'data': profile_picture_binary + } + return file_dict + + @staticmethod + def _generate_webp_file_object( + user_model: user_models.UserSettingsModel + ) -> gcs_io.FileObjectDict: + """Returns file object for webp images to write to the GCS. + + Args: + user_model: UserSettingsModel. The user settings model. + + Returns: + file_dict: gcs_io.FileObjectDict. The FileObjectDict containing + filepath and data. + """ + username = user_model.username + filepath = f'user/{username}/assets/profile_picture.webp' + profile_picture_binary = utils.convert_png_data_url_to_binary( + user_model.profile_picture_data_url) + output = io.BytesIO() + image = Image.open(io.BytesIO(profile_picture_binary)).convert('RGB') + image.save(output, 'webp') + webp_binary = output.getvalue() + file_dict: gcs_io.FileObjectDict = { + 'filepath': filepath, + 'data': webp_binary + } + return file_dict + + def _make_profile_picture_valid( + self, user_model: user_models.UserSettingsModel + ) -> user_models.UserSettingsModel: + """Generate gravatar for users that have profile picture None. + + Args: + user_model: user_models.UserSettingsModel. The user model. + + Returns: + user_model: user_models.UserSettingsModel. The updated user model. + """ + profile_picture = user_model.profile_picture_data_url + if profile_picture is None: + user_model.profile_picture_data_url = ( + user_services.fetch_gravatar(user_model.email)) + return user_model + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + users_with_valid_username = ( + self.pipeline + | 'Get all non-deleted UserSettingsModel' >> ndb_io.GetModels( + user_models.UserSettingsModel.get_all(include_deleted=False)) + | 'Filter valid users with not None username' >> beam.Filter( + lambda model: model.username is not None) + | 'Make the invalid profile picture valid' >> beam.Map( + self._make_profile_picture_valid) + ) + + write_png_files_to_gcs = ( + users_with_valid_username + | 'Map files for png' >> beam.Map(self._generate_png_file_object) + | 'Write png file to GCS' >> gcs_io.WriteFile(mime_type='image/png') + ) + + total_png_images = ( + write_png_files_to_gcs + | 'Total png images wrote to GCS' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOTAL PNG IMAGES')) + ) + + write_webp_files_to_gcs = ( + users_with_valid_username + | 'Map files for webp' >> beam.Map(self._generate_webp_file_object) + | 'Write webp file to GCS' >> gcs_io.WriteFile( + mime_type='image/webp') + ) + + total_webp_images = ( + write_webp_files_to_gcs + | 'Total webp images wrote to GCS' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOTAL WEBP IMAGES')) + ) + + unused_put_results = ( + users_with_valid_username + | 'Updating the datastore with valid profile images' + >> ndb_io.PutModels() + ) + + return ( + ( + total_png_images, + total_webp_images + ) + | 'Combine results' >> beam.Flatten() + ) + + +class AuditProfilePictureFromGCSJob(base_jobs.JobBase): + """Audit profile pictures are present in GCS.""" + + def _png_base64_to_webp_base64(self, png_base64: str) -> str: + """Convert png base64 to webp base64. + + Args: + png_base64: str. The png base64 string. + + Returns: + str. The webp base64 string. + """ + png_binary = utils.convert_png_data_url_to_binary(png_base64) + output = io.BytesIO() + image = Image.open(io.BytesIO(png_binary)).convert('RGB') + image.save(output, 'webp') + webp_binary = output.getvalue() + return utils.convert_png_or_webp_binary_to_data_url( + webp_binary, 'webp') + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + users_with_valid_username = ( + self.pipeline + | 'Get all non-deleted UserSettingsModel' >> ndb_io.GetModels( + user_models.UserSettingsModel.get_all(include_deleted=False)) + | 'Filter valid users with not None username' >> beam.Filter( + lambda model: model.username is not None) + ) + + username_with_profile_data = ( + users_with_valid_username + | 'Map username and data url' >> beam.Map( + lambda model: ( + model.username, + model.profile_picture_data_url.replace( + '%2B', '+').replace('%3D', '=').replace('%0A', ''))) + ) + + # Audit png images. + audit_png_profile_pictures = ( + users_with_valid_username + | 'Map with username for png' >> beam.Map( + lambda model: model.username) + | 'Map with filename for png' >> beam.Map( + lambda username: f'user/{username}/assets/profile_picture.png') + | 'Read png files from GCS' >> gcs_io.ReadFile() + | 'Filter the results with OK status png' >> beam.Filter( + lambda result: result.is_ok()) + | 'Unwrap the png data' >> beam.Map(lambda result: result.unwrap()) + | 'Make tuple of username and data url for png' >> beam.Map( + lambda data: ( + data[0].split('/')[1], + utils.convert_png_or_webp_binary_to_data_url( + data[1], 'png').replace('%2B', '+').replace( + '%3D', '=').replace('%0A', ''))) + ) + + mismatched_png_images_on_gcs_and_model = ( + { + 'gcs_picture': audit_png_profile_pictures, + 'model_picture': username_with_profile_data + } + | 'Merge models for png' >> beam.CoGroupByKey() + | 'Filter invalid png images' >> beam.Filter( + lambda object_image: ( + object_image[1]['gcs_picture'] != + object_image[1]['model_picture']) + ) + ) + + report_mismatched_png_images_on_gcs_and_model = ( + mismatched_png_images_on_gcs_and_model + | 'Report the png data' >> beam.Map(lambda data: ( + job_run_result.JobRunResult.as_stderr( + 'The user having username %s, have mismatched png image on ' + 'GCS and in the model.' % (data[0]) + ) + )) + ) + + total_mismatched_png_images = ( + mismatched_png_images_on_gcs_and_model + | 'Total number of mismatched png images' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOTAL MISMATCHED PNG IMAGES')) + ) + + png_images_iterated_on_gcs = ( + audit_png_profile_pictures + | 'Total number of png images iterated' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOTAL PNG IMAGES ITERATED ON GCS')) + ) + + # Audit webp images. + audit_webp_profile_pictures = ( + users_with_valid_username + | 'Map with username for webp' >> beam.Map( + lambda model: model.username) + | 'Map with filename for webp' >> beam.Map( + lambda username: f'user/{username}/assets/profile_picture.webp') + | 'Read webp files from GCS' >> gcs_io.ReadFile() + | 'Filter the results with OK status webp' >> beam.Filter( + lambda result: result.is_ok()) + | 'Unwrap the webp data' >> beam.Map(lambda result: result.unwrap()) + | 'Make tuple of username and data url for webp' >> beam.Map( + lambda data: ( + data[0].split('/')[1], + utils.convert_png_or_webp_binary_to_data_url( + data[1], 'webp')) + ) + ) + + username_with_profile_data_webp = ( + username_with_profile_data + | 'Convert to webp base64 string' >> beam.Map( + lambda data: ( + data[0], self._png_base64_to_webp_base64(data[1]))) + ) + + mismatched_webp_images_on_gcs_and_model = ( + { + 'gcs_picture': audit_webp_profile_pictures, + 'model_picture': username_with_profile_data_webp + } + | 'Merge models for webp' >> beam.CoGroupByKey() + | 'Filter invalid webp images' >> beam.Filter( + lambda object_image: ( + object_image[1]['gcs_picture'] != + object_image[1]['model_picture']) + ) + ) + + report_mismatched_webp_images_on_gcs_and_model = ( + mismatched_webp_images_on_gcs_and_model + | 'Report the webp data' >> beam.Map(lambda data: ( + job_run_result.JobRunResult.as_stderr( + 'The user having username %s, has incompatible webp image ' + 'on GCS and png in the model.' % (data[0]) + ) + )) + ) + + total_mismatched_webp_images = ( + mismatched_webp_images_on_gcs_and_model + | 'Total number of mismatched webp images' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOTAL MISMATCHED WEBP IMAGES')) + ) + + webp_images_iterated_on_gcs = ( + audit_webp_profile_pictures + | 'Total number of webp images iterated' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOTAL WEBP IMAGES ITERATED ON GCS')) + ) + + return ( + ( + report_mismatched_png_images_on_gcs_and_model, + total_mismatched_png_images, + png_images_iterated_on_gcs, + report_mismatched_webp_images_on_gcs_and_model, + total_mismatched_webp_images, + webp_images_iterated_on_gcs + ) + | 'Combine results' >> beam.Flatten() + ) diff --git a/core/jobs/batch_jobs/store_profile_images_to_gcs_jobs_test.py b/core/jobs/batch_jobs/store_profile_images_to_gcs_jobs_test.py new file mode 100644 index 000000000000..aee367c998c3 --- /dev/null +++ b/core/jobs/batch_jobs/store_profile_images_to_gcs_jobs_test.py @@ -0,0 +1,255 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for stor_profile_images_to_gcs_jobs""" + +from __future__ import annotations + +import io + +from core import feconf +from core import utils +from core.domain import user_services +from core.jobs import job_test_utils +from core.jobs.batch_jobs import store_profile_images_to_gcs_jobs +from core.jobs.types import job_run_result +from core.platform import models + +from PIL import Image + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import app_identity_services + from mypy_imports import storage_services + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) + +app_identity_services = models.Registry.import_app_identity_services() +storage_services = models.Registry.import_storage_services() + +VALID_IMAGE = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJYAAACWCAYAAAA8AXHiAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAgAElEQVR4nO2d6XfcxtXmnwLQC9lNtihSEneZlCxRsmXZmXjJa8+Mj08mf+6c9+tMTpJ3Ejtx4kQ2LWsjRcniIkoURXHpfUHNh0IBhUIBKKA3UnGdI5F8ujYUCrd+uF24ILZtUwAghIBSCvn3YaSTqo2xUcPtCwBff5JolXoHozkjVVlZ+8+vDwHi9ZNSCgICCv9YDUKzTILLl3Lo2BQ/v2j0pL7f3CxiqmT5xkCeB7qaxT8Qf4q/U0p9uq4mTtKk2u5BEx2awUTRO0i5Xzpao2nj7w8q+O+3xmCZycrKae+w7U0qCoAI4wZFHX3SLBNYuJjDtbkcRvMmHmzW/OctRRumAfzmZhEXzmWC+VKOmQUgdAZ2c3XzlEazbWBtu45PrhdCraeOtv68jqNKG5t7DSzP5Lrq04PNGigoCJx5RQkocT6nABsCwRL0WMuYBIsXc7g6l0Mhbwb6l7YN0yD4zJlUaY2ISguaBKHD3UyUrhIBdvdbOJrvoFQwU7XfaNl48rwBUODxTgOLF3OwzHTH8+JNC29OOuwqpxR8/HyWgFuxHmuWwZa8q3M5d0lX9TlNG6YBfHajiAslqyfnXNTciaWyUrImVhClqcom0QDABvBoq45PbxS12xW1jecNdGwAhKBa72Brr4Hl2Xyi4+Bpfbvh/UHY5PKtLgTgNsM9cV1qBgEuT3tLXmSfQRO3YZpglqpk9cxKiZql4ir596Qpitl0NVBgZ7+Jo0obpYIV2ieV1mhRPN6pg1dECbC2U8fipRwsM/oYZW33dROvj1tsLhHi1kdAXY2CwlsVaVcah/Jr83mMCDcdUX0GTd5GGFPpjkucZqksiNvfYS2FAhM83PRbLZ3kWiuwOgCg1qB49rKBK5LViko2pVjfaQAg8IZFnJhcIQChvmUmqZYxiQ/KtROVLsyYdqNAvZcpwFhhEycpRHe1FBKw5QbA7kELh+U2zjl3iHEmud608XS34ZZ3xhQAuyF4ZzoH0yDKsrL26rCN10ct/8EJ9Yka6zZNrFkWweLFvMtQScfL+UCrXYsvf86k6vY8RWnKu0JVku8C43wZKhjU1RgM8M4y1vpkpaDsi6w93qmj1aagRLxmGV3UmsznI94hhtVnU4qHmzV+dM7y569P1IQVXEszDfignI9fqlWCkNh2LTMI6onaSKidTngnzqA4I/PioIWjCrtDjGq30bLx88umc/cjJUdb36njssNaUcexf9jCwUnHV1ZVn1DSD8wRWtYy8Pn7RdcK64xpqEYkeFe0axr9BXWVZhBCoiE6ReoF/DnTDKCA7VituLbWtpi14kXd1UC4hGt1xlpx7d99WguUVdXnc2BTPa3RsvGXH09w/1kNjaaNrlNEu5ypLvaZqeTku+2Ql6hu4b2bCeZ8AOp8tHvQwmGlo84HoNG08exlUygLt6ysPd5peBNQkV6+UbQVUZ/vODS1tg083Krj93eO2QRr+fujayFceFe0YZoeqPfLMoVphjwpVBMqrfOsG+YCpQClIJRdetSmWNuuh7a3/ryOVsd2y4llZa3a6GBzrxHa/webtdCyYRr7kVxrtW083KzhD3eOcf/nGuqSBdMac0Ub7vInWapB8BWllFmsMOsUVUHYBOyJRp2rjxBQQsBAmTBvfKUTmIjNNmVedicfZzSxrKyJVkusb/egiTcnnciyKo0DdFqt0bLxcMuZYM9qaLa88RDHJqDxfgj1WSbxgXpo2T5q7lIocpbMXGk0sd7EGoHLC+6djiM92qoHyop+K5fQiL+srFUbNrb2gqy1vt0A/04wSX3OddC11mxTPHKWyAfPGDPysZEZ2Bt3rz7PUlm+fKqy/dQSwfsgNUKIY+L9nz/fb+Go0nb/dr3sUj5CSay2tlMXHKncy97WKhvUHE98j7RWm1mw//PdUQDyg2zDysqgPsxzeGbg3dOAh5veHSK3VnFgrdK4Nx4Qvex6ZVVaEnjvFeRzeB8mqKu0MwPvIjBzb7zoZU8K2/zvte06Ojb1vOwJyvYC3nW1KMgfNqirNFJvdmjWCrEYXSTu+EyjPdys48FWDQSeR5n9dAacEMxNZlDIG1jfqbuaKp+O9sHyKHb2m3h90klcVtTAj2MAWi5jYGkmB0qBCyXLZarTkqyv75bxxa0ichnDZ8rCvOLa3nMnpdK8s6cAZgbWz1+3vE+ICrb1tZ9+roE321V9xO1237V8lqBUMDE9kYFh+MdQHNthadZxpY2v757g8/fHkM8G94aLaSjwLs4uOIAL9FSjVC9ftOYAOPqpAeMFCysLecxNZd1+KB2nGO45tEAIjqs2vrlXxufvFd3JFdZZ3dQNBAa+OoFw5RKcSg0Qt9b0XisVTFybz2N2KgND4aoJlB6yZoGyLyyPyu3A5ApL8tIna+JP1WdxGhAChYq+nCatH+lc0cL1hTxmzmfciRbnzD4NyWJgyCbXcaWDb346wRe3xpC1ok9ymN4Tja9+xL8dBBCM2SnS4H4mula7084VTFxfyGP6fAZG0DCc+mSxhZyAD9NR1UYvgJ6ntBqfUMExlbeIvF1aadTEyuKIz0KdJijXhncuuDe0lOC42nGBPpfxL3dyehvgvTfavweU62qWnxo9YPQDfXJb/Au862lnDcp1NRfeVSkM6H+B9+7h/axCuW7ywTv3dAMAoQAFDQC9mH6Bd3/SBfWcRXBjkYH525qMgHfQ+Ys/KEDhAX2z7VkpQvq4bQbqE8k/OW0a/6erNds2vn1QwV/ulvHqqB2w/PJYnEXN24/lwjsfC+r9pHCBXt7h2Hd4l8xCui0tg9CSbZEBBV4ftfHNT2X8+ccTvDpsK5fXYbNSWs0Ig3dC2E5JNhOZxoE+sHVDkXoG78QviabgNGls7Egq7fUxY9mv75ax96YFW3Eb75Y+I1okvANBjtAB+l/gPV3aP2rhm+M2pkoWrs3ncfGU7VhIkmLhnUgATSllk6ufQP+Ww3uURgG8Ompj/6iMyXF253gWJ5gWvPPkesMJ++I6Cujdun6B91QaALw+buGv98pnEvK14Z3lcT8EKO0r0L/t8P62Q34ieAeI75ElIBzof4H33mtnCfKta/O5wFcJXi7416TgqulqBydtx4vcPbxPlSysIB9cixMmCmD/qI0LpT4ziqqfEdpRpYN8liCXMRKX5en1SRvEYNr+cVt9noagGQYwfT4LK581AtFX0qQXb1o4rnYwPhqM7ZQU3scLpi96L+BdCUl2SaxuVHFw3Mb8VAbLs/me7LoI03TTcaWDpy8ayGWI8uYnaXq4WcfDrVrXNw3dagaA6fMZXF/IY3zUgLHuPFunAvAk2vp2Aw83gw+TJoX3Zpti+1XTp8knUkdb3ajh6YsmKIDVJzU82W1ol02j6QCuhw02jqqdwM1P0vrcpZL1qG83EnHa9EQG/+P2GD69UcC5IjMsFo++cmU2H+i4nMK03ddN7B+1QAhxQzumfUJnbavuC+fodj8Bs61uVPF0t+kxEQVWN6oAKJZn8onr0+JCRI/ZUaXNJlLLBt+JJW5PUu3a1WqDApQ6m3D4R1QIbNQnjQC4cC6DG4sjOD8eRA0DxIu+EmfaVY5M/qAnP2DxYVLdycQ1N2IMUeejNDrmg00pVp8wSyVabX4uVjc8y6VTXxJN5djlvx9VPOsEfoPk9IlbsXrT1q4vOH7wHa/cRi81NqEsfH6riP94r4iJMX/gXf7TAqWo1DvKWOiq5UpOrw7b2BfCKYqhHeVBkSeLrK0/r6PZtp2r0D+IYr4w7ccnNTx5ro6jxdPqRhWg1MdcSdoI08TfRY1/U9FoCS4Z6foVv83ICRsedNtQGgSVjehCIyCYKjGHrXwzpOqLAcfEPd5poN2Jvjrlg+LhFEXWojYL3JH06vMixqhT3JK0ulHF0+d1QOARQrj59jMKn4D9XgqPKx3XFeONUZCZCPGeN2gKcbuSLLdyfb3SCCGYKmXwH+8V8fl7Rd+kiuqfwRfOSoPFQo8ERamC10dtFk6RCn4I4oV2lMtG3QzwGAyiGZbz8RMqaxzUwSeSeGEoNBHodduI0+SxUvr3CCLdCOK3GXFjrzqpOm0k0SaKJn5zs4gvbhVxaSIDw9C/ETO4P4KAYG2njnYnnLPkCu4+rbF+EKFHVB3aMZKtfBFjSOCAxRMpawzURW5y+iPUEaatblTxZLce24aOJv595DwE3GjarG2nXWeKR2oc6GWHc9gJjKsvsUYIzhVNfHajgC8/HHeeEtKbTKJmiJAmxudUJZExXr5hVokXdyt1fvDQjjrw7r1FAm4dcfAeB+q6Wq+AXgfUeSfitCRA7x5LD0B9vGDik5UC/uftMd/u1qSsRyllX0K7DAUv+ooKCMUZ+WCz5mMv+Z/thHYUG1XBe6NF8XS34ec4Cd5lCyGCelQfchmCW0sjMAxE5lvdqAaYK6zdMA2Au+uj0bLd8Qy0B4qlmZwv2p4qnwf9ar6VNbmNNJplsJdBEcWxhbUbprnwziurN73338mDxwvxcIoEMvgR18SCBEM7qmb34x0WO9RXVpGSgnouY+Dz98dwZTaHj68XYBr9BXpdUF+ezeOD5RFf2KEwiE4C9L0A9dfHbfz1Xhnf/FTG/lE70Iaq3TDNhXfqNALKYqGLke7k9dN9aZE3tb0/iNNhABTUF9pRhvdmm7K47G5D8fCuA+pZi+CLW0X3zWGzk1l8slIQlsTeAr0uqC/P5HB7eQQGIf732fQA6KPaTaJRSvHK2U3x13tlL9Z9SLta8A6nLTHSnVyAe9l5ZhW8A04EFxA8f81CO6qYaW2rjmZLmJQx8K4D6vw7OPHFTpRSzExm8dmNoju5VGVpCqDXBfWl6Rw+vFrw1SVOrqiyUUAv5lOVTatRyl7++ecfT/C3+2UcHLfd85cY3kE9qFXFQpe97NwTKx6ksBK6SeWN5152X/9C4F0X1HMZgs/fK2J81AhMYkopLk1Y+HSlAIMEy4r16QK9LqgvzWTx4dVRt5z40zLZiygvTlipgN7tdw/gPUx7ecAm2LcPKnhz0lYylXxsAXjnv3NvvJi4lz0KhFX/uDdenCzcy67KL8O7Lqh//l4RJek1IrLFmT6fwccrha6BnvOPDqjfvjIaCb3yCynTAH0v4D1KsynF7usm/t+PJ/jHwwoOy51A++KxBeDdM2nE541Xednj4J3DoeyN93nZhXwqeE8C6uMFMzCR+MGK2sz5TFdAL55gHVCXk4qPuOVKC/S9gHcdjVL2Hsk//3iC7x5VcFz1v8EjEt45fvA3kxIS7mV3zpxPI8ISyTXRGy962eV8ohlOA+o6sA2kB3oX1MXt2DGgrgu93QB9VF/6odk2xc5+C3/64QT/XKuiXPOHC1fCu1sX8bzxYV72OHjnGvfGy152OR/XVjeqePoiHahHwbaoJQV6Dur1JvXl0wF13m6clhzok3v3e6lRG9h+1cQf7hzjznoVJ1W2RJL//ZcD/yUatNK4NGHh5Zt28IOEiRBgqmTh1WF0XZPjFrsLianv1tIIrszmnLr5wfqXCB3tznoVm3uC20ORTAOYGLMC/h03OWOXyxD87tcl32vrxImtqx1XOviv1WOf20d1fkwD/jwh+QalFUcNfHqjqHhgVXE2Xxy0gmKKRCmw9ya+rgslC/NTGaw+qfkmgJzuPathNG9g5nzGzSfeIOho69uNyK+xAI9/xkdNfHOvjKNy+OSqNym+vV/GZzeLMI3gXaD4e5hWa9j4+8Oy+ntbQZqdzKI4Yvi+4VDlG4Q2mjdxdTaHy5eysEwSDe86oK4D74k1wL2bUoE11zo28N3Dim/i68A719a267j3rBbZhvjCI/HOMwpwXx218e2Dsm/Lt5iitFqT4pufyijXOpFtzExm8fGKdAMyAHiXtXzWwK2lUXz1EfuWI2OxnbCR8A5KPeAKAXWVFgbluhoXl2dyuH1lJBLebZvi7w8r7lKtC+/r2w3cf1YP1Ce2Ybp3at5EEn1lYr/lK3rvsIVv7zOrowvvjRbF13dPcFLvREK06I8LpAHBe9YycPNyHr/9b+N4dz6PjLCdXAveQZ3HVENAXRfek2jigSzP5HH7yijvcQCsQZg749sHZbw4aMXCO7dUP/1cE9r01wfqvZlUft0tn1xf3Bpzn0gKA9xXhy387b63pEVNsGrDxp9/PEGlbkcC8/T5DD67UfTtjRokvGctgpuLI/hfvy7h+sIIso6Fko8t1PPuLY2eFRGXS55JpfFy3ugl00D8XMRv3YmYRyxL2IH/42EFuweeA1dMXFvbruP+s7qvrFyf+MIjsayYuJvDs1zEZ225tn/U9k0uFV9V6x18c5dNKrGsXN+M7CIR+y2122stlyG4vsgs1MriCDLOVvcwTtSC94EnGjwBy7Ps6ZoooO9Q4LtHFXx8vaAE+vXtBu5xSxWSOKhPOV7wKPDPWgSfvz/mAX0I4L5ylkUV0NcazC92UlO8llgC9Y+ve8uf6oSGle1Gy2UMvDOdw/JMzo1Fq/oGQe7LqYX3wLFS9gBEWqBPCupi2aibgW6APimoGwYCfRHPWS+hPGMRXJvP46uPxnHzch4juWSPp51qeFcBeBqgTwvqYrtRWhqg7wbUVawW1W4SzTAIrszm8NtflfD+0qgvYrbuTQghBFYUvPsmGoUw+ZxcIRoIg3K3wYSafMAygLOHTgl7lIvnd5cIJ78D9AsXst4uCkU+UI+pphSgruvJz2cZ0H99t4zjascZT+pem3yMOdDXGrYA6sF8AMX0+QybVCGvphDhXS6bRpufyuLW0qjPOkVZpSjtTMC72z0BonWBfnOv6dPkfDqgrqNRSpHLGNpAnwbUVUzjHovURhpt77CNn180Uj88eybhXQXROkAflZKAepzGf9cF+ihNB9T7Ae/NFsWDzRqevmAPLy/N5NygJUkn15mCdxVE6wC9SksD6lEaP35AH+jTgrpK6yW8N1oUDzbr+OOdY6xt19HqJI9VcebgPS3Q9wrUwzRxYJMAfRpQ7ye8i1q9RXH/WR1/cCaY6iuqsP6des8772gcROt46OM86km23MiaanDzWUPLQw8EPeq6JzCsvl5pAFBv2PjpaQ3/959HeLxTR7sTP8HOLLyrtDig7xWoq7Qw3tABel1Q7ze8x2mNFtuX98fvj7HxvB6IUPTWwHsSoO8lqEfBu0qLAvq0oD4oz7tKq9Q7WN2oYuN5A1fn8li8mIVp+Pty5uFdB+h7Depx8K7SVECfFtRVWi/hXVer1G2sblTxpx+O8exlEzb1+vdWwHsU0PcD1HXgXaWJQN8NqA8K3nW1cs3G94+r+OP3x+6mSeOrj8Z8L2E8q/Cu0ibHLUyMWRgfNROXTarpTohchmBy3GJRXBKC+jDgPQnkl6sd/Gutiv/8+hBWqWDh0xtFHJbbeLRVx4uDFjNpbueFA5EOaBDwLg6myDhxmvgkixctj6SuL04TuS1Msyl7RvLpC76/nmB5JqdVNkwLjKFT77A1i3e45ISwOap08Girjt2DFmx7SCTfBbwTQgLhGVWhGJPU1y28859yKEsetnLJmVxJ65M1eQyHqRlyp0pCjKS5C1nvubgzAu/yw6TEaUN+0HPQ8O49We1f1n58UsPT3Ubi+mStl1DeC82Q127+90TRwifXC/jywzHMTma8ZeoUw7v6YVJncknLo059STR5/MTkPXiLwFVugyqj3ETVp9LEMRw0vKs07yVNIQdQKpj49EYRX33EJhgxhBpOEbyHPUwK8HnPNPHFUlH1JdXCwPqHxxU3Qk4oCFOKHx77o9zoTjBlfadA820LlNdt8e9SwcInKwV8eXsMc1NZdpvszgfqEaS3wgkjkExz6xXaFwFW1sSoL0Sog7r5/FpY5JaoNqI0PlbimMkRcnhp6iz9Ko1HuVHVF6a5x6fZxqA0SwTSqEHkSQX51AYgTK6uTxb1a1yXr1pKqRtJr+mAurAoeyu1QvO/KdZrW9VGnKZisB+f1FgwE1+7BEQwpSpNjEMvpkje4udQs41BaAGLJZ9s+SrhByNC/uyFrKNLE4XBTWJNtFhyn0RNDM8I8SQTOAAXrR1X/DFDVW3oaqIV8QWI422KQBmlAb67xzjrlaqNAWguvMuWS4chCCE4VzDxyfUCPrnhBcHwr4rpNLldXVBnI+9JOf6OGpHbhHy9AnqevJdDBe/FuZa1iNCFYL40QC+3MWwtEt7lpDL5lTpz5//jYUWAuf7Cuy6oL03n8Ltfl9h3hBH5egH0gB6o55ytNDzKTbdAryx7CjRteJe1cq2D7x9X8Yc7x/j5ZQOuL5VbIXEMEmpR8K4L6jw8oyV8V9gvoNcF9bDvCrsBevdYItodhmao+CkK3v1fODbRsSkIBUiXjCJqYfDO46g3WzbAyzn/iPOTUi88I09iKEYxn1xWdK76+qPoC9d8oO6rD269oBT5iN0NpoHIspzZZEzxwXtI2WFp2vDOLdSffmAWqsNNFJ+MBBDXml7Duy6oi+EZxeNx92O5oRiDZdMAvQ6oZ6VQlmJ9PGylYajL8r6FAX1Uu8PUYuG92rDxg7PkPXvZRKdDnVlExDng1/gyJ/6fUBP7ogvqS8IOUhVs82h5fHJ1C/S6oC6GspSPDWAxUcUdpGmAXm532JoS3rmFurNewe//dYynL5qwbec8EAL3jFC1hh7CexJQ//BqwT0W8bhE2HZDMXYJ9KKligN1MZRl2B2eGLYyCdAr850CLbAUKqHcM0ZQedn75XlPCupM8y4Ot1pJ6wbo04B6nPec/0sD9G6/I/oyDI3Yzt6Ycs3G+k4d26+aXjwn8WTDufK9s+UOjEoLO6k62o3FEcxOZnzLH5/fYqJgTHV7ecRXXuUWUWkdG/j2fhmvDluRbZSKlrufa5WH6PblEiw2mO9MjjmfJD1/3cR3jyqwfaEi/W2AENxeHkGjxcKkh+YbkkYqtTZ9uMUmVId/NQPmnCeUrZzEmWGu5rJaiOacSAqaSpudzOLgpO3sp2JLgLfccjNJseSAujwZRGaM09odir/dL2P/sKVsg2vjoyYmxy22/Mn5hLyMqcbcp3KS9EXUdg9a+O5hBb4tcW5/nDIACiOGLxS2Kh+/LwgGwfXn09GyFnsHUlw+I5sxUCqYyFh8MIl/yAgcMBe0PsN7Psu28AbWSwm2y9UObNvPLkm95+0Oi1EV1gbXRnIGxgumMp8Iri1en6+a5NthylXbN6lUwGwYQD7rDy+kynd5Oof33hmJzaej3b4yyrZRxeQzLJPgymwev/t1CR8sj2AkTzyjRog70cJAvR/wzp7F8x70BPxgzdPeYTsQilHXU04pRa1J3fCMYW1QCkw7YB32UKwIrmLYyqSTidXhxPL6uRoJx/zB26lxKzbftbkcFi86gdO6gPLxUQPzF7J4dz4PEKIH76bB9l//9ldsghVyhgPl8P5hcPAuh2IMg3c5FKMu09UathueMQzeAWB20u8KkB+KVYGrTVnYyuevm6GgHqbxUJZRcCyGXXL7HdKXy5dyGMkZsEzg2nw+NJ+OtrLIrN5E0WR78yLKBjzvlskm2FcfjeP28igKOQPudgga9LKrtF553uV35MD5J3vP94WAZnGecsCJ+XmPRdJT1cc1+Z07PC3N5FwnbJgn2rZZ2MpdwXKJPkKVtr7dwP2fa8r6uGYacN+5E+d5NwibTLz+y5eY1UrjUS8VTMyc95zL787nnYUqoefdMoErs3l3go3kiHtJ+7zsKo2fxJSax87SW72I055wUrgmhmKMmrTVhu2EZ7Qj65sWHiZV1ccfioXbf6EuR7M7FN89rGBXsFxhlurRVg33ntUi6zOFB291PO+Xp9lE4nktk+DqXD6yjTDt+gKbSLyuiaKJSxOZ0LKxnnfLZIP421+VcHt5BPmc0Xd454n3w/MJmeKHTrue9OqwrYytzo+Ph2cs86BnIm8K9fnfaxj/UGwU9NoUvrCV8rEBwVCWqvq4pbo4kVGymlyWs5Wc78pMDrkMSQTvpSJ7DlJu98ZiXhg2f1ntbTMi5N++MsLCCfYJ3uXj45OrG6BPCur8YdK4m4HlmTw+vFJAFMyGAX1SUL94zn83FuV5v3wph9G890Y0ntcwCN6dyyeC9+sLeZiGfwwIIThXtHCJf/caBu/8QMXfZV8LddZtBvnj+OCKCPk+89M1vMv9YZPLSAX0aUGdt6vqi6gtTWedV8j564sD+qSgrlpO3WMRyloWwdU5/7OK4j8eWlsH3jlbhS3jN98Z0YP3sEGUE7dgIuTz1Ct4VwF4UqDnuyLSgrrOzQAhxLerQgfo04C6CvydDvnKLl7MoZA3ffnEf6bpWK04eAeDfzkUgFhXqWBi9nxGH955BfLJFycfA0JgaTrn3SUIZXsB76qJlwTo/2v1uGtQ19EopYmAPg2oqyyGXNaygKtzOWX/RO3ypRzj5Qh4Hy+YmJ3KKNsVteuL+UDZrva8A8DWqyb++P0xVjccTkByUI+Dd/67CNG6QO9+jdElqMdpPOkCvayJKQ7Uo3h44WIOozn1+21EzfVrKfrH08pCHkRRVq7vXMEMeONT7Xm3KbC118Tv/3WMf65VcVITNsX1Cd5VEK0L9GFaUlCP0sS/dYA+DaiHXdxiWdWdYFjZKG/8+KiBuams1sQmhAS88YngvdOh2Nxr4E8/HOOfaxXvDmtA8K7SdIFe1tKCukpTLRE6QJ8U1OPgnXvZdctGeeO5lz1qORU12Ruv9cBqx6bY2W9ifbuB40rbf7Yo35ngTa6kJyaghcC7fKVwjQM9D8UozU9Q+OfsjBOe0ZTe+BDVRpSmsvQc6AEWtpJ9M0G8bzHcPjHNlN5ir6ovVKMUhkFcL3uSspcv5bC2XUe9Ybt9KRUtn5ddt7535/POA8wUvg1DsoWiFNh61WATqtpxTxDbIuP8RQi7yxI1pyG2ZifXVBYrrI9c48z1zb2yM/mFiU+IW//0+QwD9RRtxGkiq/K0PMuWiNWNqlMIkplm39OKoB5VnxwtY/UAAAhOSURBVEoD8XvZk5Tl3vifnnrvbxS97En6wr3xLw5aangHPCj/11oVxxX2yjN3OAbseRcPohug7xWoR8F7UqBPA+qylpStZE30xod52XXr4954H7yrodxZmfhP9+oP18KgvNfwngToewnqcfCu0lRAnxTUw+A9zMuuW5/ojQ/zsuvWx73xFsCgfOd1E2vbdRxXGZCzqSJAuZMGtW0GgO8EigwWp7HJVcTXd09wXLUxO8mWP9kadtOGqOksOYQQLE1nAcqeQzQMP6jHlQ3TLItg8VI2cV8CS/ZMDvvHbeULRJPWd/OdEbbn/fnrFo6rHQ9zxHLiiVfpcXlTaOfHLDSaNioNW90n6GmNlo3NvSaWZ3J+UE9ZX6+07f0m8lkDU+NW1/UV8wYWLubQi/TsZQPVurR/OWX/3IcpupntSqDsMrU7wNZeA2s7ddTqfOuwAPiUnh7Np2Og2spCHjcuj/Tk3DVa1Ln5kV4jnKJ/XUebCdPEn2k0cbvOB1dGfY48J5NzMMPX0u4f76WWFrZFTfViqbT9IzTG1MigqqvJd5ndah2b4ucXDazvNNyHFU6V0aIUlPuuB6hxi9XL1GjZ7pti0/bPN7HEyRHn0OS6ShPr66UGAO0OxbOXDTzeaaAqPA0jLUwD13xzbIDaykIeK4vMEdtLnGm2qXvzk6Z/PsZyB0sxqcL0uLy91MS/W2329dLjnQaqdY8JhjPB+LAOXltZ7L3F4qneZE+CH5fb0ifx/Tu18K5KqnZOBeT7dAxU6yW8q7S0QH9q4V2lqTzgpwHy3xZ4V2lpgf7MwLtuP4YF+W8TvKtSUqA/c/CuO8E45K9t11kIJJ5XGIdeam8bvKu0JECfOM57lD4Ii6WvARmLwDQIKLW9uxVpMHqjhW+H6bcG+Mcp7rx0o+Uy3ptiRaBX9S9y20zULFZ9v6VaVQetUerf6gPXmvnXsp5q4H9KJ2YQmmIMgP5ZL//2JBnovf5ZspWKmrW67gbxC9FBaZRSbL1qYm2rjpNaSEhJ0h+NkYU/DUrjKc05SqvlMmCT66cTd9OC3D9LLhSVkpjPflwtssmnlMKmwPNXTTzYrLHnBkNhm/RNIw5hUAoXZgen6Z2PXmv5LNuexIFe7t+ZhXdxq8+JBJNu3gFq/w7wrtLCgP7MwbtNgR2HoY4q8fvbB6P9e8C7SgsD+jMD70OB8l/gXUtTAf2ph/dhQvkv8J4e6E8tvJ8GKNfV/h3hXaWJQJ/6JU3iT1kTG0+q2ZS9u/nRVh131ituLCv3WJzzedo0/qDnu/PeNuFuYqZbFsHSTDY2Hx87+XwMS+MhPk8VvFPKHjTIWQQ3FvOYncy4b3G1bW8p4D+HA+rh8H5uzMLNyyMo123s7re6gvLFiyzS8cuDlvOd5+mEd5WWy5Duo82oNJXl09FWN6rsRUROH8W3uM7xt7jyf7xdguFrzu/8Qc+VhWD0FTefhmZZBO/O5fRDO2L4lkrWTs22Gf7CI7FKno+/xfXLD8e8qCY8o7gmDEmjoJgYMzHtPHRaKpiYm4qPhR6mvXMpyyImAl6QtJiyac5RP7Wu3rAapSUB9dWNKp6+aDgnSwIIoY1zRQuf3ijiq4/GhAAU/vyUDl7jD3oahnexvDuXBzG8h1N5Prd8iJZxrBQ/ZjcmQ0RZeaxOwwQbKrz7XngEyOASWrZUYBPsy9tsghnGcOH9/LiJS+f84RTPacRCV2nvzGR9kY4ppXhnmsW8Ogvwzv8NDd45qPOXO/rapur65KuiVDDx6Y0iDsvt4UE+Ibg+P+JaK3Esrs3nsfta7FM0vDO2CkaMMQnw7nweq48rgbLyOJ0GeAeG6Hlf3ai6L3VkrCCcLoXFiqqPQ/5hpYO17Tp291ve6aP99bxPjlu4NGG5E1/sX6lgYmYyg539lnAAEEyeX1ueySNrkcAFCACLF7NY3677nkyKGpekONJrbSjwLoO6/+oLr0+2WrI2DMjnXwCHjcvKwohQJBzAsxbBldnwiDGmAVxbCA/teFrYimsDh3cZ1CkHdiAS3imlvrbCNMAP+efHzL7B+1TJwsVz0SF/xoX4nFEAvjybc9/kFVbfwoUsCnkjUFbV7rC1gcF7GKj7+qcJ7zpaudbBxvMGDssdr40ewzu3VnEwKz7soALwXNbA0nR4XHaumUYwtKOYX6cvbxW8R4K6VDYJvKu0k2rHfVNsx3mBQD/g/dJEBpPj3rueo8ZibMTEwoUstvaaSnhnvipDWVbWFi5ksbZdR6XW8WpQWO2wvgxKGwi8R4F6L+CdUopK3cbaNn9TrFMXIX2Bd4M4XnYEL46w8bq2kMf2fhPUhs9M57MESzNeXHZVWTGZJrvb/P5xNXJchg3vfd82owPqUfAu1ydrlXoHa1t1bO41vfjpfn9Az7WLExYmx61E4zI2YmDhQhabLxsQ0/JMDlmLRJaVtcVLWTzaqqPS6ETmG6bW120zDNSb7u00pUL+CC0O3kULtbnHrYBTBa+P9kcjBLi+MOI7bp1xIYRFFd7eb6LTYdXnc2wZVOWNqs8gLGbDnfVqZL5haoGlUDUDxc/lvCrNdphKB9Tj4J33R4Tyte06tkQLJcwFGbZ7rU1PZHB+zAwdA95XlTY2YmB+KovNPTYuV+fysEwSuNp16pt3WIv3I0nZQWg9h/fEoK7SFPA+KCiP0gyDsZXc5yRX9bX5PHb2m8iYBEvTQWulW59JgOvzeVTqdmLrOQit5/CeFNTj4H2QUB6nTZ/PYKIoRmNOfjUXR9irREoFE6bRnbWZm8rixZtW6r70U+spvKcFdZVWbdgDh/I47Yb0mFVawF1ZzCOXMQInJml9hkE952vKvvRL+/9WiFUz1yMl1gAAAABJRU5ErkJggg==' # pylint: disable=line-too-long +INVALID_IMAGE = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJYAAACWCAYAAAA8AXHiAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAg%0AAElEQVR4nO2d6XfcxtXmnwLQC9lNtihSEneZlCxRsmXZmXjJa8%2BMj08mf%2B6c9%2BtMTpJ3Ejtx4kQ2%0ALWsjRcniIkoURXHpfUHNh0IBhUIBKKA3UnGdI5F8ujYUCrd%2BuF24ILZtUwAghIBSCvn3YaSTqo2x%0AUcPtCwBff5JolXoHozkjVVlZ%2B8%2BvDwHi9ZNSCgICCv9YDUKzTILLl3Lo2BQ/v2j0pL7f3CxiqmT5%0AxkCeB7qaxT8Qf4q/U0p9uq4mTtKk2u5BEx2awUTRO0i5Xzpao2nj7w8q%2BO%2B3xmCZycrKae%2Bw7U0q%0ACoAI4wZFHX3SLBNYuJjDtbkcRvMmHmzW/OctRRumAfzmZhEXzmWC%2BVKOmQUgdAZ2c3XzlEazbWBt%0Au45PrhdCraeOtv68jqNKG5t7DSzP5Lrq04PNGigoCJx5RQkocT6nABsCwRL0WMuYBIsXc7g6l0Mh%0Abwb6l7YN0yD4zJlUaY2ISguaBKHD3UyUrhIBdvdbOJrvoFQwU7XfaNl48rwBUODxTgOLF3OwzHTH%0A8%2BJNC29OOuwqpxR8/HyWgFuxHmuWwZa8q3M5d0lX9TlNG6YBfHajiAslqyfnXNTciaWyUrImVhCl%0Aqcom0QDABvBoq45PbxS12xW1jecNdGwAhKBa72Brr4Hl2Xyi4%2BBpfbvh/UHY5PKtLgTgNsM9cV1q%0ABgEuT3tLXmSfQRO3YZpglqpk9cxKiZql4ir596Qpitl0NVBgZ7%2BJo0obpYIV2ieV1mhRPN6pg1dE%0ACbC2U8fipRwsM/oYZW33dROvj1tsLhHi1kdAXY2CwlsVaVcah/Jr83mMCDcdUX0GTd5GGFPpjkuc%0AZqksiNvfYS2FAhM83PRbLZ3kWiuwOgCg1qB49rKBK5LViko2pVjfaQAg8IZFnJhcIQChvmUmqZYx%0AiQ/KtROVLsyYdqNAvZcpwFhhEycpRHe1FBKw5QbA7kELh%2BU2zjl3iHEmud608XS34ZZ3xhQAuyF4%0AZzoH0yDKsrL26rCN10ct/8EJ9Yka6zZNrFkWweLFvMtQScfL%2BUCrXYsvf86k6vY8RWnKu0JVku8C%0A43wZKhjU1RgM8M4y1vpkpaDsi6w93qmj1aagRLxmGV3UmsznI94hhtVnU4qHmzV%2BdM7y569P1IQV%0AXEszDfignI9fqlWCkNh2LTMI6onaSKidTngnzqA4I/PioIWjCrtDjGq30bLx88umc/cjJUdb36nj%0AssNaUcexf9jCwUnHV1ZVn1DSD8wRWtYy8Pn7RdcK64xpqEYkeFe0axr9BXWVZhBCoiE6ReoF/DnT%0ADKCA7VituLbWtpi14kXd1UC4hGt1xlpx7d99WguUVdXnc2BTPa3RsvGXH09w/1kNjaaNrlNEu5yp%0ALvaZqeTku%2B2Ql6hu4b2bCeZ8AOp8tHvQwmGlo84HoNG08exlUygLt6ysPd5peBNQkV6%2BUbQVUZ/v%0AODS1tg083Krj93eO2QRr%2BfujayFceFe0YZoeqPfLMoVphjwpVBMqrfOsG%2BYCpQClIJRdetSmWNuu%0Ah7a3/ryOVsd2y4llZa3a6GBzrxHa/webtdCyYRr7kVxrtW083KzhD3eOcf/nGuqSBdMac0Ub7vIn%0AWapB8BWllFmsMOsUVUHYBOyJRp2rjxBQQsBAmTBvfKUTmIjNNmVedicfZzSxrKyJVkusb/egiTcn%0AnciyKo0DdFqt0bLxcMuZYM9qaLa88RDHJqDxfgj1WSbxgXpo2T5q7lIocpbMXGk0sd7EGoHLC%2B6d%0AjiM92qoHyop%2BK5fQiL%2BsrFUbNrb2gqy1vt0A/04wSX3OddC11mxTPHKWyAfPGDPysZEZ2Bt3rz7P%0AUlm%2BfKqy/dQSwfsgNUKIY%2BL9nz/fb%2BGo0nb/dr3sUj5CSay2tlMXHKncy97WKhvUHE98j7RWm1mw%0A//PdUQDyg2zDysqgPsxzeGbg3dOAh5veHSK3VnFgrdK4Nx4Qvex6ZVVaEnjvFeRzeB8mqKu0MwPv%0AIjBzb7zoZU8K2/zvte06Ojb1vOwJyvYC3nW1KMgfNqirNFJvdmjWCrEYXSTu%2BEyjPdys48FWDQSe%0AR5n9dAacEMxNZlDIG1jfqbuaKp%2BO9sHyKHb2m3h90klcVtTAj2MAWi5jYGkmB0qBCyXLZarTkqyv%0A75bxxa0ichnDZ8rCvOLa3nMnpdK8s6cAZgbWz1%2B3vE%2BICrb1tZ9%2BroE321V9xO1237V8lqBUMDE9%0AkYFh%2BMdQHNthadZxpY2v757g8/fHkM8G94aLaSjwLs4uOIAL9FSjVC9ftOYAOPqpAeMFCysLecxN%0AZd1%2BKB2nGO45tEAIjqs2vrlXxufvFd3JFdZZ3dQNBAa%2BOoFw5RKcSg0Qt9b0XisVTFybz2N2KgND%0A4aoJlB6yZoGyLyyPyu3A5ApL8tIna%2BJP1WdxGhAChYq%2BnCatH%2Blc0cL1hTxmzmfciRbnzD4NyWJg%0AyCbXcaWDb346wRe3xpC1ok9ymN4Tja9%2BxL8dBBCM2SnS4H4mula7084VTFxfyGP6fAZG0DCc%2BmSx%0AhZyAD9NR1UYvgJ6ntBqfUMExlbeIvF1aadTEyuKIz0KdJijXhncuuDe0lOC42nGBPpfxL3dyehvg%0AvTfavweU62qWnxo9YPQDfXJb/Au862lnDcp1NRfeVSkM6H%2BB9%2B7h/axCuW7ywTv3dAMAoQAFDQC9%0AmH6Bd3/SBfWcRXBjkYH525qMgHfQ%2BYs/KEDhAX2z7VkpQvq4bQbqE8k/OW0a/6erNds2vn1QwV/u%0AlvHqqB2w/PJYnEXN24/lwjsfC%2Br9pHCBXt7h2Hd4l8xCui0tg9CSbZEBBV4ftfHNT2X8%2BccTvDps%0AK5fXYbNSWs0Ig3dC2E5JNhOZxoE%2BsHVDkXoG78QviabgNGls7Egq7fUxY9mv75ax96YFW3Eb75Y%2B%0AI1okvANBjtAB%2Bl/gPV3aP2rhm%2BM2pkoWrs3ncfGU7VhIkmLhnUgATSllk6ufQP%2BWw3uURgG8Ompj%0A/6iMyXF253gWJ5gWvPPkesMJ%2B%2BI6Cujdun6B91QaALw%2BbuGv98pnEvK14Z3lcT8EKO0r0L/t8P62%0AQ34ieAeI75ElIBzof4H33mtnCfKta/O5wFcJXi7416TgqulqBydtx4vcPbxPlSysIB9cixMmCmD/%0AqI0LpT4ziqqfEdpRpYN8liCXMRKX5en1SRvEYNr%2BcVt9noagGQYwfT4LK581AtFX0qQXb1o4rnYw%0APhqM7ZQU3scLpi96L%2BBdCUl2SaxuVHFw3Mb8VAbLs/me7LoI03TTcaWDpy8ayGWI8uYnaXq4WcfD%0ArVrXNw3dagaA6fMZXF/IY3zUgLHuPFunAvAk2vp2Aw83gw%2BTJoX3Zpti%2B1XTp8knUkdb3ajh6Ysm%0AKIDVJzU82W1ol02j6QCuhw02jqqdwM1P0vrcpZL1qG83EnHa9EQG/%2BP2GD69UcC5IjMsFo%2B%2BcmU2%0AH%2Bi4nMK03ddN7B%2B1QAhxQzumfUJnbavuC%2Bfodj8Bs61uVPF0t%2BkxEQVWN6oAKJZn8onr0%2BJCRI/Z%0AUaXNJlLLBt%2BJJW5PUu3a1WqDApQ6m3D4R1QIbNQnjQC4cC6DG4sjOD8eRA0DxIu%2BEmfaVY5M/qAn%0AP2DxYVLdycQ1N2IMUeejNDrmg00pVp8wSyVabX4uVjc8y6VTXxJN5djlvx9VPOsEfoPk9IlbsXrT%0A1q4vOH7wHa/cRi81NqEsfH6riP94r4iJMX/gXf7TAqWo1DvKWOiq5UpOrw7b2BfCKYqhHeVBkSeL%0ArK0/r6PZtp2r0D%2BIYr4w7ccnNTx5ro6jxdPqRhWg1MdcSdoI08TfRY1/U9FoCS4Z6foVv83ICRse%0AdNtQGgSVjehCIyCYKjGHrXwzpOqLAcfEPd5poN2Jvjrlg%2BLhFEXWojYL3JH06vMixqhT3JK0ulHF%0A0%2Bd1QOARQrj59jMKn4D9XgqPKx3XFeONUZCZCPGeN2gKcbuSLLdyfb3SCCGYKmXwH%2B8V8fl7Rd%2Bk%0AiuqfwRfOSoPFQo8ERamC10dtFk6RCn4I4oV2lMtG3QzwGAyiGZbz8RMqaxzUwSeSeGEoNBHodduI%0A0%2BSxUvr3CCLdCOK3GXFjrzqpOm0k0SaKJn5zs4gvbhVxaSIDw9C/ETO4P4KAYG2njnYnnLPkCu4%2B%0ArbF%2BEKFHVB3aMZKtfBFjSOCAxRMpawzURW5y%2BiPUEaatblTxZLce24aOJv595DwE3GjarG2nXWeK%0AR2oc6GWHc9gJjKsvsUYIzhVNfHajgC8/HHeeEtKbTKJmiJAmxudUJZExXr5hVokXdyt1fvDQjjrw%0A7r1FAm4dcfAeB%2Bq6Wq%2BAXgfUeSfitCRA7x5LD0B9vGDik5UC/uftMd/u1qSsRyllX0K7DAUv%2BooK%0ACMUZ%2BWCz5mMv%2BZ/thHYUG1XBe6NF8XS34ec4Cd5lCyGCelQfchmCW0sjMAxE5lvdqAaYK6zdMA2A%0Au%2Buj0bLd8Qy0B4qlmZwv2p4qnwf9ar6VNbmNNJplsJdBEcWxhbUbprnwziurN73338mDxwvxcIoE%0AMvgR18SCBEM7qmb34x0WO9RXVpGSgnouY%2BDz98dwZTaHj68XYBr9BXpdUF%2BezeOD5RFf2KEwiE4C%0A9L0A9dfHbfz1Xhnf/FTG/lE70Iaq3TDNhXfqNALKYqGLke7k9dN9aZE3tb0/iNNhABTUF9pRhvdm%0Am7K47G5D8fCuA%2BpZi%2BCLW0X3zWGzk1l8slIQlsTeAr0uqC/P5HB7eQQGIf732fQA6KPaTaJRSvHK%0A2U3x13tlL9Z9SLta8A6nLTHSnVyAe9l5ZhW8A04EFxA8f81CO6qYaW2rjmZLmJQx8K4D6vw7OPHF%0ATpRSzExm8dmNoju5VGVpCqDXBfWl6Rw%2BvFrw1SVOrqiyUUAv5lOVTatRyl7%2B%2BecfT/C3%2B2UcHLfd%0A85cY3kE9qFXFQpe97NwTKx6ksBK6SeWN5152X/9C4F0X1HMZgs/fK2J81AhMYkopLk1Y%2BHSlAIME%0Ay4r16QK9LqgvzWTx4dVRt5z40zLZiygvTlipgN7tdw/gPUx7ecAm2LcPKnhz0lYylXxsAXjnv3Nv%0AvJi4lz0KhFX/uDdenCzcy67KL8O7Lqh//l4RJek1IrLFmT6fwccrha6BnvOPDqjfvjIaCb3yCynT%0AAH0v4D1KsynF7usm/t%2BPJ/jHwwoOy51A%2B%2BKxBeDdM2nE541Xednj4J3DoeyN93nZhXwqeE8C6uMF%0AMzCR%2BMGK2sz5TFdAL55gHVCXk4qPuOVKC/S9gHcdjVL2Hsk//3iC7x5VcFz1v8EjEt45fvA3kxIS%0A7mV3zpxPI8ISyTXRGy962eV8ohlOA%2Bo6sA2kB3oX1MXt2DGgrgu93QB9VF/6odk2xc5%2BC3/64QT/%0AXKuiXPOHC1fCu1sX8bzxYV72OHjnGvfGy152OR/XVjeqePoiHahHwbaoJQV6Dur1JvXl0wF13m6c%0Alhzok3v3e6lRG9h%2B1cQf7hzjznoVJ1W2RJL//ZcD/yUatNK4NGHh5Zt28IOEiRBgqmTh1WF0XZPj%0AFrsLianv1tIIrszmnLr5wfqXCB3tznoVm3uC20ORTAOYGLMC/h03OWOXyxD87tcl32vrxImtqx1X%0AOviv1WOf20d1fkwD/jwh%2BQalFUcNfHqjqHhgVXE2Xxy0gmKKRCmw9ya%2BrgslC/NTGaw%2BqfkmgJzu%0APathNG9g5nzGzSfeIOho69uNyK%2BxAI9/xkdNfHOvjKNy%2BOSqNym%2BvV/GZzeLMI3gXaD4e5hWa9j4%0A%2B8Oy%2BntbQZqdzKI4Yvi%2B4VDlG4Q2mjdxdTaHy5eysEwSDe86oK4D74k1wL2bUoE11zo28N3Dim/i%0A68A719a267j3rBbZhvjCI/HOMwpwXx218e2Dsm/Lt5iitFqT4pufyijXOpFtzExm8fGKdAMyAHiX%0AtXzWwK2lUXz1EfuWI2OxnbCR8A5KPeAKAXWVFgbluhoXl2dyuH1lJBLebZvi7w8r7lKtC%2B/r2w3c%0Af1YP1Ce2Ybp3at5EEn1lYr/lK3rvsIVv7zOrowvvjRbF13dPcFLvREK06I8LpAHBe9YycPNyHr/9%0Ab%2BN4dz6PjLCdXAveQZ3HVENAXRfek2jigSzP5HH7yijvcQCsQZg749sHZbw4aMXCO7dUP/1cE9r0%0A1wfqvZlUft0tn1xf3Bpzn0gKA9xXhy387b63pEVNsGrDxp9/PEGlbkcC8/T5DD67UfTtjRokvGct%0AgpuLI/hfvy7h%2BsIIso6Fko8t1PPuLY2eFRGXS55JpfFy3ugl00D8XMRv3YmYRyxL2IH/42EFuwee%0AA1dMXFvbruP%2Bs7qvrFyf%2BMIjsayYuJvDs1zEZ225tn/U9k0uFV9V6x18c5dNKrGsXN%2BM7CIR%2By21%0A22stlyG4vsgs1MriCDLOVvcwTtSC94EnGjwBy7Ps6ZoooO9Q4LtHFXx8vaAE%2BvXtBu5xSxWSOKhP%0AOV7wKPDPWgSfvz/mAX0I4L5ylkUV0NcazC92UlO8llgC9Y%2Bve8uf6oSGle1Gy2UMvDOdw/JMzo1F%0Aq/oGQe7LqYX3wLFS9gBEWqBPCupi2aibgW6APimoGwYCfRHPWS%2BhPGMRXJvP46uPxnHzch4juWSP%0Ap51qeFcBeBqgTwvqYrtRWhqg7wbUVawW1W4SzTAIrszm8NtflfD%2B0qgvYrbuTQghBFYUvPsmGoUw%0A%2BZxcIRoIg3K3wYSafMAygLOHTgl7lIvnd5cIJ78D9AsXst4uCkU%2BUI%2BpphSgruvJz2cZ0H99t4zj%0AascZT%2Bpem3yMOdDXGrYA6sF8AMX0%2BQybVCGvphDhXS6bRpufyuLW0qjPOkVZpSjtTMC72z0BonWB%0AfnOv6dPkfDqgrqNRSpHLGNpAnwbUVUzjHovURhpt77CNn180Uj88eybhXQXROkAflZKAepzGf9cF%0A%2BihNB9T7Ae/NFsWDzRqevmAPLy/N5NygJUkn15mCdxVE6wC9SksD6lEaP35AH%2BjTgrpK6yW8N1oU%0ADzbr%2BOOdY6xt19HqJI9VcebgPS3Q9wrUwzRxYJMAfRpQ7ye8i1q9RXH/WR1/cCaY6iuqsP6des87%0A72gcROt46OM86km23MiaanDzWUPLQw8EPeq6JzCsvl5pAFBv2PjpaQ3/959HeLxTR7sTP8HOLLyr%0AtDig7xWoq7Qw3tABel1Q7ze8x2mNFtuX98fvj7HxvB6IUPTWwHsSoO8lqEfBu0qLAvq0oD4oz7tK%0Aq9Q7WN2oYuN5A1fn8li8mIVp%2BPty5uFdB%2Bh7Depx8K7SVECfFtRVWi/hXVer1G2sblTxpx%2BO8exl%0AEzb1%2BvdWwHsU0PcD1HXgXaWJQN8NqA8K3nW1cs3G94%2Br%2BOP3x%2B6mSeOrj8Z8L2E8q/Cu0ibHLUyM%0AWRgfNROXTarpTohchmBy3GJRXBKC%2BjDgPQnkl6sd/Gutiv/8%2BhBWqWDh0xtFHJbbeLRVx4uDFjNp%0AbueFA5EOaBDwLg6myDhxmvgkixctj6SuL04TuS1Msyl7RvLpC76/nmB5JqdVNkwLjKFT77A1i3e4%0A5ISwOap08Girjt2DFmx7SCTfBbwTQgLhGVWhGJPU1y28859yKEsetnLJmVxJ65M1eQyHqRlyp0pC%0AjKS5C1nvubgzAu/yw6TEaUN%2B0HPQ8O49We1f1n58UsPT3Ubi%2BmStl1DeC82Q127%2B90TRwifXC/jy%0AwzHMTma8ZeoUw7v6YVJncknLo059STR5/MTkPXiLwFVugyqj3ETVp9LEMRw0vKs07yVNIQdQKpj4%0A9EYRX33EJhgxhBpOEbyHPUwK8HnPNPHFUlH1JdXCwPqHxxU3Qk4oCFOKHx77o9zoTjBlfadA820L%0AlNdt8e9SwcInKwV8eXsMc1NZdpvszgfqEaS3wgkjkExz6xXaFwFW1sSoL0Sog7r5/FpY5JaoNqI0%0APlbimMkRcnhp6iz9Ko1HuVHVF6a5x6fZxqA0SwTSqEHkSQX51AYgTK6uTxb1a1yXr1pKqRtJr%2BmA%0AurAoeyu1QvO/KdZrW9VGnKZisB%2Bf1FgwE1%2B7BEQwpSpNjEMvpkje4udQs41BaAGLJZ9s%2BSrhByNC%0A/uyFrKNLE4XBTWJNtFhyn0RNDM8I8SQTOAAXrR1X/DFDVW3oaqIV8QWI422KQBmlAb67xzjrlaqN%0AAWguvMuWS4chCCE4VzDxyfUCPrnhBcHwr4rpNLldXVBnI%2B9JOf6OGpHbhHy9AnqevJdDBe/FuZa1%0AiNCFYL40QC%2B3MWwtEt7lpDL5lTpz5//jYUWAuf7Cuy6oL03n8Ltfl9h3hBH5egH0gB6o55ytNDzK%0ATbdAryx7CjRteJe1cq2D7x9X8Yc7x/j5ZQOuL5VbIXEMEmpR8K4L6jw8oyV8V9gvoNcF9bDvCrsB%0AevdYItodhmao%2BCkK3v1fODbRsSkIBUiXjCJqYfDO46g3WzbAyzn/iPOTUi88I09iKEYxn1xWdK76%0A%2BqPoC9d8oO6rD269oBT5iN0NpoHIspzZZEzxwXtI2WFp2vDOLdSffmAWqsNNFJ%2BMBBDXml7Duy6o%0Ai%2BEZxeNx92O5oRiDZdMAvQ6oZ6VQlmJ9PGylYajL8r6FAX1Uu8PUYuG92rDxg7PkPXvZRKdDnVlE%0AxDng1/gyJ/6fUBP7ogvqS8IOUhVs82h5fHJ1C/S6oC6GspSPDWAxUcUdpGmAXm532JoS3rmFurNe%0Awe//dYynL5qwbec8EAL3jFC1hh7CexJQ//BqwT0W8bhE2HZDMXYJ9KKligN1MZRl2B2eGLYyCdAr%0A850CLbAUKqHcM0ZQedn75XlPCupM8y4Ot1pJ6wbo04B6nPec/0sD9G6/I/oyDI3Yzt6Ycs3G%2Bk4d%0A26%2BaXjwn8WTDufK9s%2BUOjEoLO6k62o3FEcxOZnzLH5/fYqJgTHV7ecRXXuUWUWkdG/j2fhmvDluR%0AbZSKlrufa5WH6PblEiw2mO9MjjmfJD1/3cR3jyqwfaEi/W2AENxeHkGjxcKkh%2BYbkkYqtTZ9uMUm%0AVId/NQPmnCeUrZzEmWGu5rJaiOacSAqaSpudzOLgpO3sp2JLgLfccjNJseSAujwZRGaM09odir/d%0AL2P/sKVsg2vjoyYmxy22/Mn5hLyMqcbcp3KS9EXUdg9a%2BO5hBb4tcW5/nDIACiOGLxS2Kh%2B/LwgG%0AwfXn09GyFnsHUlw%2BI5sxUCqYyFh8MIl/yAgcMBe0PsN7Psu28AbWSwm2y9UObNvPLkm95%2B0Oi1EV%0A1gbXRnIGxgumMp8Iri1en6%2Ba5NthylXbN6lUwGwYQD7rDy%2Bkynd5Oof33hmJzaej3b4yyrZRxeQz%0ALJPgymwev/t1CR8sj2AkTzyjRog70cJAvR/wzp7F8x70BPxgzdPeYTsQilHXU04pRa1J3fCMYW1Q%0ACkw7YB32UKwIrmLYyqSTidXhxPL6uRoJx/zB26lxKzbftbkcFi86gdO6gPLxUQPzF7J4dz4PEKIH%0A76bB9l//9ldsghVyhgPl8P5hcPAuh2IMg3c5FKMu09UathueMQzeAWB20u8KkB%2BKVYGrTVnYyuev%0Am6GgHqbxUJZRcCyGXXL7HdKXy5dyGMkZsEzg2nw%2BNJ%2BOtrLIrN5E0WR78yLKBjzvlskm2FcfjeP2%0A8igKOQPudgga9LKrtF553uV35MD5J3vP94WAZnGecsCJ%2BXmPRdJT1cc1%2BZ07PC3N5FwnbJgn2rZZ%0A2MpdwXKJPkKVtr7dwP2fa8r6uGYacN%2B5E%2Bd5NwibTLz%2By5eY1UrjUS8VTMyc95zL787nnYUqoefd%0AMoErs3l3go3kiHtJ%2B7zsKo2fxJSax87SW72I055wUrgmhmKMmrTVhu2EZ7Qj65sWHiZV1ccfioXb%0Af6EuR7M7FN89rGBXsFxhlurRVg33ntUi6zOFB291PO%2BXp9lE4nktk%2BDqXD6yjTDt%2BgKbSLyuiaKJ%0ASxOZ0LKxnnfLZIP421%2BVcHt5BPmc0Xd454n3w/MJmeKHTrue9OqwrYytzo%2BPh2cs86BnIm8K9fnf%0Aaxj/UGwU9NoUvrCV8rEBwVCWqvq4pbo4kVGymlyWs5Wc78pMDrkMSQTvpSJ7DlJu98ZiXhg2f1nt%0AbTMi5N%2B%2BMsLCCfYJ3uXj45OrG6BPCur8YdK4m4HlmTw%2BvFJAFMyGAX1SUL94zn83FuV5v3wph9G8%0A90Y0ntcwCN6dyyeC9%2BsLeZiGfwwIIThXtHCJf/caBu/8QMXfZV8LddZtBvnj%2BOCKCPk%2B89M1vMv9%0AYZPLSAX0aUGdt6vqi6gtTWedV8j564sD%2BqSgrlpO3WMRyloWwdU5/7OK4j8eWlsH3jlbhS3jN98Z%0A0YP3sEGUE7dgIuTz1Ct4VwF4UqDnuyLSgrrOzQAhxLerQgfo04C6CvydDvnKLl7MoZA3ffnEf6bp%0AWK04eAeDfzkUgFhXqWBi9nxGH955BfLJFycfA0JgaTrn3SUIZXsB76qJlwTo/2v1uGtQ19EopYmA%0APg2oqyyGXNaygKtzOWX/RO3ypRzj5Qh4Hy%2BYmJ3KKNsVteuL%2BUDZrva8A8DWqyb%2B%2BP0xVjccTkBy%0AUI%2BDd/67CNG6QO9%2BjdElqMdpPOkCvayJKQ7Uo3h44WIOozn1%2B21EzfVrKfrH08pCHkRRVq7vXMEM%0AeONT7Xm3KbC118Tv/3WMf65VcVITNsX1Cd5VEK0L9GFaUlCP0sS/dYA%2BDaiHXdxiWdWdYFjZKG/8%0A%2BKiBuams1sQmhAS88YngvdOh2Nxr4E8/HOOfaxXvDmtA8K7SdIFe1tKCukpTLRE6QJ8U1OPgnXvZ%0AdctGeeO5lz1qORU12Ruv9cBqx6bY2W9ifbuB40rbf7Yo35ngTa6kJyaghcC7fKVwjQM9D8UozU9Q%0A%2BOfsjBOe0ZTe%2BBDVRpSmsvQc6AEWtpJ9M0G8bzHcPjHNlN5ir6ovVKMUhkFcL3uSspcv5bC2XUe9%0AYbt9KRUtn5ddt7535/POA8wUvg1DsoWiFNh61WATqtpxTxDbIuP8RQi7yxI1pyG2ZifXVBYrrI9c%0A48z1zb2yM/mFiU%2BIW//0%2BQwD9RRtxGkiq/K0PMuWiNWNqlMIkplm39OKoB5VnxwtY/UAAAhOSURB%0AVEoD8XvZk5Tl3vifnnrvbxS97En6wr3xLw5aangHPCj/11oVxxX2yjN3OAbseRcPohug7xWoR8F7%0AUqBPA%2BqylpStZE30xod52XXr4954H7yrodxZmfhP9%2BoP18KgvNfwngToewnqcfCu0lRAnxTUw%2BA9%0AzMuuW5/ojQ/zsuvWx73xFsCgfOd1E2vbdRxXGZCzqSJAuZMGtW0GgO8EigwWp7HJVcTXd09wXLUx%0AO8mWP9kadtOGqOksOYQQLE1nAcqeQzQMP6jHlQ3TLItg8VI2cV8CS/ZMDvvHbeULRJPWd/OdEbbn%0A/fnrFo6rHQ9zxHLiiVfpcXlTaOfHLDSaNioNW90n6GmNlo3NvSaWZ3J%2BUE9ZX6%2B07f0m8lkDU%2BNW%0A1/UV8wYWLubQi/TsZQPVurR/OWX/3IcpupntSqDsMrU7wNZeA2s7ddTqfOuwAPiUnh7Np2Og2spC%0AHjcuj/Tk3DVa1Ln5kV4jnKJ/XUebCdPEn2k0cbvOB1dGfY48J5NzMMPX0u4f76WWFrZFTfViqbT9%0AIzTG1MigqqvJd5ndah2b4ucXDazvNNyHFU6V0aIUlPuuB6hxi9XL1GjZ7pti0/bPN7HEyRHn0OS6%0AShPr66UGAO0OxbOXDTzeaaAqPA0jLUwD13xzbIDaykIeK4vMEdtLnGm2qXvzk6Z/PsZyB0sxqcL0%0AuLy91MS/W2329dLjnQaqdY8JhjPB%2BLAOXltZ7L3F4qneZE%2BCH5fb0ifx/Tu18K5KqnZOBeT7dAxU%0A6yW8q7S0QH9q4V2lqTzgpwHy3xZ4V2lpgf7MwLtuP4YF%2BW8TvKtSUqA/c/CuO8E45K9t11kIJJ5X%0AGIdeam8bvKu0JECfOM57lD4Ii6WvARmLwDQIKLW9uxVpMHqjhW%2BH6bcG%2BMcp7rx0o%2BUy3ptiRaBX%0A9S9y20zULFZ9v6VaVQetUerf6gPXmvnXsp5q4H9KJ2YQmmIMgP5ZL//2JBnovf5ZspWKmrW67gbx%0AC9FBaZRSbL1qYm2rjpNaSEhJ0h%2BNkYU/DUrjKc05SqvlMmCT66cTd9OC3D9LLhSVkpjPflwtssmn%0AlMKmwPNXTTzYrLHnBkNhm/RNIw5hUAoXZgen6Z2PXmv5LNuexIFe7t%2BZhXdxq8%2BJBJNu3gFq/w7w%0ArtLCgP7MwbtNgR2HoY4q8fvbB6P9e8C7SgsD%2BjMD70OB8l/gXUtTAf2ph/dhQvkv8J4e6E8tvJ8G%0AKNfV/h3hXaWJQJ/6JU3iT1kTG0%2Bq2ZS9u/nRVh131ituLCv3WJzzedo0/qDnu/PeNuFuYqZbFsHS%0ATDY2Hx87%2BXwMS%2BMhPk8VvFPKHjTIWQQ3FvOYncy4b3G1bW8p4D%2BHA%2Brh8H5uzMLNyyMo123s7re6%0AgvLFiyzS8cuDlvOd5%2BmEd5WWy5Duo82oNJXl09FWN6rsRUROH8W3uM7xt7jyf7xdguFrzu/8Qc%2BV%0AhWD0FTefhmZZBO/O5fRDO2L4lkrWTs22Gf7CI7FKno%2B/xfXLD8e8qCY8o7gmDEmjoJgYMzHtPHRa%0AKpiYm4qPhR6mvXMpyyImAl6QtJiyac5RP7Wu3rAapSUB9dWNKp6%2BaDgnSwIIoY1zRQuf3ijiq4/G%0AhAAU/vyUDl7jD3oahnexvDuXBzG8h1N5Prd8iJZxrBQ/ZjcmQ0RZeaxOwwQbKrz7XngEyOASWrZU%0AYBPsy9tsghnGcOH9/LiJS%2Bf84RTPacRCV2nvzGR9kY4ppXhnmsW8Ogvwzv8NDd45qPOXO/rapur6%0A5KuiVDDx6Y0iDsvt4UE%2BIbg%2BP%2BJaK3Esrs3nsfta7FM0vDO2CkaMMQnw7nweq48rgbLyOJ0GeAeG%0A6Hlf3ai6L3VkrCCcLoXFiqqPQ/5hpYO17Tp291ve6aP99bxPjlu4NGG5E1/sX6lgYmYyg539lnAA%0AEEyeX1ueySNrkcAFCACLF7NY3677nkyKGpekONJrbSjwLoO6/%2BoLr0%2B2WrI2DMjnXwCHjcvKwohQ%0AJBzAsxbBldnwiDGmAVxbCA/teFrYimsDh3cZ1CkHdiAS3imlvrbCNMAP%2BefHzL7B%2B1TJwsVz0SF/%0AxoX4nFEAvjybc9/kFVbfwoUsCnkjUFbV7rC1gcF7GKj7%2BqcJ7zpaudbBxvMGDssdr40ewzu3VnEw%0AKz7soALwXNbA0nR4XHaumUYwtKOYX6cvbxW8R4K6VDYJvKu0k2rHfVNsx3mBQD/g/dJEBpPj3rue%0Ao8ZibMTEwoUstvaaSnhnvipDWVbWFi5ksbZdR6XW8WpQWO2wvgxKGwi8R4F6L%2BCdUopK3cbaNn9T%0ArFMXIX2Bd4M4XnYEL46w8bq2kMf2fhPUhs9M57MESzNeXHZVWTGZJrvb/P5xNXJchg3vfd82owPq%0AUfAu1ydrlXoHa1t1bO41vfjpfn9Az7WLExYmx61E4zI2YmDhQhabLxsQ0/JMDlmLRJaVtcVLWTza%0AqqPS6ETmG6bW120zDNSb7u00pUL%2BCC0O3kULtbnHrYBTBa%2BP9kcjBLi%2BMOI7bp1xIYRFFd7eb6LT%0AYdXnc2wZVOWNqs8gLGbDnfVqZL5haoGlUDUDxc/lvCrNdphKB9Tj4J33R4Tyte06tkQLJcwFGbZ7%0ArU1PZHB%2BzAwdA95XlTY2YmB%2BKovNPTYuV%2BfysEwSuNp16pt3WIv3I0nZQWg9h/fEoK7SFPA%2BKCiP%0A0gyDsZXc5yRX9bX5PHb2m8iYBEvTQWulW59JgOvzeVTqdmLrOQit5/CeFNTj4H2QUB6nTZ/PYKIo%0ARmNOfjUXR9irREoFE6bRnbWZm8rixZtW6r70U%2BspvKcFdZVWbdgDh/I47Yb0mFVawF1ZzCOXMQIn%0AJml9hkE952vKvvRL%2B/9WiFUz1yMl1gAAAABJRU5ErkJggg%3D%3D%0A' # pylint: disable=line-too-long + + +class StoreProfilePictureToGCSJobTests(job_test_utils.JobTestBase): + """Tests for StoreProfilePictureToGCSJob.""" + + JOB_CLASS = store_profile_images_to_gcs_jobs.StoreProfilePictureToGCSJob + + def setUp(self) -> None: + super().setUp() + + self.user_1 = self.create_model( + user_models.UserSettingsModel, + id='test_id_1', + email='test_1@example.com', + username='test_1', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + profile_picture_data_url=user_services.DEFAULT_IDENTICON_DATA_URL + ) + + self.user_2 = self.create_model( + user_models.UserSettingsModel, + id='test_id_2', + email='test_2@example.com', + username='test_2', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + profile_picture_data_url=None + ) + + def test_run_with_no_models(self) -> None: + self.assert_job_output_is([]) + + def test_profile_picture_stored_to_gcs(self) -> None: + self.put_multi([self.user_1, self.user_2]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL PNG IMAGES SUCCESS: 2' + ), + job_run_result.JobRunResult( + stdout='TOTAL WEBP IMAGES SUCCESS: 2' + ) + ]) + + def test_none_profile_picture_store_to_gcs(self) -> None: + self.put_multi([self.user_2]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL PNG IMAGES SUCCESS: 1' + ), + job_run_result.JobRunResult( + stdout='TOTAL WEBP IMAGES SUCCESS: 1' + ) + ]) + + +class AuditProfilePictureFromGCSJobTests(job_test_utils.JobTestBase): + """Tests for AuditProfilePictureFromGCSJob.""" + + JOB_CLASS = store_profile_images_to_gcs_jobs.AuditProfilePictureFromGCSJob + + def setUp(self) -> None: + super().setUp() + + self.user_1 = self.create_model( + user_models.UserSettingsModel, + id='test_id_1', + email='test_1@example.com', + username='test_1', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + profile_picture_data_url=user_services.DEFAULT_IDENTICON_DATA_URL + ) + + self.user_2 = self.create_model( + user_models.UserSettingsModel, + id='test_id_2', + email='test_2@example.com', + username='test_2', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + profile_picture_data_url=VALID_IMAGE + ) + + self.user_3 = self.create_model( + user_models.UserSettingsModel, + id='test_id_3', + email='test_3@example.com', + username='test_3', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + profile_picture_data_url=INVALID_IMAGE + ) + + def _get_webp_binary_data(self, png_binary: bytes) -> bytes: + """Convert png binary data to webp binary data.""" + output = io.BytesIO() + image = Image.open(io.BytesIO(png_binary)).convert('RGB') + image.save(output, 'webp') + return output.getvalue() + + def _push_file_to_gcs(self, username: str, data_url: str) -> None: + """Push file to the fake gcs client.""" + bucket = app_identity_services.get_gcs_resource_bucket_name() + filepath_png = f'user/{username}/assets/profile_picture.png' + filepath_webp = f'user/{username}/assets/profile_picture.webp' + png_binary = utils.convert_png_data_url_to_binary(data_url) + webp_binary = self._get_webp_binary_data(png_binary) + storage_services.commit(bucket, filepath_png, png_binary, None) + storage_services.commit(bucket, filepath_webp, webp_binary, None) + + def test_images_on_gcs_and_model_are_same(self) -> None: + self.put_multi([self.user_1, self.user_2]) + self._push_file_to_gcs( + 'test_1', user_services.DEFAULT_IDENTICON_DATA_URL) + self._push_file_to_gcs('test_2', VALID_IMAGE) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL PNG IMAGES ITERATED ON GCS SUCCESS: 2' + ), + job_run_result.JobRunResult( + stdout='TOTAL WEBP IMAGES ITERATED ON GCS SUCCESS: 2' + ) + ]) + + def test_invalid_images_are_valid(self) -> None: + self.put_multi([self.user_3]) + self._push_file_to_gcs('test_3', VALID_IMAGE) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL PNG IMAGES ITERATED ON GCS SUCCESS: 1' + ), + job_run_result.JobRunResult( + stdout='TOTAL WEBP IMAGES ITERATED ON GCS SUCCESS: 1' + ) + ]) + + def test_images_on_gcs_and_model_are_not_same(self) -> None: + self.user_1.profile_picture_data_url = VALID_IMAGE + self.user_2.profile_picture_data_url = ( + user_services.DEFAULT_IDENTICON_DATA_URL) + self.put_multi([self.user_1, self.user_2]) + self._push_file_to_gcs( + 'test_1', user_services.DEFAULT_IDENTICON_DATA_URL) + self._push_file_to_gcs('test_2', VALID_IMAGE) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL PNG IMAGES ITERATED ON GCS SUCCESS: 2' + ), + job_run_result.JobRunResult( + stdout='TOTAL MISMATCHED PNG IMAGES SUCCESS: 2' + ), + job_run_result.JobRunResult( + stderr=( + 'The user having username test_1, have mismatched png image' + ' on GCS and in the model.' + ) + ), + job_run_result.JobRunResult( + stdout='TOTAL WEBP IMAGES ITERATED ON GCS SUCCESS: 2' + ), + job_run_result.JobRunResult( + stdout='TOTAL MISMATCHED WEBP IMAGES SUCCESS: 2' + ), + job_run_result.JobRunResult( + stderr=( + 'The user having username test_1, has incompatible webp ' + 'image on GCS and png in the model.' + ) + ), + job_run_result.JobRunResult( + stderr=( + 'The user having username test_2, have mismatched png image' + ' on GCS and in the model.' + ) + ), + job_run_result.JobRunResult( + stderr=( + 'The user having username test_2, has incompatible webp ' + 'image on GCS and png in the model.' + ) + ) + ]) + + def test_same_png_different_webp_on_gcs_and_in_model(self) -> None: + self.put_multi([self.user_1]) + bucket = app_identity_services.get_gcs_resource_bucket_name() + filepath_png = f'user/{self.user_1.username}/assets/profile_picture.png' + filepath_webp = ( + f'user/{self.user_1.username}/assets/profile_picture.webp') + png_binary = utils.convert_png_data_url_to_binary( + user_services.DEFAULT_IDENTICON_DATA_URL) + valid_image_png_binary = utils.convert_png_data_url_to_binary( + VALID_IMAGE) + webp_binary = self._get_webp_binary_data(valid_image_png_binary) + storage_services.commit(bucket, filepath_png, png_binary, None) + storage_services.commit(bucket, filepath_webp, webp_binary, None) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL PNG IMAGES ITERATED ON GCS SUCCESS: 1' + ), + job_run_result.JobRunResult( + stdout='TOTAL WEBP IMAGES ITERATED ON GCS SUCCESS: 1' + ), + job_run_result.JobRunResult( + stdout='TOTAL MISMATCHED WEBP IMAGES SUCCESS: 1' + ), + job_run_result.JobRunResult( + stderr=( + 'The user having username test_1, has incompatible webp ' + 'image on GCS and png in the model.' + ) + ) + ]) diff --git a/core/jobs/batch_jobs/story_migration_jobs.py b/core/jobs/batch_jobs/story_migration_jobs.py new file mode 100644 index 000000000000..953a001423c9 --- /dev/null +++ b/core/jobs/batch_jobs/story_migration_jobs.py @@ -0,0 +1,324 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs used for migrating the story models.""" + +from __future__ import annotations + +import logging + +from core import feconf +from core.domain import story_domain +from core.domain import story_fetchers +from core.domain import story_services +from core.domain import topic_domain +from core.domain import topic_fetchers +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.transforms import results_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +import result + +from typing import Dict, Iterable, Optional, Sequence, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + from mypy_imports import story_models + from mypy_imports import topic_models + +(base_models, story_models, topic_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.STORY, models.Names.TOPIC +]) + +datastore_services = models.Registry.import_datastore_services() + + +# TODO(#15927): This job needs to be kept in sync with AuditStoryMigrationJob +# and later we will unify these jobs together. +class MigrateStoryJob(base_jobs.JobBase): + """Job that migrates story models.""" + + @staticmethod + def _migrate_story( + story_id: str, + story_model: story_models.StoryModel, + # This must have a default value of None. Otherwise, Beam won't + # execute this code. + topic_id_to_topic: Optional[Dict[str, topic_domain.Topic]] = None + ) -> result.Result[Tuple[str, story_domain.Story], Tuple[str, Exception]]: + """Migrates story and transform story model into story object. + + Args: + story_id: str. The id of the story. + story_model: StoryModel. The story model to migrate. + topic_id_to_topic: dict(str, Topic). The mapping from topic ID + to topic. + + Returns: + Result((str, Story), (str, Exception)). Result containing tuple that + consists of story ID and either story object or Exception. Story + object is returned when the migration was successful and Exception + is returned otherwise. + """ + try: + story = story_fetchers.get_story_from_model(story_model) + story.validate() + assert topic_id_to_topic is not None + corresponding_topic = ( + topic_id_to_topic[story.corresponding_topic_id]) + story_services.validate_prerequisite_skills_in_story_contents( + corresponding_topic.get_all_skill_ids(), + story.story_contents + ) + except Exception as e: + logging.exception(e) + return result.Err((story_id, e)) + + return result.Ok((story_id, story)) + + @staticmethod + def _generate_story_changes( + story_id: str, story_model: story_models.StoryModel + ) -> Iterable[Tuple[str, story_domain.StoryChange]]: + """Generates story change objects. Story change object is generated when + schema version for some field is lower than the latest schema version. + + Args: + story_id: str. The id of the story. + story_model: StoryModel. The story for which to generate + the change objects. + + Yields: + (str, StoryChange). Tuple containing story ID and story change + object. + """ + schema_version = story_model.story_contents_schema_version + if schema_version < feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION: + story_change = story_domain.StoryChange({ + 'cmd': story_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION, + 'from_version': story_model.story_contents_schema_version, + 'to_version': feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION + }) + yield (story_id, story_change) + + @staticmethod + def _update_story( + story_model: story_models.StoryModel, + migrated_story: story_domain.Story, + story_change: story_domain.StoryChange + ) -> Sequence[base_models.BaseModel]: + """Generates newly updated story models. + + Args: + story_model: StoryModel. The story which should be updated. + migrated_story: Story. The migrated story domain object. + story_change: StoryChange. The story change to apply. + + Returns: + sequence(BaseModel). Sequence of models which should be put into + the datastore. + """ + updated_story_model = story_services.populate_story_model_fields( + story_model, migrated_story) + change_dicts = [story_change.to_dict()] + with datastore_services.get_ndb_context(): + models_to_put = updated_story_model.compute_models_to_commit( + feconf.MIGRATION_BOT_USERNAME, + feconf.COMMIT_TYPE_EDIT, + 'Update story contents schema version to %d.' % ( + feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION), + change_dicts, + additional_models={} + ) + models_to_put_values = [] + for model in models_to_put.values(): + # Here, we are narrowing down the type from object to BaseModel. + assert isinstance(model, base_models.BaseModel) + models_to_put_values.append(model) + datastore_services.update_timestamps_multi(models_to_put_values) + return models_to_put_values + + @staticmethod + def _update_story_summary( + migrated_story: story_domain.Story, + story_summary_model: story_models.StorySummaryModel + ) -> story_models.StorySummaryModel: + """Generates newly updated story summary model. + + Args: + migrated_story: Story. The migrated story domain object. + story_summary_model: StorySummaryModel. The story summary model + to update. + + Returns: + StorySummaryModel. The updated story summary model to put into + the datastore. + """ + story_summary = story_services.compute_summary_of_story(migrated_story) + story_summary.version += 1 + updated_story_summary_model = ( + story_services.populate_story_summary_model_fields( + story_summary_model, story_summary + ) + ) + return updated_story_summary_model + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the story migration. + + Returns: + PCollection. A PCollection of results from the story migration. + """ + + unmigrated_story_models = ( + self.pipeline + | 'Get all non-deleted story models' >> ( + ndb_io.GetModels(story_models.StoryModel.get_all())) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add story keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda story_model: story_model.id) + ) + story_summary_models = ( + self.pipeline + | 'Get all non-deleted story summary models' >> ( + ndb_io.GetModels(story_models.StorySummaryModel.get_all())) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add story summary keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda story_summary_model: story_summary_model.id) + ) + topics = ( + self.pipeline + | 'Get all non-deleted topic models' >> ( + ndb_io.GetModels(topic_models.TopicModel.get_all())) + | 'Transform model into domain object' >> beam.Map( + topic_fetchers.get_topic_from_model) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add topic keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda topic: topic.id) + ) + topic_id_to_topic = beam.pvalue.AsDict(topics) + + all_migrated_story_results = ( + unmigrated_story_models + | 'Transform and migrate model' >> beam.MapTuple( + self._migrate_story, topic_id_to_topic=topic_id_to_topic) + ) + migrated_story_job_run_results = ( + all_migrated_story_results + | 'Generate results for migration' >> ( + job_result_transforms.ResultsToJobRunResults('STORY PROCESSED')) + ) + filtered_migrated_stories = ( + all_migrated_story_results + | 'Filter migration results' >> ( + results_transforms.DrainResultsOnError()) + ) + + migrated_stories = ( + filtered_migrated_stories + | 'Unwrap ok' >> beam.Map( + lambda result_item: result_item.unwrap()) + ) + + story_changes = ( + unmigrated_story_models + | 'Generate story changes' >> beam.FlatMapTuple( + self._generate_story_changes) + ) + + story_objects_list = ( + { + 'story_model': unmigrated_story_models, + 'story_summary_model': story_summary_models, + 'story': migrated_stories, + 'story_change': story_changes + } + | 'Merge objects' >> beam.CoGroupByKey() + | 'Get rid of ID' >> beam.Values() # pylint: disable=no-value-for-parameter + ) + + transformed_story_objects_list = ( + story_objects_list + | 'Remove unmigrated stories' >> beam.Filter( + lambda x: len(x['story_change']) > 0 and len(x['story']) > 0) + | 'Reorganize the story objects' >> beam.Map(lambda objects: { + 'story_model': objects['story_model'][0], + 'story_summary_model': objects['story_summary_model'][0], + 'story': objects['story'][0], + 'story_change': objects['story_change'][0] + }) + ) + + already_migrated_job_run_results = ( + story_objects_list + | 'Remove migrated stories' >> beam.Filter( + lambda x: ( + len(x['story_change']) == 0 and len(x['story']) > 0 + )) + | 'Transform previously migrated stories into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'STORY PREVIOUSLY MIGRATED')) + ) + + story_objects_list_job_run_results = ( + transformed_story_objects_list + | 'Transform story objects into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'STORY MIGRATED')) + ) + + story_models_to_put = ( + transformed_story_objects_list + | 'Generate story models to put' >> beam.FlatMap( + lambda story_objects: self._update_story( + story_objects['story_model'], + story_objects['story'], + story_objects['story_change'], + )) + ) + + story_summary_models_to_put = ( + transformed_story_objects_list + | 'Generate story summary models to put' >> beam.Map( + lambda story_objects: self._update_story_summary( + story_objects['story'], + story_objects['story_summary_model'] + )) + ) + + unused_put_results = ( + (story_models_to_put, story_summary_models_to_put) + | 'Merge models' >> beam.Flatten() + | 'Put models into the datastore' >> ndb_io.PutModels() + ) + + return ( + ( + migrated_story_job_run_results, + already_migrated_job_run_results, + story_objects_list_job_run_results + ) + | beam.Flatten() + ) diff --git a/core/jobs/batch_jobs/story_migration_jobs_test.py b/core/jobs/batch_jobs/story_migration_jobs_test.py new file mode 100644 index 000000000000..0d27344fd514 --- /dev/null +++ b/core/jobs/batch_jobs/story_migration_jobs_test.py @@ -0,0 +1,227 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.exp_recommendation_computation_jobs.""" + +from __future__ import annotations + +import copy +import datetime + +from core import feconf +from core.domain import story_domain +from core.jobs import job_test_utils +from core.jobs.batch_jobs import story_migration_jobs +from core.jobs.types import job_run_result +from core.platform import models + +from typing import Final, Type + +MYPY = False +if MYPY: + from mypy_imports import datastore_services + from mypy_imports import story_models + from mypy_imports import topic_models + +(story_models, topic_models) = models.Registry.import_models([ + models.Names.STORY, models.Names.TOPIC +]) + +datastore_services = models.Registry.import_datastore_services() + + +class MigrateStoryJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + story_migration_jobs.MigrateStoryJob + ] = story_migration_jobs.MigrateStoryJob + + STORY_1_ID: Final = 'story_1_id' + TOPIC_1_ID: Final = 'topic_1_id' + STORY_2_ID: Final = 'story_2_id' + + def setUp(self) -> None: + super().setUp() + story_summary_model = self.create_model( + story_models.StorySummaryModel, + id=self.STORY_1_ID, + title='title', + url_fragment='urlfragment', + language_code='cs', + description='description', + node_titles=['title1', 'title2'], + story_model_last_updated=datetime.datetime.utcnow(), + story_model_created_on=datetime.datetime.utcnow(), + version=1 + ) + topic_model = self.create_model( + topic_models.TopicModel, + id=self.TOPIC_1_ID, + name='topic title', + canonical_name='topic title', + story_reference_schema_version=1, + subtopic_schema_version=1, + next_subtopic_id=1, + language_code='cs', + url_fragment='topic', + canonical_story_references=[{ + 'story_id': self.STORY_1_ID, + 'story_is_published': False + }], + page_title_fragment_for_web='fragm', + ) + datastore_services.update_timestamps_multi([ + topic_model, story_summary_model]) + datastore_services.put_multi([topic_model, story_summary_model]) + self.latest_contents: story_domain.StoryContentsDict = { + 'nodes': [{ + 'id': 'node_1111', + 'title': 'title', + 'description': 'description', + 'thumbnail_filename': 'thumbnail_filename.svg', + 'thumbnail_bg_color': '#F8BF74', + 'thumbnail_size_in_bytes': None, + 'destination_node_ids': [], + 'acquired_skill_ids': [], + 'prerequisite_skill_ids': [], + 'outline': 'outline', + 'outline_is_finalized': True, + 'exploration_id': 'exp_id' + }], + 'initial_node_id': 'node_1111', + 'next_node_id': 'node_2222' + } + self.broken_contents = copy.deepcopy(self.latest_contents) + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + self.broken_contents['nodes'][0]['description'] = 123 # type: ignore[arg-type] + + self.unmigrated_contents = copy.deepcopy(self.latest_contents) + self.unmigrated_contents['nodes'][0]['thumbnail_size_in_bytes'] = 123 + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_unmigrated_story_with_unmigrated_rubric_is_migrated(self) -> None: + story_model = self.create_model( + story_models.StoryModel, + id=self.STORY_1_ID, + story_contents_schema_version=4, + title='title', + language_code='cs', + notes='notes', + description='description', + story_contents=self.unmigrated_contents, + corresponding_topic_id=self.TOPIC_1_ID, + url_fragment='urlfragment', + ) + story_model.update_timestamps() + story_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create story', [{ + 'cmd': story_domain.CMD_CREATE_NEW + }]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='STORY PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult(stdout='STORY MIGRATED SUCCESS: 1'), + ]) + + migrated_story_model = story_models.StoryModel.get(self.STORY_1_ID) + self.assertEqual(migrated_story_model.version, 2) + self.assertEqual( + migrated_story_model.story_contents_schema_version, + feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION) + self.assertEqual( + migrated_story_model.story_contents, self.latest_contents) + + def test_broken_story_is_not_migrated(self) -> None: + story_model_one = self.create_model( + story_models.StoryModel, + id=self.STORY_1_ID, + story_contents_schema_version=4, + title='title', + language_code='cs', + notes='notes', + description='description', + story_contents=self.broken_contents, + corresponding_topic_id=self.TOPIC_1_ID, + url_fragment='urlfragment', + ) + story_model_one.update_timestamps() + story_model_one.commit(feconf.SYSTEM_COMMITTER_ID, 'Create story', [{ + 'cmd': story_domain.CMD_CREATE_NEW + }]) + + story_model_two = self.create_model( + story_models.StoryModel, + id=self.STORY_2_ID, + story_contents_schema_version=4, + title='title', + language_code='cs', + notes='notes', + description='description', + story_contents=self.unmigrated_contents, + corresponding_topic_id=self.TOPIC_1_ID, + url_fragment='urlfragment', + ) + story_model_two.update_timestamps() + story_model_two.commit(feconf.SYSTEM_COMMITTER_ID, 'Create story', [{ + 'cmd': story_domain.CMD_CREATE_NEW + }]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'STORY PROCESSED ERROR: "(\'story_1_id\', ValidationError(' + '\'Expected description to be a string, received 123\'' + '))": 1' + ) + ), + job_run_result.JobRunResult(stdout='STORY PROCESSED SUCCESS: 1') + ]) + + migrated_story_model = story_models.StoryModel.get(self.STORY_1_ID) + self.assertEqual(migrated_story_model.version, 1) + migrated_story_model = story_models.StoryModel.get(self.STORY_2_ID) + self.assertEqual(migrated_story_model.version, 1) + + def test_migrated_story_is_not_migrated(self) -> None: + story_model = self.create_model( + story_models.StoryModel, + id=self.STORY_1_ID, + story_contents_schema_version=( + feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION), + title='title', + language_code='cs', + notes='notes', + description='description', + story_contents=self.latest_contents, + corresponding_topic_id=self.TOPIC_1_ID, + url_fragment='urlfragment', + ) + story_model.update_timestamps() + story_model.commit(feconf.SYSTEM_COMMITTER_ID, 'Create story', [{ + 'cmd': story_domain.CMD_CREATE_NEW + }]) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='STORY PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='STORY PREVIOUSLY MIGRATED SUCCESS: 1' + ), + ]) + + migrated_story_model = story_models.StoryModel.get(self.STORY_1_ID) + self.assertEqual(migrated_story_model.version, 1) diff --git a/core/jobs/batch_jobs/suggestion_migration_jobs.py b/core/jobs/batch_jobs/suggestion_migration_jobs.py new file mode 100644 index 000000000000..a873c0705f16 --- /dev/null +++ b/core/jobs/batch_jobs/suggestion_migration_jobs.py @@ -0,0 +1,319 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs that migrate suggestion models.""" + +from __future__ import annotations + +import logging + +from core import feconf +from core.domain import question_domain +from core.domain import question_fetchers +from core.domain import state_domain +from core.domain import suggestion_services +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +import result +from typing import List, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + from mypy_imports import suggestion_models + +(exp_models, suggestion_models) = models.Registry.import_models( + [models.Names.EXPLORATION, models.Names.SUGGESTION]) + +datastore_services = models.Registry.import_datastore_services() + + +class RegenerateContentIdForTranslationSuggestionsInReviewJob( + base_jobs.JobBase +): + """Regenerate content_id field for suggestions in review.""" + + DATASTORE_UPDATES_ALLOWED = True + + @staticmethod + def _update_content_id_in_translation_suggestions( + suggestions: List[suggestion_models.GeneralSuggestionModel], + exp_model: exp_models.ExplorationModel + ) -> List[result.Result[ + suggestion_models.GeneralSuggestionModel, + Tuple[str, Exception] + ]]: + """Updates content id in translation suggestion. + + Args: + suggestions: list(GeneralSuggestionModel). A list of translation + suggestion models corresponding to the given exploration. + exp_model: ExplorationModel. The exploration model. + + Returns: + Result(list(GeneralSuggestionModel), (str, Exception)). Result + containing list of migrated suggestion models or Exception. + Suggestion models are returned when the migration is + successful and Exception is returned otherwise. + """ + old_to_new_content_id_mapping, _ = ( + state_domain.State + .generate_old_content_id_to_new_content_id_in_v54_states( + exp_model.states + ) + ) + + results = [] + for suggestion in suggestions: + suggestion_content_id = suggestion.change_cmd['content_id'] + state_name = suggestion.change_cmd['state_name'] + + if not state_name in old_to_new_content_id_mapping: + results.append(result.Err(( + suggestion.id, + 'State name %s does not exist in the exploration' + % state_name))) + continue + + old_to_new_content_id_in_state = old_to_new_content_id_mapping[ + state_name] + if suggestion_content_id not in old_to_new_content_id_in_state: + results.append(result.Err( + ( + suggestion.id, + 'Content ID %s does not exist in the exploration' + % suggestion_content_id + ) + )) + continue + + suggestion.change_cmd['content_id'] = ( + old_to_new_content_id_in_state[suggestion_content_id]) + results.append(result.Ok(suggestion)) + + return results + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the suggestion migration. + + Returns: + PCollection. A PCollection of results from the suggestion + migration. + """ + target_id_to_suggestion_models = ( + self.pipeline + | 'Get translation suggestion models in review' >> ndb_io.GetModels( + suggestion_models.GeneralSuggestionModel.get_all( + include_deleted=False).filter( + ( + suggestion_models + .GeneralSuggestionModel.suggestion_type + ) == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT + ).filter( + suggestion_models.GeneralSuggestionModel.status == ( + suggestion_models.STATUS_IN_REVIEW + ) + ) + ) + | 'Add target id as key' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda model: model.target_id) + | 'Group exploration suggestions' >> beam.GroupByKey() + ) + + exploration_models = ( + self.pipeline + | 'Get all exploration models' >> ndb_io.GetModels( + exp_models.ExplorationModel.get_all()) + | 'Add exploration id as key' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda model: model.id) + ) + + migrated_suggestion_results = ( + { + 'suggestion_models': target_id_to_suggestion_models, + 'exploration_model': exploration_models + } + | 'Merge objects' >> beam.CoGroupByKey() + | 'Get rid of ID' >> beam.Values() # pylint: disable=no-value-for-parameter + | 'Filter unwanted exploration' >> beam.Filter( + lambda objects: len(objects['suggestion_models']) != 0) + | 'Transform and migrate model' >> beam.Map( + lambda objects: ( + self._update_content_id_in_translation_suggestions( + objects['suggestion_models'][0], + objects['exploration_model'][0] + ) + )) + | 'Flatten results' >> beam.FlatMap(lambda x: x) + ) + + migrated_suggestion_models = ( + migrated_suggestion_results + | 'Filter oks' >> beam.Filter(lambda item: item.is_ok()) + | 'Unwrap ok' >> beam.Map(lambda item: item.unwrap()) + ) + + migrated_suggestion_job_run_results = ( + migrated_suggestion_results + | 'Generate results for migration' >> ( + job_result_transforms.ResultsToJobRunResults( + 'SUGGESTION TARGET PROCESSED')) + ) + + migrated_suggestions_count_job_run_results = ( + migrated_suggestion_models + | 'Transform suggestion objects into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'SUGGESTION MIGRATED')) + ) + + if self.DATASTORE_UPDATES_ALLOWED: + unused_put_results = ( + migrated_suggestion_models + | 'Put models into the datastore' >> ndb_io.PutModels() + ) + + return ( + ( + migrated_suggestion_job_run_results, + migrated_suggestions_count_job_run_results + ) + | beam.Flatten() + ) + + +class AuditRegenerateContentIdForTranslationSuggestionsInReviewJob( + RegenerateContentIdForTranslationSuggestionsInReviewJob +): + """Audit RegenerateContentIdForTranslationSuggestionsInReviewJob.""" + + DATASTORE_UPDATES_ALLOWED = False + + +class MigrateQuestionSuggestionsJob(base_jobs.JobBase): + """Migrate question dict in question suggestion to the latest schema.""" + + DATASTORE_UPDATES_ALLOWED = True + + @staticmethod + def _migrate_question_dict( + question_suggestion_model: suggestion_models.GeneralSuggestionModel + ) -> result.Result[ + suggestion_models.GeneralSuggestionModel, + Tuple[str, Exception] + ]: + """Migrates question dict in the question suggestion model to the latest + schema. + """ + question_dict = question_suggestion_model.change_cmd['question_dict'] + versioned_question_state: question_domain.VersionedQuestionStateDict = { + 'state': question_dict['question_state_data'], + 'state_schema_version': question_dict[ + 'question_state_data_schema_version'] + } + + try: + next_content_id_index = question_fetchers.migrate_state_schema( + versioned_question_state) + + question_dict['next_content_id_index'] = next_content_id_index + question_dict['question_state_data_schema_version'] = ( + versioned_question_state['state_schema_version']) + + suggestion = suggestion_services.get_suggestion_from_model( + question_suggestion_model) + suggestion.validate() + except Exception as e: + logging.exception(e) + return result.Err((question_suggestion_model.id, e)) + + return result.Ok(question_suggestion_model) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + question_suggestions = ( + self.pipeline + | 'Get all GeneralSuggestionModels' >> ndb_io.GetModels( + suggestion_models.GeneralSuggestionModel.get_all( + include_deleted=False)) + | 'Filter question suggestions' >> ( + beam.Filter( + lambda model: ( + model.suggestion_type == + feconf.SUGGESTION_TYPE_ADD_QUESTION + and model.status == suggestion_models.STATUS_IN_REVIEW + ), + )) + ) + + models_count_job_run_results = ( + question_suggestions + | 'Transform suggestions into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'QUESTION MODELS COUNT')) + ) + + migrate_suggestion_results = ( + question_suggestions + | 'Filter suggestions required migration' >> beam.Filter( + lambda model: ( + model.change_cmd['question_dict'][ + 'question_state_data_schema_version'] != ( + feconf.CURRENT_STATE_SCHEMA_VERSION) + )) + | 'Migrate question_dict in change field' >> beam.Map( + self._migrate_question_dict + ) + ) + + migrated_suggestions = ( + migrate_suggestion_results + | 'Filter oks' >> beam.Filter( + lambda result_item: result_item.is_ok()) + | 'Unwrap ok' >> beam.Map( + lambda result_item: result_item.unwrap()) + ) + + migrated_exp_job_run_results = ( + migrate_suggestion_results + | 'Generate results for migration' >> ( + job_result_transforms.ResultsToJobRunResults( + 'SUGGESTION MIGRATED')) + ) + + if self.DATASTORE_UPDATES_ALLOWED: + unused_put_results = ( + migrated_suggestions + | 'Put models into the datastore' >> ndb_io.PutModels() + ) + + return ( + ( + models_count_job_run_results, + migrated_exp_job_run_results + ) + | beam.Flatten() + ) + + +class AuditMigrateQuestionSuggestionsJob(MigrateQuestionSuggestionsJob): + """Audit MigrateQuestionSuggestionsJob.""" + + DATASTORE_UPDATES_ALLOWED = False diff --git a/core/jobs/batch_jobs/suggestion_migration_jobs_test.py b/core/jobs/batch_jobs/suggestion_migration_jobs_test.py new file mode 100644 index 000000000000..870b8c6049c0 --- /dev/null +++ b/core/jobs/batch_jobs/suggestion_migration_jobs_test.py @@ -0,0 +1,686 @@ +# coding: utf-8 +# +# Copyright 2021 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.suggestion_migration_jobs.""" + +from __future__ import annotations + +from core import feconf +from core.domain import question_domain +from core.domain import question_fetchers +from core.domain import skill_services +from core.domain import suggestion_services +from core.domain import translation_domain +from core.jobs import job_test_utils +from core.jobs.batch_jobs import suggestion_migration_jobs +from core.jobs.types import job_run_result +from core.platform import models +from core.tests import test_utils + +from typing import Dict, Union +from typing_extensions import Final + +MYPY = False +if MYPY: + from mypy_imports import exp_models + from mypy_imports import suggestion_models + +(exp_models, suggestion_models) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.SUGGESTION +]) + + +class MigrateSuggestionJobTests(job_test_utils.JobTestBase): + + JOB_CLASS = ( + suggestion_migration_jobs + .RegenerateContentIdForTranslationSuggestionsInReviewJob + ) + TARGET_ID = 'exp1' + + def setUp(self) -> None: + super().setUp() + self.STATE_DICT_IN_V52 = { + 'content': {'content_id': 'content', 'html': ''}, + 'param_changes': [], + 'interaction': { + 'solution': None, + 'answer_groups': [], + 'default_outcome': { + 'param_changes': [], + 'feedback': { + 'content_id': 'default_outcome', + 'html': 'Default outcome' + }, + 'dest': 'Introduction', + 'dest_if_really_stuck': None, + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False + }, + 'customization_args': { + 'catchMisspellings': { + 'value': False + }, + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { + 'unicode_str': '', + 'content_id': 'ca_placeholder_1' + } + } + }, + 'confirmed_unclassified_answers': [], + 'id': 'TextInput', + 'hints': [] + }, + 'linked_skill_id': None, + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content': {}, + 'default_outcome': {}, + 'ca_placeholder_1': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'content': {}, + 'default_outcome': {}, + 'ca_placeholder_1': {} + } + }, + 'classifier_model_id': None, + 'card_is_checkpoint': False, + 'solicit_answer_details': False, + 'next_content_id_index': 2 + } + self.exp_1 = self.create_model( + exp_models.ExplorationModel, + id=self.TARGET_ID, + title='title', + init_state_name=feconf.DEFAULT_INIT_STATE_NAME, + category=feconf.DEFAULT_EXPLORATION_CATEGORY, + objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE, + language_code='en', + tags=['Topic'], + blurb='blurb', + author_notes='author notes', + states_schema_version=52, + param_specs={}, + param_changes=[], + auto_tts_enabled=feconf.DEFAULT_AUTO_TTS_ENABLED, + correctness_feedback_enabled=False, + states={feconf.DEFAULT_INIT_STATE_NAME: self.STATE_DICT_IN_V52}, + ) + self.put_multi([self.exp_1]) + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_unmigrated_suggestion_is_migrated(self) -> None: + change_dict = { + 'cmd': 'add_translation', + 'content_id': 'default_outcome', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ' + } + + suggestion_1_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=change_dict, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_1_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_1_model]) + unmigrated_suggestion_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id) + ) + self.assertEqual( + unmigrated_suggestion_model.change_cmd['content_id'], + 'default_outcome' + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='SUGGESTION TARGET PROCESSED SUCCESS: 1' + ), + job_run_result.JobRunResult( + stdout='SUGGESTION MIGRATED SUCCESS: 1' + ) + ]) + + migrated_suggestion_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id) + ) + self.assertEqual( + migrated_suggestion_model.change_cmd['content_id'], + 'default_outcome_1' + ) + + def test_unmigrated_invalid_suggestion_raises_error(self) -> None: + change_dict = { + 'cmd': 'add_translation', + 'content_id': 'default_outcome', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'invalid_state_name', + 'translation_html': '

    Translation for content.

    ' + } + + suggestion_1_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + id=16, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=change_dict, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_1_model.update_timestamps() + + change_dict = { + 'cmd': 'add_translation', + 'content_id': 'invalid', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ' + } + + suggestion_2_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + id=17, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=change_dict, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_2_model.update_timestamps() + + change_dict = { + 'cmd': 'add_translation', + 'content_id': 'default_outcome', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ' + } + + suggestion_3_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=change_dict, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_3_model.update_timestamps() + + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_1_model, suggestion_2_model, suggestion_3_model]) + unmigrated_suggestion_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id) + ) + self.assertEqual( + unmigrated_suggestion_model.change_cmd['content_id'], + 'default_outcome' + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='SUGGESTION TARGET PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='SUGGESTION MIGRATED SUCCESS: 1'), + job_run_result.JobRunResult( + stderr=( + 'SUGGESTION TARGET PROCESSED ERROR: \"(16, ' + '\'State name invalid_state_name does not exist in the ' + 'exploration\')\": 1') + ), job_run_result.JobRunResult( + stderr=( + 'SUGGESTION TARGET PROCESSED ERROR: ' + '\"(17, \'Content ID invalid does not exist in the ' + 'exploration\')\": 1') + ), + ]) + + def test_suggestion_with_invalid_content_id_raise_error(self) -> None: + change_dict = { + 'cmd': 'add_translation', + 'content_id': 'invalid_id', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ' + } + + suggestion_1_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + id='111', + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=change_dict, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_1_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_1_model]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'SUGGESTION TARGET PROCESSED ERROR: "(\'111\', ' + '\'Content ID invalid_id does not exist in the exploration' + '\')": 1')), + ]) + + unmigrated_suggestion_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id) + ) + self.assertEqual( + unmigrated_suggestion_model.change_cmd['content_id'], 'invalid_id') + + +class AuditMigrateSuggestionJobTests(job_test_utils.JobTestBase): + + JOB_CLASS = ( + suggestion_migration_jobs + .AuditRegenerateContentIdForTranslationSuggestionsInReviewJob + ) + TARGET_ID = 'exp1' + + def setUp(self) -> None: + super().setUp() + self.STATE_DICT_IN_V52 = { + 'content': {'content_id': 'content', 'html': ''}, + 'param_changes': [], + 'interaction': { + 'solution': None, + 'answer_groups': [], + 'default_outcome': { + 'param_changes': [], + 'feedback': { + 'content_id': 'default_outcome', + 'html': 'Default outcome' + }, + 'dest': 'Introduction', + 'dest_if_really_stuck': None, + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False + }, + 'customization_args': { + 'catchMisspellings': { + 'value': False + }, + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { + 'unicode_str': '', + 'content_id': 'ca_placeholder_1' + } + } + }, + 'confirmed_unclassified_answers': [], + 'id': 'TextInput', + 'hints': [] + }, + 'linked_skill_id': None, + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content': {}, + 'default_outcome': {}, + 'ca_placeholder_1': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'content': {}, + 'default_outcome': {}, + 'ca_placeholder_1': {} + } + }, + 'classifier_model_id': None, + 'card_is_checkpoint': False, + 'solicit_answer_details': False, + 'next_content_id_index': 2 + } + self.exp_1 = self.create_model( + exp_models.ExplorationModel, + id=self.TARGET_ID, + title='title', + init_state_name=feconf.DEFAULT_INIT_STATE_NAME, + category=feconf.DEFAULT_EXPLORATION_CATEGORY, + objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE, + language_code='en', + tags=['Topic'], + blurb='blurb', + author_notes='author notes', + states_schema_version=52, + param_specs={}, + param_changes=[], + auto_tts_enabled=feconf.DEFAULT_AUTO_TTS_ENABLED, + correctness_feedback_enabled=False, + states={feconf.DEFAULT_INIT_STATE_NAME: self.STATE_DICT_IN_V52}, + ) + self.put_multi([self.exp_1]) + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_unmigrated_suggestion_is_not_migrated(self) -> None: + change_dict = { + 'cmd': 'add_translation', + 'content_id': 'default_outcome', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ' + } + + suggestion_1_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=change_dict, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_1_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_1_model]) + unmigrated_suggestion_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id) + ) + self.assertEqual( + unmigrated_suggestion_model.change_cmd['content_id'], + 'default_outcome' + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='SUGGESTION TARGET PROCESSED SUCCESS: 1' + ), + job_run_result.JobRunResult( + stdout='SUGGESTION MIGRATED SUCCESS: 1' + ) + ]) + + migrated_suggestion_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id) + ) + self.assertEqual( + migrated_suggestion_model.change_cmd['content_id'], + 'default_outcome' + ) + + def test_suggestion_with_invalid_content_id_raise_error(self) -> None: + change_dict = { + 'cmd': 'add_translation', + 'content_id': 'invalid_id', + 'language_code': 'hi', + 'content_html': 'Content', + 'state_name': 'Introduction', + 'translation_html': '

    Translation for content.

    ' + } + + suggestion_1_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + id=15, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id='user1', + change_cmd=change_dict, + score_category='irrelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.TARGET_ID, + target_version_at_submission=0, + language_code='bn' + ) + suggestion_1_model.update_timestamps() + suggestion_models.GeneralSuggestionModel.put_multi([ + suggestion_1_model]) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'SUGGESTION TARGET PROCESSED ERROR: "(15, ' + '\'Content ID invalid_id does not exist in the exploration' + '\')": 1')), + ]) + + unmigrated_suggestion_model = ( + suggestion_models.GeneralSuggestionModel.get(suggestion_1_model.id) + ) + self.assertEqual( + unmigrated_suggestion_model.change_cmd['content_id'], 'invalid_id') + + +class MigrateQuestionSuggestionsJobTests( + job_test_utils.JobTestBase, test_utils.GenericTestBase): + + JOB_CLASS = suggestion_migration_jobs.MigrateQuestionSuggestionsJob + + AUTHOR_EMAIL: Final = 'author@example.com' + + def setUp(self) -> None: + super().setUp() + self.signup(self.AUTHOR_EMAIL, 'author') + self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_migrated_question_is_not_migrated(self) -> None: + skill_id = skill_services.get_new_skill_id() + self.save_new_skill( + skill_id, self.author_id, description='description') + content_id_generator = translation_domain.ContentIdGenerator() + state = self._create_valid_question_data( + 'default-state', content_id_generator) + suggestion_change: Dict[ + str, Union[str, float, question_domain.QuestionDict] + ] = { + 'cmd': ( + question_domain + .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), + 'question_dict': { + 'id': 'test_id', + 'version': 12, + 'question_state_data': state.to_dict(), + 'language_code': 'en', + 'question_state_data_schema_version': ( + feconf.CURRENT_STATE_SCHEMA_VERSION), + 'linked_skill_ids': ['skill_1'], + 'inapplicable_skill_misconception_ids': ['skillid12345-1'], + 'next_content_id_index': ( + content_id_generator.next_content_id_index) + }, + 'skill_id': skill_id, + 'skill_difficulty': 0.3 + } + suggestion_services.create_suggestion( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_SKILL, skill_id, 1, + self.author_id, suggestion_change, 'test description') + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='QUESTION MODELS COUNT SUCCESS: 1') + ]) + + def test_unmigrated_question_suggestion_is_migrated(self) -> None: + skill_id = skill_services.get_new_skill_id() + self.save_new_skill( + skill_id, self.author_id, description='description') + suggestion_id = ( + self.save_new_question_suggestion_with_state_data_schema_v27( + self.author_id, skill_id + ) + ) + + suggestion = suggestion_models.GeneralSuggestionModel.get_by_id( + suggestion_id) + + self.assertEqual( + suggestion.change_cmd['question_dict'][ + 'question_state_data_schema_version'], + 27 + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='QUESTION MODELS COUNT SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='SUGGESTION MIGRATED SUCCESS: 1') + ]) + + suggestion = suggestion_models.GeneralSuggestionModel.get_by_id( + suggestion_id) + + self.assertEqual( + suggestion.change_cmd['question_dict'][ + 'question_state_data_schema_version'], + feconf.CURRENT_STATE_SCHEMA_VERSION + ) + + def test_migration_errors_are_reported_in_job_result(self) -> None: + skill_id = skill_services.get_new_skill_id() + self.save_new_skill( + skill_id, self.author_id, description='description') + suggestion_id = ( + self.save_new_question_suggestion_with_state_data_schema_v27( + self.author_id, skill_id) + ) + migrate_state_schema_raise = self.swap_to_always_raise( + question_fetchers, 'migrate_state_schema') + with migrate_state_schema_raise: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'SUGGESTION MIGRATED ERROR: "(\'%s\', ' + 'Exception())": 1' % suggestion_id) + ), + job_run_result.JobRunResult( + stdout='QUESTION MODELS COUNT SUCCESS: 1'), + ]) + + +class AuditMigrateQuestionSuggestionsJobTests( + job_test_utils.JobTestBase, test_utils.GenericTestBase): + + JOB_CLASS = suggestion_migration_jobs.AuditMigrateQuestionSuggestionsJob + + AUTHOR_EMAIL: Final = 'author@example.com' + + def setUp(self) -> None: + super().setUp() + self.signup(self.AUTHOR_EMAIL, 'author') + self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_unmigrated_question_suggestion_is_not_migrated(self) -> None: + skill_id = skill_services.get_new_skill_id() + self.save_new_skill( + skill_id, self.author_id, description='description') + suggestion_id = ( + self.save_new_question_suggestion_with_state_data_schema_v27( + self.author_id, skill_id + ) + ) + suggestion = suggestion_models.GeneralSuggestionModel.get_by_id( + suggestion_id) + + self.assertEqual( + suggestion.change_cmd['question_dict'][ + 'question_state_data_schema_version'], + 27 + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='QUESTION MODELS COUNT SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='SUGGESTION MIGRATED SUCCESS: 1') + ]) + + suggestion = suggestion_models.GeneralSuggestionModel.get_by_id( + suggestion_id) + + self.assertEqual( + suggestion.change_cmd['question_dict'][ + 'question_state_data_schema_version'], + 27 + ) + + def test_audit_errors_are_reported_in_job_result(self) -> None: + skill_id = skill_services.get_new_skill_id() + self.save_new_skill( + skill_id, self.author_id, description='description') + suggestion_id = ( + self.save_new_question_suggestion_with_state_data_schema_v27( + self.author_id, skill_id) + ) + migrate_state_schema_raise = self.swap_to_always_raise( + question_fetchers, 'migrate_state_schema') + with migrate_state_schema_raise: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'SUGGESTION MIGRATED ERROR: "(\'%s\', ' + 'Exception())": 1' % suggestion_id) + ), + job_run_result.JobRunResult( + stdout='QUESTION MODELS COUNT SUCCESS: 1'), + ]) diff --git a/core/jobs/batch_jobs/suggestion_stats_computation_jobs.py b/core/jobs/batch_jobs/suggestion_stats_computation_jobs.py index b67ab1f0974e..4824593a1b8d 100644 --- a/core/jobs/batch_jobs/suggestion_stats_computation_jobs.py +++ b/core/jobs/batch_jobs/suggestion_stats_computation_jobs.py @@ -21,12 +21,13 @@ import datetime from core import feconf +from core.domain import exp_domain from core.domain import html_cleaner from core.domain import opportunity_domain from core.domain import opportunity_services -from core.domain import state_domain from core.domain import suggestion_registry from core.domain import suggestion_services +from core.domain import translation_domain from core.jobs import base_jobs from core.jobs.io import ndb_io from core.jobs.transforms import job_result_transforms @@ -35,7 +36,10 @@ import apache_beam as beam -from typing import Dict, Iterable, Optional, Tuple, Union +import result + +from typing import ( + Dict, Iterable, Iterator, List, Optional, Set, Tuple, TypedDict, Union) MYPY = False if MYPY: # pragma: no cover @@ -43,12 +47,22 @@ from mypy_imports import opportunity_models from mypy_imports import suggestion_models -(opportunity_models, suggestion_models) = models.Registry.import_models( - [models.NAMES.opportunity, models.NAMES.suggestion]) +(opportunity_models, suggestion_models) = models.Registry.import_models([ + models.Names.OPPORTUNITY, models.Names.SUGGESTION +]) datastore_services = models.Registry.import_datastore_services() +class TranslationContributionsStatsDict(TypedDict): + """Type for the translation contributions stats dictionary.""" + + suggestion_status: str + edited_by_reviewer: bool + content_word_count: int + last_updated_date: datetime.date + + class GenerateTranslationContributionStatsJob(base_jobs.JobBase): """Job that indexes the explorations in Elastic Search.""" @@ -64,8 +78,6 @@ def run(self) -> beam.PCollection[job_run_result.JobRunResult]: | 'Get all non-deleted suggestion models' >> ndb_io.GetModels( suggestion_models.GeneralSuggestionModel.get_all( include_deleted=False)) - # We need to window the models so that CoGroupByKey below - # works properly. | 'Filter translate suggestions' >> beam.Filter( lambda m: ( m.suggestion_type == @@ -80,15 +92,13 @@ def run(self) -> beam.PCollection[job_run_result.JobRunResult]: | 'Get all non-deleted opportunity models' >> ndb_io.GetModels( opportunity_models.ExplorationOpportunitySummaryModel.get_all( include_deleted=False)) - # We need to window the models so that CoGroupByKey below - # works properly. | 'Transform to opportunity domain object' >> beam.Map( opportunity_services. get_exploration_opportunity_summary_from_model) | 'Group by ID' >> beam.GroupBy(lambda m: m.id) ) - new_user_stats_models = ( + user_stats_results = ( { 'suggestion': suggestions_grouped_by_target, 'opportunity': exp_opportunities @@ -101,27 +111,56 @@ def run(self) -> beam.PCollection[job_run_result.JobRunResult]: list(x['opportunity'][0])[0] if len(x['opportunity']) else None )) + ) + + user_stats_models = ( + user_stats_results + | 'Filter ok results' >> beam.Filter( + lambda key_and_result: key_and_result[1].is_ok()) + | 'Unpack result' >> beam.MapTuple( + lambda key, result: (key, result.unwrap())) | 'Combine the stats' >> beam.CombinePerKey(CombineStats()) | 'Generate models from stats' >> beam.MapTuple( self._generate_translation_contribution_model) ) + user_stats_error_job_run_results = ( + user_stats_results + | 'Filter err results' >> beam.Filter( + lambda key_and_result: key_and_result[1].is_err()) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Remove keys' >> beam.Values() # pylint: disable=no-value-for-parameter + | 'Transform result to job run result' >> ( + job_result_transforms.ResultsToJobRunResults()) + ) + unused_put_result = ( - new_user_stats_models + user_stats_models | 'Put models into the datastore' >> ndb_io.PutModels() ) - return ( - new_user_stats_models + user_stats_models_job_run_results = ( + user_stats_models | 'Create job run result' >> ( job_result_transforms.CountObjectsToJobRunResult()) ) + return ( + ( + user_stats_error_job_run_results, + user_stats_models_job_run_results + ) + | 'Merge job run results' >> beam.Flatten() + ) + @staticmethod def _generate_stats( suggestions: Iterable[suggestion_registry.SuggestionTranslateContent], opportunity: Optional[opportunity_domain.ExplorationOpportunitySummary] - ) -> Iterable[Tuple[str, Dict[str, Union[bool, int, str]]]]: + ) -> Iterator[ + Tuple[str, result.Result[Dict[str, Union[bool, int, str]], str]] + ]: """Generates translation contribution stats for each suggestion. Args: @@ -146,33 +185,44 @@ def _generate_stats( topic_id = opportunity.topic_id for suggestion in suggestions: - # Content in set format is a list, content in unicode and html - # format is a string. This code normalizes the content to the list - # type so that we can easily count words. - if state_domain.WrittenTranslation.is_data_format_list( - suggestion.change.data_format - ): - content_items = suggestion.change.content_html - else: - content_items = [suggestion.change.content_html] - - content_word_count = 0 - for item in content_items: - # Count the number of words in the original content, ignoring - # any HTML tags and attributes. - content_plain_text = html_cleaner.strip_html_tags(item) # type: ignore[no-untyped-call,attr-defined] - content_word_count += len(content_plain_text.split()) - key = ( - suggestion_models.TranslationContributionStatsModel.generate_id( + suggestion_models + .TranslationContributionStatsModel.construct_id( suggestion.language_code, suggestion.author_id, topic_id)) - translation_contribution_stats_dict = { - 'suggestion_status': suggestion.status, - 'edited_by_reviewer': suggestion.edited_by_reviewer, - 'content_word_count': content_word_count, - 'last_updated_date': suggestion.last_updated.date().isoformat() - } - yield (key, translation_contribution_stats_dict) + try: + change = suggestion.change + # In the new translation command the content in set format is + # a list, content in unicode and html format is a string. + # This code normalizes the content to the list type so that + # we can easily count words. + if ( + change.cmd == exp_domain.CMD_ADD_WRITTEN_TRANSLATION and + translation_domain.TranslatableContentFormat + .is_data_format_list(change.data_format) + ): + content_items: Union[str, List[str]] = change.content_html + else: + content_items = [change.content_html] + + content_word_count = 0 + for item in content_items: + # Count the number of words in the original content, + # ignoring any HTML tags and attributes. + content_plain_text = html_cleaner.strip_html_tags(item) + content_word_count += len(content_plain_text.split()) + + translation_contribution_stats_dict = { + 'suggestion_status': suggestion.status, + 'edited_by_reviewer': suggestion.edited_by_reviewer, + 'content_word_count': content_word_count, + 'last_updated_date': ( + suggestion.last_updated.date().isoformat()) + } + yield (key, result.Ok(translation_contribution_stats_dict)) + except Exception as e: + yield ( + key, result.Err('%s: %s' % (suggestion.suggestion_id, e)) + ) @staticmethod def _generate_translation_contribution_model( @@ -219,6 +269,10 @@ def _generate_translation_contribution_model( return translation_contributions_stats_model +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to assume +# that CombineFn class is of type Any. Thus to avoid MyPy's error (Class cannot +# subclass 'CombineFn' (has type 'Any')), we added an ignore here. class CombineStats(beam.CombineFn): # type: ignore[misc] """CombineFn for combining the stats.""" @@ -230,7 +284,7 @@ def create_accumulator( def add_input( self, accumulator: suggestion_registry.TranslationContributionStats, - translation: Dict[str, Union[bool, int, str]] + translation: TranslationContributionsStatsDict ) -> suggestion_registry.TranslationContributionStats: is_accepted = ( translation['suggestion_status'] == @@ -245,7 +299,7 @@ def add_input( word_count = translation['content_word_count'] suggestion_date = datetime.datetime.strptime( str(translation['last_updated_date']), '%Y-%m-%d').date() - return suggestion_registry.TranslationContributionStats( # type: ignore[no-untyped-call] + return suggestion_registry.TranslationContributionStats( accumulator.language_code, accumulator.contributor_user_id, accumulator.topic_id, @@ -272,7 +326,13 @@ def merge_accumulators( self, accumulators: Iterable[suggestion_registry.TranslationContributionStats] ) -> suggestion_registry.TranslationContributionStats: - return suggestion_registry.TranslationContributionStats( # type: ignore[no-untyped-call] + contribution_dates: Set[datetime.date] = set() + all_contribution_dates = [ + acc.contribution_dates for acc in accumulators + ] + contribution_dates = contribution_dates.union(*all_contribution_dates) + + return suggestion_registry.TranslationContributionStats( list(accumulators)[0].language_code, list(accumulators)[0].contributor_user_id, list(accumulators)[0].topic_id, @@ -286,7 +346,7 @@ def merge_accumulators( sum(acc.accepted_translation_word_count for acc in accumulators), sum(acc.rejected_translations_count for acc in accumulators), sum(acc.rejected_translation_word_count for acc in accumulators), - set().union(*[acc.contribution_dates for acc in accumulators]) + contribution_dates ) def extract_output( diff --git a/core/jobs/batch_jobs/suggestion_stats_computation_jobs_test.py b/core/jobs/batch_jobs/suggestion_stats_computation_jobs_test.py index 4b8187f0b990..2bb2de10f2c7 100644 --- a/core/jobs/batch_jobs/suggestion_stats_computation_jobs_test.py +++ b/core/jobs/batch_jobs/suggestion_stats_computation_jobs_test.py @@ -29,34 +29,42 @@ import apache_beam as beam -from typing import Dict, List, Set, Tuple, Union +from typing import Dict, Final, List, Set, Tuple, Type, Union MYPY = False if MYPY: from mypy_imports import opportunity_models from mypy_imports import suggestion_models -(opportunity_models, suggestion_models) = models.Registry.import_models( - [models.NAMES.opportunity, models.NAMES.suggestion]) +(opportunity_models, suggestion_models) = models.Registry.import_models([ + models.Names.OPPORTUNITY, models.Names.SUGGESTION +]) StatsType = List[Tuple[str, Dict[str, Union[bool, int, str]]]] class GenerateTranslationContributionStatsJobTests(job_test_utils.JobTestBase): - JOB_CLASS = ( + JOB_CLASS: Type[ suggestion_stats_computation_jobs - .GenerateTranslationContributionStatsJob) - - VALID_USER_ID_1 = 'uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH) - VALID_USER_ID_2 = 'uid_%s' % ('b' * feconf.USER_ID_RANDOM_PART_LENGTH) - EXP_1_ID = 'exp_1_id' - EXP_2_ID = 'exp_2_id' - TOPIC_2_ID = 'topic_2_id' - TOPIC_1_ID = 'topic_1_id' - TOPIC_2_ID = 'topic_2_id' - LANG_1 = 'lang_1' - LANG_2 = 'lang_2' + .GenerateTranslationContributionStatsJob + ] = ( + suggestion_stats_computation_jobs + .GenerateTranslationContributionStatsJob + ) + + VALID_USER_ID_1: Final = 'uid_%s' % ( + 'a' * feconf.USER_ID_RANDOM_PART_LENGTH + ) + VALID_USER_ID_2: Final = 'uid_%s' % ( + 'b' * feconf.USER_ID_RANDOM_PART_LENGTH + ) + EXP_1_ID: Final = 'exp_1_id' + EXP_2_ID: Final = 'exp_2_id' + TOPIC_1_ID: Final = 'topic_1_id' + TOPIC_2_ID: Final = 'topic_2_id' + LANG_1: Final = 'lang_1' + LANG_2: Final = 'lang_2' def test_empty_storage(self) -> None: self.assert_job_output_is_empty() @@ -111,7 +119,8 @@ def test_creates_stats_model_from_one_in_review_suggestion(self) -> None: suggestion_models.TranslationContributionStatsModel.get( self.LANG_1, self.VALID_USER_ID_1, '')) - self.assertIsNotNone(translation_stats_model) + # Ruling out the possibility of None for mypy type checking. + assert translation_stats_model is not None self.assertEqual(translation_stats_model.language_code, self.LANG_1) self.assertEqual( translation_stats_model.contributor_user_id, self.VALID_USER_ID_1) @@ -131,7 +140,100 @@ def test_creates_stats_model_from_one_in_review_suggestion(self) -> None: self.assertEqual(translation_stats_model.rejected_translations_count, 0) self.assertEqual( translation_stats_model.rejected_translation_word_count, 0) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( + translation_stats_model.contribution_dates, + [datetime.date.today()] + ) + + def test_reports_failure_on_broken_model(self) -> None: + suggestion_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + id='suggestion_id', + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id=self.VALID_USER_ID_1, + change_cmd={ + 'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION, + 'state_name': 'state', + 'content_id': 'content_id', + 'language_code': 'lang', + 'content_html': 111, + 'translation_html': '111 222 333', + 'data_format': 'html' + }, + score_category='irelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.EXP_1_ID, + target_version_at_submission=0, + language_code=self.LANG_1 + ) + suggestion_model.update_timestamps() + suggestion_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'ERROR: "suggestion_id: argument cannot be of \'int\' ' + 'type, must be of text type": 1' + ) + ) + ]) + + def test_creates_stats_model_from_one_suggestion_in_legacy_format( + self + ) -> None: + suggestion_model = self.create_model( + suggestion_models.GeneralSuggestionModel, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + author_id=self.VALID_USER_ID_1, + change_cmd={ + 'cmd': exp_domain.DEPRECATED_CMD_ADD_TRANSLATION, + 'state_name': 'state', + 'content_id': 'content_id', + 'language_code': 'lang', + 'content_html': '111 a', + 'translation_html': '111 a' + }, + score_category='irelevant', + status=suggestion_models.STATUS_IN_REVIEW, + target_type='exploration', + target_id=self.EXP_1_ID, + target_version_at_submission=0, + language_code=self.LANG_1 + ) + suggestion_model.update_timestamps() + suggestion_model.put() + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='SUCCESS: 1') + ]) + + translation_stats_model = ( + suggestion_models.TranslationContributionStatsModel.get( + self.LANG_1, self.VALID_USER_ID_1, '')) + + # Ruling out the possibility of None for mypy type checking. + assert translation_stats_model is not None + self.assertEqual(translation_stats_model.language_code, self.LANG_1) + self.assertEqual( + translation_stats_model.contributor_user_id, self.VALID_USER_ID_1) + self.assertEqual(translation_stats_model.topic_id, '') + self.assertEqual( + translation_stats_model.submitted_translations_count, 1) + self.assertEqual( + translation_stats_model.submitted_translation_word_count, 2) + self.assertEqual(translation_stats_model.accepted_translations_count, 0) + self.assertEqual( + translation_stats_model + .accepted_translations_without_reviewer_edits_count, + 0 + ) + self.assertEqual( + translation_stats_model.accepted_translation_word_count, 0) + self.assertEqual(translation_stats_model.rejected_translations_count, 0) + self.assertEqual( + translation_stats_model.rejected_translation_word_count, 0) + self.assertItemsEqual( translation_stats_model.contribution_dates, [datetime.date.today()] ) @@ -170,7 +272,8 @@ def test_creates_stats_model_from_one_suggestion_in_set_format( suggestion_models.TranslationContributionStatsModel.get( self.LANG_1, self.VALID_USER_ID_1, '')) - self.assertIsNotNone(translation_stats_model) + # Ruling out the possibility of None for mypy type checking. + assert translation_stats_model is not None self.assertEqual(translation_stats_model.language_code, self.LANG_1) self.assertEqual( translation_stats_model.contributor_user_id, self.VALID_USER_ID_1) @@ -190,13 +293,13 @@ def test_creates_stats_model_from_one_suggestion_in_set_format( self.assertEqual(translation_stats_model.rejected_translations_count, 0) self.assertEqual( translation_stats_model.rejected_translation_word_count, 0) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( translation_stats_model.contribution_dates, [datetime.date.today()] ) def test_creates_stats_model_from_one_in_review_suggestion_with_opportunity( - self + self ) -> None: suggestion_model = self.create_model( suggestion_models.GeneralSuggestionModel, @@ -241,7 +344,8 @@ def test_creates_stats_model_from_one_in_review_suggestion_with_opportunity( suggestion_models.TranslationContributionStatsModel.get( self.LANG_1, self.VALID_USER_ID_1, self.TOPIC_1_ID)) - self.assertIsNotNone(translation_stats_model) + # Ruling out the possibility of None for mypy type checking. + assert translation_stats_model is not None self.assertEqual(translation_stats_model.language_code, self.LANG_1) self.assertEqual( translation_stats_model.contributor_user_id, self.VALID_USER_ID_1) @@ -261,7 +365,7 @@ def test_creates_stats_model_from_one_in_review_suggestion_with_opportunity( self.assertEqual(translation_stats_model.rejected_translations_count, 0) self.assertEqual( translation_stats_model.rejected_translation_word_count, 0) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( translation_stats_model.contribution_dates, [datetime.date.today()] ) @@ -298,7 +402,8 @@ def test_creates_stats_model_from_one_accepted_suggestion(self) -> None: suggestion_models.TranslationContributionStatsModel.get( self.LANG_1, self.VALID_USER_ID_1, '')) - self.assertIsNotNone(translation_stats_model) + # Ruling out the possibility of None for mypy type checking. + assert translation_stats_model is not None self.assertEqual(translation_stats_model.language_code, self.LANG_1) self.assertEqual( translation_stats_model.contributor_user_id, self.VALID_USER_ID_1) @@ -318,7 +423,7 @@ def test_creates_stats_model_from_one_accepted_suggestion(self) -> None: self.assertEqual(translation_stats_model.rejected_translations_count, 0) self.assertEqual( translation_stats_model.rejected_translation_word_count, 0) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( translation_stats_model.contribution_dates, [datetime.date.today()] ) @@ -378,7 +483,8 @@ def test_creates_stats_model_from_multiple_suggestions(self) -> None: suggestion_models.TranslationContributionStatsModel.get( self.LANG_1, self.VALID_USER_ID_1, '')) - self.assertIsNotNone(translation_stats_model) + # Ruling out the possibility of None for mypy type checking. + assert translation_stats_model is not None self.assertEqual(translation_stats_model.language_code, self.LANG_1) self.assertEqual( translation_stats_model.contributor_user_id, self.VALID_USER_ID_1) @@ -398,7 +504,7 @@ def test_creates_stats_model_from_multiple_suggestions(self) -> None: self.assertEqual(translation_stats_model.rejected_translations_count, 1) self.assertEqual( translation_stats_model.rejected_translation_word_count, 5) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( translation_stats_model.contribution_dates, [ datetime.date.today(), diff --git a/core/jobs/batch_jobs/topic_migration_jobs.py b/core/jobs/batch_jobs/topic_migration_jobs.py new file mode 100644 index 000000000000..16d2c2633aef --- /dev/null +++ b/core/jobs/batch_jobs/topic_migration_jobs.py @@ -0,0 +1,337 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs used for migrating the topic models.""" + +from __future__ import annotations + +import logging + +from core import feconf +from core.domain import topic_domain +from core.domain import topic_fetchers +from core.domain import topic_services +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +import result +from typing import Iterable, Sequence, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + from mypy_imports import topic_models + +(base_models, topic_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.TOPIC]) +datastore_services = models.Registry.import_datastore_services() + + +class MigrateTopicJob(base_jobs.JobBase): + """Job that migrates topic models.""" + + @staticmethod + def _migrate_topic( + topic_id: str, + topic_model: topic_models.TopicModel + ) -> result.Result[Tuple[str, topic_domain.Topic], Tuple[str, Exception]]: + """Migrates topic and transform topic model into topic object. + + Args: + topic_id: str. The id of the topic. + topic_model: TopicModel. The topic model to migrate. + + Returns: + Result((str, Topic), (str, Exception)). Result containing tuple that + consist of topic ID and either topic object or Exception. Topic + object is returned when the migration was successful and Exception + is returned otherwise. + """ + try: + topic = topic_fetchers.get_topic_from_model(topic_model) + topic.validate() + except Exception as e: + logging.exception(e) + return result.Err((topic_id, e)) + + return result.Ok((topic_id, topic)) + + @staticmethod + def _generate_topic_changes( + topic_id: str, + topic_model: topic_models.TopicModel + ) -> Iterable[Tuple[str, topic_domain.TopicChange]]: + """Generates topic change objects. Topic change object is generated when + schema version for some field is lower than the latest schema version. + + Args: + topic_id: str. The ID of the topic. + topic_model: TopicModel. The topic for which to generate the change + objects. + + Yields: + (str, TopicChange). Tuple containing Topic ID and topic change + object. + """ + subtopic_version = topic_model.subtopic_schema_version + if subtopic_version < feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION: + topic_change = topic_domain.TopicChange({ + 'cmd': ( + topic_domain.CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION), + 'from_version': subtopic_version, + 'to_version': feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION + }) + yield (topic_id, topic_change) + + story_version = topic_model.story_reference_schema_version + if story_version < feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION: + topic_change = topic_domain.TopicChange({ + 'cmd': ( + topic_domain.CMD_MIGRATE_STORY_REFERENCE_SCHEMA_TO_LATEST_VERSION), # pylint: disable=line-too-long + 'from_version': story_version, + 'to_version': feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION + }) + yield (topic_id, topic_change) + + @staticmethod + def _update_topic( + topic_model: topic_models.TopicModel, + migrated_topic: topic_domain.Topic, + topic_changes: Sequence[topic_domain.TopicChange] + ) -> Sequence[base_models.BaseModel]: + """Generates newly updated topic models. + + Args: + topic_model: TopicModel. The topic which should be updated. + migrated_topic: Topic. The migrated topic domain object. + topic_changes: TopicChange. The topic changes to apply. + + Returns: + sequence(BaseModel). Sequence of models which should be put into + the datastore. + """ + updated_topic_model = topic_services.populate_topic_model_fields( + topic_model, migrated_topic) + topic_rights_model = topic_models.TopicRightsModel.get( + migrated_topic.id + ) + change_dicts = [change.to_dict() for change in topic_changes] + with datastore_services.get_ndb_context(): + models_to_put = updated_topic_model.compute_models_to_commit( + feconf.MIGRATION_BOT_USER_ID, + feconf.COMMIT_TYPE_EDIT, + 'Update subtopic contents schema version to %d.' % ( + feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION), + change_dicts, + additional_models={'rights_model': topic_rights_model} + ) + models_to_put_values = [] + for model in models_to_put.values(): + # Here, we are narrowing down the type from object to BaseModel. + assert isinstance(model, base_models.BaseModel) + models_to_put_values.append(model) + datastore_services.update_timestamps_multi(list(models_to_put_values)) + return models_to_put_values + + @staticmethod + def _update_topic_summary( + migrated_topic: topic_domain.Topic, + topic_summary_model: topic_models.TopicSummaryModel + ) -> topic_models.TopicSummaryModel: + """Generates newly updated topic summary model. + + Args: + migrated_topic: Topic. The migrated topic domain object. + topic_summary_model: TopicSummaryModel. The topic summary model to + update. + + Returns: + TopicSummaryModel. The updated topic summary model to put into the + datastore. + """ + topic_summary = topic_services.compute_summary_of_topic(migrated_topic) + topic_summary.version += 1 + updated_topic_summary_model = ( + topic_services.populate_topic_summary_model_fields( + topic_summary_model, topic_summary + ) + ) + return updated_topic_summary_model + + @staticmethod + def _check_migration_errors( + unused_migrated_topic: topic_domain.Topic, + is_no_migration_error: beam.pvalue.AsSingleton + ) -> bool: + """Checks if any migration errors have occured. + + Args: + unused_migrated_topic: Topic. Unused migrated topic domain object. + is_no_migration_error: beam.pvalue.AsSingleton. Side input data + specifying non-zero erros during migration. + + Returns: + bool. Specifies whether any migration errors were found. + """ + return bool(is_no_migration_error) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + """Returns a PCollection of results from the topic migration. + + Returns: + PCollection. A PCollection of results from the topic migration. + """ + + unmigrated_topic_models = ( + self.pipeline + | 'Get all non-deleted topic models' >> ( + ndb_io.GetModels(topic_models.TopicModel.get_all())) + # Pylint disable is needed becasue pylint is not able to correclty + # detect that the value is passed through the pipe. + | 'Add topic keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda topic_model: topic_model.id) + ) + topic_summary_models = ( + self.pipeline + | 'Get all non-deleted topic summary models' >> ( + ndb_io.GetModels(topic_models.TopicSummaryModel.get_all())) + # Pylint disable is needed because pylint is not able to correctly + # detect that the value is passed through the pipe. + | 'Add topic summary keys' >> beam.WithKeys( # pylint: disable=no-value-for-parameter + lambda topic_summary_model: topic_summary_model.id) + ) + + all_migrated_topic_results = ( + unmigrated_topic_models + | 'Transform and migrate model' >> beam.MapTuple( + self._migrate_topic) + ) + + migrated_topic_job_run_results = ( + all_migrated_topic_results + | 'Generates results for migration' >> ( + job_result_transforms.ResultsToJobRunResults( + 'TOPIC PROCESSED')) + ) + + migration_error_check = ( + all_migrated_topic_results + | 'Filter errors' >> beam.Filter( + lambda result_item: result_item.is_err()) + | 'Count number of errors' >> beam.combiners.Count.Globally() + | 'Check if error count is zero' >> beam.Map(lambda x: x == 0) + ) + + migrated_topic_results = ( + all_migrated_topic_results + | 'Remove all results in case of migration errors' >> beam.Filter( + self._check_migration_errors, + is_no_migration_error=beam.pvalue.AsSingleton( + migration_error_check)) + ) + + migrated_topics = ( + migrated_topic_results + | 'Unwrap ok' >> beam.Map( + lambda result_item: result_item.unwrap()) + ) + + topic_changes = ( + unmigrated_topic_models + | 'Generates topic changes' >> beam.FlatMapTuple( + self._generate_topic_changes) + ) + + topic_objects_list = ( + { + 'topic_model': unmigrated_topic_models, + 'topic_summary_model': topic_summary_models, + 'topic': migrated_topics, + 'topic_changes': topic_changes + } + | 'Merge objects' >> beam.CoGroupByKey() + | 'Get rid of ID' >> beam.Values() # pylint: disable=no-value-for-parameter + ) + + transformed_topic_objects_list = ( + topic_objects_list + | 'Remove unmigrated topics' >> beam.Filter( + lambda x: len(x['topic_changes']) > 0 and len(x['topic']) > 0) + | 'Reorganize the topic objects' >> beam.Map(lambda objects: { + 'topic_model': objects['topic_model'][0], + 'topic_summary_model': objects['topic_summary_model'][0], + 'topic': objects['topic'][0], + 'topic_changes': objects['topic_changes'] + }) + + ) + + already_migrated_job_run_results = ( + topic_objects_list + | 'Remove migrated jobs' >> beam.Filter( + lambda x: ( + len(x['topic_changes']) == 0 and len(x['topic']) > 0 + )) + | 'Transform previously migrated topics into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOPIC PREVIOUSLY MIGRATED')) + ) + + topic_objects_list_job_run_results = ( + transformed_topic_objects_list + | 'Transform topic objects into job run results' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOPIC MIGRATED')) + ) + + topic_models_to_put = ( + transformed_topic_objects_list + | 'Generate topic models to put' >> beam.FlatMap( + lambda topic_objects: self._update_topic( + topic_objects['topic_model'], + topic_objects['topic'], + topic_objects['topic_changes'], + )) + ) + + topic_summary_model_to_put = ( + transformed_topic_objects_list + | 'Generate topic summary to put' >> beam.Map( + lambda topic_objects: self._update_topic_summary( + topic_objects['topic'], + topic_objects['topic_summary_model'] + )) + ) + + unused_put_results = ( + (topic_models_to_put, topic_summary_model_to_put) + | 'Merge models' >> beam.Flatten() + | 'Put models into datastore' >> ndb_io.PutModels() + ) + + return ( + ( + migrated_topic_job_run_results, + already_migrated_job_run_results, + topic_objects_list_job_run_results + ) + | beam.Flatten() + ) diff --git a/core/jobs/batch_jobs/topic_migration_jobs_test.py b/core/jobs/batch_jobs/topic_migration_jobs_test.py new file mode 100644 index 000000000000..fffa7f3d0246 --- /dev/null +++ b/core/jobs/batch_jobs/topic_migration_jobs_test.py @@ -0,0 +1,294 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.batch_jobs.topic_migration_jobs.""" + +from __future__ import annotations + +import datetime + +from core import feconf +from core.domain import topic_domain +from core.jobs import job_test_utils +from core.jobs.batch_jobs import topic_migration_jobs +from core.jobs.types import job_run_result +from core.platform import models + +from typing import Type +from typing_extensions import Final + +MYPY = True +if MYPY: + from mypy_imports import topic_models + +(topic_model,) = models.Registry.import_models([models.Names.TOPIC]) + + +class MigrateTopicJobTests(job_test_utils.JobTestBase): + + JOB_CLASS: Type[ + topic_migration_jobs.MigrateTopicJob + ] = topic_migration_jobs.MigrateTopicJob + + TOPIC_1_ID: Final = 'topic_1_id' + TOPIC_2_ID: Final = 'topic_2_id' + + def setUp(self) -> None: + super().setUp() + first_topic_summary_model = self.create_model( + topic_models.TopicSummaryModel, + id=self.TOPIC_1_ID, + name='topic summary', + canonical_name='topic summary', + language_code='cs', + description='description', + url_fragment='/fragm', + topic_model_last_updated=datetime.datetime.utcnow(), + topic_model_created_on=datetime.datetime.utcnow(), + canonical_story_count=0, + additional_story_count=0, + total_skill_count=0, + total_published_node_count=0, + uncategorized_skill_count=0, + subtopic_count=0, + version=1 + ) + first_topic_summary_model.update_timestamps() + first_topic_summary_model.put() + + second_topic_summary_model = self.create_model( + topic_models.TopicSummaryModel, + id=self.TOPIC_2_ID, + name='topic summary', + canonical_name='topic summary', + language_code='cs', + description='description', + url_fragment='/fragm', + topic_model_last_updated=datetime.datetime.utcnow(), + topic_model_created_on=datetime.datetime.utcnow(), + canonical_story_count=0, + additional_story_count=0, + total_skill_count=0, + total_published_node_count=0, + uncategorized_skill_count=0, + subtopic_count=0, + version=1 + ) + second_topic_summary_model.update_timestamps() + second_topic_summary_model.put() + + first_topic_rights_model = self.create_model( + topic_models.TopicRightsModel, + id=self.TOPIC_1_ID, + topic_is_published=False + ) + first_topic_rights_model.commit( + feconf.SYSTEM_COMMITTER_ID, + 'Create topic rights', + [{'cmd': topic_domain.CMD_CREATE_NEW}] + ) + + second_topic_rights_model = self.create_model( + topic_models.TopicRightsModel, + id=self.TOPIC_2_ID, + topic_is_published=False + ) + second_topic_rights_model.commit( + feconf.SYSTEM_COMMITTER_ID, + 'Create topic rights', + [{'cmd': topic_domain.CMD_CREATE_NEW}] + ) + mock_story_reference_schema_version = 2 + # A mock method to update story references is being + # used since there are no higher versions for story reference + # schema. This should be removed when newer schema versions are + # added. + def mock_update_story_references_from_model( + unused_cls: Type[topic_domain.Topic], + versioned_story_references: topic_domain.VersionedStoryReferencesDict, # pylint: disable=line-too-long + current_version: int + ) -> None: + versioned_story_references['schema_version'] = current_version + 1 + + self.story_reference_schema_version_swap = self.swap( + feconf, 'CURRENT_STORY_REFERENCE_SCHEMA_VERSION', + mock_story_reference_schema_version) + self.update_story_reference_swap = self.swap( + topic_domain.Topic, 'update_story_references_from_model', + classmethod(mock_update_story_references_from_model)) + + def test_empty_storage(self) -> None: + self.assert_job_output_is_empty() + + def test_unmigrated_topic_with_unmigrated_prop_is_migrated(self) -> None: + with self.story_reference_schema_version_swap, self.update_story_reference_swap: # pylint: disable=line-too-long + unmigrated_topic_model = self.create_model( + topic_models.TopicModel, + id=self.TOPIC_1_ID, + name='topic title', + description='description', + canonical_name='topic title', + subtopic_schema_version=3, + story_reference_schema_version=1, + next_subtopic_id=1, + language_code='cs', + url_fragment='topic', + page_title_fragment_for_web='fragm', + ) + unmigrated_topic_model.update_timestamps() + unmigrated_topic_model.commit( + feconf.SYSTEM_COMMITTER_ID, + 'Create topic', + [{'cmd': topic_domain.CMD_CREATE_NEW}] + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOPIC PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='TOPIC MIGRATED SUCCESS: 1'), + ]) + + migrated_topic_model = topic_models.TopicModel.get(self.TOPIC_1_ID) + self.assertEqual(migrated_topic_model.version, 2) + self.assertEqual( + migrated_topic_model.subtopic_schema_version, + feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION) + self.assertEqual( + migrated_topic_model.story_reference_schema_version, + feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION) + + def test_topic_summary_of_unmigrated_topic_is_updated(self) -> None: + unmigrated_topic_model = self.create_model( + topic_models.TopicModel, + id=self.TOPIC_1_ID, + name='topic title', + description='description', + canonical_name='topic title', + subtopic_schema_version=3, + story_reference_schema_version=1, + next_subtopic_id=1, + language_code='cs', + url_fragment='topic', + page_title_fragment_for_web='fragm', + ) + unmigrated_topic_model.update_timestamps() + unmigrated_topic_model.commit( + feconf.SYSTEM_COMMITTER_ID, + 'Create topic', + [{'cmd': topic_domain.CMD_CREATE_NEW}] + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='TOPIC PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult(stdout='TOPIC MIGRATED SUCCESS: 1'), + ]) + migrated_topic_summary_model = topic_models.TopicSummaryModel.get( + self.TOPIC_1_ID + ) + self.assertEqual(migrated_topic_summary_model.version, 2) + + def test_broken_topic_leads_to_no_migration(self) -> None: + first_unmigrated_topic_model = self.create_model( + topic_models.TopicModel, + id=self.TOPIC_1_ID, + name='topic title', + canonical_name='topic title', + description='description', + subtopic_schema_version=feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, + story_reference_schema_version=1, + next_subtopic_id=1, + language_code='abc', + url_fragment='topic', + page_title_fragment_for_web='fragm', + ) + first_unmigrated_topic_model.update_timestamps() + first_unmigrated_topic_model.commit( + feconf.SYSTEM_COMMITTER_ID, + 'Create topic', + [{'cmd': topic_domain.CMD_CREATE_NEW}] + ) + + second_unmigrated_topic_model = self.create_model( + topic_models.TopicModel, + id=self.TOPIC_2_ID, + name='topic title', + canonical_name='topic title', + description='description', + subtopic_schema_version=3, + story_reference_schema_version=1, + next_subtopic_id=1, + language_code='cs', + url_fragment='topic', + page_title_fragment_for_web='fragm', + ) + second_unmigrated_topic_model.update_timestamps() + second_unmigrated_topic_model.commit( + feconf.SYSTEM_COMMITTER_ID, + 'Create topic', + [{'cmd': topic_domain.CMD_CREATE_NEW}] + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'TOPIC PROCESSED ERROR: "(\'topic_1_id\', ValidationError(' + '\'Invalid language code: abc\'' + '))": 1' + ) + ), + job_run_result.JobRunResult( + stdout='TOPIC PROCESSED SUCCESS: 1' + ) + ]) + first_migrated_topic_model = topic_models.TopicModel.get( + self.TOPIC_1_ID) + self.assertEqual(first_migrated_topic_model.version, 1) + + second_migrated_topic_model = topic_models.TopicModel.get( + self.TOPIC_2_ID) + self.assertEqual(second_migrated_topic_model.version, 1) + + def test_migrated_topic_is_not_migrated(self) -> None: + unmigrated_topic_model = self.create_model( + topic_models.TopicModel, + id=self.TOPIC_1_ID, + name='topic title', + description='description', + canonical_name='topic title', + subtopic_schema_version=feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, + story_reference_schema_version=1, + next_subtopic_id=1, + language_code='cs', + url_fragment='topic', + page_title_fragment_for_web='fragm', + ) + unmigrated_topic_model.update_timestamps() + unmigrated_topic_model.commit( + feconf.SYSTEM_COMMITTER_ID, + 'Create topic', + [{'cmd': topic_domain.CMD_CREATE_NEW}] + ) + + self.assert_job_output_is([ + job_run_result.JobRunResult(stdout='TOPIC PROCESSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='TOPIC PREVIOUSLY MIGRATED SUCCESS: 1' + ), + ]) + + migrated_topic_model = topic_models.TopicModel.get(self.TOPIC_1_ID) + self.assertEqual(migrated_topic_model.version, 1) diff --git a/core/jobs/batch_jobs/translation_migration_jobs.py b/core/jobs/batch_jobs/translation_migration_jobs.py new file mode 100644 index 000000000000..16d13436619e --- /dev/null +++ b/core/jobs/batch_jobs/translation_migration_jobs.py @@ -0,0 +1,200 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs that migrate translation models.""" + +from __future__ import annotations + +import logging + +from core import feconf +from core.domain import state_domain +from core.domain import translation_domain +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +import apache_beam as beam +import result +from typing import Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import exp_models + from mypy_imports import translation_models + +(exp_models, translation_models) = models.Registry.import_models( + [models.Names.EXPLORATION, models.Names.TRANSLATION]) + +datastore_services = models.Registry.import_datastore_services() + + +class EntityTranslationsModelGenerationOneOffJob(base_jobs.JobBase): + """Generate EntityTranslation models for explorations.""" + + DATASTORE_UPDATES_ALLOWED = True + + @staticmethod + def _generate_validated_entity_translations_for_exploration( + exploration: exp_models.ExplorationModel + ) -> result.Result[ + Tuple[str, translation_domain.EntityTranslation], + Tuple[str, Exception] + ]: + """Generates EntityTranslation object for the given exploration. + + Args: + exploration: ExplorationModel. The exploration model. + + Returns: + Result(list(EntityTranslation), (str, Exception)). Result containing + list of EntityTranslation objects. + """ + try: + lang_code_to_translation = {} + (old_content_id_to_new_content_id, _) = ( + state_domain.State + .generate_old_content_id_to_new_content_id_in_v54_states( + exploration.states)) + for state_name in exploration.states: + translations_mapping = exploration.states[state_name][ + 'written_translations']['translations_mapping'] + for content_id in translations_mapping: + new_content_id = ( + old_content_id_to_new_content_id[state_name][content_id] + ) + for lang_code in translations_mapping[content_id]: + if lang_code not in lang_code_to_translation: + lang_code_to_translation[lang_code] = ( + translation_domain.EntityTranslation( + exploration.id, + feconf.TranslatableEntityType.EXPLORATION, + exploration.version, lang_code, {})) + + translation_dict = translations_mapping[content_id][ + lang_code] + lang_code_to_translation[lang_code].add_translation( + new_content_id, + translation_dict['translation'], + translation_domain.TranslatableContentFormat( + translation_dict['data_format']), + translation_dict['needs_update'] + ) + for entity_translation in lang_code_to_translation.values(): + entity_translation.validate() + except Exception as e: + logging.exception(e) + return result.Err((exploration.id, e)) + + return result.Ok(list(lang_code_to_translation.values())) + + @staticmethod + def _create_entity_translation_model( + entity_translation: translation_domain.EntityTranslation + ) -> result.Result[ + translation_models.EntityTranslationsModel, + Tuple[str, Exception] + ]: + """Creates the EntityTranslationsModel from the given EntityTranslation + object. + + Args: + entity_translation: EntityTranslation. The EntityTranslation object. + + Returns: + Result(EntityTranslationModel, (str, Exception)). Result containing + the EntityTranslationModel for the given EntityTranslation opbject. + """ + try: + with datastore_services.get_ndb_context(): + translation_model = ( + translation_models.EntityTranslationsModel.create_new( + entity_translation.entity_type, + entity_translation.entity_id, + entity_translation.entity_version, + entity_translation.language_code, + entity_translation.to_dict()['translations'] + )) + translation_model.update_timestamps() + except Exception as e: + logging.exception(e) + return result.Err((entity_translation.entity_id, e)) + + return result.Ok(translation_model) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + entity_translations_result = ( + self.pipeline + | 'Get all exploration models' >> ndb_io.GetModels( + exp_models.ExplorationModel.get_all( + include_deleted=False)) + | 'Generate EntityTranslation objects for exploration' >> beam.Map( + self._generate_validated_entity_translations_for_exploration) + ) + + new_translation_models_results = ( + entity_translations_result + | 'Filter the results with OK status' >> beam.Filter( + lambda result: result.is_ok()) + | 'Fetch the translation objects' >> beam.FlatMap( + lambda result: result.unwrap()) + | 'Create models from objects' >> beam.Map( + self._create_entity_translation_model) + ) + + if self.DATASTORE_UPDATES_ALLOWED: + unused_data = ( + new_translation_models_results + | 'Filter model results with OK status' >> beam.Filter( + lambda result: result.is_ok()) + | 'Fetch the models to be put' >> beam.Map( + lambda result: result.unwrap()) + | 'Put models into the datastore' >> ndb_io.PutModels() + ) + + traverse_exp_job_run_results = ( + entity_translations_result + | 'Generate traverse results' >> ( + job_result_transforms.ResultsToJobRunResults( + 'EXPLORATION MODELS TRAVERSED')) + ) + + generate_translations_job_run_results = ( + new_translation_models_results + | 'Generate translation results' >> ( + job_result_transforms.ResultsToJobRunResults( + 'GENERATED TRANSLATIONS')) + ) + + return ( + ( + generate_translations_job_run_results, + traverse_exp_job_run_results + ) + | beam.Flatten() + + ) + + +class AuditEntityTranslationsModelGenerationOneOffJob( + EntityTranslationsModelGenerationOneOffJob +): + """Audit EntityTranslationsModelGenerationOneOffJob.""" + + DATASTORE_UPDATES_ALLOWED = False diff --git a/core/jobs/batch_jobs/translation_migration_jobs_test.py b/core/jobs/batch_jobs/translation_migration_jobs_test.py new file mode 100644 index 000000000000..920a1d635947 --- /dev/null +++ b/core/jobs/batch_jobs/translation_migration_jobs_test.py @@ -0,0 +1,410 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for translation migration jobs.""" + +from __future__ import annotations + +from core import feconf +from core.domain import exp_domain +from core.domain import rights_manager +from core.domain import state_domain +from core.jobs import job_test_utils +from core.jobs.batch_jobs import translation_migration_jobs +from core.jobs.types import job_run_result +from core.platform import models +from core.tests import test_utils + +from typing import Sequence +from typing_extensions import Final + +MYPY = False +if MYPY: + from mypy_imports import exp_models + from mypy_imports import translation_models + +(exp_models, translation_models) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.TRANSLATION +]) + +STATE_DICT_IN_V52 = { + 'content': { + 'content_id': 'content', + 'html': 'Content for the state' + }, + 'param_changes': [], + 'interaction': { + 'solution': None, + 'answer_groups': [{ + 'outcome': { + 'feedback': { + 'content_id': 'feedback_1', + 'html': '

    Feedback

    ' + }, + 'missing_prerequisite_skill_id': None, + 'dest': 'End', + 'dest_if_really_stuck': None, + 'param_changes': [], + 'labelled_as_correct': False, + 'refresher_exploration_id': None + }, + 'training_data': [], + 'rule_specs': [{ + 'inputs': { + 'x': { + 'normalizedStrSet': [ + 'Hello', + 'Hola', + 'Hi'], + 'contentId': 'rule_input_2' + } + }, + 'rule_type': 'StartsWith' + }], + 'tagged_skill_misconception_id': None + }], + 'default_outcome': { + 'param_changes': [], + 'feedback': { + 'content_id': 'default_outcome', + 'html': 'Default outcome' + }, + 'dest': 'Introduction', + 'dest_if_really_stuck': None, + 'refresher_exploration_id': None, + 'missing_prerequisite_skill_id': None, + 'labelled_as_correct': False + }, + 'customization_args': { + 'catchMisspellings': { + 'value': False + }, + 'rows': { + 'value': 1 + }, + 'placeholder': { + 'value': { + 'unicode_str': 'Placeholder for the text...', + 'content_id': 'ca_placeholder_1' + } + } + }, + 'confirmed_unclassified_answers': [], + 'id': 'TextInput', + 'hints': [] + }, + 'linked_skill_id': None, + 'recorded_voiceovers': { + 'voiceovers_mapping': { + 'content': {}, + 'default_outcome': {}, + 'ca_placeholder_1': {}, + 'feedback_1': {}, + 'rule_input_2': {} + } + }, + 'written_translations': { + 'translations_mapping': { + 'content': { + 'hi': { + 'data_format': 'html', + 'translation': 'Translated content in Hindi', + 'needs_update': False + }, + 'bn': { + 'data_format': 'html', + 'translation': 'Translated content in Bangla', + 'needs_update': False + } + }, + 'default_outcome': { + 'hi': { + 'data_format': 'html', + 'translation': 'Translated outcome in Hindi', + 'needs_update': False + }, + 'bn': { + 'data_format': 'html', + 'translation': 'Translated outcome in Bangla', + 'needs_update': False + } + }, + 'ca_placeholder_1': { + 'hi': { + 'data_format': 'unicode', + 'translation': 'Translated placeholder in Hindi', + 'needs_update': False + }, + 'bn': { + 'data_format': 'unicode', + 'translation': 'Translated placeholder in Bangla', + 'needs_update': False + } + }, + 'feedback_1': { + 'hi': { + 'data_format': 'html', + 'translation': 'Translated feedback in Hindi', + 'needs_update': False + }, + 'bn': { + 'data_format': 'html', + 'translation': 'Translated feedback in Bangla', + 'needs_update': False + } + }, + 'rule_input_2': { + 'hi': { + 'data_format': 'set_of_normalized_string', + 'translation': ['test1', 'test2', 'test3'], + 'needs_update': False + }, + 'bn': { + 'data_format': 'set_of_normalized_string', + 'translation': ['test1', 'test2', 'test3'], + 'needs_update': False + } + } + } + }, + 'classifier_model_id': None, + 'card_is_checkpoint': False, + 'solicit_answer_details': False, + 'next_content_id_index': 2 +} + + +class EntityTranslationsModelGenerationOneOffJobTests( + job_test_utils.JobTestBase, test_utils.GenericTestBase): + + JOB_CLASS = ( + translation_migration_jobs.EntityTranslationsModelGenerationOneOffJob) + + AUTHOR_EMAIL: Final = 'author@example.com' + + def setUp(self) -> None: + super().setUp() + self.signup(self.AUTHOR_EMAIL, 'author') + self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) + + rights_manager.create_new_exploration_rights('exp1', self.author_id) + model = self.create_model( + exp_models.ExplorationModel, + id='exp1', + title='title', + init_state_name=feconf.DEFAULT_INIT_STATE_NAME, + category=feconf.DEFAULT_EXPLORATION_CATEGORY, + objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE, + language_code='en', + tags=['Topic'], + blurb='blurb', + author_notes='author notes', + states_schema_version=52, + param_specs={}, + param_changes=[], + auto_tts_enabled=feconf.DEFAULT_AUTO_TTS_ENABLED, + correctness_feedback_enabled=False, + states={feconf.DEFAULT_INIT_STATE_NAME: STATE_DICT_IN_V52}, + ) + commit_cmd = exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_CREATE_NEW, + 'title': 'title', + 'category': 'category', + }) + commit_cmds_dict = [commit_cmd.to_dict()] + model.commit(self.author_id, 'commit_message', commit_cmds_dict) + + def test_entity_translation_model_generated_from_old_exp(self) -> None: + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='EXPLORATION MODELS TRAVERSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='GENERATED TRANSLATIONS SUCCESS: 2'), + ]) + + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 2) + + def test_job_raises_error_for_failing_exp_traversal_steps(self) -> None: + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + raise_swap = self.swap_to_always_raise( + state_domain.State, + 'generate_old_content_id_to_new_content_id_in_v54_states' + ) + with raise_swap: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'EXPLORATION MODELS TRAVERSED ERROR: "(\'exp1\', ' + 'Exception())": 1')) + ]) + + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + + def test_job_raises_error_for_failing_model_creation_steps(self) -> None: + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + raise_swap = self.swap_to_always_raise( + translation_models.EntityTranslationsModel, + 'create_new' + ) + with raise_swap: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout=('EXPLORATION MODELS TRAVERSED SUCCESS: 1')), + job_run_result.JobRunResult( + stderr=( + 'GENERATED TRANSLATIONS ERROR: "(\'exp1\', ' + 'Exception())": 2')), + ]) + + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + + +class AuditEntityTranslationsModelGenerationOneOffJobTests( + job_test_utils.JobTestBase, test_utils.GenericTestBase): + + JOB_CLASS = ( + translation_migration_jobs. + AuditEntityTranslationsModelGenerationOneOffJob) + + AUTHOR_EMAIL: Final = 'author@example.com' + + def setUp(self) -> None: + super().setUp() + self.signup(self.AUTHOR_EMAIL, 'author') + self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) + + rights_manager.create_new_exploration_rights('exp1', self.author_id) + model = self.create_model( + exp_models.ExplorationModel, + id='exp1', + title='title', + init_state_name=feconf.DEFAULT_INIT_STATE_NAME, + category=feconf.DEFAULT_EXPLORATION_CATEGORY, + objective=feconf.DEFAULT_EXPLORATION_OBJECTIVE, + language_code='en', + tags=['Topic'], + blurb='blurb', + author_notes='author notes', + states_schema_version=52, + param_specs={}, + param_changes=[], + auto_tts_enabled=feconf.DEFAULT_AUTO_TTS_ENABLED, + correctness_feedback_enabled=False, + states={feconf.DEFAULT_INIT_STATE_NAME: STATE_DICT_IN_V52}, + ) + commit_cmd = exp_domain.ExplorationChange({ + 'cmd': exp_domain.CMD_CREATE_NEW, + 'title': 'title', + 'category': 'category', + }) + commit_cmds_dict = [commit_cmd.to_dict()] + model.commit(self.author_id, 'commit_message', commit_cmds_dict) + + def test_entity_translation_model_not_generated_from_old_exp(self) -> None: + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='EXPLORATION MODELS TRAVERSED SUCCESS: 1'), + job_run_result.JobRunResult( + stdout='GENERATED TRANSLATIONS SUCCESS: 2'), + ]) + + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + + def test_job_raises_error_for_failing_exp_traversal_steps(self) -> None: + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + raise_swap = self.swap_to_always_raise( + state_domain.State, + 'generate_old_content_id_to_new_content_id_in_v54_states' + ) + with raise_swap: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stderr=( + 'EXPLORATION MODELS TRAVERSED ERROR: "(\'exp1\', ' + 'Exception())": 1')) + ]) + + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + + def test_job_raises_error_for_failing_model_creation_steps(self) -> None: + entity_translation_models: Sequence[ + translation_models.EntityTranslationsModel + ] = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) + raise_swap = self.swap_to_always_raise( + translation_models.EntityTranslationsModel, + 'create_new' + ) + with raise_swap: + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout=('EXPLORATION MODELS TRAVERSED SUCCESS: 1')), + job_run_result.JobRunResult( + stderr=( + 'GENERATED TRANSLATIONS ERROR: "(\'exp1\', ' + 'Exception())": 2')), + ]) + + entity_translation_models = ( + translation_models.EntityTranslationsModel.get_all().fetch()) + + self.assertEqual(len(entity_translation_models), 0) diff --git a/core/jobs/batch_jobs/user_settings_profile_picture_jobs.py b/core/jobs/batch_jobs/user_settings_profile_picture_jobs.py new file mode 100644 index 000000000000..4acf33c868ac --- /dev/null +++ b/core/jobs/batch_jobs/user_settings_profile_picture_jobs.py @@ -0,0 +1,228 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Jobs for UserSettingsModel profile picture.""" + +from __future__ import annotations + +import io +import logging +import os + +from core import constants +from core import utils +from core.domain import image_services +from core.domain import user_services +from core.jobs import base_jobs +from core.jobs.io import ndb_io +from core.jobs.transforms import job_result_transforms +from core.jobs.types import job_run_result +from core.platform import models + +from PIL import Image +import apache_beam as beam +from typing import List, Optional, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) + + +class AuditInvalidProfilePictureJob(base_jobs.JobBase): + """Audit job to fetch invalid images from UserSettingsModel.""" + + def _get_invalid_image( + self, picture_str: Optional[str] + ) -> Optional[List[str]]: + """Helper function to filter the invalid profile pictures. + + Args: + picture_str: Optional[str]. The profile picture data. + + Returns: + Optional[List[str]]. None, if the image is valid otherwise + the invalid image data. + """ + invalid_image = [] + try: + # Ruling out the possibility of different types for + # mypy type checking. + assert isinstance(picture_str, str) + imgdata = utils.convert_png_data_url_to_binary(picture_str) + image = Image.open(io.BytesIO(imgdata)) + width, height = image.size + if width != 150 and height != 150: + invalid_image.append( + f'wrong dimensions - height = {height} and width = {width}' + ) + except Exception: + logging.exception('ERRORED EXCEPTION AUDIT') + invalid_image.append( + f'Image is not base64 having value - {picture_str}') + + if len(invalid_image) != 0: + return invalid_image + return None + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + users_with_valid_username = ( + self.pipeline + | 'Get all non-deleted UserSettingsModel' >> ndb_io.GetModels( + user_models.UserSettingsModel.get_all(include_deleted=False)) + | 'Filter valid users with not None username' >> beam.Filter( + lambda model: model.username is not None) + ) + + invalid_user_profile_picture = ( + users_with_valid_username + | 'Get invalid images' >> beam.Map( + lambda model: (model.username, self._get_invalid_image( + model.profile_picture_data_url))) + | 'Filter invalid images' >> beam.Filter( + lambda model: model[1] is not None) + ) + + total_invalid_images = ( + invalid_user_profile_picture + | 'Total number of invalid images' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOTAL INVALID IMAGES')) + ) + + total_user_with_valid_username = ( + users_with_valid_username + | 'Total valid users' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'TOTAL USERS WITH VALID USERNAME')) + ) + + report_invalid_images = ( + invalid_user_profile_picture + | 'Report the data' >> beam.Map(lambda data: ( + job_run_result.JobRunResult.as_stderr( + f'The username is {data[0]} and the invalid image ' + f'details are {data[1]}.' + ) + )) + ) + + return ( + ( + total_invalid_images, + report_invalid_images, + total_user_with_valid_username + ) + | 'Combine results' >> beam.Flatten() + ) + + +class FixInvalidProfilePictureJob(base_jobs.JobBase): + """Fix invalid profile pictures inside UserSettingsModel.""" + + def _fix_invalid_images( + self, user_model: user_models.UserSettingsModel + ) -> Tuple[user_models.UserSettingsModel, bool]: + """Helper function to fix the invalid images. + + Args: + user_model: user_models.UserSettingsModel. The UserSettingsModel. + + Returns: + Tuple[user_models.UserSettingsModel, bool]. The tuple containing + updated UserSettingsModel and a bool value that tells whether the + profile picture present is the default data url or not. + """ + profile_picture_data = user_model.profile_picture_data_url + width, height = 0, 0 + + try: + imgdata = utils.convert_png_data_url_to_binary(profile_picture_data) + height, width = image_services.get_image_dimensions(imgdata) + except Exception: + logging.exception('ERRORED EXCEPTION MIGRATION') + user_model.profile_picture_data_url = ( + user_services.fetch_gravatar(user_model.email)) + + if ( + user_model.profile_picture_data_url == + user_services.DEFAULT_IDENTICON_DATA_URL or ( + width == 76 and height == 76) + ): + default_image_path = os.path.join( + 'images', 'avatar', 'user_blue_150px.png') + raw_image_png = constants.get_package_file_contents( + 'assets', default_image_path, binary_mode=True) + user_model.profile_picture_data_url = ( + utils.convert_png_or_webp_binary_to_data_url( + raw_image_png, 'png')) + + # Here we need to check for the default image again because there is a + # possibility that in the above check we are not able to generate the + # gravatar for the user having default image and we want to keep track + # of all the default images. + imgdata = utils.convert_png_data_url_to_binary( + user_model.profile_picture_data_url) + height, width = image_services.get_image_dimensions(imgdata) + if ( + user_model.profile_picture_data_url == + user_services.DEFAULT_IDENTICON_DATA_URL or ( + width == 76 and height == 76) + ): + return (user_model, False) + + return (user_model, True) + + def run(self) -> beam.PCollection[job_run_result.JobRunResult]: + fixed_user_profile_picture = ( + self.pipeline + | 'Get all non-deleted UserSettingsModel' >> ndb_io.GetModels( + user_models.UserSettingsModel.get_all(include_deleted=False)) + | 'Filter user with valid usernames' >> beam.Filter( + lambda model: model.username is not None) + | 'Get invalid images' >> beam.Map(self._fix_invalid_images) + ) + + default_profile_picture = ( + fixed_user_profile_picture + | 'Filter default profile pictures' >> beam.Filter( + lambda value: value[1] is False) + | 'Total count for user models having default profile picture' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'DEFAULT PROFILE PICTURE')) + ) + + count_user_models_iterated = ( + fixed_user_profile_picture + | 'Total count for user models' >> ( + job_result_transforms.CountObjectsToJobRunResult( + 'USER MODELS ITERATED')) + ) + + unused_put_results = ( + fixed_user_profile_picture + | 'Map with only models' >> beam.Map(lambda value: value[0]) + | 'Put models into the datastore' >> ndb_io.PutModels() + ) + + return ( + ( + count_user_models_iterated, + default_profile_picture + ) + | 'Combine results' >> beam.Flatten() + ) diff --git a/core/jobs/batch_jobs/user_settings_profile_picture_jobs_test.py b/core/jobs/batch_jobs/user_settings_profile_picture_jobs_test.py new file mode 100644 index 000000000000..e9c71abb3082 --- /dev/null +++ b/core/jobs/batch_jobs/user_settings_profile_picture_jobs_test.py @@ -0,0 +1,203 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for user_settings_profile_picture_jobs.""" + +from __future__ import annotations + +from core import feconf +from core.domain import image_services +from core.domain import user_services +from core.jobs import job_test_utils +from core.jobs.batch_jobs import user_settings_profile_picture_jobs +from core.jobs.types import job_run_result +from core.platform import models + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +datastore_services = models.Registry.import_datastore_services() + +(user_models,) = models.Registry.import_models([models.Names.USER]) + +VALID_IMAGE = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJYAAACWCAYAAAA8AXHiAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAVjklEQVR4nO1da7AdVZlde%2B/uPvceHqXCwBSDo/gIESzUqanyz0zNKPKIhIQIMiUQ3gmvAUVkfISXFkwNzqhAEAlBRQekSJBoAkbCSygUZMAHCiMPIcww0RQZHgM5957d%2BzE/uu%2B95957Ht29v%2B7d58b1J8m53V9/qbvOt1evvfe3mbXWggDWbkFLLoDFmHOsUJwIZTbC2q3OsRrBhQjFUuc4dYSxL8DalyD4X/tOZRY4VSDG9kIgjiSKtQsisZwkVqy/DYvtJLHqAmM3o61WoCUXQtuHfKfTFWTEAoBInAYgcg9kgUAcCcb%2B3DmUsS9C6XXuOdUAxj6PtroALbkYsV4LoO07pZ4gJRZjeyIURxMEAhiaiMSJ7rEw/FXL2BfQViswJo9ArNcABHKjbJASCwAicSqAUZJYoTgejP2Zcxxj/xtK/4Ago2ph7ItphToMsV5Lol%2BrAjmxGNsLoTiKKFqASJxMEknq1UPzi5moUC15cFqhpO%2BUcoOcWAAQipPAsBNRrGNJtJa1W6D09wkyKg%2BdojzRUMp3SoVRCrE42xuBWEIUbQSROIUkktSrUMdvv5MoJzGL6FEKsQDaqpW8Ie7pHMfarenQUg%2BQiHJGnhYJSiMWZ29FII4gicWwc2pluEPq6%2BH7rWqYRXlWlEYsAIjEMjCiN8RALCaqWlsQ61sJMsqPuSDKs6JUYpG68RheN34uifKsKJVYAKEbj%2BFz4ytxync08T4BMjcew%2BPGD6NTTo3SiQXsOG68F1G%2Bo70VdmKuu/E7kijPikqIBcxNN35HFOVZURmx5pIbX6vlKzuqeO/EsLvxtRTljEa7UsLaP1RLrGF34%2BvklHM2DyPhSkTiBN%2BpTMLarWirS9GSC6olFjDsbrxvUc7A2XyMhFehGd2OgB8CIPScE2DtH9FWl2G7PAix/i4sWtUT609ufBEwcLY/RsIr0IzWIeCH%2Bk4IQDLktdW/oCUXINbfATA%2B%2BbPKiQVkcONzCNJhc%2BPzIiHUlWhGaxDwBQCE75RSQl2Oljwcsf5W1y%2BkF2INdONzmH7D4sbnBWf7pUPeLWmFqsOQ9xKk%2BjJachFi/U1Y/F/Pa70QC9hx3Pi8mBDlzWhtSiiaeVYXWPsypPoKWnIBpL4eFq8NvMcbsea6G58PdRXl2yDVV9GSH4HUq/pWqJnwRixgbrrx%2BVBXUf4SpLoyrVDXwuKN3DG8EqunG1/ITR6utfH1FOXbINVVaMWLIPXXMw15veCVWECPqlVwxn4Y1sbXU5S/CqlWpoS6Gtb%2Br3NM78Qadjc%2BK2opyvE6pL4GrXgBpF4Ja7eRxfZOLGDY3fh%2BqKkox2uQ%2BhuJKFdXkFSomagFseaeG19XUf4qpL4uEeXqa7D2ldKeVQtiATPceMelID7d%2BFqKcrwKqVcnGkr9G%2BmQ1wu1IdY0N95xua0PN76WohxvINbXoyUXQap/hbV/rOzZtSEWMJxufD1F%2BRhi/W205AK01ZcrJdQEgsqf2AdluPFtdblzJKlXIxAf6/iEgbN9EQVn1kY/AYDFdii9Npk9sC95zaVWxAISX0ubTUSxjoXU33H%2Bxk658Qyc7YcoWI6AH4w66CcgGfKUXpcSqvrq1A1M6futNr/OeRdmCWzG9kg1kvt%2BJGXWw5gXnOOACcBKSH2Neyi2Jxh2RcAPhKt%2B4nw%2BrN1GIqIFfz%2Bs3QZjX0wTRfK76fwdVfyZta8jYGw3SL3S%2BT8ICAj%2BPnA23zmSsS%2BQ5NQIvogwOBoWryHWNznFsnYrLLZC6mec4nA2H6PiE7DsZYzFS51f%2BSOcjSg42ykGFSxaUPo2xGYNOGf7EekEDancKwMVGsElCMUnAAg0ggvSv/sFZ/MwGn4LjO2e/v27YOzNbkFrsWFVIdY3pC8LX4K1W8EBhlCcAgq9oMyPYezT7nk6QSSVShwz4LNqwdm%2BGA3/HYzt3vez3PC6/auNWN%2BA7fJDaKt/hrV/mPwJBwDB34eAf4TkUVJRDKtF0b86TVWxatGvOnVWsULwULEstiPWN2K7PCQl1OyDHiZ9rDBYBgpTT5l7YeyTbkEKfQsnSHVs36sawUUDr6EEZ/MxGt7Qd8hLrilIrgorVkKom9GSh6VD3pae104SS7ADEPC/JXh8DKmuJYiTD9kJk42AFMhDmCwE9IcxxPpmjMmFaKuL%2BxJqAtOc9yg4BxTWVlK1nigeIGd5zz/ElS/oiwxxZIKeDBqxvimtUBfD2P/JfOc0YiVviBRVS0KqVSi/TruI8vIEvYsoJxH0zmgj1t/DdvlhtNUXpzyyHJg1VxgF55Gkpsw97lqrL2iqDrWgp6g6uaodoXi3aCHWt6QV6pJpb3l5MYtYnM0jekOMIdV1KFS1Bt5Cq5OoBD2lTsqszwgGhWTSei3G5CK01YUw9r%2BcY3Zd3RAFZzkHBgBlNsHYp0hidYL%2Bzc6dqE5vdn1jlino2x2EWkFCqAl0JZZ3N75PeS/Piyo%2BtDp7UQNjUwt6jVivQUsuTAlFMC87Az3WY9XRja/CPc//jCrENt0zJGJ9K1ryILTVBaUQagI9F/rVy42vdr4va1Ws0h7oWRUzifcxxPr7aMnD0VZfKPSWlxd9V5B6c%2BOnCdLqDM1ODNJxPgzNrjqur3gfh9K3oSWXoK0%2BD2OfLzvFSfQlVh3c%2BKqnYKbQm9BlCPWsyEZoCaXXoSWXYFx9DsY%2BV1V6kxi45t2LG5%2BWd1%2BTxlOYPQSXKdSzovcQbBDrdWjJRSmhfu8lPwBg1tqBTsh4fBqUuc/5YQE/FCPhlRgkDKS%2BBgxvrsUaqgm01SXQ5rFaTbkY%2BzS0eQyhOBJKb4TUV5cqyPMgE7GMfRotuZDgcSGa0Rpwtn/fq7R5FIJ/AHVZU55gDNo8DsE/6DuRadDmfrTV5TD2Wd%2BpAEgavYTipGzbv6p247V5CG11KQBN8Ex3WLQwFp%2BBcXUujP2d73QmocxdGIvPrgWpOPsLNIILMBqtRyiWZt9XWLUbH%2Bub0FZfInmmCyxaGI/PgjY/g7XbMBafXAtyKXMPxuNPo7OhrA8wticawYqUUMeDYWcAOTasVurGpxIs1jejrS4heGZRjGE8PhPa/HTykyly%2BVuCrcxPMB6fDZ8nYjC2OxrB59CMNiIUJ4Bhl2k/z7ET2o8bH%2Bvvoa0uRtXDosUYxuLl0OZns39mt2EsXlrKPOggJJXqTPg6t4exPdAIPotmdDdCcfJkhZqJXFvsfbnxSeWqUnONYTxeDm1%2B3vMKa1/BWHx8pZVLmbswHn8KPkjF2J6Igs%2BkFeoUMDT7Xp%2B7d0MlbnwXbZ9orvLJNSHU%2B5Fq8lr7CsbiEyvRXAmpzkPVwx9jeyAKzkMzXI9ILJ815PVCbmL5dOPLFvSdQj3zPRUIeh9CnbHdEAWfRjPcgEicltu7K9RtpnQ3vo9/Wp6gny3Us6JMQV%2B1UGd4EyLxSTSjjYjE6YXN4ELE8r02nlrQ9xPqmWOUIOirFOqMvQVRcA6ajbsRBWeB4U1O8Qr3x/K9Np5O0A8W6llBKeirEuqMvQWROAvNcCMi8Y9g2JUkbmFilerGZyxgroI%2Bj1DPHJNA0Fch1BNCnZloqOCT5POfTh396rA2vqigLyLUM8d2EPRlC3WGXRGJM5K3vOBTJF0Pu8GJWKW58Tm3NOUX9MWFelYUEfRlCnWGnRCJZYkoD84FY3uQP6MTjj1I67M2PqugpxDqWZFH0Jcl1Bl2QSiWoRndjSg4v7QKNRPOzW3rtDZ%2BsKCnE%2BpZkUXQlyHUGXZFKE5CM9qIRnA%2BGNuNLHYWkHRNJnfjHTZh9hL0ZQj1rOgn6KmFelKhTkQzWo9G8PnSh7xeICFWHdbGT4syQ9CXKdSzopugpxXqowjFCWhGG9AIvgDG9iKIWRyZVpBmgbFPoiWPgns5jxDwv4Ey9zrnFIpj0Ag%2Bm1Yqf6TqBGO7YzS8AcZuIdJUEUJxNCJxqncydYKsHTdn%2B2HnhnsTEGNfQFtdRJARoM1j0ObxGrSvnIK1r0Cbx1JB766pQnEMlNmIWN/oHKsRXIhQLHWOA9ToZApjN6OtVqAlF0Kbh5zjTexkEfyD3nfVTGFq1w/VDiTG6nAo1Wx4J5axz6OtLkBLLkas14JCxM7ce%2BdzH%2BAUZu9TJNkzaf0eStUL3oiVDHkrMCaPSE8zpTl0sheJfLdi7E4iml3ePg6lGoTKiWXsi2mFOgyxXkt6YvygzaS%2BWjH2H/Zo%2BlJUdShVVlRGrIkK1ZIHpxWK9jDvrB1Zqm3FmLV7DUUnneRQKgpIvdr5C186sTpFeaKh6JeB5K1E1WyTz1%2BJXAV9KI4l0VpTh1IVR2nEKkOUd0NR7VSuoC%2BundwE/QgicUrBe6dD6lVwGVXIiVWWKO8GV3KUJejdyOEm6JM3RIrD1remv79iICNWmaK8G6iGM2pBT%2BNPFRf0DDun52u7Q%2BrrUbQwOBOrbFHeDdQCnCYedSvL4vECsZioam1BrG8tdG9hYlUhyruhLMvArQKW18qySAVk8O/G5yZWVaK8G8o2OYtptvJbWRbRbL7d%2BMzEqlKUd0NV0zJ5yVtNK8v85PXtxg8kVtWivBuqbs%2BYdbittpVl/uHWpxvfk1g%2BRHk3%2BDq0qP9zfZ3Ymve5/tz4WcTyJcq7wfcxa90rpf8zpvNUSl9u/CSxfIrybvC9GmF6HhPk8tNzvhuyazs/bjzTZrON9XVQ%2BnYv%2BqkbBP8ARoKv12RxXoKpDsX16eQMaBi7GZy9s%2B9VFm%2BgJRd0Pbs5LxJCHzfwOu5TlM9Khs3DSLgSAT8MFi/7TmcajN3ipYNff0hYu23gVT7ceO5LlE%2BBgbP5GAmvQjO6HQE/BBaveWvF2A0Tm0l9ta3shqmNt49kur5qN97j0mQGzvbHSHgFmtG6WVv1fbRi7IaZm0mrb1vZDfk33lbtxnshVkKoK9GM1iDgCzBri366Ia3KVozd0GszaVVtK7vBZeNtlW58pcTibL90yLslrVA9dk93NAXx1Vt90GZSH33oXTfeVunGV0KsCVHejNamhIpy3V91b/WsXV%2Bq7UNP0yGnKje%2BRGLNFuUu/R2q6q2et%2BtLFYKetkNONW58CcTqL8pdULagL9r1pVxBT98hpwo3npRYA0V5VvTpJlGWoHft%2BlKGoC%2BvQ075bjwJsTKL8qwY0NGPWtBTdX2hFPRld8gpe228Y6tIN1HuAipBT92ekUbQl9/Ksmw3vgCxaEW5C1wFfVntGV0EfZWtLMt043Od/lWWKHdBUUFfdh/1YoK%2B2laWZbrxGU9YJRLlWZGzFVxeQV/VgUd5BL2vVpZlufF9iUUuyrMiZztuILugr/rAoyyC3mcry7Lc%2BK7E8inKXTBI0Ps6mbS/oC9fqA9CGW58B7HqI8pd0EvQ%2Bz6ZtJugr1Ko9we9G8/rKspdMFPQ%2BzyZtBPTBX31Pef7gdqNZ9vbH7UBPxBgYSKaO/VNt38jw2fd7s3zGSJMc3M7n2E7/hzwGWO7IxRHQOobAYznunf2ZxLaPgxtfj0z2dwIxYkI%2BIeg7aPd/485Pkv6uh8LitFFmweh7S8L5zLxGWO7gL0%2B/m6SdtwU4OxtCMVyBGIhGEZ9p5MihjI/glSrMRJejpZcQhBToBndBs7e4xxJqpXgfH8E/MMEedHBe3NbAOBsbzSCS9GM7kAoPl4TUhko80O05GKMx/8EY59OD6VaQBB7xqFUjpDqSvge5meCrM97EUxUqFAs8Z1KBySUuRNSrYSxm2f8LDmUSplNcJ1sVuZOGPsUONvXKQ4AGPufUOaBWlUtT0uT345GcBma0e0IxcdRD1KNQ5n1aMklGI/P60KqBIIfgIAfRPJEqa4iiZPE%2BgqcDiEiRqW/Uc72QShOQSgOB2ox3AFJhdoIqVZnnhaKguVQ8l647nBS5j4Y%2B1tw9l6nOABg7DNQ5i4E/GDnWBSoaGny29AILsNo9AOE4mjUg1RxqqE%2BhvH4/FxzjZy9FwH/O4IcFMmhVBNIdFs9qlapFYuzvRGK0xGKI1Af995AmQ2QahWM/T2K/iKi4Gwo%2BRMAsVM2ytwHbR%2BHYAc4xQEmtNaPEPDDnGO5opSKNVGhmtGmtELVgVQSymxASx6aVqhn4fLt5mw%2BAv73BHnFiNUqp1ymYCHVN1GHN0TSisXZ2xGKZQjFIgANytAOGIcym9IK9Qxp5Cg4F0reDVdSJFrrN%2BAkVeu3UGYTAv5R51guIFqavE/qQ/0wfcurA6lkqqGOwnj8GXJSAQBn7yKaAlOQahUAQxALkGo1fLdOcKpYdXfKq9iHGAVnQMk74UoKZe6BsU8SvSE%2BAWXuSxcS%2BEGhijUsTnkV4GxfomHHQKqrCeIkSA5ud3uxcEGuijV8TnkVYIiCU9Oq5fqGeC%2BMfQKc7e%2BclbFPQ5l7vK1Wybg0eXid8iqQzCHSDDvJvB8NpLoCVLotL/oyZK445VUgCpZBybvgujpVmQehza8g%2BPudczL2OSiz0Yuv1WNp8txyyqsAZ%2B9BwA8kiKQQa0o3/hvw0W5pWsWay055FYiCs9Kq5aq1HoA2v4Dgf%2BWck7HPQOk7EIhFzrHygAM7hlNeBTh7N9EksEKsrwOZG6%2BvR9W%2BFq%2BfKAe0eaQWonwCDDtlbsEdBeeAwndW5n5o80vnOABg7O%2Bg9I9JYmUFr49TPgVtfl6KU54XCaGOw2i0Ho3g4kz3cLYPkVjWqdYicuP1alR6oFZlT8qDAhtWaTGKUCzFaLQBjeAicPbWXHdHwemgqP7KPABtf%2BMcBwCMfQrK3EUSKwvqSSxviBCK47BTtBGN4EJwtnehKJy9C4FYSJCPIfa1rkFVWutPxALAMJoS6h40govA2F7OESNxKigkhjYPQpvHneMAgLHPQplqtNYOTawJUd6MNqaEcm/pMwHO5iEQFDt6AKm/RhIHQDofWb6vVU9ilewqzBTl7hWqO5KqNeIcR5uHoc2jgy/MAGM3Q5nbSWL1Qz2JVZp4dxPlecHZPKI5RA1J6sZfi7JXPtSTWOSgEeWFnhycAQrDWZufkvV5MPY5KF1u1ZrTxKIX5fnB2TsQCBpfK3HQqdz4cn2tOUmsMkV5EUTiLFB0QdTmAWjzH%2B4JIXlDjPUdJLG6oZ7EKvilrEqU5wVnf4lQLCaIZFOtRePGxxnPHiyCehIrt3ivVpQXQShOA53W%2BpV7Qkh9rZLmEOtJrMzwJ8rzIlk0eThBJAupv0oQJ0FSAend%2BKEkVh1EeRGEYhlofK1HoM0v3BNCcsh8rDeQxOrEUBGrbqI8Lzh7B0KSOUSQVq1YXwPq3dP1JNYM8V5XUV4EoTiVZLucNo9Bm4cJMkq6Hcd9zh4sgnoSa1K811%2BU5wWtr0XnxicrVul8rXoSC0Ao/mEoRHkR0L0hPgxtafrDG7sZsV5PEgsA/h8mgptFNsAk2gAAAABJRU5ErkJggg%3D%3D' # pylint: disable=line-too-long + + +class AuditInvalidProfilePictureJobTests(job_test_utils.JobTestBase): + """Fetch invalid profile pictures data.""" + + JOB_CLASS = user_settings_profile_picture_jobs.AuditInvalidProfilePictureJob + + def setUp(self) -> None: + super().setUp() + + self.user_1 = self.create_model( + user_models.UserSettingsModel, + id='test_id_1', + email='test_1@example.com', + username='test_1', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + profile_picture_data_url=user_services.DEFAULT_IDENTICON_DATA_URL + ) + + self.user_2 = self.create_model( + user_models.UserSettingsModel, + id='test_id_2', + email='test_2@example.com', + username='test_2', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + ) + + def test_run_with_no_models(self) -> None: + self.assert_job_output_is([]) + + def test_get_invalid_image_dimension_data(self) -> None: + self.put_multi([self.user_1]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL INVALID IMAGES SUCCESS: 1' + ), + job_run_result.JobRunResult( + stderr=( + 'The username is test_1 and the invalid image details ' + 'are [\'wrong dimensions - height = 76 and width = 76\'].' + ) + ), + job_run_result.JobRunResult( + stdout='TOTAL USERS WITH VALID USERNAME SUCCESS: 1' + ) + ]) + + def test_get_invalid_image_base64_data(self) -> None: + self.put_multi([self.user_2]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL INVALID IMAGES SUCCESS: 1' + ), + job_run_result.JobRunResult( + stderr=( + 'The username is test_2 and the invalid image details ' + 'are [\'Image is not base64 having value - None\'].' + ) + ), + job_run_result.JobRunResult( + stdout='TOTAL USERS WITH VALID USERNAME SUCCESS: 1' + ) + ]) + + def test_ignore_valid_images(self) -> None: + self.user_1.profile_picture_data_url = VALID_IMAGE + self.put_multi([self.user_1]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='TOTAL USERS WITH VALID USERNAME SUCCESS: 1' + ) + ]) + + +class FixInvalidProfilePictureJobTests(job_test_utils.JobTestBase): + """Tests to check the fixing of invalid profile picture.""" + + JOB_CLASS = user_settings_profile_picture_jobs.FixInvalidProfilePictureJob + + def setUp(self) -> None: + super().setUp() + + self.user_3 = self.create_model( + user_models.UserSettingsModel, + id='test_id_3', + email='test_3@example.com', + username='test_3', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + profile_picture_data_url=VALID_IMAGE + ) + + self.user_4 = self.create_model( + user_models.UserSettingsModel, + id='test_id_4', + email='test_4@example.com', + username='test_4', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + ) + + self.user_5 = self.create_model( + user_models.UserSettingsModel, + id='test_id_5', + email='test_5@example.com', + username='test_5', + roles=[feconf.ROLE_ID_FULL_USER, feconf.ROLE_ID_CURRICULUM_ADMIN], + profile_picture_data_url=user_services.DEFAULT_IDENTICON_DATA_URL + ) + + def test_run_with_no_models(self) -> None: + self.assert_job_output_is([]) + + def test_iterate_user_model_with_valid_profile_picture(self) -> None: + self.put_multi([self.user_3]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='USER MODELS ITERATED SUCCESS: 1' + ) + ]) + + migrated_user_model_1 = ( + user_models.UserSettingsModel.get(self.user_3.id) + ) + + self.assertEqual( + migrated_user_model_1.profile_picture_data_url, VALID_IMAGE + ) + + def test_update_user_model_with_invalid_profile_picture(self) -> None: + self.put_multi([self.user_4]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='USER MODELS ITERATED SUCCESS: 1' + ) + ]) + + migrated_user_model_2 = ( + user_models.UserSettingsModel.get(self.user_4.id) + ) + + self.assertEqual( + migrated_user_model_2.profile_picture_data_url, + user_services.fetch_gravatar(migrated_user_model_2.email) + ) + + def test_default_profile_picture(self) -> None: + self.put_multi([self.user_5]) + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='USER MODELS ITERATED SUCCESS: 1' + ) + ]) + + def test_default_profile_picture_report(self) -> None: + self.put_multi([self.user_5]) + with self.swap_to_always_return( + image_services, 'get_image_dimensions', + (76, 76) + ): + self.assert_job_output_is([ + job_run_result.JobRunResult( + stdout='USER MODELS ITERATED SUCCESS: 1' + ), + job_run_result.JobRunResult( + stdout='DEFAULT PROFILE PICTURE SUCCESS: 1' + ) + ]) diff --git a/core/jobs/batch_jobs/user_stats_computation_jobs.py b/core/jobs/batch_jobs/user_stats_computation_jobs.py index 70fdda8556c5..e7cc11b5e00b 100644 --- a/core/jobs/batch_jobs/user_stats_computation_jobs.py +++ b/core/jobs/batch_jobs/user_stats_computation_jobs.py @@ -28,6 +28,7 @@ from core.platform import models import apache_beam as beam + from typing import Iterable MYPY = False @@ -35,7 +36,7 @@ from mypy_imports import datastore_services from mypy_imports import user_models -(user_models,) = models.Registry.import_models([models.NAMES.user]) +(user_models,) = models.Registry.import_models([models.Names.USER]) datastore_services = models.Registry.import_datastore_services() @@ -108,6 +109,10 @@ def run(self) -> beam.PCollection[job_run_result.JobRunResult]: ) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. class CreateUserStatsModel(beam.DoFn): # type: ignore[misc] """DoFn to create empty user stats model.""" @@ -130,6 +135,10 @@ def process( yield user_stats_model +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. class UpdateWeeklyCreatorStats(beam.DoFn): # type: ignore[misc] """DoFn to update weekly dashboard stats in the user stats model.""" @@ -150,10 +159,10 @@ def process( schema_version = model.schema_version if schema_version != feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION: - user_services.migrate_dashboard_stats_to_latest_schema(model) # type: ignore[no-untyped-call] + user_services.migrate_dashboard_stats_to_latest_schema(model) weekly_creator_stats = { - user_services.get_current_date_as_string(): { # type: ignore[no-untyped-call] + user_services.get_current_date_as_string(): { 'num_ratings': model.num_ratings or 0, 'average_ratings': model.average_ratings, 'total_plays': model.total_plays or 0 diff --git a/core/jobs/batch_jobs/user_stats_computation_jobs_test.py b/core/jobs/batch_jobs/user_stats_computation_jobs_test.py index 875bcbc09b42..720099cb580e 100644 --- a/core/jobs/batch_jobs/user_stats_computation_jobs_test.py +++ b/core/jobs/batch_jobs/user_stats_computation_jobs_test.py @@ -26,19 +26,27 @@ from core.jobs.types import job_run_result from core.platform import models +from typing import Final, Type + MYPY = False if MYPY: from mypy_imports import user_models -(user_models,) = models.Registry.import_models([models.NAMES.user]) +(user_models,) = models.Registry.import_models([models.Names.USER]) class CollectWeeklyDashboardStatsJobTests(job_test_utils.JobTestBase): - JOB_CLASS = user_stats_computation_jobs.CollectWeeklyDashboardStatsJob + JOB_CLASS: Type[ + user_stats_computation_jobs.CollectWeeklyDashboardStatsJob + ] = user_stats_computation_jobs.CollectWeeklyDashboardStatsJob - VALID_USER_ID_1 = 'uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH) - VALID_USER_ID_2 = 'uid_%s' % ('b' * feconf.USER_ID_RANDOM_PART_LENGTH) + VALID_USER_ID_1: Final = 'uid_%s' % ( + 'a' * feconf.USER_ID_RANDOM_PART_LENGTH + ) + VALID_USER_ID_2: Final = 'uid_%s' % ( + 'b' * feconf.USER_ID_RANDOM_PART_LENGTH + ) def setUp(self) -> None: super().setUp() @@ -49,7 +57,7 @@ def test_empty_storage(self) -> None: self.assert_job_output_is_empty() def test_updates_existing_stats_model_when_no_values_are_provided( - self + self ) -> None: user_settings_model = self.create_model( user_models.UserSettingsModel, @@ -92,7 +100,7 @@ def test_fails_when_existing_stats_has_wrong_schema_version(self) -> None: self.put_multi([user_settings_model, user_stats_model]) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Sorry, we can only process v1-v%d dashboard stats schemas at ' 'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION @@ -108,7 +116,7 @@ def test_fails_when_existing_stats_has_wrong_schema_version(self) -> None: self.assertEqual(new_user_stats_model.weekly_creator_stats_list, []) def test_updates_existing_stats_model_when_values_are_provided( - self + self ) -> None: user_settings_model = self.create_model( user_models.UserSettingsModel, diff --git a/core/jobs/decorators/validation_decorators.py b/core/jobs/decorators/validation_decorators.py index 19fa4e8618a7..b928745e52b8 100644 --- a/core/jobs/decorators/validation_decorators.py +++ b/core/jobs/decorators/validation_decorators.py @@ -31,20 +31,42 @@ import apache_beam as beam from apache_beam import typehints -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +from typing import ( + Callable, Dict, FrozenSet, Iterator, Sequence, Set, Tuple, Type, cast) -datastore_services = models.Registry.import_datastore_services() +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models -_ALL_MODEL_TYPES = frozenset(models.Registry.get_all_storage_model_classes()) -_ALL_BASE_MODEL_TYPES = frozenset( - models.Registry.get_storage_model_classes([models.NAMES.base_model])) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) -_MODEL_TYPES_BY_BASE_CLASS = { +_ALL_MODEL_TYPES: FrozenSet[Type[base_models.BaseModel]] = frozenset( + models.Registry.get_all_storage_model_classes()) + +_ALL_BASE_MODEL_TYPES: FrozenSet[Type[base_models.BaseModel]] = frozenset( + models.Registry.get_storage_model_classes([models.Names.BASE_MODEL])) + +_MODEL_TYPES_BY_BASE_CLASS: Dict[ + Type[base_models.BaseModel], + FrozenSet[Type[base_models.BaseModel]] +] = { base_class: frozenset({base_class}).union( t for t in _ALL_MODEL_TYPES if issubclass(t, base_class)) for base_class in _ALL_BASE_MODEL_TYPES } +# This type is defined for the arguments which can accept functions +# that yields the values of type Tuple(property, List[BaseModel]). +ModelRelationshipsType = Callable[ + ..., + Iterator[ + Tuple[ + model_property.PropertyType, + Sequence[Type[base_models.BaseModel]] + ] + ] +] + class AuditsExisting: """Decorator for registering DoFns that audit storage models. @@ -57,9 +79,11 @@ class AuditsExisting: and only if ValidateExplorationModelId inherits from ValidateModelId. """ - _DO_FN_TYPES_BY_KIND = collections.defaultdict(set) + _DO_FN_TYPES_BY_KIND: Dict[ + str, Set[Type[beam.DoFn]] + ] = collections.defaultdict(set) - def __init__(self, *model_types): + def __init__(self, *model_types: Type[base_models.BaseModel]) -> None: """Initializes the decorator to target the given types of models. Args: @@ -68,11 +92,12 @@ def __init__(self, *model_types): targeted as well. Raises: + ValueError. No model given. TypeError. When a non-model type is provided. """ if not model_types: raise ValueError('Must target at least one model') - self._targeted_model_types = set() + self._targeted_model_types: Set[Type[base_models.BaseModel]] = set() for t in model_types: if t in _MODEL_TYPES_BY_BASE_CLASS: self._targeted_model_types.update(_MODEL_TYPES_BY_BASE_CLASS[t]) @@ -85,7 +110,7 @@ def __init__(self, *model_types): job_utils.get_model_kind(t) for t in self._targeted_model_types } - def __call__(self, do_fn_type): + def __call__(self, do_fn_type: Type[beam.DoFn]) -> Type[beam.DoFn]: """Decorator which registers the given DoFn to the targeted models. This decorator also installs type constraints on the DoFn to guard it @@ -123,10 +148,19 @@ def __call__(self, do_fn_type): typehints.with_input_types( typehints.Union[self._targeted_model_types]), typehints.with_output_types(base_validation_errors.BaseAuditError)) - return with_input_types(with_output_types(do_fn_type)) + # TODO(#15613): Here we use cast because the return type of functions + # with_input_types and with_output_types is Any, because these functions + # are not type annotated yet in Apache_beam library. Thus to return the + # appropriate type from function instead of Any. We used cast here. + return cast( + Type[beam.DoFn], + with_input_types(with_output_types(do_fn_type)) + ) @classmethod - def get_audit_do_fn_types_by_kind(cls): + def get_audit_do_fn_types_by_kind( + cls + ) -> Dict[str, FrozenSet[Type[beam.DoFn]]]: """Returns the sets of audit DoFns targeting a kind of model. Returns: @@ -161,9 +195,11 @@ def user_auth_details_model_relationships(model): # A dict(ModelProperty: set(str)). The keys are properties of a model whose # values refer to the IDs of their corresponding set of model kinds. - _ID_REFERENCING_PROPERTIES = collections.defaultdict(set) + _ID_REFERENCING_PROPERTIES: Dict[ + model_property.ModelProperty, Set[str] + ] = collections.defaultdict(set) - def __init__(self, model_class): + def __init__(self, model_class: Type[base_models.BaseModel]) -> None: """Initializes a new RelationshipsOf decorator. Args: @@ -172,7 +208,9 @@ def __init__(self, model_class): self._model_kind = self._get_model_kind(model_class) self._model_class = model_class - def __call__(self, model_relationships): + def __call__( + self, model_relationships: ModelRelationshipsType + ) -> ModelRelationshipsType: """Registers the property relationships of self._model_kind yielded by the generator. @@ -199,7 +237,11 @@ def __call__(self, model_relationships): return model_relationships @classmethod - def get_id_referencing_properties_by_kind_of_possessor(cls): + def get_id_referencing_properties_by_kind_of_possessor( + cls + ) -> Dict[ + str, Tuple[Tuple[model_property.ModelProperty, Tuple[str, ...]], ...] + ]: """Returns properties whose values refer to the IDs of the corresponding set of model kinds, grouped by the kind of model the properties belong to. @@ -209,11 +251,15 @@ def get_id_referencing_properties_by_kind_of_possessor(cls): (ModelProperty, set(kind of models)), grouped by the kind of model the properties belong to. """ - by_kind = lambda model_property: model_property.model_kind + by_kind: Callable[ + [model_property.ModelProperty], str + ] = lambda model_property: model_property.model_kind id_referencing_properties_by_kind_of_possessor = itertools.groupby( sorted(cls._ID_REFERENCING_PROPERTIES.keys(), key=by_kind), key=by_kind) - references_of = lambda p: cls._ID_REFERENCING_PROPERTIES[p] + references_of: Callable[ + [model_property.ModelProperty], Set[str] + ] = lambda p: cls._ID_REFERENCING_PROPERTIES[p] return { kind: tuple((p, tuple(references_of(p))) for p in properties) for kind, properties in ( @@ -221,7 +267,7 @@ def get_id_referencing_properties_by_kind_of_possessor(cls): } @classmethod - def get_all_model_kinds_referenced_by_properties(cls): + def get_all_model_kinds_referenced_by_properties(cls) -> Set[str]: """Returns all model kinds that are referenced by another's property. Returns: @@ -232,7 +278,9 @@ def get_all_model_kinds_referenced_by_properties(cls): cls._ID_REFERENCING_PROPERTIES.values())) @classmethod - def get_model_kind_references(cls, model_kind, property_name): + def get_model_kind_references( + cls, model_kind: str, property_name: str + ) -> Set[str]: """Returns the kinds of models referenced by the given property. Args: @@ -240,14 +288,19 @@ def get_model_kind_references(cls, model_kind, property_name): property_name: str. The property's name. Returns: - list(str). The kinds of models referenced by the given property. + set(str). The kinds of models referenced by the given property. """ model_cls = job_utils.get_model_class(model_kind) + # Here model_cls is of type Type[datastore_services.Model] but from the + # implementation of ModelProperty it is clear that it can only accept + # Type[base_models.BaseModel]. So to narrow down the type, we used + # assert statement here. + assert issubclass(model_cls, base_models.BaseModel) prop = model_property.ModelProperty( model_cls, getattr(model_cls, property_name)) return cls._ID_REFERENCING_PROPERTIES.get(prop, set()) - def _get_model_kind(self, model_class): + def _get_model_kind(self, model_class: Type[base_models.BaseModel]) -> str: """Returns the kind of the model class. Args: @@ -266,7 +319,9 @@ def _get_model_kind(self, model_class): '%s is not a subclass of BaseModel' % model_class.__name__) return job_utils.get_model_kind(model_class) - def _validate_name_of_model_relationships(self, model_relationships): + def _validate_name_of_model_relationships( + self, model_relationships: ModelRelationshipsType + ) -> None: """Checks that the model_relationships function has the expected name. Args: diff --git a/core/jobs/decorators/validation_decorators_test.py b/core/jobs/decorators/validation_decorators_test.py index a7540522ae46..0492d999101f 100644 --- a/core/jobs/decorators/validation_decorators_test.py +++ b/core/jobs/decorators/validation_decorators_test.py @@ -28,8 +28,16 @@ import apache_beam as beam +from typing import Dict, Final, FrozenSet, Iterator, List, Set, Tuple, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + from mypy_imports import exp_models + base_models, exp_models = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.exploration]) + [models.Names.BASE_MODEL, models.Names.EXPLORATION]) datastore_services = models.Registry.import_datastore_services() @@ -38,10 +46,12 @@ class MockAuditsExisting(validation_decorators.AuditsExisting): """Subclassed with overrides to avoid modifying the real decorator.""" # Overrides the real value of _DO_FN_TYPES_BY_KIND for the unit tests. - _DO_FN_TYPES_BY_KIND = collections.defaultdict(set) + _DO_FN_TYPES_BY_KIND: Dict[str, Set[Type[beam.DoFn]]] = ( + collections.defaultdict(set) + ) @classmethod - def get_audit_do_fn_types(cls, kind): + def get_audit_do_fn_types(cls, kind: str) -> FrozenSet[Type[beam.DoFn]]: """Test-only helper for getting the DoFns of a specific kind of a model. Args: @@ -53,23 +63,31 @@ def get_audit_do_fn_types(cls, kind): return frozenset(cls._DO_FN_TYPES_BY_KIND[kind]) @classmethod - def clear(cls): + def clear(cls) -> None: """Test-only helper method for clearing the decorator.""" cls._DO_FN_TYPES_BY_KIND.clear() -class DoFn(beam.DoFn): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. +class DoFn(beam.DoFn): # type: ignore[misc] """Simple DoFn that does nothing.""" - def process(self, unused_item): + def process(self, unused_item: None) -> None: """Does nothing.""" pass -class UnrelatedDoFn(beam.DoFn): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. +class UnrelatedDoFn(beam.DoFn): # type: ignore[misc] """Simple DoFn that does nothing.""" - def process(self, unused_item): + def process(self, unused_item: None) -> None: """Does nothing.""" pass @@ -77,7 +95,7 @@ def process(self, unused_item): class DerivedDoFn(DoFn): """Simple DoFn that derives from another.""" - def process(self, unused_item): + def process(self, unused_item: None) -> None: """Does nothing.""" pass @@ -85,7 +103,7 @@ def process(self, unused_item): class NotDoFn: """Class that does not inherit from DoFn.""" - def process(self, unused_item): + def process(self, unused_item: None) -> None: """Does nothing.""" pass @@ -99,7 +117,7 @@ class FooModel(base_models.BaseModel): class BarModel(base_models.BaseModel): """A model that holds a reference to a FooModel ID.""" - BAR_CONSTANT = 1 + BAR_CONSTANT: Final = 1 foo_id = datastore_services.StringProperty() @@ -113,14 +131,14 @@ class BazModel(base_models.BaseModel): class AuditsExistingTests(test_utils.TestBase): - def tearDown(self): - super(AuditsExistingTests, self).tearDown() + def tearDown(self) -> None: + super().tearDown() MockAuditsExisting.clear() - def test_has_no_do_fns_by_default(self): + def test_has_no_do_fns_by_default(self) -> None: self.assertEqual(MockAuditsExisting.get_audit_do_fn_types_by_kind(), {}) - def test_targets_every_subclass_when_a_base_model_is_targeted(self): + def test_targets_every_subclass_when_a_base_model_is_targeted(self) -> None: self.assertIs(MockAuditsExisting(base_models.BaseModel)(DoFn), DoFn) self.assertItemsEqual( @@ -129,7 +147,9 @@ def test_targets_every_subclass_when_a_base_model_is_targeted(self): models.Registry.get_all_storage_model_classes()) ]) - def test_replaces_base_do_fn_when_derived_do_fn_is_added_later(self): + def test_replaces_base_do_fn_when_derived_do_fn_is_added_later( + self + ) -> None: MockAuditsExisting(base_models.BaseModel)(DoFn) MockAuditsExisting(base_models.BaseModel)(UnrelatedDoFn) self.assertItemsEqual( @@ -147,7 +167,7 @@ def test_replaces_base_do_fn_when_derived_do_fn_is_added_later(self): MockAuditsExisting.get_audit_do_fn_types('ExplorationModel'), [DerivedDoFn, UnrelatedDoFn]) - def test_keeps_derived_do_fn_when_base_do_fn_is_added_later(self): + def test_keeps_derived_do_fn_when_base_do_fn_is_added_later(self) -> None: MockAuditsExisting(exp_models.ExplorationModel)(DerivedDoFn) MockAuditsExisting(exp_models.ExplorationModel)(UnrelatedDoFn) self.assertItemsEqual( @@ -165,7 +185,7 @@ def test_keeps_derived_do_fn_when_base_do_fn_is_added_later(self): MockAuditsExisting.get_audit_do_fn_types('ExplorationModel'), [DerivedDoFn, UnrelatedDoFn]) - def test_does_not_register_duplicate_do_fns(self): + def test_does_not_register_duplicate_do_fns(self) -> None: MockAuditsExisting(base_models.BaseModel)(DoFn) self.assertItemsEqual( MockAuditsExisting.get_audit_do_fn_types('BaseModel'), [DoFn]) @@ -174,21 +194,21 @@ def test_does_not_register_duplicate_do_fns(self): self.assertItemsEqual( MockAuditsExisting.get_audit_do_fn_types('BaseModel'), [DoFn]) - def test_raises_value_error_when_given_no_args(self): - with self.assertRaisesRegexp( + def test_raises_value_error_when_given_no_args(self) -> None: + with self.assertRaisesRegex( ValueError, 'Must target at least one model' ): MockAuditsExisting() - def test_raises_type_error_when_given_unregistered_model(self): - with self.assertRaisesRegexp( + def test_raises_type_error_when_given_unregistered_model(self) -> None: + with self.assertRaisesRegex( TypeError, re.escape( '%r is not a model registered in core.platform' % FooModel), ): MockAuditsExisting(FooModel) - def test_raises_type_error_when_decorating_non_do_fn_class(self): - with self.assertRaisesRegexp( + def test_raises_type_error_when_decorating_non_do_fn_class(self) -> None: + with self.assertRaisesRegex( TypeError, '%r is not a subclass of DoFn' % NotDoFn, ): MockAuditsExisting(base_models.BaseModel)(NotDoFn) @@ -198,21 +218,25 @@ class MockRelationshipsOf(validation_decorators.RelationshipsOf): """Subclassed with overrides to avoid modifying the real decorator.""" # Overrides the real value for the unit tests. - _ID_REFERENCING_PROPERTIES = collections.defaultdict(set) + _ID_REFERENCING_PROPERTIES: Dict[ + model_property.ModelProperty, Set[str] + ] = collections.defaultdict(set) @classmethod - def clear(cls): + def clear(cls) -> None: """Test-only helper method for clearing the decorator.""" cls._ID_REFERENCING_PROPERTIES.clear() class RelationshipsOfTests(test_utils.TestBase): - def tearDown(self): - super(RelationshipsOfTests, self).tearDown() + def tearDown(self) -> None: + super().tearDown() MockRelationshipsOf.clear() - def get_property_of(self, model_class, property_name): + def get_property_of( + self, model_class: Type[base_models.BaseModel], property_name: str + ) -> model_property.ModelProperty: """Helper method to create a ModelProperty. Args: @@ -226,7 +250,7 @@ def get_property_of(self, model_class, property_name): return model_property.ModelProperty( model_class, getattr(model_class, property_name)) - def test_has_no_relationships_by_default(self): + def test_has_no_relationships_by_default(self) -> None: self.assertEqual( MockRelationshipsOf .get_id_referencing_properties_by_kind_of_possessor(), {}) @@ -234,9 +258,14 @@ def test_has_no_relationships_by_default(self): MockRelationshipsOf.get_all_model_kinds_referenced_by_properties(), set()) - def test_valid_relationship_generator(self): + def test_valid_relationship_generator(self) -> None: @MockRelationshipsOf(BarModel) - def bar_model_relationships(model): # pylint: disable=unused-variable + def bar_model_relationships( + model: Type[BarModel] + ) -> Iterator[ + Tuple[model_property.PropertyType, + List[Type[base_models.BaseModel]]] + ]: """Defines the relationships of BarModel.""" yield (model.foo_id, [FooModel]) @@ -254,9 +283,14 @@ def bar_model_relationships(model): # pylint: disable=unused-variable MockRelationshipsOf.get_all_model_kinds_referenced_by_properties(), {'FooModel'}) - def test_accepts_id_as_property(self): + def test_accepts_id_as_property(self) -> None: @MockRelationshipsOf(BarModel) - def bar_model_relationships(model): # pylint: disable=unused-variable + def bar_model_relationships( + model: Type[base_models.BaseModel] + ) -> Iterator[ + Tuple[model_property.PropertyType, + List[Type[base_models.BaseModel]]] + ]: """Defines the relationships of BarModel.""" yield (model.id, [BazModel]) @@ -274,19 +308,30 @@ def bar_model_relationships(model): # pylint: disable=unused-variable MockRelationshipsOf.get_all_model_kinds_referenced_by_properties(), {'BazModel'}) - def test_rejects_values_that_are_not_types(self): + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_rejects_values_that_are_not_types(self) -> None: foo_model = FooModel() - with self.assertRaisesRegexp(TypeError, 'is an instance, not a type'): - MockRelationshipsOf(foo_model) + with self.assertRaisesRegex(TypeError, 'is an instance, not a type'): + MockRelationshipsOf(foo_model) # type: ignore[arg-type] - def test_rejects_types_that_are_not_models(self): - with self.assertRaisesRegexp(TypeError, 'not a subclass of BaseModel'): - MockRelationshipsOf(int) + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. + def test_rejects_types_that_are_not_models(self) -> None: + with self.assertRaisesRegex(TypeError, 'not a subclass of BaseModel'): + MockRelationshipsOf(int) # type: ignore[arg-type] - def test_rejects_relationship_generator_with_wrong_name(self): - with self.assertRaisesRegexp(ValueError, 'Please rename the function'): + def test_rejects_relationship_generator_with_wrong_name(self) -> None: + with self.assertRaisesRegex(ValueError, 'Please rename the function'): @MockRelationshipsOf(BarModel) - def unused_bar_model_relationships(unused_model): + def unused_bar_model_relationships( + unused_model: Type[base_models.BaseModel] + ) -> Iterator[ + Tuple[model_property.PropertyType, + List[Type[base_models.BaseModel]]] + ]: """Defines the relationships of BarModel.""" yield (BarModel.foo_id, [FooModel]) diff --git a/core/jobs/io/cache_io.py b/core/jobs/io/cache_io.py new file mode 100644 index 000000000000..6d243a3dce49 --- /dev/null +++ b/core/jobs/io/cache_io.py @@ -0,0 +1,51 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides PTransforms for manipulating with cache.""" + +from __future__ import annotations + +from core.domain import caching_services + +import apache_beam as beam +from typing import Any + + +# TODO(#15613): Here we use MyPy ignore because of the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. +class FlushCache(beam.PTransform): # type: ignore[misc] + """Flushes the memory caches.""" + + # Here we use type Any because we do not care about the type of items passed + # here. + def expand( + self, items: beam.PCollection[Any] + ) -> beam.pvalue.PDone: + """Flushes the memory caches. + + Args: + items: PCollection. Items, can also contain just one item. + + Returns: + PCollection. An empty PCollection. + """ + return ( + items + | beam.CombineGlobally(lambda _: []) + | beam.Map(lambda _: caching_services.flush_memory_caches()) + ) diff --git a/core/jobs/io/cache_io_test.py b/core/jobs/io/cache_io_test.py new file mode 100644 index 000000000000..bca574559e4e --- /dev/null +++ b/core/jobs/io/cache_io_test.py @@ -0,0 +1,52 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.io.cache_io.""" + +from __future__ import annotations + +from core.domain import caching_services +from core.jobs import job_test_utils +from core.jobs.io import cache_io + +import apache_beam as beam + + +class FlushCacheTests(job_test_utils.PipelinedTestBase): + + def test_cache_is_flushed(self) -> None: + items = [1] * 100 + + called_functions = {'flush_caches': False} + + class MockMemoryCachingServices: + + @staticmethod + def flush_caches() -> None: + """Flush cache.""" + called_functions['flush_caches'] = True + + with self.swap( + caching_services, 'memory_cache_services', MockMemoryCachingServices + ): + self.assert_pcoll_equal( + self.pipeline + | beam.Create(items) + | cache_io.FlushCache(), + [None] + ) + + self.assertTrue(called_functions['flush_caches']) diff --git a/core/jobs/io/gcs_io.py b/core/jobs/io/gcs_io.py new file mode 100644 index 000000000000..d89f2e7f8bda --- /dev/null +++ b/core/jobs/io/gcs_io.py @@ -0,0 +1,247 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides an Apache Beam API for operating on GCS.""" + +from __future__ import annotations + +from core.platform import models + +import apache_beam as beam +import result +from typing import List, Optional, Tuple, TypedDict, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import app_identity_services + from mypy_imports import storage_services + +storage_services = models.Registry.import_storage_services() +app_identity_services = models.Registry.import_app_identity_services() + +BUCKET = app_identity_services.get_gcs_resource_bucket_name() + + +# TODO(#15613): Here we use MyPy ignore because of the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. +class ReadFile(beam.PTransform): # type: ignore[misc] + """Read files form the GCS.""" + + def __init__( + self, + bucket: str = BUCKET, + label: Optional[str] = None + ) -> None: + """Initializes the ReadFile PTransform. + + Args: + bucket: str. The bucket name on the GCS. + label: Optional[str]. The label of the PTransform. + """ + super().__init__(label=label) + self.bucket = bucket + + def expand(self, file_paths: beam.PCollection) -> beam.PCollection: + """Returns PCollection with file data. + + Args: + file_paths: PCollection. The collection of filepaths that will + be read. + + Returns: + PCollection. The PCollection of the file data. + """ + return ( + file_paths + | 'Read the file' >> beam.Map(self._read_file) + ) + + def _read_file( + self, file_path: str + ) -> result.Result[Tuple[str, Union[bytes, str]]]: + """Helper function to read the contents of a file. + + Args: + file_path: str. The name of the file that will be read. + + Returns: + data: Tuple[str, bytes]. The file data. + """ + try: + file_data = storage_services.get(self.bucket, file_path) + return result.Ok((file_path, file_data)) + except Exception: + err_message: str = 'The file does not exists.' + return result.Err((file_path, err_message)) + + +class FileObjectDict(TypedDict): + """Dictionary representing file object that will be written to GCS.""" + + filepath: str + data: bytes + + +# TODO(#15613): Here we use MyPy ignore because of the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. +class WriteFile(beam.PTransform): # type: ignore[misc] + """Write files to GCS.""" + + def __init__( + self, + mime_type: str = 'application/octet-stream', + bucket: str = BUCKET, + label: Optional[str] = None + ) -> None: + """Initializes the WriteFile PTransform. + + Args: + mime_type: str. The mime_type to assign to the file. + bucket: str. The bucket name on the GCS. + label: Optional[str]. The label of the PTransform. + """ + super().__init__(label=label) + self.mime_type = mime_type + self.bucket = bucket + + def expand(self, file_objects: beam.PCollection) -> beam.PCollection: + """Returns the PCollection of files that have written to the GCS. + + Args: + file_objects: PCollection. The collection of file paths and data + that will be written. + + Returns: + PCollection. The PCollection of the number of bytes that has + written to GCS. + """ + return ( + file_objects + | 'Write files to GCS' >> beam.Map(self._write_file) + ) + + def _write_file(self, file_obj: FileObjectDict) -> int: + """Helper function to write file to the GCS. + + Args: + file_obj: FileObjectDict. The dictionary having file + path and file data. + + Returns: + int. Returns the number of bytes that has been written to GCS. + """ + storage_services.commit( + self.bucket, file_obj['filepath'], file_obj['data'], self.mime_type) + return len(file_obj['data']) + + +# TODO(#15613): Here we use MyPy ignore because of the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. +class DeleteFile(beam.PTransform): # type: ignore[misc] + """Delete files from GCS.""" + + def __init__( + self, + bucket: str = BUCKET, + label: Optional[str] = None + ) -> None: + """Initializes the DeleteFile PTransform. + + Args: + bucket: str. The bucket name on the GCS. + label: Optional[str]. The label of the PTransform. + """ + super().__init__(label=label) + self.bucket = bucket + + def expand(self, file_paths: beam.PCollection) -> beam.pvalue.PDone: + """Deletes the files in given PCollection. + + Args: + file_paths: PCollection. The collection of filepaths that will + be deleted. + + Returns: + PCollection. The PCollection of the file data. + """ + return ( + file_paths + | 'Delete the file' >> beam.Map(self._delete_file) + ) + + def _delete_file(self, file_path: str) -> None: + """Helper function to delete the file. + + Args: + file_path: str. The name of the file that will be deleted. + """ + storage_services.delete(self.bucket, file_path) + + +# TODO(#15613): Here we use MyPy ignore because of the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. +class GetFiles(beam.PTransform): # type: ignore[misc] + """Get all files with specefic prefix.""" + + def __init__( + self, + bucket: str = BUCKET, + label: Optional[str] = None + ) -> None: + """Initializes the GetFiles PTransform. + + Args: + bucket: str. The bucket name on the GCS. + label: Optional[str]. The label of the PTransform. + """ + super().__init__(label=label) + self.bucket = bucket + + def expand(self, prefixes: beam.PCollection) -> beam.PCollection: + """Returns PCollection with file names. + + Args: + prefixes: PCollection. The collection of filepath prefixes. + + Returns: + PCollection. The PCollection of the file names. + """ + return ( + prefixes + | 'Get names of the files' >> beam.Map(self._get_file_with_prefix) + ) + + def _get_file_with_prefix(self, prefix: str) -> List[str]: + """Helper function to get file names with the prefix. + + Args: + prefix: str. The prefix path of which we want to list + all the files. + + Returns: + filepaths: List[str]. The file name as key and size of file + as value. + """ + list_of_blobs = storage_services.listdir(self.bucket, prefix) + return list(blob.name for blob in list_of_blobs) diff --git a/core/jobs/io/gcs_io_test.py b/core/jobs/io/gcs_io_test.py new file mode 100644 index 000000000000..dd3cf2f3b025 --- /dev/null +++ b/core/jobs/io/gcs_io_test.py @@ -0,0 +1,172 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for jobs.io.gcs_io.""" + +from __future__ import annotations + +from core import utils +from core.domain import user_services +from core.jobs import job_test_utils +from core.jobs.io import gcs_io +from core.platform import models + +import apache_beam as beam +import result + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import app_identity_services + from mypy_imports import storage_services + +storage_services = models.Registry.import_storage_services() +app_identity_services = models.Registry.import_app_identity_services() + + +class ReadFileTest(job_test_utils.PipelinedTestBase): + """Tests to check gcs_io.ReadFile.""" + + def test_read_from_gcs(self) -> None: + string = b'testing' + bucket = app_identity_services.get_gcs_resource_bucket_name() + storage_services.commit(bucket, 'dummy_file', string, None) + filepaths = ['dummy_file', 'new_dummy_file'] + filepath_p_collec = ( + self.pipeline + | 'Create pcoll of filepaths' >> beam.Create(filepaths) + | 'Read file from GCS' >> gcs_io.ReadFile() + ) + self.assert_pcoll_equal(filepath_p_collec, [ + result.Ok(('dummy_file', b'testing')), + result.Err(('new_dummy_file', 'The file does not exists.'))]) + + +class WriteFileTest(job_test_utils.PipelinedTestBase): + """Tests to check gcs_io.WriteFile.""" + + def test_write_to_gcs(self) -> None: + string = b'testing' + filepaths = [ + { + 'filepath': 'dummy_folder/dummy_subfolder/dummy_file_1', + 'data': string + }, + { + 'filepath': 'dummy_folder/dummy_subfolder/dummy_file_2', + 'data': string + } + ] + filepath_p_collec = ( + self.pipeline + | 'Create pcoll of filepaths' >> beam.Create(filepaths) + | 'Write to GCS' >> gcs_io.WriteFile() + ) + self.assert_pcoll_equal(filepath_p_collec, [7, 7]) + + def test_write_binary_files_to_gcs(self) -> None: + binary_data = utils.convert_png_data_url_to_binary( + user_services.DEFAULT_IDENTICON_DATA_URL) + filepaths = [ + { + 'filepath': 'dummy_folder/dummy_subfolder/dummy_file_1', + 'data': binary_data + } + ] + filepath_p_collec = ( + self.pipeline + | 'Create pcoll of filepaths' >> beam.Create(filepaths) + | 'Write to GCS' >> gcs_io.WriteFile() + ) + self.assert_pcoll_equal(filepath_p_collec, [3681]) + + +class DeleteFileTest(job_test_utils.PipelinedTestBase): + """Tests to check gcs_io.DeleteFile.""" + + def test_delete_files_in_gcs(self) -> None: + file_path = 'dummy_folder/dummy_subfolder/dummy_file' + string = b'testing' + bucket = app_identity_services.get_gcs_resource_bucket_name() + storage_services.commit(bucket, file_path, string, None) + file_paths = [file_path] + filepath_p_collec = ( + self.pipeline + | 'Create pcoll of filepaths' >> beam.Create(file_paths) + | 'Delete file from GCS' >> gcs_io.DeleteFile() + ) + self.assert_pcoll_equal(filepath_p_collec, [None]) + self.assertFalse(storage_services.isfile(bucket, file_path)) + + def test_check_correct_files_are_passing(self) -> None: + file_path = 'dummy_folder/dummy_subfolder/dummy_file' + file_paths = [file_path] + + with self.swap( + gcs_io.DeleteFile, + '_delete_file', + lambda self, file_path: file_path + ): # pylint: disable=unused-argument + filepath_p_collec = ( + self.pipeline + | 'Create pcoll of filepaths' >> beam.Create(file_paths) + | 'Delete file from GCS' >> gcs_io.DeleteFile() + ) + self.assert_pcoll_equal(filepath_p_collec, [file_path]) + + +class GetFilesTest(job_test_utils.PipelinedTestBase): + """Tests to check gcs_io.GetFiles.""" + + def test_get_files_with_specefic_prefix(self) -> None: + bucket = app_identity_services.get_gcs_resource_bucket_name() + filepath_1 = 'dummy_folder/dummy_subfolder/dummy_file_1' + filepath_2 = 'dummy_folder/dummy_subfolder/dummy_file_2' + string = b'testing' + storage_services.commit( + bucket, filepath_1, string, 'application/octet-stream') + storage_services.commit( + bucket, filepath_2, string, 'application/octet-stream') + prefixes = ['dummy_folder/dummy_subfolder'] + filepath_p_collec = ( + self.pipeline + | 'Create pcoll of filepaths' >> beam.Create(prefixes) + | 'Get files from GCS' >> gcs_io.GetFiles() + | 'Sort the values' >> beam.Map(sorted) + ) + self.assert_pcoll_equal( + filepath_p_collec, [ + [ + 'dummy_folder/dummy_subfolder/dummy_file_1', + 'dummy_folder/dummy_subfolder/dummy_file_2' + ] + ]) + + def test_check_correct_filepath_is_passing(self) -> None: + file_paths = ['dummy_folder/dummy_subfolder'] + + with self.swap( + gcs_io.GetFiles, + '_get_file_with_prefix', + lambda self, file_path: file_path + ): # pylint: disable=unused-argument + filepath_p_collec = ( + self.pipeline + | 'Create pcoll of filepaths' >> beam.Create(file_paths) + | 'Get files with prefixes from GCS' >> gcs_io.GetFiles() + ) + self.assert_pcoll_equal( + filepath_p_collec, + ['dummy_folder/dummy_subfolder']) diff --git a/core/jobs/io/job_io.py b/core/jobs/io/job_io.py index 296171f24b98..f6e83f59e383 100644 --- a/core/jobs/io/job_io.py +++ b/core/jobs/io/job_io.py @@ -31,11 +31,15 @@ from mypy_imports import beam_job_models from mypy_imports import datastore_services -(beam_job_models,) = models.Registry.import_models([models.NAMES.beam_job]) +(beam_job_models,) = models.Registry.import_models([models.Names.BEAM_JOB]) datastore_services = models.Registry.import_datastore_services() +# TODO(#15613): Here we use MyPy ignore because of the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. class PutResults(beam.PTransform): # type: ignore[misc] """Writes Job Results into the NDB datastore.""" @@ -48,7 +52,7 @@ def __init__(self, job_id: str, label: Optional[str] = None) -> None: job_id: str. The Oppia ID associated with the current pipeline. label: str|None. The label of the PTransform. """ - super(PutResults, self).__init__(label=label) + super().__init__(label=label) self.job_id = job_id def expand( diff --git a/core/jobs/io/job_io_test.py b/core/jobs/io/job_io_test.py index 9c610ad4caf8..385e0aa6ab75 100644 --- a/core/jobs/io/job_io_test.py +++ b/core/jobs/io/job_io_test.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Unit tests for jobs.io.job_run_results_io.""" +"""Unit tests for jobs.io.job_io.""" from __future__ import annotations @@ -53,7 +53,7 @@ def test_sharded_output(self) -> None: job_run_result.JobRunResult(stdout='ghi', stderr='789'), ] - with self.swap(job_run_result, 'MAX_OUTPUT_BYTES', 11): + with self.swap(job_run_result, 'MAX_OUTPUT_CHARACTERS', 8): self.assert_pcoll_empty( self.pipeline | beam.Create(messages) @@ -61,5 +61,5 @@ def test_sharded_output(self) -> None: ) result = beam_job_services.get_beam_job_run_result(self.JOB_ID) - self.assertItemsEqual(result.stdout.split('\n'), ['abc', 'def', 'ghi']) # type: ignore[no-untyped-call] - self.assertItemsEqual(result.stderr.split('\n'), ['123', '456', '789']) # type: ignore[no-untyped-call] + self.assertItemsEqual(result.stdout.split('\n'), ['abc', 'def', 'ghi']) + self.assertItemsEqual(result.stderr.split('\n'), ['123', '456', '789']) diff --git a/core/jobs/io/ndb_io.py b/core/jobs/io/ndb_io.py index 3ba40d0ad95e..6f117736383b 100644 --- a/core/jobs/io/ndb_io.py +++ b/core/jobs/io/ndb_io.py @@ -35,6 +35,10 @@ datastore_services = models.Registry.import_datastore_services() +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. class GetModels(beam.PTransform): # type: ignore[misc] """Reads NDB models from the datastore using a query.""" @@ -47,7 +51,7 @@ def __init__( query: datastore_services.Query. The query used to fetch models. label: str|None. The label of the PTransform. """ - super(GetModels, self).__init__(label=label) + super().__init__(label=label) self.query = query def expand( @@ -75,6 +79,10 @@ def expand( ) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. class PutModels(beam.PTransform): # type: ignore[misc] """Writes NDB models to the datastore.""" @@ -102,6 +110,10 @@ def expand( ) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'PTransform' (has type 'Any')), we added an ignore here. class DeleteModels(beam.PTransform): # type: ignore[misc] """Deletes NDB models from the datastore.""" diff --git a/core/jobs/io/ndb_io_test.py b/core/jobs/io/ndb_io_test.py index 9412d4fd41ba..51ab043ab8b7 100644 --- a/core/jobs/io/ndb_io_test.py +++ b/core/jobs/io/ndb_io_test.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Provides an Apache Beam API for operating on NDB models.""" +"""Unit tests for jobs.io.ndb_io.""" from __future__ import annotations @@ -31,7 +31,7 @@ from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() @@ -64,7 +64,7 @@ def test_read_from_datastore(self) -> None: ] self.put_multi(model_list) - self.assertItemsEqual(self.get_base_models(), model_list) # type: ignore[no-untyped-call] + self.assertItemsEqual(self.get_base_models(), model_list) model_pcoll = ( self.pipeline | ndb_io.GetModels(base_models.BaseModel.get_all()) @@ -79,12 +79,11 @@ def test_write_to_datastore(self) -> None: self.create_model(base_models.BaseModel, id='c'), ] - self.assertItemsEqual(self.get_base_models(), []) # type: ignore[no-untyped-call] + self.assertItemsEqual(self.get_base_models(), []) self.assert_pcoll_empty( self.pipeline | beam.Create(model_list) | ndb_io.PutModels()) - - self.assertItemsEqual(self.get_base_models(), model_list) # type: ignore[no-untyped-call] + self.assertItemsEqual(self.get_base_models(), model_list) def test_delete_from_datastore(self) -> None: model_list = [ @@ -94,11 +93,11 @@ def test_delete_from_datastore(self) -> None: ] self.put_multi(model_list) - self.assertItemsEqual(self.get_base_models(), model_list) # type: ignore[no-untyped-call] + self.assertItemsEqual(self.get_base_models(), model_list) self.assert_pcoll_empty( self.pipeline | beam.Create([model.key for model in model_list]) | ndb_io.DeleteModels()) - self.assertItemsEqual(self.get_base_models(), []) # type: ignore[no-untyped-call] + self.assertItemsEqual(self.get_base_models(), []) diff --git a/core/jobs/job_options.py b/core/jobs/job_options.py index 39bd728d61c3..467f26323a42 100644 --- a/core/jobs/job_options.py +++ b/core/jobs/job_options.py @@ -26,6 +26,11 @@ from typing import List, Optional +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PipelineOptions class is of type Any. Thus to avoid MyPy's +# error (Class cannot subclass 'PipelineOptions' (has type 'Any')), we +# added an ignore here. class JobOptions(pipeline_options.PipelineOptions): # type: ignore[misc] """Option class for configuring the behavior of Oppia jobs.""" @@ -48,13 +53,16 @@ def __init__( (unmodified) because PipelineOptions, a parent class, needs it. **job_options: dict(str: *). One of the options defined in the class JOB_OPTIONS dict. + + Raises: + ValueError. Unsupported job option(s). """ unsupported_options = set(job_options).difference(self.JOB_OPTIONS) if unsupported_options: joined_unsupported_options = ', '.join(sorted(unsupported_options)) raise ValueError( 'Unsupported option(s): %s' % joined_unsupported_options) - super(JobOptions, self).__init__( + super().__init__( # Needed by PipelineOptions. flags=flags, # Needed by GoogleCloudOptions. diff --git a/core/jobs/job_options_test.py b/core/jobs/job_options_test.py index 13b5b9c292e0..3a4a92ced4e2 100644 --- a/core/jobs/job_options_test.py +++ b/core/jobs/job_options_test.py @@ -35,5 +35,5 @@ def test_overwritten_values(self) -> None: self.assertEqual(options.namespace, 'abc') def test_unsupported_values(self) -> None: - with self.assertRaisesRegexp(ValueError, r'Unsupported option\(s\)'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(ValueError, r'Unsupported option\(s\)'): job_options.JobOptions(a='a', b='b') diff --git a/core/jobs/job_test_utils.py b/core/jobs/job_test_utils.py index 17d974c45270..ad44f69fe87a 100644 --- a/core/jobs/job_test_utils.py +++ b/core/jobs/job_test_utils.py @@ -41,7 +41,7 @@ from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() @@ -59,15 +59,18 @@ class PipelinedTestBase(test_utils.AppEngineTestBase): YEAR_AGO = NOW - datetime.timedelta(weeks=52) YEAR_LATER = NOW + datetime.timedelta(weeks=52) + # Here we use type Any because we need to match the behavior of super + # class's constructor and super class's constructor can accept arbitrary + # number of arguments with different types of values. def __init__(self, *args: Any, **kwargs: Any) -> None: - super(PipelinedTestBase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.pipeline = test_pipeline.TestPipeline( runner=runners.DirectRunner(), options=job_options.JobOptions(namespace=self.namespace)) self._pipeline_context_stack: Optional[contextlib.ExitStack] = None def setUp(self) -> None: - super(PipelinedTestBase, self).setUp() + super().setUp() with contextlib.ExitStack() as pipeline_context_stack: pipeline_context_stack.enter_context(decorate_beam_errors()) pipeline_context_stack.enter_context(self.pipeline) @@ -77,7 +80,7 @@ def tearDown(self) -> None: try: self._exit_pipeline_context() finally: - super(PipelinedTestBase, self).tearDown() + super().tearDown() def assert_pcoll_equal( self, actual: beam.PCollection, expected: beam.PCollection @@ -119,6 +122,9 @@ def assert_pcoll_empty(self, actual: beam.PCollection) -> None: beam_testing_util.assert_that(actual, beam_testing_util.is_empty()) self._exit_pipeline_context() + # Here we use type Any because this method can accept different properties + # of models and those properties can be of type str, int, bool, Dict and + # other types too. So, to allow every type of property we used Any here. def create_model( self, model_class: Type[base_models.SELF_BASE_MODEL], @@ -180,8 +186,11 @@ class JobTestBase(PipelinedTestBase): # NOTE: run() raises a NotImplementedError. JOB_CLASS: Type[base_jobs.JobBase] = base_jobs.JobBase + # Here we use type Any because we need to match the behavior of super + # class's constructor and super class's constructor can accept arbitrary + # number of arguments with different types of values. def __init__(self, *args: Any, **kwargs: Any) -> None: - super(JobTestBase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.job = self.JOB_CLASS(self.pipeline) def run_job(self) -> beam.PCollection[job_run_result.JobRunResult]: @@ -196,7 +205,13 @@ def run(self, model_kind): Returns: PCollection. The output of the job. """ - return self.job.run() + job_results = self.job.run() + # NDB operations in Beam do not properly update the context cache + # (this cache is separate for every application thread), thus we clear + # it ourselves. + with datastore_services.get_ndb_context() as ndb_context: + ndb_context.clear_cache() + return job_results def put_multi(self, model_list: Sequence[base_models.BaseModel]) -> None: """Puts the input models into the datastore. @@ -269,23 +284,23 @@ def decorate_beam_errors() -> Iterator[None]: if match: groupdict = match.groupdict() else: - raise AssertionError(exception_message) + raise AssertionError(exception_message) from exception unexpected_elements = groupdict.get('unexpected', None) try: unexpected_elements = ( ast.literal_eval(unexpected_elements) if unexpected_elements else None) - except (SyntaxError, ValueError): - raise AssertionError(exception_message) + except (SyntaxError, ValueError) as e: + raise AssertionError(exception_message) from e missing_elements = groupdict.get('missing', None) try: missing_elements = ( ast.literal_eval(missing_elements) if missing_elements else None) - except (SyntaxError, ValueError): - raise AssertionError(exception_message) + except (SyntaxError, ValueError) as e: + raise AssertionError(exception_message) from e error_lines = [ 'failed %s' % match.group('context'), @@ -300,4 +315,4 @@ def decorate_beam_errors() -> Iterator[None]: error_lines.append('Missing:') error_lines.extend(' %r' % e for e in missing_elements) error_lines.append('') - raise AssertionError('\n'.join(error_lines)) + raise AssertionError('\n'.join(error_lines)) from exception diff --git a/core/jobs/job_test_utils_test.py b/core/jobs/job_test_utils_test.py index 7e923e84e025..6647d6c2a62f 100644 --- a/core/jobs/job_test_utils_test.py +++ b/core/jobs/job_test_utils_test.py @@ -31,7 +31,7 @@ if MYPY: # pragma: no cover from mypy_imports import base_models -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) class PipelinedTestBaseTests(job_test_utils.PipelinedTestBase): @@ -39,14 +39,14 @@ class PipelinedTestBaseTests(job_test_utils.PipelinedTestBase): def test_assert_pcoll_empty_raises_immediately(self) -> None: # NOTE: Arbitrary operations that produce a non-empty PCollection. output = self.pipeline | beam.Create([123]) | beam.Map(lambda x: x) - with self.assertRaisesRegexp(AssertionError, 'failed'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(AssertionError, 'failed'): self.assert_pcoll_empty(output) def test_assert_pcoll_equal_raises_immediately(self) -> None: # NOTE: Arbitrary operations that produce an empty PCollection. output = self.pipeline | beam.Create([]) | beam.Map(lambda x: x) - with self.assertRaisesRegexp(AssertionError, 'failed'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(AssertionError, 'failed'): self.assert_pcoll_equal(output, [123]) def test_assert_pcoll_empty_raises_runtime_error_when_called_twice( @@ -57,9 +57,10 @@ def test_assert_pcoll_empty_raises_runtime_error_when_called_twice( self.assert_pcoll_empty(output) - self.assertRaisesRegexp( # type: ignore[no-untyped-call] - RuntimeError, 'must be run in the pipeline context', - lambda: self.assert_pcoll_empty(output)) + with self.assertRaisesRegex( + RuntimeError, 'must be run in the pipeline context' + ): + self.assert_pcoll_empty(output) def test_assert_pcoll_equal_raises_runtime_error_when_called_twice( self @@ -69,9 +70,10 @@ def test_assert_pcoll_equal_raises_runtime_error_when_called_twice( self.assert_pcoll_equal(output, [123]) - self.assertRaisesRegexp( # type: ignore[no-untyped-call] - RuntimeError, 'must be run in the pipeline context', - lambda: self.assert_pcoll_equal(output, [123])) + with self.assertRaisesRegex( + RuntimeError, 'must be run in the pipeline context' + ): + self.assert_pcoll_equal(output, [123]) def test_create_model_sets_date_properties(self) -> None: model = self.create_model(base_models.BaseModel) @@ -86,14 +88,36 @@ class JobTestBaseTests(job_test_utils.JobTestBase): def tearDown(self) -> None: self.JOB_CLASS.reset_mock() - super(JobTestBaseTests, self).tearDown() + super().tearDown() def test_run_job(self) -> None: self.run_job() + # TODO(#16049): Here we use MyPy ignore because the method + # 'assert_called' is accessed on 'run' function and currently + # MyPy does not support for extra attributes on functions of + # Callable types. So, once this 'assert_called' method is + # replaced with some more standard method, we can remove this + # todo from here. self.job.run.assert_called() # type: ignore[attr-defined] + def test_put_multi(self) -> None: + model_list = [ + self.create_model(base_models.BaseModel) for _ in range(3)] + self.put_multi(model_list) + + model_ids = [model.id for model in model_list] + for model_id in model_ids: + model = base_models.BaseModel.get_by_id(model_id) + self.assertIsNotNone(model) + def test_job_output_is(self) -> None: + # TODO(#16049): Here we use MyPy ignore because the attribute + # 'return_value' is accessed on 'run' function and currently + # MyPy does not support for extra attributes on functions of + # Callable types. So, once this 'return_value' attribute is + # replaced with some more standard method, we can remove this + # todo from here. self.job.run.return_value = ( # type: ignore[attr-defined] # NOTE: Arbitrary operations that produce a non-empty PCollection. self.pipeline | beam.Create([123]) | beam.Map(lambda x: x)) @@ -101,6 +125,12 @@ def test_job_output_is(self) -> None: self.assert_job_output_is([123]) def test_job_output_is_empty(self) -> None: + # TODO(#16049): Here we use MyPy ignore because the attribute + # 'return_value' is accessed on 'run' function and currently + # MyPy does not support for extra attributes on functions of + # Callable types. So, once this 'return_value' attribute is + # replaced with some more standard method, we can remove this + # todo from here. self.job.run.return_value = ( # type: ignore[attr-defined] # NOTE: Arbitrary operations that produce an empty PCollection. self.pipeline | beam.Create([]) | beam.Map(lambda x: x)) @@ -202,6 +232,6 @@ def test_does_not_decorate_message_with_invalid_missing_value(self) -> None: self.assert_error_is_decorated(actual_msg, actual_msg) def test_does_not_decorate_message_with_non_beam_type(self) -> None: - with self.assertRaisesRegexp(Exception, 'Error coming through!'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, 'Error coming through!'): with job_test_utils.decorate_beam_errors(): raise Exception('Error coming through!') diff --git a/core/jobs/job_utils.py b/core/jobs/job_utils.py index 5b3c0b2ff66c..ae3b7ba26262 100644 --- a/core/jobs/job_utils.py +++ b/core/jobs/job_utils.py @@ -33,9 +33,11 @@ datastore_services = models.Registry.import_datastore_services() -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) +# Here we use type Any because argument 'new_values' can accept arbitrary +# number of keyword args with different types of values. def clone_model( model: datastore_services.TYPE_MODEL_SUBCLASS, **new_values: Any ) -> datastore_services.TYPE_MODEL_SUBCLASS: @@ -145,6 +147,8 @@ def get_model_id(model: datastore_services.Model) -> Optional[str]: raise TypeError('%r is not a model instance' % model) +# Here we use type Any because this method can return a property from a +# model and that property can be of any type. def get_model_property( model: datastore_services.Model, property_name: str ) -> Any: @@ -182,9 +186,9 @@ def get_beam_entity_from_ndb_model( # We use private _entity_to_ds_entity here because it provides # a functionality that we need and writing it ourselves would be # too complicated. - return beam_datastore_types.Entity.from_client_entity( - ndb_model._entity_to_ds_entity(model) # pylint: disable=protected-access - ) + with datastore_services.get_ndb_context(): + model_to_put = ndb_model._entity_to_ds_entity(model) # pylint: disable=protected-access + return beam_datastore_types.Entity.from_client_entity(model_to_put) def get_ndb_model_from_beam_entity( @@ -276,6 +280,9 @@ def get_beam_query_from_ndb_query( filters=filters, order=order) +# Here we use type Any because this method can return a list of tuples +# in which we have a property values from a model and those property values +# can be of any type. def _get_beam_filters_from_ndb_node( node: ndb_query.Node ) -> Tuple[Tuple[str, str, Any], ...]: @@ -287,7 +294,13 @@ def _get_beam_filters_from_ndb_node( Returns: tuple(tuple(str, str, *)). The equivalent Apache Beam filters. Items are: (property name, comparison operator, property value). + + Raises: + TypeError. These `!=`, `IN`, and `OR` are forbidden filters. """ + # Here we use type Any because this list can contain tuples of + # format (property name, comparison operator, property value) + # and here property value can be of any type. beam_filters: List[Tuple[str, str, Any]] = [] if isinstance(node, ndb_query.ConjunctionNode): diff --git a/core/jobs/job_utils_test.py b/core/jobs/job_utils_test.py index 4248274e8924..01425d886b2a 100644 --- a/core/jobs/job_utils_test.py +++ b/core/jobs/job_utils_test.py @@ -32,7 +32,7 @@ from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() @@ -114,7 +114,7 @@ def test_get_from_existing_model(self) -> None: job_utils.get_model_class('BaseModel'), base_models.BaseModel) def test_get_from_non_existing_model(self) -> None: - with self.assertRaisesRegexp(Exception, 'No model class found'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, 'No model class found'): job_utils.get_model_class('InvalidModel') @@ -129,10 +129,14 @@ def test_get_from_datastore_model_class(self) -> None: self.assertEqual( job_utils.get_model_kind(base_models.BaseModel), 'BaseModel') + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_get_from_bad_value(self) -> None: - self.assertRaisesRegexp( # type: ignore[no-untyped-call] - TypeError, 'not a model type or instance', - lambda: job_utils.get_model_kind(123)) # type: ignore[arg-type] + with self.assertRaisesRegex( + TypeError, 'not a model type or instance' + ): + job_utils.get_model_kind(123) # type: ignore[arg-type] class GetModelPropertyTests(test_utils.TestBase): @@ -152,8 +156,11 @@ def test_get_missing_property_from_datastore_model(self) -> None: self.assertEqual(job_utils.get_model_property(model, 'prop'), None) + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_get_property_from_bad_value(self) -> None: - with self.assertRaisesRegexp(TypeError, 'not a model instance'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(TypeError, 'not a model instance'): job_utils.get_model_property(123, 'prop') # type: ignore[arg-type] @@ -164,8 +171,11 @@ def test_get_id_from_datastore_model(self) -> None: self.assertEqual(job_utils.get_model_id(model), '123') + # TODO(#13059): Here we use MyPy ignore because after we fully type the + # codebase we plan to get rid of the tests that intentionally test wrong + # inputs that we can normally catch by typing. def test_get_id_from_bad_value(self) -> None: - with self.assertRaisesRegexp(TypeError, 'not a model instance'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(TypeError, 'not a model instance'): job_utils.get_model_id(123) # type: ignore[arg-type] @@ -191,6 +201,19 @@ def test_get_model_from_beam_entity(self) -> None: FooModel(id='abc', project=feconf.OPPIA_PROJECT_ID, prop='123'), job_utils.get_ndb_model_from_beam_entity(beam_entity)) + def test_get_beam_key_from_ndb_key(self) -> None: + beam_key = beam_datastore_types.Key( + ('FooModel', 'abc'), + project=feconf.OPPIA_PROJECT_ID, + namespace=self.namespace + ) + + # We use private _from_ds_key here because it provides functionality + # for obtaining an NDB key from a Beam key, and writing it ourselves + # would be too complicated. + ndb_key = datastore_services.Key._from_ds_key(beam_key.to_client_key()) # pylint: disable=protected-access + self.assertEqual(job_utils.get_beam_key_from_ndb_key(ndb_key), beam_key) + def test_get_model_from_beam_entity_with_time(self) -> None: utcnow = datetime.datetime.utcnow() @@ -280,19 +303,19 @@ def test_query_with_or_filter_raises_type_error(self) -> None: query = datastore_services.Query(filters=datastore_services.any_of( BarModel.prop == 1, BarModel.prop == 2)) - with self.assertRaisesRegexp(TypeError, 'forbidden filter'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(TypeError, 'forbidden filter'): job_utils.get_beam_query_from_ndb_query(query) def test_query_with_in_filter_raises_type_error(self) -> None: query = datastore_services.Query(filters=BarModel.prop.IN([1, 2, 3])) - with self.assertRaisesRegexp(TypeError, 'forbidden filter'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(TypeError, 'forbidden filter'): job_utils.get_beam_query_from_ndb_query(query) def test_query_with_not_equal_filter_raises_type_error(self) -> None: query = datastore_services.Query(filters=BarModel.prop != 1) - with self.assertRaisesRegexp(TypeError, 'forbidden filter'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(TypeError, 'forbidden filter'): job_utils.get_beam_query_from_ndb_query(query) def test_query_with_order(self) -> None: diff --git a/core/jobs/jobs_manager.py b/core/jobs/jobs_manager.py index a6da6dc1cd78..6e9db32c0056 100644 --- a/core/jobs/jobs_manager.py +++ b/core/jobs/jobs_manager.py @@ -24,9 +24,12 @@ from core import feconf from core.domain import beam_job_services +from core.domain import caching_services from core.jobs import base_jobs from core.jobs import job_options +from core.jobs.io import cache_io from core.jobs.io import job_io +from core.platform import models from core.storage.beam_job import gae_models as beam_job_models import apache_beam as beam @@ -34,6 +37,12 @@ from google.cloud import dataflow from typing import Iterator, Optional, Type +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + +datastore_services = models.Registry.import_datastore_services() + # This is a mapping from the Google Cloud Dataflow JobState enum to our enum. # https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate _GCLOUD_DATAFLOW_JOB_STATE_TO_OPPIA_BEAM_JOB_STATE = { @@ -109,6 +118,9 @@ def run_job( Returns: BeamJobRun. Contains metadata related to the execution status of the job. + + Raises: + RuntimeError. Failed to deploy given job to the Dataflow service. """ if pipeline is None: pipeline = beam.Pipeline( @@ -118,9 +130,17 @@ def run_job( job = job_class(pipeline) job_name = job_class.__name__ + # Clear cache before running the job to be sure that the cache + # does not affect the job. + caching_services.flush_memory_caches() + # NOTE: Exceptions raised within this context are logged and suppressed. with _job_bookkeeping_context(job_name) as run_model: - _ = job.run() | job_io.PutResults(run_model.id) + _ = ( + job.run() + | job_io.PutResults(run_model.id) + | cache_io.FlushCache() + ) run_result = pipeline.run() @@ -137,6 +157,12 @@ def run_job( 'Failed to deploy %s to the Dataflow service. Please try again ' 'after a few minutes.' % job_name) + # NDB operations in Beam do not properly update the context cache + # (this cache is separate for every application thread), thus we clear + # it ourselves. + with datastore_services.get_ndb_context() as ndb_context: + ndb_context.clear_cache() + return run_model @@ -160,11 +186,11 @@ def refresh_state_of_beam_job_run_model( job_id=job_id, project_id=feconf.OPPIA_PROJECT_ID, location=feconf.GOOGLE_APP_ENGINE_REGION)) - except Exception: + except Exception as e: job_state = beam_job_models.BeamJobState.UNKNOWN.value job_state_updated = beam_job_run_model.last_updated - logging.exception('Failed to update job_id="%s"!' % job_id) + logging.warning('Failed to update job_id="%s": %s', job_id, e) else: job_state = _GCLOUD_DATAFLOW_JOB_STATE_TO_OPPIA_BEAM_JOB_STATE.get( @@ -195,6 +221,9 @@ def cancel_job(beam_job_run_model: beam_job_models.BeamJobRunModel) -> None: Args: beam_job_run_model: BeamJobRunModel. The model to update. + + Raises: + ValueError. The given model has no job ID. """ job_id = beam_job_run_model.dataflow_job_id if job_id is None: diff --git a/core/jobs/jobs_manager_test.py b/core/jobs/jobs_manager_test.py index 93b1dbd869eb..0d5640608c2e 100644 --- a/core/jobs/jobs_manager_test.py +++ b/core/jobs/jobs_manager_test.py @@ -226,7 +226,7 @@ def tearDown(self) -> None: def test_sync_job(self) -> None: self.run_model.dataflow_job_id = None - with self.assertRaisesRegexp(ValueError, 'must not be None'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(ValueError, 'must not be None'): jobs_manager.cancel_job(self.run_model) def test_job_with_cancelling_status(self) -> None: diff --git a/core/jobs/registry.py b/core/jobs/registry.py index 5adccdcf304e..0bd36df221ae 100644 --- a/core/jobs/registry.py +++ b/core/jobs/registry.py @@ -43,14 +43,30 @@ # We need this to happen for every job in this registry file, because the # registry depends on JobMetaclass to handle the responsibility of keeping track # of every job. -from core.jobs.batch_jobs import blog_validation_jobs # pylint: disable=unused-import # isort: skip -from core.jobs.batch_jobs import ( # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import blog_validation_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import collection_info_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import email_deletion_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import exp_migration_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import ( # pylint: disable=unused-import # isort: skip exp_recommendation_computation_jobs) -from core.jobs.batch_jobs import exp_search_indexing_jobs # pylint: disable=unused-import # isort: skip -from core.jobs.batch_jobs import model_validation_jobs # pylint: disable=unused-import # isort: skip -from core.jobs.batch_jobs import opportunity_management_jobs # pylint: disable=unused-import # isort: skip -from core.jobs.batch_jobs import suggestion_stats_computation_jobs # pylint: disable=unused-import # isort: skip -from core.jobs.batch_jobs import user_stats_computation_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import exp_search_indexing_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import model_validation_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import opportunity_management_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import skill_migration_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import story_migration_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import topic_migration_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import suggestion_stats_computation_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import suggestion_migration_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import translation_migration_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import user_stats_computation_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import math_interactions_audit_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import mailchimp_population_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import ( # pylint: disable=unused-import # isort: skip + exp_version_history_computation_job) +from core.jobs.batch_jobs import ( # pylint: disable=unused-import # isort: skip + rejecting_suggestion_for_invalid_content_ids_jobs) +from core.jobs.batch_jobs import user_settings_profile_picture_jobs # pylint: disable=unused-import # isort: skip +from core.jobs.batch_jobs import store_profile_images_to_gcs_jobs # pylint: disable=unused-import # isort: skip def get_all_jobs() -> List[Type[base_jobs.JobBase]]: diff --git a/core/jobs/registry_test.py b/core/jobs/registry_test.py index c0488ab9852f..9ce0fb33e1db 100644 --- a/core/jobs/registry_test.py +++ b/core/jobs/registry_test.py @@ -22,46 +22,49 @@ from core.jobs import registry from core.tests import test_utils -from typing import Type - class RegistryTests(test_utils.TestBase): - def test_get_all_jobs_returns_value_from_job_metaclass(self) -> None: - unique_obj = object() - - @classmethod # type: ignore[misc] - def get_all_jobs_mock( - unused_cls: Type[base_jobs.JobMetaclass] - ) -> object: - """Returns the unique_obj.""" - return unique_obj + unique_obj = object() + + # Here we use object because we need to mock the behavior of + # 'registry.get_all_jobs' method. + @classmethod + def get_all_jobs_mock(cls) -> object: + """Returns the unique_obj.""" + return cls.unique_obj + + # Here we use object because we need to mock the behavior of + # 'registry.get_all_jobs_names' method. + @classmethod + def get_all_job_names_mock(cls) -> object: + """Returns the unique_obj.""" + return cls.unique_obj + + # Here we use object because we need to mock the behavior of + # 'registry.get_job_class_by_name' method. + @classmethod + def get_job_class_by_name_mock(cls, unused_name: str) -> object: + """Returns the unique_obj.""" + return cls.unique_obj + def test_get_all_jobs_returns_value_from_job_metaclass(self) -> None: get_all_jobs_swap = self.swap( - base_jobs.JobMetaclass, 'get_all_jobs', get_all_jobs_mock) + base_jobs.JobMetaclass, 'get_all_jobs', self.get_all_jobs_mock) with get_all_jobs_swap: - self.assertIs(registry.get_all_jobs(), unique_obj) + self.assertIs(registry.get_all_jobs(), self.unique_obj) def test_get_all_jobs_never_returns_an_empty_list(self) -> None: self.assertNotEqual(registry.get_all_jobs(), []) def test_get_all_job_names_returns_value_from_job_metaclass(self) -> None: - unique_obj = object() - - @classmethod # type: ignore[misc] - def get_all_job_names_mock( - unused_cls: Type[base_jobs.JobMetaclass] - ) -> object: - """Returns the unique_obj.""" - return unique_obj - get_all_job_names_swap = self.swap( base_jobs.JobMetaclass, - 'get_all_job_names', get_all_job_names_mock) + 'get_all_job_names', self.get_all_job_names_mock) with get_all_job_names_swap: - self.assertIs(registry.get_all_job_names(), unique_obj) + self.assertIs(registry.get_all_job_names(), self.unique_obj) def test_get_all_job_names_never_returns_an_empty_list(self) -> None: self.assertNotEqual(registry.get_all_job_names(), []) @@ -69,20 +72,10 @@ def test_get_all_job_names_never_returns_an_empty_list(self) -> None: def test_get_job_class_by_name_returns_value_from_job_metaclass( self ) -> None: - unique_obj = object() - - @classmethod # type: ignore[misc] - def get_job_class_by_name_mock( - unused_cls: Type[base_jobs.JobMetaclass], - unused_name: str - ) -> object: - """Returns the unique_obj.""" - return unique_obj - get_job_class_by_name_swap = self.swap( base_jobs.JobMetaclass, - 'get_job_class_by_name', get_job_class_by_name_mock) + 'get_job_class_by_name', self.get_job_class_by_name_mock) with get_job_class_by_name_swap: self.assertIs( - registry.get_job_class_by_name('arbitrary'), unique_obj) + registry.get_job_class_by_name('arbitrary'), self.unique_obj) diff --git a/core/jobs/transforms/job_result_transforms.py b/core/jobs/transforms/job_result_transforms.py index 12ee12011dd9..b5cc3bae450e 100644 --- a/core/jobs/transforms/job_result_transforms.py +++ b/core/jobs/transforms/job_result_transforms.py @@ -25,7 +25,12 @@ from typing import Any, Optional, Tuple -class ResultsToJobRunResults(beam.PTransform): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class ResultsToJobRunResults(beam.PTransform): # type: ignore[misc] """Transforms result.Result into job_run_result.JobRunResult.""" def __init__( @@ -42,7 +47,14 @@ def __init__( # This is needed because the Beam annotations validator doesn't properly # work with result.Result. - @beam.typehints.no_annotations + # TODO(#15613): Here we use MyPy ignore because the decorator + # no_annotations is not type annotated yet in apache_beam library, + # which causes MyPy to throw untyped decorator error. So to silent + # the error, we used ignore here. + # Here we use type Any because this method is a generalized method which + # converts transform_results to a job_run_results. So, to allow all types + # of transform results, we used Any type here. + @beam.typehints.no_annotations # type: ignore[misc] def _transform_result_to_job_run_result( self, result_item: result.Result[Any, Any] ) -> job_run_result.JobRunResult: @@ -87,7 +99,14 @@ def _add_count_to_job_run_result( # This is needed because the Beam annotations validator doesn't properly # work with result.Result. - @beam.typehints.no_annotations + # TODO(#15613): Here we use MyPy ignore because the decorator + # no_annotations is not type annotated yet in apache_beam library, + # which causes MyPy to throw untyped decorator error. So to silent + # the error, we used ignore here. + # Here we use type Any because this method can accept any kind of + # Pcollection results to return the unique JobRunResult objects + # with count. + @beam.typehints.no_annotations # type: ignore[misc] def expand( self, results: beam.PCollection[result.Result[Any, Any]] ) -> beam.PCollection[job_run_result.JobRunResult]: @@ -110,7 +129,12 @@ def expand( ) -class CountObjectsToJobRunResult(beam.PTransform): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class CountObjectsToJobRunResult(beam.PTransform): # type: ignore[misc] """Transform that counts number of objects in a sequence and puts the count into job_run_result.JobRunResult. """ @@ -127,6 +151,9 @@ def __init__( super().__init__(label=label) self.prefix = '%s ' % prefix if prefix else '' + # Here we use type Any because this method can accept any kind of + # Pcollection object to return the unique JobRunResult objects + # with count. def expand( self, objects: beam.PCollection[Any] ) -> beam.PCollection[job_run_result.JobRunResult]: diff --git a/core/jobs/transforms/results_transforms.py b/core/jobs/transforms/results_transforms.py new file mode 100644 index 000000000000..45e2614c9004 --- /dev/null +++ b/core/jobs/transforms/results_transforms.py @@ -0,0 +1,66 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides a transform to drain PCollection in case of an error.""" + +from __future__ import annotations + +import apache_beam as beam +import result +from typing import Any, Tuple + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that PTransform class is of type Any. Thus to avoid MyPy's error +# (Class cannot subclass 'PTransform' (has type 'Any')), we added an +# ignore here. +class DrainResultsOnError(beam.PTransform): # type: ignore[misc] + """Transform that flushes an input PCollection if any error results + are encountered. + """ + + # Here we use type Any because this method can accept any kind of + # PCollection object to return the filtered migration results. + def expand( + self, objects: beam.PCollection[result.Result[Tuple[str, Any], Tuple[str, Exception]]] # pylint: disable=line-too-long + ) -> beam.PCollection[result.Result[Tuple[str, Any], None]]: + """Count error results in collection and flush the input + in case of errors. + + Args: + objects: PCollection. Sequence of Result objects. + + Returns: + PCollection. Sequence of Result objects or empty PCollection. + """ + + error_check = ( + objects + | 'Filter errors' >> beam.Filter( + lambda result_item: result_item.is_err()) + | 'Count number of errors' >> beam.combiners.Count.Globally() + | 'Check if error count is zero' >> beam.Map(lambda x: x == 0) + ) + + filtered_results = ( + objects + | 'Remove all results in case of errors' >> beam.Filter( + lambda _, no_migration_error: bool(no_migration_error), + no_migration_error=beam.pvalue.AsSingleton( + error_check)) + ) + return filtered_results diff --git a/core/jobs/transforms/results_transforms_test.py b/core/jobs/transforms/results_transforms_test.py new file mode 100644 index 000000000000..05795a7b4f54 --- /dev/null +++ b/core/jobs/transforms/results_transforms_test.py @@ -0,0 +1,68 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for core.jobs.transforms.results_transforms.""" + +from __future__ import annotations + +from core.jobs import job_test_utils +from core.jobs.transforms import results_transforms + +import apache_beam as beam +import result + + +class DrainResultsOnErrorTests(job_test_utils.PipelinedTestBase): + + def test_error_results_returns_empty_collection(self) -> None: + transform_result = ( + self.pipeline + | beam.Create( + [result.Ok(('id_1', None)), + result.Ok(('id_2', None)), + result.Err(('id_3', None))] + ) + | results_transforms.DrainResultsOnError() + ) + + self.assert_pcoll_empty(transform_result) + + def test_ok_results_returns_unchanged_collection(self) -> None: + transform_result = ( + self.pipeline + | beam.Create( + [result.Ok(('id_1', None)), + result.Ok(('id_2', None)), + result.Ok(('id_3', None))] + ) + | results_transforms.DrainResultsOnError() + ) + + self.assert_pcoll_equal( + transform_result, + [result.Ok(('id_1', None)), + result.Ok(('id_2', None)), + result.Ok(('id_3', None))] + ) + + def test_zero_objects_correctly_outputs(self) -> None: + transform_result = ( + self.pipeline + | beam.Create([]) + | results_transforms.DrainResultsOnError() + ) + + self.assert_pcoll_empty(transform_result) diff --git a/core/jobs/transforms/validation/auth_validation.py b/core/jobs/transforms/validation/auth_validation.py index ddc97b98ebcb..b7f5f7644ebc 100644 --- a/core/jobs/transforms/validation/auth_validation.py +++ b/core/jobs/transforms/validation/auth_validation.py @@ -23,16 +23,24 @@ from core.jobs.transforms.validation import base_validation from core.platform import models -(auth_models, user_models) = ( - models.Registry.import_models([models.NAMES.auth, models.NAMES.user])) +from typing import Iterator, List, Tuple, Type, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import auth_models + from mypy_imports import datastore_services + +(auth_models,) = (models.Registry.import_models([models.Names.AUTH])) + +datastore_services = models.Registry.import_datastore_services() @validation_decorators.AuditsExisting(auth_models.FirebaseSeedModel) class ValidateFirebaseSeedModelId(base_validation.ValidateBaseModelId): """Overrides regex to match the single valid FirebaseSeedModel ID.""" - def __init__(self): - super(ValidateFirebaseSeedModelId, self).__init__() + def __init__(self) -> None: + super().__init__() self._pattern = auth_models.ONLY_FIREBASE_SEED_MODEL_ID @@ -41,25 +49,49 @@ class ValidateUserIdByFirebaseAuthIdModelId( base_validation.ValidateBaseModelId): """Overrides regex to match the Firebase account ID pattern.""" - def __init__(self): - super(ValidateUserIdByFirebaseAuthIdModelId, self).__init__() + def __init__(self) -> None: + super().__init__() self._pattern = feconf.FIREBASE_AUTH_ID_REGEX @validation_decorators.RelationshipsOf(auth_models.UserAuthDetailsModel) -def user_auth_details_model_relationships(model): +def user_auth_details_model_relationships( + model: Type[auth_models.UserAuthDetailsModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[Union[ + auth_models.UserIdByFirebaseAuthIdModel, + auth_models.UserIdentifiersModel + ]]] + ] +]: """Yields how the properties of the model relate to the IDs of others.""" yield (model.firebase_auth_id, [auth_models.UserIdByFirebaseAuthIdModel]) yield (model.gae_id, [auth_models.UserIdentifiersModel]) @validation_decorators.RelationshipsOf(auth_models.UserIdByFirebaseAuthIdModel) -def user_id_by_firebase_auth_id_model_relationships(model): +def user_id_by_firebase_auth_id_model_relationships( + model: Type[auth_models.UserIdByFirebaseAuthIdModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[auth_models.UserAuthDetailsModel]] + ] +]: """Yields how the properties of the model relate to the IDs of others.""" yield (model.user_id, [auth_models.UserAuthDetailsModel]) @validation_decorators.RelationshipsOf(auth_models.UserIdentifiersModel) -def user_identifiers_model_relationships(model): +def user_identifiers_model_relationships( + model: Type[auth_models.UserIdentifiersModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[auth_models.UserAuthDetailsModel]] + ] +]: """Yields how the properties of the model relate to the IDs of others.""" yield (model.user_id, [auth_models.UserAuthDetailsModel]) diff --git a/core/jobs/transforms/validation/auth_validation_test.py b/core/jobs/transforms/validation/auth_validation_test.py index c8672aaf9c06..15f0241a939c 100644 --- a/core/jobs/transforms/validation/auth_validation_test.py +++ b/core/jobs/transforms/validation/auth_validation_test.py @@ -28,12 +28,16 @@ import apache_beam as beam -(auth_models,) = models.Registry.import_models([models.NAMES.auth]) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import auth_models + +(auth_models,) = models.Registry.import_models([models.Names.AUTH]) class ValidateFirebaseSeedModelIdTests(job_test_utils.PipelinedTestBase): - def test_reports_error_for_invalid_id(self): + def test_reports_error_for_invalid_id(self) -> None: model_with_invalid_id = auth_models.FirebaseSeedModel( id='2', created_on=self.NOW, last_updated=self.NOW) @@ -48,7 +52,7 @@ def test_reports_error_for_invalid_id(self): model_with_invalid_id, auth_models.ONLY_FIREBASE_SEED_MODEL_ID), ]) - def test_reports_nothing_for_valid_id(self): + def test_reports_nothing_for_valid_id(self) -> None: model_with_valid_id = auth_models.FirebaseSeedModel( id=auth_models.ONLY_FIREBASE_SEED_MODEL_ID, created_on=self.NOW, last_updated=self.NOW) @@ -65,7 +69,7 @@ def test_reports_nothing_for_valid_id(self): class ValidateUserIdByFirebaseAuthIdModelIdTests( job_test_utils.PipelinedTestBase): - def test_reports_error_for_invalid_id(self): + def test_reports_error_for_invalid_id(self) -> None: model_with_invalid_id = auth_models.UserIdByFirebaseAuthIdModel( id='-!\'"', user_id='1', created_on=self.NOW, last_updated=self.NOW) @@ -81,7 +85,7 @@ def test_reports_error_for_invalid_id(self): model_with_invalid_id, feconf.FIREBASE_AUTH_ID_REGEX), ]) - def test_reports_nothing_for_valid_id(self): + def test_reports_nothing_for_valid_id(self) -> None: model_with_valid_id = auth_models.UserIdByFirebaseAuthIdModel( id='123', user_id='1', created_on=self.NOW, last_updated=self.NOW) @@ -97,7 +101,7 @@ def test_reports_nothing_for_valid_id(self): class RelationshipsOfTests(test_utils.TestBase): - def test_user_auth_details_model_relationships(self): + def test_user_auth_details_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserAuthDetailsModel', 'firebase_auth_id'), @@ -107,13 +111,13 @@ def test_user_auth_details_model_relationships(self): 'UserAuthDetailsModel', 'gae_id'), ['UserIdentifiersModel']) - def test_user_id_by_firebase_auth_id_model_relationships(self): + def test_user_id_by_firebase_auth_id_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserIdByFirebaseAuthIdModel', 'user_id'), ['UserAuthDetailsModel']) - def test_user_identifiers_model_relationships(self): + def test_user_identifiers_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserIdentifiersModel', 'user_id'), diff --git a/core/jobs/transforms/validation/base_validation.py b/core/jobs/transforms/validation/base_validation.py index 94096aef34b1..e62448c08a35 100644 --- a/core/jobs/transforms/validation/base_validation.py +++ b/core/jobs/transforms/validation/base_validation.py @@ -28,10 +28,11 @@ from __future__ import annotations import datetime +import enum import re from core import feconf -from core import python_utils +from core.domain import change_domain from core.jobs import job_utils from core.jobs.decorators import validation_decorators from core.jobs.types import base_validation_errors @@ -39,23 +40,42 @@ import apache_beam as beam -(base_models, exp_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.exploration]) +from typing import Any, Final, Generic, Iterator, Type, TypeVar, Union -BASE_MODEL_ID_PATTERN = r'^[A-Za-z0-9-_]{1,%s}$' % base_models.ID_LENGTH -MAX_CLOCK_SKEW_SECS = datetime.timedelta(seconds=1) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models -VALIDATION_MODES = python_utils.create_enum('neutral', 'strict', 'non_strict') # pylint: disable=invalid-name +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) +BASE_MODEL_ID_PATTERN: str = r'^[A-Za-z0-9-_]{1,%s}$' % base_models.ID_LENGTH +MAX_CLOCK_SKEW_SECS: Final = datetime.timedelta(seconds=1) -class ValidateDeletedModel(beam.DoFn): +ModelInstanceType = TypeVar('ModelInstanceType', bound='base_models.BaseModel') + + +class ValidationModes(enum.Enum): + """Enum for validation modes.""" + + NEUTRAL = 'neutral' + STRICT = 'strict' + NON_STRICT = 'non_strict' + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. +class ValidateDeletedModel(beam.DoFn): # type: ignore[misc] """DoFn to check whether models marked for deletion are stale. Doesn't use the AuditsExisting decorator because it audits deleted models, not existing ones. """ - def process(self, entity): + def process( + self, entity: base_models.BaseModel + ) -> Iterator[base_validation_errors.ModelExpiredError]: """Yields audit errors that are discovered in the input model. Args: @@ -74,8 +94,12 @@ def process(self, entity): yield base_validation_errors.ModelExpiredError(cloned_entity) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting(base_models.BaseModel) -class ValidateBaseModelId(beam.DoFn): +class ValidateBaseModelId(beam.DoFn): # type: ignore[misc] """DoFn to validate model ids. IMPORTANT: Models with special ID checks should derive from this class and @@ -84,8 +108,8 @@ class ValidateBaseModelId(beam.DoFn): specific model type. """ - def __init__(self): - super(ValidateBaseModelId, self).__init__() + def __init__(self) -> None: + super().__init__() # IMPORTANT: Only picklable objects can be stored on DoFns! This is # because DoFns are serialized with pickle when run on a pipeline (and # might be run on many different machines). Any other types assigned to @@ -93,7 +117,9 @@ def __init__(self): # https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled self._pattern = BASE_MODEL_ID_PATTERN - def process(self, entity): + def process( + self, entity: base_models.BaseModel + ) -> Iterator[base_validation_errors.ModelIdRegexError]: """Function that defines how to process each entity in a pipeline of models. @@ -110,11 +136,17 @@ def process(self, entity): cloned_entity, self._pattern) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting(base_models.BaseCommitLogEntryModel) -class ValidatePostCommitStatus(beam.DoFn): +class ValidatePostCommitStatus(beam.DoFn): # type: ignore[misc] """DoFn to validate post_commit_status.""" - def process(self, entity): + def process( + self, entity: base_models.BaseCommitLogEntryModel + ) -> Iterator[base_validation_errors.InvalidCommitStatusError]: """Function validates that post_commit_status is either public or private @@ -131,13 +163,19 @@ def process(self, entity): yield base_validation_errors.InvalidCommitStatusError(cloned_entity) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting(base_models.BaseCommitLogEntryModel) -class ValidatePostCommitIsPrivate(beam.DoFn): +class ValidatePostCommitIsPrivate(beam.DoFn): # type: ignore[misc] """DoFn to check if post_commit_status is private when post_commit_is_private is true and vice-versa. """ - def process(self, entity): + def process( + self, entity: base_models.BaseCommitLogEntryModel + ) -> Iterator[base_validation_errors.InvalidPrivateCommitStatusError]: """Function validates that post_commit_is_private is true iff post_commit_status is private @@ -164,13 +202,19 @@ def process(self, entity): ) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting(base_models.BaseCommitLogEntryModel) -class ValidatePostCommitIsPublic(beam.DoFn): +class ValidatePostCommitIsPublic(beam.DoFn): # type: ignore[misc] """DoFn to check if post_commit_status is public when post_commit_is_public is true and vice-versa. """ - def process(self, entity): + def process( + self, entity: base_models.BaseCommitLogEntryModel + ) -> Iterator[base_validation_errors.InvalidPublicCommitStatusError]: """Function validates that post_commit_is_public is true iff post_commit_status is public. @@ -196,12 +240,23 @@ def process(self, entity): ) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting(base_models.BaseModel) -class ValidateModelTimestamps(beam.DoFn): +class ValidateModelTimestamps(beam.DoFn): # type: ignore[misc] """DoFn to check whether created_on and last_updated timestamps are valid. """ - def process(self, entity): + def process( + self, entity: base_models.BaseModel + ) -> Iterator[ + Union[ + base_validation_errors.InconsistentTimestampsError, + base_validation_errors.ModelMutatedDuringJobError + ] + ]: """Function that defines how to process each entity in a pipeline of models. @@ -228,13 +283,24 @@ def process(self, entity): cloned_entity) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting(base_models.BaseModel) -class ValidateModelDomainObjectInstances(beam.DoFn): +class ValidateModelDomainObjectInstances( + beam.DoFn, Generic[ModelInstanceType] # type: ignore[misc] +): """DoFn to check whether the model instance passes the validation of the domain object for model. """ - def _get_model_domain_object_instance(self, unused_item): + # Here we use type Any because in child classes this method can be + # redefined with domain objects as return type. So, to allow every + # domain object as return type, we used Any here. + def _get_model_domain_object_instance( + self, unused_item: ModelInstanceType + ) -> Any: """Returns a domain object instance created from the model. This method can be overridden by subclasses, if needed. @@ -247,7 +313,9 @@ def _get_model_domain_object_instance(self, unused_item): """ return None - def _get_domain_object_validation_type(self, unused_item): + def _get_domain_object_validation_type( + self, unused_item: ModelInstanceType + ) -> ValidationModes: """Returns the type of domain object validation to be performed. Some of the storage models support a strict/non strict mode depending @@ -264,9 +332,11 @@ def _get_domain_object_validation_type(self, unused_item): Returns: str. The type of validation mode: neutral, strict or non strict. """ - return VALIDATION_MODES.neutral + return ValidationModes.NEUTRAL - def process(self, entity): + def process( + self, entity: ModelInstanceType + ) -> Iterator[base_validation_errors.ModelDomainObjectValidateError]: """Function that defines how to process each entity in a pipeline of models. @@ -281,11 +351,11 @@ def process(self, entity): validation_type = self._get_domain_object_validation_type(entity) if domain_object is None: return - if validation_type == VALIDATION_MODES.neutral: + if validation_type == ValidationModes.NEUTRAL: domain_object.validate() - elif validation_type == VALIDATION_MODES.strict: + elif validation_type == ValidationModes.STRICT: domain_object.validate(strict=True) - elif validation_type == VALIDATION_MODES.non_strict: + elif validation_type == ValidationModes.NON_STRICT: domain_object.validate(strict=False) else: raise Exception( @@ -293,17 +363,23 @@ def process(self, entity): validation_type)) except Exception as e: yield base_validation_errors.ModelDomainObjectValidateError( - entity, e) + entity, str(e)) -class BaseValidateCommitCmdsSchema(beam.DoFn): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. +class BaseValidateCommitCmdsSchema(beam.DoFn, Generic[ModelInstanceType]): # type: ignore[misc] """DoFn to validate schema of commit commands in commit_cmds dict. Decorators are not required here as _get_change_domain_class is not implemented. This class is used as a parent class in other places. """ - def _get_change_domain_class(self, unused_item): + def _get_change_domain_class( + self, unused_item: ModelInstanceType + ) -> Type[change_domain.BaseChange]: """Returns a Change domain class. This should be implemented by subclasses. @@ -322,7 +398,14 @@ def _get_change_domain_class(self, unused_item): 'The _get_change_domain_class() method is missing from the derived ' 'class. It should be implemented in the derived class.') - def process(self, entity): + def process( + self, entity: ModelInstanceType + ) -> Iterator[ + Union[ + base_validation_errors.CommitCmdsNoneError, + base_validation_errors.CommitCmdsValidateError + ] + ]: """Validates schema of commit commands in commit_cmds dict. Args: @@ -341,6 +424,15 @@ def process(self, entity): # no commit command domain object defined for this model. yield base_validation_errors.CommitCmdsNoneError(entity) return + # Ruling out the possibility of any other model instance for mypy type + # checking. + assert isinstance( + entity, + ( + base_models.BaseSnapshotMetadataModel, + base_models.BaseCommitLogEntryModel + ) + ) for commit_cmd_dict in entity.commit_cmds: if not commit_cmd_dict: continue @@ -348,15 +440,25 @@ def process(self, entity): change_domain_object(commit_cmd_dict) except Exception as e: yield base_validation_errors.CommitCmdsValidateError( - entity, commit_cmd_dict, e) + entity, commit_cmd_dict, str(e)) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting( base_models.BaseCommitLogEntryModel, base_models.BaseSnapshotMetadataModel) -class ValidateCommitType(beam.DoFn): +class ValidateCommitType(beam.DoFn): # type: ignore[misc] """DoFn to check whether commit type is valid.""" - def process(self, entity): + def process( + self, + entity: Union[ + base_models.BaseCommitLogEntryModel, + base_models.BaseSnapshotMetadataModel + ] + ) -> Iterator[base_validation_errors.InvalidCommitTypeError]: """Function that defines how to process each entity in a pipeline of models. diff --git a/core/jobs/transforms/validation/base_validation_registry.py b/core/jobs/transforms/validation/base_validation_registry.py index 006957f66a90..8eda47fedee2 100644 --- a/core/jobs/transforms/validation/base_validation_registry.py +++ b/core/jobs/transforms/validation/base_validation_registry.py @@ -28,6 +28,11 @@ from __future__ import annotations from core.jobs.decorators import validation_decorators +from core.jobs.types import model_property + +import apache_beam as beam +from typing import Dict, FrozenSet, Set, Tuple, Type + from core.jobs.transforms.validation import auth_validation # pylint: disable=unused-import # isort: skip from core.jobs.transforms.validation import base_validation # pylint: disable=unused-import # isort: skip from core.jobs.transforms.validation import blog_validation # pylint: disable=unused-import # isort: skip @@ -44,7 +49,7 @@ from core.jobs.transforms.validation import user_validation # pylint: disable=unused-import # isort: skip -def get_audit_do_fn_types_by_kind(): +def get_audit_do_fn_types_by_kind() -> Dict[str, FrozenSet[Type[beam.DoFn]]]: """Returns the set of DoFns targeting each kind of model. Returns: @@ -54,7 +59,9 @@ def get_audit_do_fn_types_by_kind(): return validation_decorators.AuditsExisting.get_audit_do_fn_types_by_kind() -def get_id_referencing_properties_by_kind_of_possessor(): +def get_id_referencing_properties_by_kind_of_possessor() -> Dict[ + str, Tuple[Tuple[model_property.ModelProperty, Tuple[str, ...]], ...] +]: """Returns properties whose values refer to the IDs of the corresponding set of model kinds, grouped by the kind of model the properties belong to. @@ -68,7 +75,7 @@ def get_id_referencing_properties_by_kind_of_possessor(): .get_id_referencing_properties_by_kind_of_possessor()) -def get_all_model_kinds_referenced_by_properties(): +def get_all_model_kinds_referenced_by_properties() -> Set[str]: """Returns all model kinds that are referenced by another model's property. Returns: diff --git a/core/jobs/transforms/validation/base_validation_registry_test.py b/core/jobs/transforms/validation/base_validation_registry_test.py index 9e79108d4991..d640aab2225f 100644 --- a/core/jobs/transforms/validation/base_validation_registry_test.py +++ b/core/jobs/transforms/validation/base_validation_registry_test.py @@ -25,64 +25,72 @@ class GetAuditsByKindTests(test_utils.TestBase): - def test_returns_value_from_decorator(self): - unique_obj = object() + unique_obj = object() - @classmethod - def get_audit_do_fn_types_by_kind_mock(unused_cls): - """Returns the unique_obj.""" - return unique_obj + # Here we use object because we need to mock the behavior of + # 'base_validation_registry.get_audit_do_fn_types_by_kind' method. + @classmethod + def get_audit_do_fn_types_by_kind_mock(cls) -> object: + """Returns the unique_obj.""" + return cls.unique_obj + def test_returns_value_from_decorator(self) -> None: get_audit_do_fn_types_by_kind_swap = self.swap( validation_decorators.AuditsExisting, 'get_audit_do_fn_types_by_kind', - get_audit_do_fn_types_by_kind_mock) + self.get_audit_do_fn_types_by_kind_mock) with get_audit_do_fn_types_by_kind_swap: self.assertIs( base_validation_registry.get_audit_do_fn_types_by_kind(), - unique_obj) + self.unique_obj) class GetIdReferencingPropertiesByKindOfPossessorTests(test_utils.TestBase): - def test_returns_value_from_decorator(self): - unique_obj = object() + unique_obj = object() - @classmethod - def get_id_referencing_properties_by_kind_of_possessor_mock(unused_cls): - """Returns the unique_obj.""" - return unique_obj + # Here we use object because we need to mock the behavior of + # 'get_id_referencing_properties_by_kind_of_possessor' method of + # base_validation_registry. + @classmethod + def get_id_referencing_properties_by_kind_of_possessor_mock(cls) -> object: + """Returns the unique_obj.""" + return cls.unique_obj + def test_returns_value_from_decorator(self) -> None: get_id_referencing_properties_by_kind_of_possessor_swap = self.swap( validation_decorators.RelationshipsOf, 'get_id_referencing_properties_by_kind_of_possessor', - get_id_referencing_properties_by_kind_of_possessor_mock) + self.get_id_referencing_properties_by_kind_of_possessor_mock) with get_id_referencing_properties_by_kind_of_possessor_swap: self.assertIs( base_validation_registry .get_id_referencing_properties_by_kind_of_possessor(), - unique_obj) + self.unique_obj) class GetAllModelKindsReferencedByPropertiesTests(test_utils.TestBase): - def test_returns_value_from_decorator(self): - unique_obj = object() + unique_obj = object() - @classmethod - def get_all_model_kinds_referenced_by_properties_mock(unused_cls): - """Returns the unique_obj.""" - return unique_obj + # Here we use object because we need to mock the behavior of + # 'get_all_model_kinds_referenced_by_properties' method of + # base_validation_registry. + @classmethod + def get_all_model_kinds_referenced_by_properties_mock(cls) -> object: + """Returns the unique_obj.""" + return cls.unique_obj + def test_returns_value_from_decorator(self) -> None: get_all_model_kinds_referenced_by_properties_swap = self.swap( validation_decorators.RelationshipsOf, 'get_all_model_kinds_referenced_by_properties', - get_all_model_kinds_referenced_by_properties_mock) + self.get_all_model_kinds_referenced_by_properties_mock) with get_all_model_kinds_referenced_by_properties_swap: self.assertIs( base_validation_registry .get_all_model_kinds_referenced_by_properties(), - unique_obj) + self.unique_obj) diff --git a/core/jobs/transforms/validation/base_validation_test.py b/core/jobs/transforms/validation/base_validation_test.py index 0ece058c28ec..b9d617a61da0 100644 --- a/core/jobs/transforms/validation/base_validation_test.py +++ b/core/jobs/transforms/validation/base_validation_test.py @@ -22,6 +22,7 @@ from core import feconf from core.domain import change_domain +from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import state_domain from core.jobs import job_test_utils @@ -31,20 +32,29 @@ import apache_beam as beam -(base_models, exp_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.exploration]) +from typing import Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import exp_models + +(base_models, exp_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, + models.Names.EXPLORATION +]) class MockDomainObject(base_models.BaseModel): - def validate(self, strict=True): + def validate(self, strict: bool = True) -> None: """Mock validate function.""" pass class ValidateDeletedTests(job_test_utils.PipelinedTestBase): - def test_process_reports_error_for_old_deleted_model(self): + def test_process_reports_error_for_old_deleted_model(self) -> None: expired_model = base_models.BaseModel( id='123', deleted=True, @@ -64,7 +74,7 @@ def test_process_reports_error_for_old_deleted_model(self): class ValidateModelTimeFieldTests(job_test_utils.PipelinedTestBase): - def test_process_reports_model_timestamp_relationship_error(self): + def test_process_reports_model_timestamp_relationship_error(self) -> None: invalid_timestamp = base_models.BaseModel( id='123', created_on=self.NOW, @@ -81,7 +91,7 @@ def test_process_reports_model_timestamp_relationship_error(self): invalid_timestamp), ]) - def test_process_reports_model_mutated_during_job_error(self): + def test_process_reports_model_mutated_during_job_error(self) -> None: invalid_timestamp = base_models.BaseModel( id='124', created_on=self.NOW, @@ -101,7 +111,7 @@ def test_process_reports_model_mutated_during_job_error(self): class ValidateModelIdTests(job_test_utils.PipelinedTestBase): - def test_validate_model_id(self): + def test_validate_model_id(self) -> None: invalid_id_model = base_models.BaseModel( id='123@?!*', created_on=self.YEAR_AGO, @@ -122,7 +132,7 @@ def test_validate_model_id(self): class ValidatePostCommitIsInvalidTests(job_test_utils.PipelinedTestBase): - def test_validate_post_commit_is_invalid(self): + def test_validate_post_commit_is_invalid(self) -> None: invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -147,7 +157,9 @@ def test_validate_post_commit_is_invalid(self): class ValidatePostCommitIsPrivateTests(job_test_utils.PipelinedTestBase): - def test_validate_post_commit_is_private_when_status_is_public(self): + def test_validate_post_commit_is_private_when_status_is_public( + self + ) -> None: invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -169,7 +181,9 @@ def test_validate_post_commit_is_private_when_status_is_public(self): invalid_commit_status), ]) - def test_validate_post_commit_is_private_when_status_is_private(self): + def test_validate_post_commit_is_private_when_status_is_private( + self + ) -> None: invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -194,7 +208,7 @@ def test_validate_post_commit_is_private_when_status_is_private(self): class ValidatePostCommitIsPublicTests(job_test_utils.PipelinedTestBase): - def test_validate_post_commit_is_public_when_status_is_public(self): + def test_validate_post_commit_is_public_when_status_is_public(self) -> None: invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -213,7 +227,9 @@ def test_validate_post_commit_is_public_when_status_is_public(self): self.assert_pcoll_empty(output) - def test_validate_post_commit_is_public_when_status_is_private(self): + def test_validate_post_commit_is_public_when_status_is_private( + self + ) -> None: invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -235,7 +251,7 @@ def test_validate_post_commit_is_public_when_status_is_private(self): invalid_commit_status), ]) - def test_validate_post_commit_is_public_raise_exception(self): + def test_validate_post_commit_is_public_raise_exception(self) -> None: invalid_commit_status = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -259,51 +275,109 @@ def test_validate_post_commit_is_public_raise_exception(self): class MockValidateModelDomainObjectInstancesWithNeutral( - base_validation.ValidateModelDomainObjectInstances): - def _get_model_domain_object_instance(self, item): # pylint: disable=unused-argument + base_validation.ValidateModelDomainObjectInstances[base_models.BaseModel] +): + def _get_model_domain_object_instance( + self, _: base_models.BaseModel + ) -> MockDomainObject: + """Method redefined for testing purpose returning instance of + MockDomainObject. + """ return MockDomainObject() - def _get_domain_object_validation_type(self, item): # pylint: disable=unused-argument - return base_validation.VALIDATION_MODES.neutral + def _get_domain_object_validation_type( + self, _: base_models.BaseModel + ) -> base_validation.ValidationModes: + """Method redefined for testing purpose returning neutral mode + of validation. + """ + return base_validation.ValidationModes.NEUTRAL class MockValidateModelDomainObjectInstancesWithStrict( - base_validation.ValidateModelDomainObjectInstances): - def _get_model_domain_object_instance(self, item): # pylint: disable=unused-argument + base_validation.ValidateModelDomainObjectInstances[base_models.BaseModel] +): + def _get_model_domain_object_instance( + self, _: base_models.BaseModel + ) -> MockDomainObject: + """Method redefined for testing purpose returning instance of + MockDomainObject. + """ return MockDomainObject() - def _get_domain_object_validation_type(self, item): # pylint: disable=unused-argument - return base_validation.VALIDATION_MODES.strict + def _get_domain_object_validation_type( + self, _: base_models.BaseModel + ) -> base_validation.ValidationModes: + """Method redefined for testing purpose returning strict mode + of validation. + """ + return base_validation.ValidationModes.STRICT class MockValidateModelDomainObjectInstancesWithNonStrict( - base_validation.ValidateModelDomainObjectInstances): - def _get_model_domain_object_instance(self, item): # pylint: disable=unused-argument + base_validation.ValidateModelDomainObjectInstances[base_models.BaseModel] +): + def _get_model_domain_object_instance( + self, _: base_models.BaseModel + ) -> MockDomainObject: + """Method redefined for testing purpose returning instance of + MockDomainObject. + """ return MockDomainObject() - def _get_domain_object_validation_type(self, item): # pylint: disable=unused-argument - return base_validation.VALIDATION_MODES.non_strict + def _get_domain_object_validation_type( + self, _: base_models.BaseModel + ) -> base_validation.ValidationModes: + """Method redefined for testing purpose returning non-strict mode + of validation. + """ + return base_validation.ValidationModes.NON_STRICT class MockValidateModelDomainObjectInstancesWithInvalid( - base_validation.ValidateModelDomainObjectInstances): - def _get_model_domain_object_instance(self, item): # pylint: disable=unused-argument + base_validation.ValidateModelDomainObjectInstances[base_models.BaseModel] +): + def _get_model_domain_object_instance( + self, _: base_models.BaseModel + ) -> MockDomainObject: + """Method redefined for testing purpose returning instance of + MockDomainObject. + """ return MockDomainObject() - def _get_domain_object_validation_type(self, item): # pylint: disable=unused-argument + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's _get_domain_object_validation_type method, + # because in super class's method we are returning 'ValidationModes' Enum + # and here for testing purposes we are returning string value. So, due + # to this conflict in return types, a conflict in signatures occurred + # which causes MyPy to throw an error. Thus, to avoid the error, we + # used ignore here. + def _get_domain_object_validation_type( # type: ignore[override] + self, _: base_models.BaseModel + ) -> str: + """Method redefined for testing purpose returning string literal, + to check error 'Invalid validation type for domain object'. + """ return 'invalid' class MockValidateExplorationModelDomainObjectInstances( - base_validation.ValidateModelDomainObjectInstances): - def _get_model_domain_object_instance(self, item): + base_validation.ValidateModelDomainObjectInstances[ + exp_models.ExplorationModel + ] +): + def _get_model_domain_object_instance( + self, item: exp_models.ExplorationModel + ) -> exp_domain.Exploration: + """Returns an Exploration domain object given an exploration + model loaded from the datastore. + """ return exp_fetchers.get_exploration_from_model(item) class ValidateModelDomainObjectInstancesTests(job_test_utils.PipelinedTestBase): - def test_validation_type_for_domain_object( - self): + def test_validation_type_for_domain_object(self) -> None: model = base_models.BaseModel( id='mock-123', deleted=False, @@ -320,7 +394,8 @@ def test_validation_type_for_domain_object( self.assert_pcoll_equal(output, []) def test_validation_type_for_domain_object_with_neutral_type( - self): + self + ) -> None: model = base_models.BaseModel( id='mock-123', deleted=False, @@ -337,7 +412,8 @@ def test_validation_type_for_domain_object_with_neutral_type( self.assert_pcoll_equal(output, []) def test_validation_type_for_domain_object_with_strict_type( - self): + self + ) -> None: model = base_models.BaseModel( id='mock-123', deleted=False, @@ -354,7 +430,8 @@ def test_validation_type_for_domain_object_with_strict_type( self.assert_pcoll_equal(output, []) def test_validation_type_for_domain_object_with_non_strict_type( - self): + self + ) -> None: model = base_models.BaseModel( id='mock-123', deleted=False, @@ -371,7 +448,8 @@ def test_validation_type_for_domain_object_with_non_strict_type( self.assert_pcoll_equal(output, []) def test_error_is_raised_with_invalid_validation_type_for_domain_object( - self): + self + ) -> None: model = base_models.BaseModel( id='mock-123', deleted=False, @@ -388,7 +466,7 @@ def test_error_is_raised_with_invalid_validation_type_for_domain_object( model, 'Invalid validation type for domain object: invalid') ]) - def test_validation_type_for_exploration_domain_object(self): + def test_validation_type_for_exploration_domain_object(self) -> None: model_instance1 = exp_models.ExplorationModel( id='mock-123', title='title', @@ -398,10 +476,13 @@ def test_validation_type_for_exploration_domain_object(self): states={ feconf.DEFAULT_INIT_STATE_NAME: ( state_domain.State.create_default_state( - feconf.DEFAULT_INIT_STATE_NAME, is_initial_state=True + feconf.DEFAULT_INIT_STATE_NAME, + 'content_0', 'default_outcome_1', + is_initial_state=True ).to_dict()), }, states_schema_version=feconf.CURRENT_STATE_SCHEMA_VERSION, + next_content_id_index=2, created_on=self.YEAR_AGO, last_updated=self.NOW ) @@ -415,10 +496,12 @@ def test_validation_type_for_exploration_domain_object(self): states={ feconf.DEFAULT_INIT_STATE_NAME: ( state_domain.State.create_default_state( - 'end', is_initial_state=True + 'end', 'content_0', 'default_outcome_1', + is_initial_state=True ).to_dict()), }, states_schema_version=feconf.CURRENT_STATE_SCHEMA_VERSION, + next_content_id_index=2, created_on=self.YEAR_AGO, last_updated=self.NOW ) @@ -437,7 +520,7 @@ def test_validation_type_for_exploration_domain_object(self): class ValidateCommitTypeTests(job_test_utils.PipelinedTestBase): - def test_validate_commit_type(self): + def test_validate_commit_type(self) -> None: invalid_commit_type_model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -460,28 +543,55 @@ def test_validate_commit_type(self): class MockValidateCommitCmdsSchema( - base_validation.BaseValidateCommitCmdsSchema): - - def process(self, input_model): + base_validation.BaseValidateCommitCmdsSchema[base_models.BaseModel] +): + + # Here we use MyPy ignore because the signature of this method + # doesn't match with super class's process() method, because in + # in super class's process() method we are returning CommitCmdsValidateError + # and here for testing purposes we are returning 'None'. So, due + # to this conflict in return types, a conflict in signatures occurred + # which causes to MyPy throw an error. Thus, to avoid the error, + # we used ignore here. + def process( # type: ignore[override] + self, input_model: base_models.BaseModel + ) -> None: + """Method defined to check that error is displayed when + _get_change_domain_class() method is missing from the + derived class. + """ self._get_change_domain_class(input_model) class MockValidateCommitCmdsSchemaChangeDomain( - base_validation.BaseValidateCommitCmdsSchema): - - def _get_change_domain_class(self, item): + base_validation.BaseValidateCommitCmdsSchema[base_models.BaseModel] +): + + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's `_get_change_domain_class()` method, because + # in super class's method we are returning Type[BaseChange] and here for + # testing purposes we are returning 'None'. So, due to this conflict in + # return types, a conflict in signatures occurred which causes MyPy to + # throw an error. Thus, to avoid the error, we used ignore here. + def _get_change_domain_class(self, _: base_models.BaseModel) -> None: # type: ignore[override] + """Method defined for testing purpose.""" pass -class MockValidateWrongSchema(base_validation.BaseValidateCommitCmdsSchema): +class MockValidateWrongSchema( + base_validation.BaseValidateCommitCmdsSchema[base_models.BaseModel] +): - def _get_change_domain_class(self, item): # pylint: disable=unused-argument + def _get_change_domain_class( + self, _: base_models.BaseModel + ) -> Type[change_domain.BaseChange]: + """Method defined for testing purpose returning BaseChange class.""" return change_domain.BaseChange class ValidateCommitCmdsSchemaTests(job_test_utils.PipelinedTestBase): - def test_validate_none_commit(self): + def test_validate_none_commit(self) -> None: invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='invalid', created_on=self.YEAR_AGO, @@ -501,7 +611,7 @@ def test_validate_none_commit(self): base_validation_errors.CommitCmdsNoneError(invalid_commit_cmd_model) ]) - def test_validate_wrong_commit_cmd_missing(self): + def test_validate_wrong_commit_cmd_missing(self) -> None: invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='invalid', created_on=self.YEAR_AGO, @@ -524,7 +634,7 @@ def test_validate_wrong_commit_cmd_missing(self): 'Missing cmd key in change dict') ]) - def test_validate_wrong_commit_cmd(self): + def test_validate_wrong_commit_cmd(self) -> None: invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='invalid', created_on=self.YEAR_AGO, @@ -547,7 +657,7 @@ def test_validate_wrong_commit_cmd(self): 'Command invalid_test_command is not allowed') ]) - def test_validate_raise_not_implemented(self): + def test_validate_raise_not_implemented(self) -> None: invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -557,7 +667,7 @@ def test_validate_raise_not_implemented(self): post_commit_status='', commit_cmds=[{}]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The _get_change_domain_class() method is missing from the ' @@ -566,7 +676,7 @@ def test_validate_raise_not_implemented(self): ): MockValidateCommitCmdsSchema().process(invalid_commit_cmd_model) - def test_validate_commit_cmds(self): + def test_validate_commit_cmds(self) -> None: invalid_commit_cmd_model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, diff --git a/core/jobs/transforms/validation/blog_validation.py b/core/jobs/transforms/validation/blog_validation.py index 74c3f45b5f4c..b3987230c1a1 100644 --- a/core/jobs/transforms/validation/blog_validation.py +++ b/core/jobs/transforms/validation/blog_validation.py @@ -25,25 +25,33 @@ from core.jobs.decorators import validation_decorators from core.jobs.transforms.validation import base_validation from core.jobs.types import blog_validation_errors +from core.jobs.types import model_property from core.platform import models import apache_beam as beam +from typing import Iterator, List, Tuple, Type, Union + MYPY = False if MYPY: # pragma: no cover from mypy_imports import blog_models from mypy_imports import user_models (blog_models, user_models) = models.Registry.import_models( - [models.NAMES.blog, models.NAMES.user]) + [models.Names.BLOG, models.Names.USER]) @validation_decorators.AuditsExisting(blog_models.BlogPostModel) class ValidateBlogPostModelDomainObjectsInstances( - base_validation.ValidateModelDomainObjectInstances): + base_validation.ValidateModelDomainObjectInstances[ + blog_models.BlogPostModel + ] +): """Provides the validation type for validating blog post objects.""" - def _get_model_domain_object_instance(self, blog_post_model): + def _get_model_domain_object_instance( + self, blog_post_model: blog_models.BlogPostModel + ) -> blog_domain.BlogPost: """Returns blog post domain object instance created from the model. Args: @@ -64,34 +72,55 @@ def _get_model_domain_object_instance(self, blog_post_model): blog_post_model.published_on ) - def _get_domain_object_validation_type(self, unused_item): + def _get_domain_object_validation_type( + self, blog_post_model: blog_models.BlogPostModel + ) -> base_validation.ValidationModes: """Returns the type of domain object validation to be performed. Args: - unused_item: datastore_services.Model. Entity to validate. + blog_post_model: datastore_services.Model. Entity to validate. Returns: str. The type of validation mode: strict or non strict. """ - # TODO(#13397): Write a custom job to avoid applying strict validation - # to private blog posts. We can't determine public/private without - # performing an NDB get() operation, which are forbidden in Apache Beam - # jobs. - return base_validation.VALIDATION_MODES.strict + if blog_post_model.published_on is None: + return base_validation.ValidationModes.NON_STRICT + return base_validation.ValidationModes.STRICT + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting( blog_models.BlogPostModel, blog_models.BlogPostSummaryModel) -class ValidateModelPublishTimestamps(beam.DoFn): - """DoFn to check whether created_on and last_updated timestamps are valid. +class ValidateBlogModelTimestamps(beam.DoFn): # type: ignore[misc] + """DoFn to check whether created_on, last_updated and published_on + timestamps are valid for both blog post models and blog post summary models. """ - def process(self, input_model): - """Function that validates that the published timestamp of the blog post - models is either None or is greater than created on time, is less than - current datetime and is equal to or greater than the last updated - timestamp. + def process( + self, input_model: Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel + ] + ) -> Iterator[ + Union[ + blog_validation_errors.InconsistentLastUpdatedTimestampsError, + blog_validation_errors.ModelMutatedDuringJobErrorForLastUpdated, + blog_validation_errors.ModelMutatedDuringJobErrorForPublishedOn, + blog_validation_errors.InconsistentPublishLastUpdatedTimestampsError + ] + ]: + """Function that validates that the last updated timestamp of the blog + post models is greater than created on time, is less than current + datetime and is equal to or greater than the published on timestamp. + For blog posts migrated from 'Medium', published_on will be less than + created_on time and last_updated time. Therefore published_on can be + less than or greater than created_on time and less than or equal to + last_updated time for blog posts. Args: input_model: datastore_services.Model. Entity to validate. @@ -102,32 +131,39 @@ def process(self, input_model): timestamps. """ model = job_utils.clone_model(input_model) - if model.published_on is None: - return if model.created_on > ( - model.published_on + base_validation.MAX_CLOCK_SKEW_SECS): - yield blog_validation_errors.InconsistentPublishTimestampsError( + model.last_updated + base_validation.MAX_CLOCK_SKEW_SECS): + yield blog_validation_errors.InconsistentLastUpdatedTimestampsError( model) current_datetime = datetime.datetime.utcnow() - if (model.published_on - base_validation.MAX_CLOCK_SKEW_SECS) > ( - current_datetime): - yield blog_validation_errors.ModelMutatedDuringJobError( - model) + if model.published_on: + if (model.published_on - base_validation.MAX_CLOCK_SKEW_SECS) > ( + current_datetime): + yield blog_validation_errors.ModelMutatedDuringJobErrorForPublishedOn(model) # pylint: disable=line-too-long + + if (model.published_on - base_validation.MAX_CLOCK_SKEW_SECS) > ( + model.last_updated): + yield blog_validation_errors.InconsistentPublishLastUpdatedTimestampsError(model) # pylint: disable=line-too-long - if (model.published_on - base_validation.MAX_CLOCK_SKEW_SECS) > ( - model.last_updated): - yield blog_validation_errors.InconsistentPublishLastUpdatedTimestampsError(model) # pylint: disable=line-too-long + if (model.last_updated - base_validation.MAX_CLOCK_SKEW_SECS) > ( + current_datetime): + yield blog_validation_errors.ModelMutatedDuringJobErrorForLastUpdated(model) # pylint: disable=line-too-long @validation_decorators.AuditsExisting( blog_models.BlogPostSummaryModel) class ValidateBlogSummaryModelDomainObjectsInstances( - base_validation.ValidateModelDomainObjectInstances): + base_validation.ValidateModelDomainObjectInstances[ + blog_models.BlogPostSummaryModel + ] +): """Provides the validation type for validating blog post objects.""" - def _get_model_domain_object_instance(self, summary_model): + def _get_model_domain_object_instance( + self, summary_model: blog_models.BlogPostSummaryModel + ) -> blog_domain.BlogPostSummary: """Returns blog post domain object instance created from the model. Args: @@ -148,44 +184,89 @@ def _get_model_domain_object_instance(self, summary_model): summary_model.published_on ) - def _get_domain_object_validation_type(self, unused_item): + def _get_domain_object_validation_type( + self, blog_post_summary: blog_models.BlogPostSummaryModel + ) -> base_validation.ValidationModes: """Returns the type of domain object validation to be performed. Args: - unused_item: datastore_services.Model. Entity to validate. + blog_post_summary: datastore_services.Model. Entity to validate. Returns: str. The type of validation mode: strict or non strict. """ - # TODO(#13397): Write a custom job to avoid applying strict validation - # to private blog posts. We can't determine public/private without - # performing an NDB get() operation, which are forbidden in Apache Beam - # jobs. - return base_validation.VALIDATION_MODES.strict + if blog_post_summary.published_on is None: + return base_validation.ValidationModes.NON_STRICT + + return base_validation.ValidationModes.STRICT @validation_decorators.RelationshipsOf( blog_models.BlogPostModel) -def blog_post_model_relationships(model): +def blog_post_model_relationships( + model: Type[blog_models.BlogPostModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[Union[ + blog_models.BlogPostSummaryModel, + blog_models.BlogPostRightsModel, + user_models.UserSettingsModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" - yield model.id, [blog_models.BlogPostSummaryModel] - yield model.id, [blog_models.BlogPostRightsModel] - yield model.author_id, [user_models.UserSettingsModel] + yield (model.id, [blog_models.BlogPostSummaryModel]) + yield (model.id, [blog_models.BlogPostRightsModel]) + yield (model.author_id, [user_models.UserSettingsModel]) @validation_decorators.RelationshipsOf( blog_models.BlogPostSummaryModel) -def blog_post_summary_model_relationships(model): +def blog_post_summary_model_relationships( + model: Type[blog_models.BlogPostSummaryModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[Union[ + blog_models.BlogPostModel, + blog_models.BlogPostRightsModel, + user_models.UserSettingsModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" - yield model.id, [blog_models.BlogPostModel] - yield model.id, [blog_models.BlogPostRightsModel] - yield model.author_id, [user_models.UserSettingsModel] + yield (model.id, [blog_models.BlogPostModel]) + yield (model.id, [blog_models.BlogPostRightsModel]) + yield (model.author_id, [user_models.UserSettingsModel]) @validation_decorators.RelationshipsOf( blog_models.BlogPostRightsModel) -def blog_post_rights_model_relationships(model): +def blog_post_rights_model_relationships( + model: Type[blog_models.BlogPostRightsModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel, + user_models.UserSettingsModel + ]]] + ]]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [blog_models.BlogPostModel] yield model.id, [blog_models.BlogPostSummaryModel] yield model.editor_ids, [user_models.UserSettingsModel] + + +@validation_decorators.RelationshipsOf(blog_models.BlogAuthorDetailsModel) +def blog_author_details_model_relationships( + model: Type[blog_models.BlogAuthorDetailsModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[user_models.UserSettingsModel]] + ]]: + """Yields how the properties of the model relates to the ID of others.""" + yield model.author_id, [user_models.UserSettingsModel] diff --git a/core/jobs/transforms/validation/blog_validation_test.py b/core/jobs/transforms/validation/blog_validation_test.py index 2701857fa3b4..fea7cea69ba3 100644 --- a/core/jobs/transforms/validation/blog_validation_test.py +++ b/core/jobs/transforms/validation/blog_validation_test.py @@ -31,46 +31,52 @@ if MYPY: # pragma: no cover from mypy_imports import blog_models -(blog_models,) = models.Registry.import_models([models.NAMES.blog]) +(blog_models,) = models.Registry.import_models([models.Names.BLOG]) class RelationshipsOfTests(test_utils.TestBase): def test_blog_post_model_relationships(self) -> None: - self.assertItemsEqual( # type: ignore[no-untyped-call] - validation_decorators.RelationshipsOf.get_model_kind_references( # type: ignore[no-untyped-call] + self.assertItemsEqual( + validation_decorators.RelationshipsOf.get_model_kind_references( 'BlogPostModel', 'id'), ['BlogPostSummaryModel', 'BlogPostRightsModel']) - self.assertItemsEqual( # type: ignore[no-untyped-call] - validation_decorators.RelationshipsOf.get_model_kind_references( # type: ignore[no-untyped-call] + self.assertItemsEqual( + validation_decorators.RelationshipsOf.get_model_kind_references( 'BlogPostModel', 'author_id'), ['UserSettingsModel']) def test_blog_post_summary_model_relationships(self) -> None: - self.assertItemsEqual( # type: ignore[no-untyped-call] - validation_decorators.RelationshipsOf.get_model_kind_references( # type: ignore[no-untyped-call] + self.assertItemsEqual( + validation_decorators.RelationshipsOf.get_model_kind_references( 'BlogPostSummaryModel', 'id'), ['BlogPostModel', 'BlogPostRightsModel']) - self.assertItemsEqual( # type: ignore[no-untyped-call] - validation_decorators.RelationshipsOf.get_model_kind_references( # type: ignore[no-untyped-call] + self.assertItemsEqual( + validation_decorators.RelationshipsOf.get_model_kind_references( 'BlogPostSummaryModel', 'author_id'), ['UserSettingsModel']) def test_blog_post_rights_model_relationships(self) -> None: - self.assertItemsEqual( # type: ignore[no-untyped-call] - validation_decorators.RelationshipsOf.get_model_kind_references( # type: ignore[no-untyped-call] + self.assertItemsEqual( + validation_decorators.RelationshipsOf.get_model_kind_references( 'BlogPostRightsModel', 'id'), ['BlogPostModel', 'BlogPostSummaryModel']) - self.assertItemsEqual( # type: ignore[no-untyped-call] - validation_decorators.RelationshipsOf.get_model_kind_references( # type: ignore[no-untyped-call] + self.assertItemsEqual( + validation_decorators.RelationshipsOf.get_model_kind_references( 'BlogPostRightsModel', 'editor_ids'), ['UserSettingsModel']) + def test_blog_author_details_model_relationships(self) -> None: + self.assertItemsEqual( + validation_decorators.RelationshipsOf.get_model_kind_references( + 'BlogAuthorDetailsModel', 'author_id'), + ['UserSettingsModel']) + -class ValidateModelPublishTimeFieldTests(job_test_utils.PipelinedTestBase): +class ValidateBlogModelTimeFieldTests(job_test_utils.PipelinedTestBase): def test_reports_model_created_on_timestamp_relationship_error( - self + self ) -> None: invalid_timestamp = blog_models.BlogPostModel( id='validblogid1', @@ -85,12 +91,12 @@ def test_reports_model_created_on_timestamp_relationship_error( output = ( self.pipeline | beam.Create([invalid_timestamp]) - | beam.ParDo(blog_validation.ValidateModelPublishTimestamps()) + | beam.ParDo(blog_validation.ValidateBlogModelTimestamps()) ) - self.assert_pcoll_equal( # type: ignore[no-untyped-call] + self.assert_pcoll_equal( output, [ - blog_validation_errors.InconsistentPublishTimestampsError( + blog_validation_errors.InconsistentLastUpdatedTimestampsError( invalid_timestamp), ] ) @@ -111,10 +117,10 @@ def test_reports_model_last_updated_timestamp_relationship_error( output = ( self.pipeline | beam.Create([invalid_timestamp]) - | beam.ParDo(blog_validation.ValidateModelPublishTimestamps()) + | beam.ParDo(blog_validation.ValidateBlogModelTimestamps()) ) - self.assert_pcoll_equal( # type: ignore[no-untyped-call] + self.assert_pcoll_equal( output, [ blog_validation_errors .InconsistentPublishLastUpdatedTimestampsError( @@ -129,38 +135,66 @@ def test_process_reports_no_error_if_published_on_is_none(self) -> None: content='

    hello

    ,', author_id='user', url_fragment='url-fragment-1', - created_on=self.NOW, + created_on=self.YEAR_AGO, last_updated=self.NOW, published_on=None) output = ( self.pipeline | beam.Create([valid_timestamp]) - | beam.ParDo(blog_validation.ValidateModelPublishTimestamps()) + | beam.ParDo(blog_validation.ValidateBlogModelTimestamps()) ) - self.assert_pcoll_equal(output, []) # type: ignore[no-untyped-call] + self.assert_pcoll_equal(output, []) - def test_process_reports_model_mutated_during_job_error(self) -> None: + def test_process_reports_model_mutated_during_job_error_for_published_on( + self) -> None: invalid_timestamp = blog_models.BlogPostModel( id='124', title='Sample Title', content='

    hello

    ,', author_id='user', url_fragment='url-fragment-1', - created_on=self.NOW, - last_updated=self.YEAR_LATER, + created_on=self.YEAR_AGO, + last_updated=self.NOW, published_on=self.YEAR_LATER) output = ( self.pipeline | beam.Create([invalid_timestamp]) - | beam.ParDo(blog_validation.ValidateModelPublishTimestamps()) + | beam.ParDo(blog_validation.ValidateBlogModelTimestamps()) ) - self.assert_pcoll_equal( # type: ignore[no-untyped-call] + self.assert_pcoll_equal( output, [ - blog_validation_errors.ModelMutatedDuringJobError( + blog_validation_errors.ModelMutatedDuringJobErrorForPublishedOn( + invalid_timestamp), + blog_validation_errors.InconsistentPublishLastUpdatedTimestampsError( # pylint: disable=line-too-long + invalid_timestamp), + ] + ) + + def test_process_reports_model_mutated_during_job_error_for_last_updated( + self) -> None: + invalid_timestamp = blog_models.BlogPostModel( + id='124', + title='Sample Title', + content='

    hello

    ,', + author_id='user', + url_fragment='url-fragment-1', + created_on=self.YEAR_AGO, + last_updated=self.YEAR_LATER, + published_on=self.YEAR_AGO) + + output = ( + self.pipeline + | beam.Create([invalid_timestamp]) + | beam.ParDo(blog_validation.ValidateBlogModelTimestamps()) + ) + + self.assert_pcoll_equal( + output, [ + blog_validation_errors.ModelMutatedDuringJobErrorForLastUpdated( invalid_timestamp), ] ) @@ -182,14 +216,28 @@ def test_validation_type_for_domain_object_strict(self) -> None: thumbnail_filename='sample.svg', tags=['learners']) - blog_rights_model = blog_models.BlogPostRightsModel( + output = ( + self.pipeline + | beam.Create([blog_model]) + | beam.ParDo( + blog_validation.ValidateBlogPostModelDomainObjectsInstances()) + ) + + self.assert_pcoll_equal(output, []) + + def test_validation_type_for_domain_object_non_strict(self) -> None: + blog_model = blog_models.BlogPostModel( id='validblogid2', - editor_ids=['user'], - blog_post_is_published=True, + title='Sample Title', + content='

    hello

    ,', + author_id='user', + url_fragment='url-fragment-1', created_on=self.YEAR_AGO, - last_updated=self.NOW) - blog_rights_model.update_timestamps() - blog_rights_model.put() + last_updated=self.NOW, + published_on=None, + thumbnail_filename=None, + tags=[]) + output = ( self.pipeline | beam.Create([blog_model]) @@ -197,7 +245,7 @@ def test_validation_type_for_domain_object_strict(self) -> None: blog_validation.ValidateBlogPostModelDomainObjectsInstances()) ) - self.assert_pcoll_equal(output, []) # type: ignore[no-untyped-call] + self.assert_pcoll_equal(output, []) class ValidateBlogPostSummaryModelDomainObjectsInstancesTests( @@ -216,14 +264,28 @@ def test_validation_type_for_domain_object_strict(self) -> None: thumbnail_filename='sample.svg', tags=['learners']) - blog_rights_model = blog_models.BlogPostRightsModel( - id='validblogid4', - editor_ids=['user'], - blog_post_is_published=True, + output = ( + self.pipeline + | beam.Create([blog_summary_model]) + | beam.ParDo( + blog_validation.ValidateBlogSummaryModelDomainObjectsInstances()) # pylint: disable=line-too-long + ) + + self.assert_pcoll_equal(output, []) + + def test_validation_type_for_domain_object_non_strict(self) -> None: + blog_summary_model = blog_models.BlogPostSummaryModel( + id='validblogid5', + title='Sample Title', + summary='

    hello

    ,', + author_id='user', + url_fragment='url-fragment-1', created_on=self.YEAR_AGO, - last_updated=self.NOW) - blog_rights_model.update_timestamps() - blog_rights_model.put() + last_updated=self.NOW, + published_on=None, + thumbnail_filename=None, + tags=[]) + output = ( self.pipeline | beam.Create([blog_summary_model]) @@ -231,4 +293,4 @@ def test_validation_type_for_domain_object_strict(self) -> None: blog_validation.ValidateBlogSummaryModelDomainObjectsInstances()) # pylint: disable=line-too-long ) - self.assert_pcoll_equal(output, []) # type: ignore[no-untyped-call] + self.assert_pcoll_equal(output, []) diff --git a/core/jobs/transforms/validation/collection_validation.py b/core/jobs/transforms/validation/collection_validation.py index af93266561c7..955985268738 100644 --- a/core/jobs/transforms/validation/collection_validation.py +++ b/core/jobs/transforms/validation/collection_validation.py @@ -23,19 +23,31 @@ from core.jobs import job_utils from core.jobs.decorators import validation_decorators from core.jobs.transforms.validation import base_validation +from core.jobs.types import model_property from core.platform import models -(collection_models,) = models.Registry.import_models([models.NAMES.collection]) +from typing import Iterator, List, Optional, Tuple, Type, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import collection_models + +(collection_models,) = models.Registry.import_models([models.Names.COLLECTION]) @validation_decorators.AuditsExisting( collection_models.CollectionSnapshotMetadataModel) class ValidateCollectionSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + collection_models.CollectionSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for CollectionSnapshotMetadataModel. """ - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, input_model: collection_models.CollectionSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[collection_domain.CollectionChange]: """Returns a change domain class. Args: @@ -49,7 +61,17 @@ def _get_change_domain_class(self, input_model): # pylint: disable=unused-argume @validation_decorators.RelationshipsOf(collection_models.CollectionSummaryModel) -def collection_summary_model_relationships(model): +def collection_summary_model_relationships( + model: Type[collection_models.CollectionSummaryModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[Union[ + collection_models.CollectionModel, + collection_models.CollectionRightsModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [collection_models.CollectionModel] @@ -59,12 +81,18 @@ def collection_summary_model_relationships(model): @validation_decorators.AuditsExisting( collection_models.CollectionRightsSnapshotMetadataModel) class ValidateCollectionRightsSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + collection_models.CollectionRightsSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for CollectionRightsSnapshotMetadataModel. """ - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, + input_model: collection_models.CollectionRightsSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[rights_domain.CollectionRightsChange]: """Returns a change domain class. Args: @@ -80,10 +108,22 @@ def _get_change_domain_class(self, input_model): # pylint: disable=unused-argume @validation_decorators.AuditsExisting( collection_models.CollectionCommitLogEntryModel) class ValidateCollectionCommitLogEntryModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + collection_models.CollectionCommitLogEntryModel + ] +): """Overrides _get_change_domain_class for CollectionCommitLogEntryModel.""" - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's _get_change_domain_class() method. + def _get_change_domain_class( # type: ignore[override] + self, input_model: collection_models.CollectionCommitLogEntryModel + ) -> Optional[ + Type[Union[ + rights_domain.CollectionRightsChange, + collection_domain.CollectionChange + ]] + ]: """Returns a change domain class. Args: diff --git a/core/jobs/transforms/validation/collection_validation_test.py b/core/jobs/transforms/validation/collection_validation_test.py index 8a72f7317007..48b4fb641765 100644 --- a/core/jobs/transforms/validation/collection_validation_test.py +++ b/core/jobs/transforms/validation/collection_validation_test.py @@ -28,14 +28,19 @@ import apache_beam as beam +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import collection_models + (base_models, collection_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.collection]) + [models.Names.BASE_MODEL, models.Names.COLLECTION]) class ValidateCollectionSnapshotMetadataModelTests( job_test_utils.PipelinedTestBase): - def test_validate_change_domain_implemented(self): + def test_validate_change_domain_implemented(self) -> None: invalid_commit_cmd_model = ( collection_models.CollectionSnapshotMetadataModel( id='model_id-1', @@ -56,7 +61,7 @@ def test_validate_change_domain_implemented(self): self.assert_pcoll_equal(output, []) - def test_collection_change_object_with_missing_cmd(self): + def test_collection_change_object_with_missing_cmd(self) -> None: invalid_commit_cmd_model = ( collection_models.CollectionSnapshotMetadataModel( id='123', @@ -81,7 +86,7 @@ def test_collection_change_object_with_missing_cmd(self): 'Missing cmd key in change dict') ]) - def test_collection_change_object_with_invalid_cmd(self): + def test_collection_change_object_with_invalid_cmd(self) -> None: invalid_commit_cmd_model = ( collection_models.CollectionSnapshotMetadataModel( id='123', @@ -106,7 +111,9 @@ def test_collection_change_object_with_invalid_cmd(self): 'Command invalid is not allowed') ]) - def test_collection_change_object_with_missing_attribute_in_cmd(self): + def test_collection_change_object_with_missing_attribute_in_cmd( + self + ) -> None: invalid_commit_cmd_model = ( collection_models.CollectionSnapshotMetadataModel( id='123', @@ -140,7 +147,9 @@ def test_collection_change_object_with_missing_attribute_in_cmd(self): 'exploration_id, new_value') ]) - def test_collection_change_object_with_extra_attribute_in_cmd(self): + def test_collection_change_object_with_extra_attribute_in_cmd( + self + ) -> None: invalid_commit_cmd_model = ( collection_models.CollectionSnapshotMetadataModel( id='123', @@ -179,7 +188,9 @@ def test_collection_change_object_with_extra_attribute_in_cmd(self): 'The following extra attributes are present: invalid') ]) - def test_collection_change_object_with_invalid_collection_property(self): + def test_collection_change_object_with_invalid_collection_property( + self + ) -> None: invalid_commit_cmd_model = ( collection_models.CollectionSnapshotMetadataModel( id='123', @@ -218,7 +229,7 @@ def test_collection_change_object_with_invalid_collection_property(self): class RelationshipsOfTests(test_utils.TestBase): - def test_collection_summary_model_relationships(self): + def test_collection_summary_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'CollectionSummaryModel', 'id'), @@ -228,7 +239,7 @@ def test_collection_summary_model_relationships(self): class ValidateCollectionRightsSnapshotMetadataModelTests( job_test_utils.PipelinedTestBase): - def test_collection_rights_change_object_with_missing_cmd(self): + def test_collection_rights_change_object_with_missing_cmd(self) -> None: commit_dict = {'invalid': 'data'} invalid_commit_cmd_model = ( collection_models.CollectionRightsSnapshotMetadataModel( @@ -255,7 +266,7 @@ def test_collection_rights_change_object_with_missing_cmd(self): 'Missing cmd key in change dict') ]) - def test_collection_rights_change_object_with_invalid_cmd(self): + def test_collection_rights_change_object_with_invalid_cmd(self) -> None: commit_dict = {'cmd': 'invalid'} invalid_commit_cmd_model = ( collection_models.CollectionRightsSnapshotMetadataModel( @@ -283,7 +294,8 @@ def test_collection_rights_change_object_with_invalid_cmd(self): ]) def test_collection_rights_change_object_with_missing_attribute_in_cmd( - self): + self + ) -> None: commit_dict = { 'cmd': 'change_role', 'assignee_id': 'assignee_id', @@ -314,7 +326,9 @@ def test_collection_rights_change_object_with_missing_attribute_in_cmd( 'new_role, old_role') ]) - def test_collection_rights_change_object_with_extra_attribute_in_cmd(self): + def test_collection_rights_change_object_with_extra_attribute_in_cmd( + self + ) -> None: commit_dict = { 'cmd': 'change_private_viewability', 'old_viewable_if_private': 'old_viewable_if_private', @@ -346,7 +360,7 @@ def test_collection_rights_change_object_with_extra_attribute_in_cmd(self): 'The following extra attributes are present: invalid') ]) - def test_collection_rights_change_object_with_invalid_role(self): + def test_collection_rights_change_object_with_invalid_role(self) -> None: commit_dict = { 'cmd': 'change_role', 'assignee_id': 'assignee_id', @@ -379,7 +393,9 @@ def test_collection_rights_change_object_with_invalid_role(self): 'invalid is not allowed') ]) - def test_collection_rights_change_object_with_invalid_status(self): + def test_collection_rights_change_object_with_invalid_status( + self + ) -> None: commit_dict = { 'cmd': 'change_collection_status', 'old_status': rights_domain.ACTIVITY_STATUS_PRIVATE, @@ -415,7 +431,7 @@ def test_collection_rights_change_object_with_invalid_status(self): class ValidateCollectionCommitLogEntryModelTests( job_test_utils.PipelinedTestBase): - def test_validate_rights_model(self): + def test_validate_rights_model(self) -> None: invalid_commit_cmd_model = ( collection_models.CollectionCommitLogEntryModel( id='rights_id123', @@ -437,7 +453,7 @@ def test_validate_rights_model(self): self.assert_pcoll_equal(output, []) - def test_validate_collection_model(self): + def test_validate_collection_model(self) -> None: invalid_commit_cmd_model = ( collection_models.CollectionCommitLogEntryModel( id='collection_id123', @@ -460,7 +476,7 @@ def test_validate_collection_model(self): self.assert_pcoll_equal(output, []) - def test_raises_commit_cmd_none_error(self): + def test_raises_commit_cmd_none_error(self) -> None: invalid_commit_cmd_model = ( collection_models.CollectionCommitLogEntryModel( id='model_id123', diff --git a/core/jobs/transforms/validation/config_validation.py b/core/jobs/transforms/validation/config_validation.py index bb7c4b72fc3f..c5b6d8bdddb5 100644 --- a/core/jobs/transforms/validation/config_validation.py +++ b/core/jobs/transforms/validation/config_validation.py @@ -24,18 +24,29 @@ from core.jobs.transforms.validation import base_validation from core.platform import models -(config_models,) = models.Registry.import_models([models.NAMES.config]) +from typing import Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import config_models + +(config_models,) = models.Registry.import_models([models.Names.CONFIG]) @validation_decorators.AuditsExisting( config_models.ConfigPropertySnapshotMetadataModel) class ValidateConfigPropertySnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + config_models.ConfigPropertySnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for ConfigPropertySnapshotMetadataModel. """ - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, input_model: config_models.ConfigPropertySnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[config_domain.ConfigPropertyChange]: """Returns a change domain class. Args: @@ -51,12 +62,17 @@ def _get_change_domain_class(self, input_model): # pylint: disable=unused-argume @validation_decorators.AuditsExisting( config_models.PlatformParameterSnapshotMetadataModel) class ValidatePlatformParameterSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + config_models.PlatformParameterSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for PlatformParameterSnapshotMetadataModel. """ - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, input_model: config_models.PlatformParameterSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[parameter_domain.PlatformParameterChange]: """Returns a change domain class. Args: diff --git a/core/jobs/transforms/validation/config_validation_test.py b/core/jobs/transforms/validation/config_validation_test.py index 49c5b8c07464..7c8ef989663f 100644 --- a/core/jobs/transforms/validation/config_validation_test.py +++ b/core/jobs/transforms/validation/config_validation_test.py @@ -26,14 +26,21 @@ import apache_beam as beam +from typing import Dict, Final, List, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import config_models + (base_models, config_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.config]) + [models.Names.BASE_MODEL, models.Names.CONFIG]) class ValidateConfigPropertySnapshotMetadataModelTests( job_test_utils.PipelinedTestBase): - def test_validate_change_domain_implemented(self): + def test_validate_change_domain_implemented(self) -> None: invalid_commit_cmd_model = ( config_models.ConfigPropertySnapshotMetadataModel( id='model_id-1', @@ -54,7 +61,7 @@ def test_validate_change_domain_implemented(self): self.assert_pcoll_equal(output, []) - def test_config_property_change_object_with_missing_cmd(self): + def test_config_property_change_object_with_missing_cmd(self) -> None: invalid_commit_cmd_model = ( config_models.ConfigPropertySnapshotMetadataModel( id='model_id-1', @@ -79,7 +86,7 @@ def test_config_property_change_object_with_missing_cmd(self): 'Missing cmd key in change dict') ]) - def test_config_property_change_object_with_invalid_cmd(self): + def test_config_property_change_object_with_invalid_cmd(self) -> None: invalid_commit_cmd_model = ( config_models.ConfigPropertySnapshotMetadataModel( id='model_id-1', @@ -104,7 +111,9 @@ def test_config_property_change_object_with_invalid_cmd(self): 'Command invalid is not allowed') ]) - def test_config_property_change_object_with_missing_attribute_in_cmd(self): + def test_config_property_change_object_with_missing_attribute_in_cmd( + self + ) -> None: invalid_commit_cmd_model = ( config_models.ConfigPropertySnapshotMetadataModel( id='model_id-1', @@ -130,7 +139,9 @@ def test_config_property_change_object_with_missing_attribute_in_cmd(self): 'new_value') ]) - def test_config_property_change_object_with_extra_attribute_in_cmd(self): + def test_config_property_change_object_with_extra_attribute_in_cmd( + self + ) -> None: commit_dict = { 'cmd': 'change_property_value', 'new_value': 'new_value', @@ -164,9 +175,11 @@ def test_config_property_change_object_with_extra_attribute_in_cmd(self): class ValidatePlatformParameterSnapshotMetadataModelTests( job_test_utils.PipelinedTestBase): - CMD_EDIT_RULES = parameter_domain.PlatformParameterChange.CMD_EDIT_RULES + CMD_EDIT_RULES: Final = ( + parameter_domain.PlatformParameterChange.CMD_EDIT_RULES + ) - def test_validate_change_domain_implemented(self): + def test_validate_change_domain_implemented(self) -> None: invalid_commit_cmd_model = ( config_models.PlatformParameterSnapshotMetadataModel( id='model_id-1', @@ -188,7 +201,9 @@ def test_validate_change_domain_implemented(self): self.assert_pcoll_equal(output, []) - def test_param_change_object_with_missing_cmd_raises_exception(self): + def test_param_change_object_with_missing_cmd_raises_exception( + self + ) -> None: invalid_commit_cmd_model = ( config_models.PlatformParameterSnapshotMetadataModel( id='model_id-1', @@ -214,7 +229,9 @@ def test_param_change_object_with_missing_cmd_raises_exception(self): 'Missing cmd key in change dict') ]) - def test_param_change_object_with_invalid_cmd_raises_exception(self): + def test_param_change_object_with_invalid_cmd_raises_exception( + self + ) -> None: invalid_commit_cmd_model = ( config_models.PlatformParameterSnapshotMetadataModel( id='model_id-1', @@ -241,7 +258,8 @@ def test_param_change_object_with_invalid_cmd_raises_exception(self): ]) def test_param_change_object_missing_attribute_in_cmd_raises_exception( - self): + self + ) -> None: invalid_commit_cmd_model = ( config_models.PlatformParameterSnapshotMetadataModel( id='model_id-1', @@ -268,8 +286,9 @@ def test_param_change_object_missing_attribute_in_cmd_raises_exception( ]) def test_param_change_object_with_extra_attribute_in_cmd_raises_exception( - self): - commit_dict = { + self + ) -> None: + commit_dict: Dict[str, Union[str, List[str]]] = { 'cmd': self.CMD_EDIT_RULES, 'new_rules': [], 'invalid': 'invalid' diff --git a/core/jobs/transforms/validation/exp_validation.py b/core/jobs/transforms/validation/exp_validation.py index 171819de8df3..035a4af50f93 100644 --- a/core/jobs/transforms/validation/exp_validation.py +++ b/core/jobs/transforms/validation/exp_validation.py @@ -23,22 +23,33 @@ from core.jobs import job_utils from core.jobs.decorators import validation_decorators from core.jobs.transforms.validation import base_validation +from core.jobs.types import model_property from core.platform import models -( - exp_models, story_models -) = models.Registry.import_models([ - models.NAMES.exploration, models.NAMES.story +from typing import Iterator, List, Optional, Tuple, Type, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + from mypy_imports import story_models + +(exp_models, story_models) = models.Registry.import_models([ + models.Names.EXPLORATION, models.Names.STORY ]) @validation_decorators.AuditsExisting( exp_models.ExplorationSnapshotMetadataModel) class ValidateExplorationSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + exp_models.ExplorationSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for exploration models """ - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, input_model: exp_models.ExplorationSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[exp_domain.ExplorationChange]: """Returns a change domain class. Args: @@ -56,7 +67,14 @@ def _get_change_domain_class(self, input_model): # pylint: disable=unused-argume @validation_decorators.RelationshipsOf(exp_models.ExplorationContextModel) -def exploration_context_model_relationships(model): +def exploration_context_model_relationships( + model: Type[exp_models.ExplorationContextModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[Union[story_models.StoryModel, exp_models.ExplorationModel]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.story_id, [story_models.StoryModel] @@ -64,7 +82,16 @@ def exploration_context_model_relationships(model): @validation_decorators.RelationshipsOf(exp_models.ExpSummaryModel) -def exp_summary_model_relationships(model): +def exp_summary_model_relationships( + model: Type[exp_models.ExpSummaryModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[Union[ + exp_models.ExplorationModel, exp_models.ExplorationRightsModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [exp_models.ExplorationModel] @@ -74,10 +101,15 @@ def exp_summary_model_relationships(model): @validation_decorators.AuditsExisting( exp_models.ExplorationRightsSnapshotMetadataModel) class ValidateExplorationRightsSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + exp_models.ExplorationRightsSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for exploration models """ - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, input_model: exp_models.ExplorationRightsSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[rights_domain.ExplorationRightsChange]: """Returns a change domain class. Args: @@ -93,10 +125,22 @@ def _get_change_domain_class(self, input_model): # pylint: disable=unused-argume @validation_decorators.AuditsExisting( exp_models.ExplorationCommitLogEntryModel) class ValidateExplorationCommitLogEntryModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + exp_models.ExplorationCommitLogEntryModel + ] +): """Overrides _get_change_domain_class for exploration models """ - def _get_change_domain_class(self, input_model): + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's _get_change_domain_class() method. + def _get_change_domain_class( # type: ignore[override] + self, input_model: exp_models.ExplorationCommitLogEntryModel + ) -> Optional[ + Type[Union[ + rights_domain.ExplorationRightsChange, + exp_domain.ExplorationChange + ]] + ]: """Returns a change domain class. Args: diff --git a/core/jobs/transforms/validation/exp_validation_test.py b/core/jobs/transforms/validation/exp_validation_test.py index 3e58bacb9b6e..ce1558811583 100644 --- a/core/jobs/transforms/validation/exp_validation_test.py +++ b/core/jobs/transforms/validation/exp_validation_test.py @@ -28,14 +28,19 @@ import apache_beam as beam +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import exp_models + (base_models, exp_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.exploration]) + [models.Names.BASE_MODEL, models.Names.EXPLORATION]) class ValidateExplorationSnapshotMetadataModelTests( job_test_utils.PipelinedTestBase): - def test_validate_change_domain_implemented(self): + def test_validate_change_domain_implemented(self) -> None: invalid_commit_cmd_model = exp_models.ExplorationSnapshotMetadataModel( id='model_id-1', created_on=self.YEAR_AGO, @@ -57,7 +62,7 @@ def test_validate_change_domain_implemented(self): self.assert_pcoll_equal(output, []) - def test_validate_exp_model_object_with_missing_cmd(self): + def test_validate_exp_model_object_with_missing_cmd(self) -> None: invalid_commit_cmd_model = exp_models.ExplorationSnapshotMetadataModel( id='model_id-1', created_on=self.YEAR_AGO, @@ -83,7 +88,7 @@ def test_validate_exp_model_object_with_missing_cmd(self): 'Missing cmd key in change dict') ]) - def test_validate_exp_model_object_with_invalid_cmd(self): + def test_validate_exp_model_object_with_invalid_cmd(self) -> None: invalid_commit_cmd_model = exp_models.ExplorationSnapshotMetadataModel( id='model_id-1', created_on=self.YEAR_AGO, @@ -109,7 +114,9 @@ def test_validate_exp_model_object_with_invalid_cmd(self): 'Command invalid is not allowed') ]) - def test_validate_exp_model_object_with_missing_attribute_in_cmd(self): + def test_validate_exp_model_object_with_missing_attribute_in_cmd( + self + ) -> None: invalid_commit_cmd_model = exp_models.ExplorationSnapshotMetadataModel( id='model_id-1', created_on=self.YEAR_AGO, @@ -144,7 +151,9 @@ def test_validate_exp_model_object_with_missing_attribute_in_cmd(self): 'new_value, state_name') ]) - def test_validate_exp_model_object_with_extra_attribute_in_cmd(self): + def test_validate_exp_model_object_with_extra_attribute_in_cmd( + self + ) -> None: invalid_commit_cmd_model = exp_models.ExplorationSnapshotMetadataModel( id='model_id-1', created_on=self.YEAR_AGO, @@ -180,7 +189,9 @@ def test_validate_exp_model_object_with_extra_attribute_in_cmd(self): 'The following extra attributes are present: invalid') ]) - def test_validate_exp_model_object_with_invalid_exploration_property(self): + def test_validate_exp_model_object_with_invalid_exploration_property( + self + ) -> None: invalid_commit_cmd_model = exp_models.ExplorationSnapshotMetadataModel( id='model_id-1', created_on=self.YEAR_AGO, @@ -217,7 +228,9 @@ def test_validate_exp_model_object_with_invalid_exploration_property(self): 'invalid is not allowed') ]) - def test_validate_exp_model_object_with_invalid_state_property(self): + def test_validate_exp_model_object_with_invalid_state_property( + self + ) -> None: invalid_commit_cmd_model = exp_models.ExplorationSnapshotMetadataModel( id='model_id-1', created_on=self.YEAR_AGO, @@ -259,7 +272,7 @@ def test_validate_exp_model_object_with_invalid_state_property(self): class RelationshipsOfTests(test_utils.TestBase): - def test_exploration_context_model_relationships(self): + def test_exploration_context_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'ExplorationContextModel', 'story_id'), ['StoryModel']) @@ -267,7 +280,7 @@ def test_exploration_context_model_relationships(self): validation_decorators.RelationshipsOf.get_model_kind_references( 'ExplorationContextModel', 'id'), ['ExplorationModel']) - def test_exp_summary_model_relationships(self): + def test_exp_summary_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'ExpSummaryModel', 'id'), @@ -277,7 +290,7 @@ def test_exp_summary_model_relationships(self): class ValidateExplorationRightsSnapshotMetadataModelTests( job_test_utils.PipelinedTestBase): - def test_exploration_rights_change_object_with_missing_cmd(self): + def test_exploration_rights_change_object_with_missing_cmd(self) -> None: invalid_commit_cmd_model = ( exp_models.ExplorationRightsSnapshotMetadataModel( id='model_id-1', @@ -302,7 +315,7 @@ def test_exploration_rights_change_object_with_missing_cmd(self): 'Missing cmd key in change dict') ]) - def test_exploration_rights_change_object_with_invalid_cmd(self): + def test_exploration_rights_change_object_with_invalid_cmd(self) -> None: invalid_commit_cmd_model = ( exp_models.ExplorationRightsSnapshotMetadataModel( id='model_id-1', @@ -328,7 +341,8 @@ def test_exploration_rights_change_object_with_invalid_cmd(self): ]) def test_exploration_rights_change_object_with_missing_attribute_in_cmd( - self): + self + ) -> None: invalid_commit_cmd_model = ( exp_models.ExplorationRightsSnapshotMetadataModel( id='model_id-1', @@ -361,7 +375,8 @@ def test_exploration_rights_change_object_with_missing_attribute_in_cmd( ]) def test_exploration_rights_change_object_with_extra_attribute_in_cmd( - self): + self + ) -> None: invalid_commit_cmd_model = ( exp_models.ExplorationRightsSnapshotMetadataModel( id='model_id-1', @@ -397,7 +412,8 @@ def test_exploration_rights_change_object_with_extra_attribute_in_cmd( ]) def test_exploration_rights_change_object_with_invalid_role( - self): + self + ) -> None: invalid_commit_cmd_model = ( exp_models.ExplorationRightsSnapshotMetadataModel( id='model_id-1', @@ -434,7 +450,8 @@ def test_exploration_rights_change_object_with_invalid_role( ]) def test_exploration_rights_change_object_with_invalid_status( - self): + self + ) -> None: invalid_commit_cmd_model = ( exp_models.ExplorationRightsSnapshotMetadataModel( id='model_id-1', @@ -472,7 +489,7 @@ def test_exploration_rights_change_object_with_invalid_status( class ValidateExplorationCommitLogEntryModelTests( job_test_utils.PipelinedTestBase): - def test_validate_rights_model(self): + def test_validate_rights_model(self) -> None: invalid_commit_cmd_model = exp_models.ExplorationCommitLogEntryModel( id='rights_id123', created_on=self.YEAR_AGO, @@ -492,7 +509,7 @@ def test_validate_rights_model(self): self.assert_pcoll_equal(output, []) - def test_validate_exploration_model(self): + def test_validate_exploration_model(self) -> None: invalid_commit_cmd_model = exp_models.ExplorationCommitLogEntryModel( id='exploration_id123', created_on=self.YEAR_AGO, @@ -513,7 +530,7 @@ def test_validate_exploration_model(self): self.assert_pcoll_equal(output, []) - def test_raises_commit_cmd_none_error(self): + def test_raises_commit_cmd_none_error(self) -> None: invalid_commit_cmd_model = exp_models.ExplorationCommitLogEntryModel( id='model_id123', created_on=self.YEAR_AGO, diff --git a/core/jobs/transforms/validation/feedback_validation.py b/core/jobs/transforms/validation/feedback_validation.py index 5b31facebc52..d4103b9f6698 100644 --- a/core/jobs/transforms/validation/feedback_validation.py +++ b/core/jobs/transforms/validation/feedback_validation.py @@ -22,20 +22,36 @@ from core.jobs import job_utils from core.jobs.decorators import validation_decorators from core.jobs.types import feedback_validation_errors +from core.jobs.types import model_property from core.platform import models import apache_beam as beam -(exp_models, feedback_models) = models.Registry.import_models( - [models.NAMES.exploration, models.NAMES.feedback]) +from typing import Iterator, List, Tuple, Type +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import exp_models + from mypy_imports import feedback_models +(exp_models, feedback_models) = models.Registry.import_models([ + models.Names.EXPLORATION, + models.Names.FEEDBACK +]) + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting( feedback_models.GeneralFeedbackThreadModel) -class ValidateEntityType(beam.DoFn): +class ValidateEntityType(beam.DoFn): # type: ignore[misc] """DoFn to validate the entity type.""" - def process(self, input_model): + def process( + self, input_model: feedback_models.GeneralFeedbackThreadModel + ) -> Iterator[feedback_validation_errors.InvalidEntityTypeError]: """Function that checks if the entity type is valid Args: @@ -52,7 +68,14 @@ def process(self, input_model): @validation_decorators.RelationshipsOf(feedback_models.FeedbackAnalyticsModel) -def feedback_analytics_model_relationships(model): +def feedback_analytics_model_relationships( + model: Type[feedback_models.FeedbackAnalyticsModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[exp_models.ExplorationModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [exp_models.ExplorationModel] diff --git a/core/jobs/transforms/validation/feedback_validation_test.py b/core/jobs/transforms/validation/feedback_validation_test.py index f6d628c793cc..fb24c52f2420 100644 --- a/core/jobs/transforms/validation/feedback_validation_test.py +++ b/core/jobs/transforms/validation/feedback_validation_test.py @@ -27,12 +27,16 @@ import apache_beam as beam -(feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import feedback_models + +(feedback_models,) = models.Registry.import_models([models.Names.FEEDBACK]) class ValidateEntityTypeTests(job_test_utils.PipelinedTestBase): - def test_model_with_invalid_entity_type_raises_error(self): + def test_model_with_invalid_entity_type_raises_error(self) -> None: model = feedback_models.GeneralFeedbackThreadModel( id='123', entity_id='123', @@ -51,7 +55,7 @@ def test_model_with_invalid_entity_type_raises_error(self): feedback_validation_errors.InvalidEntityTypeError(model) ]) - def test_model_with_valid_entity_type_raises_no_error(self): + def test_model_with_valid_entity_type_raises_no_error(self) -> None: model = feedback_models.GeneralFeedbackThreadModel( id='123', entity_id='123', @@ -71,7 +75,7 @@ def test_model_with_valid_entity_type_raises_no_error(self): class RelationshipsOfTests(test_utils.TestBase): - def test_feedback_analytics_model_relationships(self): + def test_feedback_analytics_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'FeedbackAnalyticsModel', 'id'), ['ExplorationModel']) diff --git a/core/jobs/transforms/validation/improvements_validation.py b/core/jobs/transforms/validation/improvements_validation.py index fa0b259b18d1..81952862b9d2 100644 --- a/core/jobs/transforms/validation/improvements_validation.py +++ b/core/jobs/transforms/validation/improvements_validation.py @@ -25,19 +25,33 @@ import apache_beam as beam +from typing import Iterator + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import improvements_models + (improvements_models,) = models.Registry.import_models( - [models.NAMES.improvements]) + [models.Names.IMPROVEMENTS]) -@validation_decorators.AuditsExisting(improvements_models.TaskEntryModel) -class ValidateCompositeEntityId(beam.DoFn): +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. +@validation_decorators.AuditsExisting( + improvements_models.ExplorationStatsTaskEntryModel +) +class ValidateCompositeEntityId(beam.DoFn): # type: ignore[misc] """DoFn to validate the composite entity id.""" - def process(self, input_model): + def process( + self, input_model: improvements_models.ExplorationStatsTaskEntryModel + ) -> Iterator[improvements_validation_errors.InvalidCompositeEntityError]: """Function that checks if the composite entity id is valid Args: - input_model: improvements_models.TaskEntryModel. + input_model: improvements_models.ExplorationStatsTaskEntryModel. Entity to validate. Yields: @@ -46,7 +60,8 @@ def process(self, input_model): """ model = job_utils.clone_model(input_model) expected_composite_entity_id = ( - improvements_models.TaskEntryModel.generate_composite_entity_id( + improvements_models.ExplorationStatsTaskEntryModel + .generate_composite_entity_id( model.entity_type, model.entity_id, model.entity_version)) if model.composite_entity_id != expected_composite_entity_id: diff --git a/core/jobs/transforms/validation/improvements_validation_test.py b/core/jobs/transforms/validation/improvements_validation_test.py index 286c4e409fc7..0658af33c586 100644 --- a/core/jobs/transforms/validation/improvements_validation_test.py +++ b/core/jobs/transforms/validation/improvements_validation_test.py @@ -25,14 +25,18 @@ import apache_beam as beam +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import improvements_models + (improvements_models,) = models.Registry.import_models( - [models.NAMES.improvements]) + [models.Names.IMPROVEMENTS]) class ValidateCompositeEntityIdTests(job_test_utils.PipelinedTestBase): - def test_model_with_invalid_composite_entity(self): - model = improvements_models.TaskEntryModel( + def test_model_with_invalid_composite_entity(self) -> None: + model = improvements_models.ExplorationStatsTaskEntryModel( id='123', entity_id='999', entity_type='exploration', @@ -54,9 +58,9 @@ def test_model_with_invalid_composite_entity(self): improvements_validation_errors.InvalidCompositeEntityError(model) ]) - def test_model_with_valid_composite_entity(self): + def test_model_with_valid_composite_entity(self) -> None: # Value has the form: "[entity_type].[entity_id].[entity_version]". - model = improvements_models.TaskEntryModel( + model = improvements_models.ExplorationStatsTaskEntryModel( id='23', entity_id='999', entity_type='exploration', diff --git a/core/jobs/transforms/validation/question_validation.py b/core/jobs/transforms/validation/question_validation.py index 142e4f02697f..21e92a8d7d0f 100644 --- a/core/jobs/transforms/validation/question_validation.py +++ b/core/jobs/transforms/validation/question_validation.py @@ -22,19 +22,35 @@ from core.jobs import job_utils from core.jobs.decorators import validation_decorators from core.jobs.transforms.validation import base_validation +from core.jobs.types import model_property from core.platform import models +from typing import Iterator, List, Optional, Tuple, Type, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import datastore_services + from mypy_imports import question_models + from mypy_imports import skill_models + (question_models, skill_models) = models.Registry.import_models( - [models.NAMES.question, models.NAMES.skill]) + [models.Names.QUESTION, models.Names.SKILL]) + +datastore_services = models.Registry.import_datastore_services() @validation_decorators.AuditsExisting( question_models.QuestionSnapshotMetadataModel) class ValidateQuestionSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + question_models.QuestionSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for QuestionSnapshotMetadataModel.""" - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, input_model: question_models.QuestionSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[question_domain.QuestionChange]: """Returns a change domain class. Args: @@ -48,7 +64,16 @@ def _get_change_domain_class(self, input_model): # pylint: disable=unused-argume @validation_decorators.RelationshipsOf(question_models.QuestionSkillLinkModel) -def question_skill_link_model_relationships(model): +def question_skill_link_model_relationships( + model: Type[question_models.QuestionSkillLinkModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[Union[ + question_models.QuestionModel, skill_models.SkillModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [question_models.QuestionModel] @@ -57,14 +82,28 @@ def question_skill_link_model_relationships(model): @validation_decorators.RelationshipsOf( question_models.QuestionCommitLogEntryModel) -def question_commit_log_entry_model_relationships(model): +def question_commit_log_entry_model_relationships( + model: Type[question_models.QuestionCommitLogEntryModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[question_models.QuestionModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.question_id, [question_models.QuestionModel] @validation_decorators.RelationshipsOf(question_models.QuestionSummaryModel) -def question_summary_model_relationships(model): +def question_summary_model_relationships( + model: Type[question_models.QuestionSummaryModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[question_models.QuestionModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [question_models.QuestionModel] @@ -73,10 +112,17 @@ def question_summary_model_relationships(model): @validation_decorators.AuditsExisting( question_models.QuestionCommitLogEntryModel) class ValidateQuestionCommitLogEntryModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + question_models.QuestionCommitLogEntryModel + ] +): """Overrides _get_change_domain_class for QuestionCommitLogEntryModel.""" - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's _get_change_domain_class() method. + def _get_change_domain_class( # type: ignore[override] + self, input_model: question_models.QuestionCommitLogEntryModel # pylint: disable=unused-argument + ) -> Optional[Type[question_domain.QuestionChange]]: """Returns a change domain class. Args: diff --git a/core/jobs/transforms/validation/question_validation_test.py b/core/jobs/transforms/validation/question_validation_test.py index 07efaa0e8578..c2e238b17857 100644 --- a/core/jobs/transforms/validation/question_validation_test.py +++ b/core/jobs/transforms/validation/question_validation_test.py @@ -27,14 +27,19 @@ import apache_beam as beam +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import question_models + (base_models, question_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.question]) + [models.Names.BASE_MODEL, models.Names.QUESTION]) class ValidateQuestionSnapshotMetadataModelTests( job_test_utils.PipelinedTestBase): - def test_validate_change_domain_implemented(self): + def test_validate_change_domain_implemented(self) -> None: invalid_commit_cmd_model = ( question_models.QuestionSnapshotMetadataModel( id='123', @@ -55,7 +60,7 @@ def test_validate_change_domain_implemented(self): self.assert_pcoll_equal(output, []) - def test_change_dict_without_cmd(self): + def test_change_dict_without_cmd(self) -> None: invalid_commit_cmd_model = ( question_models.QuestionSnapshotMetadataModel( id='123', @@ -80,7 +85,7 @@ def test_change_dict_without_cmd(self): 'Missing cmd key in change dict') ]) - def test_change_dict_with_invalid_cmd(self): + def test_change_dict_with_invalid_cmd(self) -> None: invalid_commit_cmd_model = ( question_models.QuestionSnapshotMetadataModel( id='123', @@ -105,7 +110,7 @@ def test_change_dict_with_invalid_cmd(self): 'Command invalid is not allowed') ]) - def test_change_dict_with_missing_attributes_in_cmd(self): + def test_change_dict_with_missing_attributes_in_cmd(self) -> None: commit_dict = { 'cmd': 'update_question_property', 'property_name': 'question_state_data', @@ -135,7 +140,7 @@ def test_change_dict_with_missing_attributes_in_cmd(self): 'The following required attributes are missing: new_value') ]) - def test_change_dict_with_extra_attributes_in_cmd(self): + def test_change_dict_with_extra_attributes_in_cmd(self) -> None: invalid_commit_cmd_model = ( question_models.QuestionSnapshotMetadataModel( id='model_id-1', @@ -160,7 +165,7 @@ def test_change_dict_with_extra_attributes_in_cmd(self): 'The following extra attributes are present: invalid') ]) - def test_update_question_property_with_wrong_property_name(self): + def test_update_question_property_with_wrong_property_name(self) -> None: commit_dict = { 'cmd': 'update_question_property', 'property_name': 'wrong', @@ -195,7 +200,7 @@ def test_update_question_property_with_wrong_property_name(self): class RelationshipsOfTests(test_utils.TestBase): - def test_question_skill_link_model_relationships(self): + def test_question_skill_link_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'QuestionSkillLinkModel', 'id'), ['QuestionModel']) @@ -203,13 +208,13 @@ def test_question_skill_link_model_relationships(self): validation_decorators.RelationshipsOf.get_model_kind_references( 'QuestionSkillLinkModel', 'skill_id'), ['SkillModel']) - def test_question_commit_log_entry_model_relationships(self): + def test_question_commit_log_entry_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'QuestionCommitLogEntryModel', 'question_id'), ['QuestionModel']) - def test_question_summary_model_relationships(self): + def test_question_summary_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'QuestionSummaryModel', 'id'), ['QuestionModel']) @@ -218,7 +223,7 @@ def test_question_summary_model_relationships(self): class ValidateQuestionCommitLogEntryModelTests( job_test_utils.PipelinedTestBase): - def test_validate_question_model(self): + def test_validate_question_model(self) -> None: invalid_commit_cmd_model = ( question_models.QuestionCommitLogEntryModel( id='question_123', @@ -241,7 +246,7 @@ def test_validate_question_model(self): self.assert_pcoll_equal(output, []) - def test_raises_commit_cmd_none_error(self): + def test_raises_commit_cmd_none_error(self) -> None: invalid_commit_cmd_model = ( question_models.QuestionCommitLogEntryModel( id='model_123', diff --git a/core/jobs/transforms/validation/skill_validation.py b/core/jobs/transforms/validation/skill_validation.py index cf49f201c11b..ff6eaa8d434e 100644 --- a/core/jobs/transforms/validation/skill_validation.py +++ b/core/jobs/transforms/validation/skill_validation.py @@ -24,19 +24,26 @@ from core.jobs.transforms.validation import base_validation from core.platform import models +from typing import Optional, Type + MYPY = False if MYPY: # pragma: no cover from mypy_imports import skill_models -(skill_models,) = models.Registry.import_models([models.NAMES.skill]) +(skill_models,) = models.Registry.import_models([models.Names.SKILL]) @validation_decorators.AuditsExisting(skill_models.SkillSnapshotMetadataModel) class ValidateSkillSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + skill_models.SkillSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for SkillSnapshotMetadataModel.""" - def _get_change_domain_class(self, unused_input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, unused_input_model: skill_models.SkillSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[skill_domain.SkillChange]: """Returns a change domain class. Args: @@ -51,10 +58,17 @@ def _get_change_domain_class(self, unused_input_model): # pylint: disable=unused @validation_decorators.AuditsExisting(skill_models.SkillCommitLogEntryModel) class ValidateSkillCommitLogEntryModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + skill_models.SkillCommitLogEntryModel + ] +): """Overrides _get_change_domain_class for SkillCommitLogEntryModel.""" - def _get_change_domain_class(self, input_model): + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's _get_change_domain_class() method. + def _get_change_domain_class( # type: ignore[override] + self, input_model: skill_models.SkillCommitLogEntryModel + ) -> Optional[Type[skill_domain.SkillChange]]: """Returns a change domain class. Args: diff --git a/core/jobs/transforms/validation/skill_validation_test.py b/core/jobs/transforms/validation/skill_validation_test.py index 1e1331e138d1..3285245d1356 100644 --- a/core/jobs/transforms/validation/skill_validation_test.py +++ b/core/jobs/transforms/validation/skill_validation_test.py @@ -31,7 +31,7 @@ from mypy_imports import skill_models (base_models, skill_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.skill]) + [models.Names.BASE_MODEL, models.Names.SKILL]) class ValidateSkillSnapshotMetadataModelTests(job_test_utils.PipelinedTestBase): diff --git a/core/jobs/transforms/validation/story_validation.py b/core/jobs/transforms/validation/story_validation.py index b86adb52894a..59b9ed7de494 100644 --- a/core/jobs/transforms/validation/story_validation.py +++ b/core/jobs/transforms/validation/story_validation.py @@ -24,19 +24,26 @@ from core.jobs.transforms.validation import base_validation from core.platform import models +from typing import Optional, Type + MYPY = False if MYPY: # pragma: no cover from mypy_imports import story_models -(story_models,) = models.Registry.import_models([models.NAMES.story]) +(story_models,) = models.Registry.import_models([models.Names.STORY]) @validation_decorators.AuditsExisting(story_models.StorySnapshotMetadataModel) class ValidateStorySnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + story_models.StorySnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for StorySnapshotMetadataModel.""" - def _get_change_domain_class(self, unused_input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, unused_input_model: story_models.StorySnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[story_domain.StoryChange]: """Returns a change domain class. Args: @@ -51,10 +58,17 @@ def _get_change_domain_class(self, unused_input_model): # pylint: disable=unused @validation_decorators.AuditsExisting(story_models.StoryCommitLogEntryModel) class ValidateStoryCommitLogEntryModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + story_models.StoryCommitLogEntryModel + ] +): """Overrides _get_change_domain_class for StoryCommitLogEntryModel.""" - def _get_change_domain_class(self, input_model): + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's _get_change_domain_class() method. + def _get_change_domain_class( # type: ignore[override] + self, input_model: story_models.StoryCommitLogEntryModel + ) -> Optional[Type[story_domain.StoryChange]]: """Returns a change domain class. Args: diff --git a/core/jobs/transforms/validation/story_validation_test.py b/core/jobs/transforms/validation/story_validation_test.py index 25a2c25f19a3..f6da8b9ae438 100644 --- a/core/jobs/transforms/validation/story_validation_test.py +++ b/core/jobs/transforms/validation/story_validation_test.py @@ -31,7 +31,7 @@ from mypy_imports import story_models (base_models, story_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.story]) + [models.Names.BASE_MODEL, models.Names.STORY]) class ValidateStorySnapshotMetadataModelTests(job_test_utils.PipelinedTestBase): @@ -236,7 +236,7 @@ def test_story_change_object_with_invalid_story_node_property(self) -> None: ]) def test_story_change_object_with_invalid_story_contents_property( - self + self ) -> None: commit_dict = { 'cmd': 'update_story_contents_property', diff --git a/core/jobs/transforms/validation/subtopic_validation.py b/core/jobs/transforms/validation/subtopic_validation.py index 72d99ae275c4..f7ff6dd5a90b 100644 --- a/core/jobs/transforms/validation/subtopic_validation.py +++ b/core/jobs/transforms/validation/subtopic_validation.py @@ -24,21 +24,29 @@ from core.jobs.transforms.validation import base_validation from core.platform import models +from typing import Optional, Type + MYPY = False if MYPY: # pragma: no cover from mypy_imports import subtopic_models -(subtopic_models,) = models.Registry.import_models([models.NAMES.subtopic]) +(subtopic_models,) = models.Registry.import_models([models.Names.SUBTOPIC]) @validation_decorators.AuditsExisting( subtopic_models.SubtopicPageSnapshotMetadataModel) class ValidateSubtopicPageSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + subtopic_models.SubtopicPageSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for SubtopicPageSnapshotMetadataModel. """ - def _get_change_domain_class(self, unused_input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, + unused_input_model: subtopic_models.SubtopicPageSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[subtopic_page_domain.SubtopicPageChange]: """Returns a change domain class. Args: @@ -54,11 +62,18 @@ def _get_change_domain_class(self, unused_input_model): # pylint: disable=unused @validation_decorators.AuditsExisting( subtopic_models.SubtopicPageCommitLogEntryModel) class ValidateSubtopicPageCommitLogEntryModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + subtopic_models.SubtopicPageCommitLogEntryModel + ] +): """Overrides _get_change_domain_class for SubtopicPageCommitLogEntryModel. """ - def _get_change_domain_class(self, input_model): + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's _get_change_domain_class() method. + def _get_change_domain_class( # type: ignore[override] + self, input_model: subtopic_models.SubtopicPageCommitLogEntryModel + ) -> Optional[Type[subtopic_page_domain.SubtopicPageChange]]: """Returns a change domain class. Args: diff --git a/core/jobs/transforms/validation/subtopic_validation_test.py b/core/jobs/transforms/validation/subtopic_validation_test.py index 22b24e604cbe..50e9eb7ac2fe 100644 --- a/core/jobs/transforms/validation/subtopic_validation_test.py +++ b/core/jobs/transforms/validation/subtopic_validation_test.py @@ -31,7 +31,7 @@ from mypy_imports import subtopic_models (base_models, subtopic_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.subtopic]) + [models.Names.BASE_MODEL, models.Names.SUBTOPIC]) class ValidateSubtopicCommitCmdsSchemaTests(job_test_utils.PipelinedTestBase): @@ -110,7 +110,7 @@ def test_subtopic_page_change_object_with_invalid_cmd(self) -> None: ]) def test_subtopic_page_change_object_with_missing_attribute_in_cmd( - self + self ) -> None: invalid_commit_cmd_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel( @@ -147,7 +147,7 @@ def test_subtopic_page_change_object_with_missing_attribute_in_cmd( ]) def test_subtopic_page_change_object_with_extra_attribute_in_cmd( - self + self ) -> None: invalid_commit_cmd_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel( @@ -185,7 +185,7 @@ def test_subtopic_page_change_object_with_extra_attribute_in_cmd( ]) def test_subtopic_page_change_object_with_invalid_subtopic_page_property( - self + self ) -> None: invalid_commit_cmd_model = ( subtopic_models.SubtopicPageSnapshotMetadataModel( diff --git a/core/jobs/transforms/validation/topic_validation.py b/core/jobs/transforms/validation/topic_validation.py index 7cad260b911b..d389315d89f6 100644 --- a/core/jobs/transforms/validation/topic_validation.py +++ b/core/jobs/transforms/validation/topic_validation.py @@ -22,19 +22,32 @@ from core.jobs import job_utils from core.jobs.decorators import validation_decorators from core.jobs.transforms.validation import base_validation +from core.jobs.types import model_property from core.jobs.types import topic_validation_errors from core.platform import models import apache_beam as beam -(topic_models,) = models.Registry.import_models([models.NAMES.topic]) +from typing import Iterator, List, Optional, Tuple, Type, Union +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import topic_models +(topic_models,) = models.Registry.import_models([models.Names.TOPIC]) + + +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting(topic_models.TopicModel) -class ValidateCanonicalNameMatchesNameInLowercase(beam.DoFn): +class ValidateCanonicalNameMatchesNameInLowercase(beam.DoFn): # type: ignore[misc] """DoFn to validate canonical name matching with lower case name.""" - def process(self, input_model): + def process( + self, input_model: topic_models.TopicModel + ) -> Iterator[topic_validation_errors.ModelCanonicalNameMismatchError]: """Function that validate that canonical name of the model is same as name of the model in lowercase. @@ -54,10 +67,15 @@ def process(self, input_model): @validation_decorators.AuditsExisting( topic_models.TopicSnapshotMetadataModel) class ValidateTopicSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + topic_models.TopicSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for TopicSnapshotMetadataModel.""" - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, input_model: topic_models.TopicSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[topic_domain.TopicChange]: """Returns a change domain class. Args: @@ -73,11 +91,16 @@ def _get_change_domain_class(self, input_model): # pylint: disable=unused-argume @validation_decorators.AuditsExisting( topic_models.TopicRightsSnapshotMetadataModel) class ValidateTopicRightsSnapshotMetadataModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + topic_models.TopicRightsSnapshotMetadataModel + ] +): """Overrides _get_change_domain_class for TopicRightsSnapshotMetadataModel. """ - def _get_change_domain_class(self, input_model): # pylint: disable=unused-argument + def _get_change_domain_class( + self, input_model: topic_models.TopicRightsSnapshotMetadataModel # pylint: disable=unused-argument + ) -> Type[topic_domain.TopicRightsChange]: """Returns a change domain class. Args: @@ -92,11 +115,20 @@ def _get_change_domain_class(self, input_model): # pylint: disable=unused-argume @validation_decorators.AuditsExisting(topic_models.TopicCommitLogEntryModel) class ValidateTopicCommitLogEntryModel( - base_validation.BaseValidateCommitCmdsSchema): + base_validation.BaseValidateCommitCmdsSchema[ + topic_models.TopicCommitLogEntryModel + ] +): """Overrides _get_change_domain_class for TopicCommitLogEntryModel. """ - def _get_change_domain_class(self, input_model): + # Here we use MyPy ignore because the signature of this method doesn't + # match with super class's _get_change_domain_class() method. + def _get_change_domain_class( # type: ignore[override] + self, input_model: topic_models.TopicCommitLogEntryModel + ) -> Optional[ + Type[Union[topic_domain.TopicRightsChange, topic_domain.TopicChange]] + ]: """Returns a change domain class. Args: @@ -117,7 +149,16 @@ def _get_change_domain_class(self, input_model): @validation_decorators.RelationshipsOf(topic_models.TopicSummaryModel) -def topic_summary_model_relationships(model): +def topic_summary_model_relationships( + model: Type[topic_models.TopicSummaryModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[ + Type[Union[topic_models.TopicModel, topic_models.TopicRightsModel]] + ] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [topic_models.TopicModel] diff --git a/core/jobs/transforms/validation/topic_validation_test.py b/core/jobs/transforms/validation/topic_validation_test.py index f310e42f2b53..7438b15b3e1b 100644 --- a/core/jobs/transforms/validation/topic_validation_test.py +++ b/core/jobs/transforms/validation/topic_validation_test.py @@ -18,8 +18,6 @@ from __future__ import annotations -import datetime - from core.domain import topic_domain from core.jobs import job_test_utils from core.jobs.decorators import validation_decorators @@ -31,16 +29,19 @@ import apache_beam as beam +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import topic_models + (base_models, topic_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.topic]) + [models.Names.BASE_MODEL, models.Names.TOPIC]) class ValidateCanonicalNameMatchesNameInLowercaseTests( job_test_utils.PipelinedTestBase): - NOW = datetime.datetime.utcnow() - - def test_process_for_not_matching_canonical_name(self): + def test_process_for_not_matching_canonical_name(self) -> None: model_with_different_name = topic_models.TopicModel( id='123', name='name', @@ -64,7 +65,7 @@ def test_process_for_not_matching_canonical_name(self): model_with_different_name) ]) - def test_process_for_matching_canonical_name(self): + def test_process_for_matching_canonical_name(self) -> None: model_with_same_name = topic_models.TopicModel( id='123', name='SOMEthing', @@ -88,7 +89,7 @@ def test_process_for_matching_canonical_name(self): class ValidateTopicSnapshotMetadataModelTests(job_test_utils.PipelinedTestBase): - def test_validate_change_domain_implemented(self): + def test_validate_change_domain_implemented(self) -> None: valid_commit_cmd_model = topic_models.TopicSnapshotMetadataModel( id='123', created_on=self.YEAR_AGO, @@ -107,7 +108,7 @@ def test_validate_change_domain_implemented(self): self.assert_pcoll_equal(output, []) - def test_topic_change_object_with_missing_cmd(self): + def test_topic_change_object_with_missing_cmd(self) -> None: invalid_commit_cmd_model = topic_models.TopicSnapshotMetadataModel( id='123', created_on=self.YEAR_AGO, @@ -130,7 +131,7 @@ def test_topic_change_object_with_missing_cmd(self): 'Missing cmd key in change dict') ]) - def test_topic_change_object_with_invalid_cmd(self): + def test_topic_change_object_with_invalid_cmd(self) -> None: invalid_commit_cmd_model = topic_models.TopicSnapshotMetadataModel( id='123', created_on=self.YEAR_AGO, @@ -153,7 +154,7 @@ def test_topic_change_object_with_invalid_cmd(self): 'Command invalid is not allowed') ]) - def test_topic_change_object_with_missing_attribute_in_cmd(self): + def test_topic_change_object_with_missing_attribute_in_cmd(self) -> None: invalid_commit_cmd_model = topic_models.TopicSnapshotMetadataModel( id='123', created_on=self.YEAR_AGO, @@ -183,7 +184,7 @@ def test_topic_change_object_with_missing_attribute_in_cmd(self): 'new_value, old_value') ]) - def test_topic_change_object_with_extra_attribute_in_cmd(self): + def test_topic_change_object_with_extra_attribute_in_cmd(self) -> None: invalid_commit_cmd_model = topic_models.TopicSnapshotMetadataModel( id='123', created_on=self.YEAR_AGO, @@ -194,6 +195,7 @@ def test_topic_change_object_with_extra_attribute_in_cmd(self): 'cmd': 'add_subtopic', 'title': 'title', 'subtopic_id': 'subtopic_id', + 'url_fragment': 'url-fragment', 'invalid': 'invalid' }]) @@ -211,12 +213,13 @@ def test_topic_change_object_with_extra_attribute_in_cmd(self): 'cmd': 'add_subtopic', 'title': 'title', 'subtopic_id': 'subtopic_id', + 'url_fragment': 'url-fragment', 'invalid': 'invalid' }, 'The following extra attributes are present: invalid') ]) - def test_topic_change_object_with_invalid_topic_property(self): + def test_topic_change_object_with_invalid_topic_property(self) -> None: invalid_commit_cmd_model = topic_models.TopicSnapshotMetadataModel( id='123', created_on=self.YEAR_AGO, @@ -250,7 +253,7 @@ def test_topic_change_object_with_invalid_topic_property(self): 'invalid is not allowed') ]) - def test_topic_change_object_with_invalid_subtopic_property(self): + def test_topic_change_object_with_invalid_subtopic_property(self) -> None: invalid_commit_cmd_model = topic_models.TopicSnapshotMetadataModel( id='123', created_on=self.YEAR_AGO, @@ -286,7 +289,9 @@ def test_topic_change_object_with_invalid_subtopic_property(self): 'invalid is not allowed') ]) - def test_topic_change_object_with_invalid_subtopic_page_property(self): + def test_topic_change_object_with_invalid_subtopic_page_property( + self + ) -> None: invalid_commit_cmd_model = topic_models.TopicSnapshotMetadataModel( id='123', created_on=self.YEAR_AGO, @@ -326,7 +331,7 @@ def test_topic_change_object_with_invalid_subtopic_page_property(self): class ValidateTopicRightsSnapshotMetadataModelTests( job_test_utils.PipelinedTestBase): - def test_topic_rights_change_object_with_missing_cmd(self): + def test_topic_rights_change_object_with_missing_cmd(self) -> None: invalid_commit_cmd_model = ( topic_models.TopicRightsSnapshotMetadataModel( id='123', @@ -351,7 +356,7 @@ def test_topic_rights_change_object_with_missing_cmd(self): 'Missing cmd key in change dict') ]) - def test_topic_change_rights_object_with_invalid_cmd(self): + def test_topic_change_rights_object_with_invalid_cmd(self) -> None: invalid_commit_cmd_model = ( topic_models.TopicRightsSnapshotMetadataModel( id='123', @@ -376,7 +381,9 @@ def test_topic_change_rights_object_with_invalid_cmd(self): 'Command invalid is not allowed') ]) - def test_topic_rights_change_object_with_missing_attribute_in_cmd(self): + def test_topic_rights_change_object_with_missing_attribute_in_cmd( + self + ) -> None: commit_dict = { 'cmd': 'change_role', 'assignee_id': 'assignee_id', @@ -406,7 +413,9 @@ def test_topic_rights_change_object_with_missing_attribute_in_cmd(self): 'new_role, old_role') ]) - def test_topic_rights_change_object_with_extra_attribute_in_cmd(self): + def test_topic_rights_change_object_with_extra_attribute_in_cmd( + self + ) -> None: commit_dict = { 'cmd': 'publish_topic', 'invalid': 'invalid' @@ -435,7 +444,7 @@ def test_topic_rights_change_object_with_extra_attribute_in_cmd(self): 'The following extra attributes are present: invalid') ]) - def test_topic_rights_change_object_with_invalid_role(self): + def test_topic_rights_change_object_with_invalid_role(self) -> None: commit_dict = { 'cmd': 'change_role', 'assignee_id': 'assignee_id', @@ -470,7 +479,7 @@ def test_topic_rights_change_object_with_invalid_role(self): class ValidateTopicCommitLogEntryModelTests(job_test_utils.PipelinedTestBase): - def test_validate_rights_model(self): + def test_validate_rights_model(self) -> None: valid_commit_cmd_model = topic_models.TopicCommitLogEntryModel( id='rights_id123', created_on=self.YEAR_AGO, @@ -491,7 +500,7 @@ def test_validate_rights_model(self): self.assert_pcoll_equal(output, []) - def test_validate_topic_model(self): + def test_validate_topic_model(self) -> None: valid_commit_cmd_model = topic_models.TopicCommitLogEntryModel( id='topic_id123', created_on=self.YEAR_AGO, @@ -512,7 +521,7 @@ def test_validate_topic_model(self): self.assert_pcoll_equal(output, []) - def test_raises_commit_cmd_none_error(self): + def test_raises_commit_cmd_none_error(self) -> None: invalid_commit_cmd_model = topic_models.TopicCommitLogEntryModel( id='model_id123', created_on=self.YEAR_AGO, @@ -538,7 +547,7 @@ def test_raises_commit_cmd_none_error(self): class RelationshipsOfTests(test_utils.TestBase): - def test_topic_summary_model_relationships(self): + def test_topic_summary_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'TopicSummaryModel', 'id'), diff --git a/core/jobs/transforms/validation/user_validation.py b/core/jobs/transforms/validation/user_validation.py index 4b13e46448b0..22b1103fe314 100644 --- a/core/jobs/transforms/validation/user_validation.py +++ b/core/jobs/transforms/validation/user_validation.py @@ -24,21 +24,38 @@ from core.jobs import job_utils from core.jobs.decorators import validation_decorators from core.jobs.transforms.validation import base_validation +from core.jobs.types import model_property from core.jobs.types import user_validation_errors from core.platform import models import apache_beam as beam +from typing import Iterator, List, Tuple, Type, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import auth_models + from mypy_imports import collection_models + from mypy_imports import datastore_services + from mypy_imports import email_models + from mypy_imports import exp_models + from mypy_imports import feedback_models + from mypy_imports import skill_models + from mypy_imports import story_models + from mypy_imports import user_models + ( auth_models, collection_models, email_models, exp_models, feedback_models, skill_models, story_models, user_models ) = models.Registry.import_models([ - models.NAMES.auth, models.NAMES.collection, models.NAMES.email, - models.NAMES.exploration, models.NAMES.feedback, models.NAMES.skill, - models.NAMES.story, models.NAMES.user + models.Names.AUTH, models.Names.COLLECTION, models.Names.EMAIL, + models.Names.EXPLORATION, models.Names.FEEDBACK, models.Names.SKILL, + models.Names.STORY, models.Names.USER ]) +datastore_services = models.Registry.import_datastore_services() + @validation_decorators.AuditsExisting( auth_models.UserAuthDetailsModel, @@ -48,8 +65,8 @@ class ValidateModelWithUserId(base_validation.ValidateBaseModelId): """Overload for models keyed by a user ID, which have a special format.""" - def __init__(self): - super(ValidateModelWithUserId, self).__init__() + def __init__(self) -> None: + super().__init__() # IMPORTANT: Only picklable objects can be stored on DoFns! This is # because DoFns are serialized with pickle when run on a pipeline (and # might be run on many different machines). Any other types assigned to @@ -58,13 +75,19 @@ def __init__(self): self._pattern = feconf.USER_ID_REGEX +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting( user_models.PendingDeletionRequestModel ) -class ValidateActivityMappingOnlyAllowedKeys(beam.DoFn): +class ValidateActivityMappingOnlyAllowedKeys(beam.DoFn): # type: ignore[misc] """DoFn to check for Validates that pseudonymizable_entity_mappings.""" - def process(self, input_model): + def process( + self, input_model: user_models.PendingDeletionRequestModel + ) -> Iterator[user_validation_errors.ModelIncorrectKeyError]: """Function that check for incorrect key in model. Args: @@ -90,69 +113,111 @@ def process(self, input_model): model, incorrect_keys) -@validation_decorators.AuditsExisting(user_models.UserQueryModel) -class ValidateOldModelsMarkedDeleted(beam.DoFn): - """DoFn to validate old models and mark them for deletion""" - - def process(self, input_model): - """Function that checks if a model is old enough to mark them deleted. - - Args: - input_model: user_models.UserQueryModel. Entity to validate. - - Yields: - ModelExpiringError. An error class for expiring models. - """ - model = job_utils.clone_model(input_model) - expiration_date = ( - datetime.datetime.utcnow() - - feconf.PERIOD_TO_MARK_MODELS_AS_DELETED) - if expiration_date > model.last_updated: - yield user_validation_errors.ModelExpiringError(model) - - @validation_decorators.RelationshipsOf(user_models.CompletedActivitiesModel) -def completed_activities_model_relationships(model): +def completed_activities_model_relationships( + model: Type[user_models.CompletedActivitiesModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[Union[ + exp_models.ExplorationModel, + collection_models.CollectionModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.exploration_ids, [exp_models.ExplorationModel] yield model.collection_ids, [collection_models.CollectionModel] @validation_decorators.RelationshipsOf(user_models.IncompleteActivitiesModel) -def incomplete_activities_model_relationships(model): +def incomplete_activities_model_relationships( + model: Type[user_models.IncompleteActivitiesModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[Union[ + exp_models.ExplorationModel, + collection_models.CollectionModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.exploration_ids, [exp_models.ExplorationModel] yield model.collection_ids, [collection_models.CollectionModel] @validation_decorators.RelationshipsOf(user_models.ExpUserLastPlaythroughModel) -def exp_user_last_playthrough_model_relationships(model): +def exp_user_last_playthrough_model_relationships( + model: Type[user_models.ExpUserLastPlaythroughModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[exp_models.ExplorationModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.exploration_id, [exp_models.ExplorationModel] @validation_decorators.RelationshipsOf(user_models.LearnerPlaylistModel) -def learner_playlist_model_relationships(model): +def learner_playlist_model_relationships( + model: Type[user_models.LearnerPlaylistModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[Union[ + exp_models.ExplorationModel, + collection_models.CollectionModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.exploration_ids, [exp_models.ExplorationModel] yield model.collection_ids, [collection_models.CollectionModel] @validation_decorators.RelationshipsOf(user_models.UserContributionsModel) -def user_contributions_model_relationships(model): +def user_contributions_model_relationships( + model: Type[user_models.UserContributionsModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[exp_models.ExplorationModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.created_exploration_ids, [exp_models.ExplorationModel] yield model.edited_exploration_ids, [exp_models.ExplorationModel] @validation_decorators.RelationshipsOf(user_models.UserEmailPreferencesModel) -def user_email_preferences_model_relationships(model): +def user_email_preferences_model_relationships( + model: Type[user_models.UserEmailPreferencesModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[user_models.UserSettingsModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [user_models.UserSettingsModel] @validation_decorators.RelationshipsOf(user_models.UserSubscriptionsModel) -def user_subscriptions_model_relationships(model): +def user_subscriptions_model_relationships( + model: Type[user_models.UserSubscriptionsModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[Union[ + exp_models.ExplorationModel, + collection_models.CollectionModel, + feedback_models.GeneralFeedbackThreadModel, + user_models.UserSubscribersModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.exploration_ids, [exp_models.ExplorationModel] @@ -164,35 +229,74 @@ def user_subscriptions_model_relationships(model): @validation_decorators.RelationshipsOf(user_models.UserSubscribersModel) -def user_subscribers_model_relationships(model): +def user_subscribers_model_relationships( + model: Type[user_models.UserSubscribersModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[user_models.UserSubscriptionsModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.subscriber_ids, [user_models.UserSubscriptionsModel] @validation_decorators.RelationshipsOf(user_models.UserRecentChangesBatchModel) -def user_recent_changes_batch_model_relationships(model): +def user_recent_changes_batch_model_relationships( + model: Type[user_models.UserRecentChangesBatchModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[user_models.UserSettingsModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [user_models.UserSettingsModel] @validation_decorators.RelationshipsOf(user_models.UserStatsModel) -def user_stats_model_relationships(model): +def user_stats_model_relationships( + model: Type[user_models.UserStatsModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[user_models.UserSettingsModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [user_models.UserSettingsModel] @validation_decorators.RelationshipsOf(user_models.ExplorationUserDataModel) -def exploration_user_data_model_relationships(model): +def exploration_user_data_model_relationships( + model: Type[user_models.ExplorationUserDataModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[exp_models.ExplorationModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.exploration_id, [exp_models.ExplorationModel] @validation_decorators.RelationshipsOf(user_models.CollectionProgressModel) -def collection_progress_model_relationships(model): +def collection_progress_model_relationships( + model: Type[user_models.CollectionProgressModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[Union[ + collection_models.CollectionModel, + exp_models.ExplorationModel, + user_models.CompletedActivitiesModel + ]]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.collection_id, [collection_models.CollectionModel] @@ -201,28 +305,56 @@ def collection_progress_model_relationships(model): @validation_decorators.RelationshipsOf(user_models.StoryProgressModel) -def story_progress_model_relationships(model): +def story_progress_model_relationships( + model: Type[user_models.StoryProgressModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[story_models.StoryModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.story_id, [story_models.StoryModel] @validation_decorators.RelationshipsOf(user_models.UserQueryModel) -def user_query_model_relationships(model): +def user_query_model_relationships( + model: Type[user_models.UserQueryModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[email_models.BulkEmailModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.sent_email_model_id, [email_models.BulkEmailModel] @validation_decorators.RelationshipsOf(user_models.UserBulkEmailsModel) -def user_bulk_emails_model_relationships(model): +def user_bulk_emails_model_relationships( + model: Type[user_models.UserBulkEmailsModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[email_models.BulkEmailModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.sent_email_model_ids, [email_models.BulkEmailModel] @validation_decorators.RelationshipsOf(user_models.UserSkillMasteryModel) -def user_skill_mastery_model_relationships(model): +def user_skill_mastery_model_relationships( + model: Type[user_models.UserSkillMasteryModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[skill_models.SkillModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.skill_id, [skill_models.SkillModel] @@ -230,26 +362,51 @@ def user_skill_mastery_model_relationships(model): @validation_decorators.RelationshipsOf( user_models.UserContributionProficiencyModel) -def user_contribution_proficiency_model_relationships(model): +def user_contribution_proficiency_model_relationships( + model: Type[user_models.UserContributionProficiencyModel] +) -> Iterator[ + Tuple[ + datastore_services.Property, + List[Type[user_models.UserSettingsModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.user_id, [user_models.UserSettingsModel] @validation_decorators.RelationshipsOf(user_models.UserContributionRightsModel) -def user_contribution_rights_model_relationships(model): +def user_contribution_rights_model_relationships( + model: Type[user_models.UserContributionRightsModel] +) -> Iterator[ + Tuple[ + model_property.PropertyType, + List[Type[user_models.UserSettingsModel]] + ] +]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [user_models.UserSettingsModel] +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting( user_models.ExplorationUserDataModel ) -class ValidateDraftChangeListLastUpdated(beam.DoFn): +class ValidateDraftChangeListLastUpdated(beam.DoFn): # type: ignore[misc] """DoFn to validate the last_update of draft change list""" - def process(self, input_model): + def process( + self, input_model: user_models.ExplorationUserDataModel + ) -> Iterator[ + Union[ + user_validation_errors.DraftChangeListLastUpdatedNoneError, + user_validation_errors.DraftChangeListLastUpdatedInvalidError + ] + ]: """Function that checks if last_updated for draft change list is valid. Args: @@ -275,13 +432,19 @@ def process(self, input_model): model) +# TODO(#15613): Here we use MyPy ignore because the incomplete typing of +# apache_beam library and absences of stubs in Typeshed, forces MyPy to +# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class +# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting( user_models.UserQueryModel ) -class ValidateArchivedModelsMarkedDeleted(beam.DoFn): +class ValidateArchivedModelsMarkedDeleted(beam.DoFn): # type: ignore[misc] """DoFn to validate archived models marked deleted.""" - def process(self, input_model): + def process( + self, input_model: user_models.UserQueryModel + ) -> Iterator[user_validation_errors.ArchivedModelNotMarkedDeletedError]: """Function that checks if archived model is marked deleted. Args: diff --git a/core/jobs/transforms/validation/user_validation_test.py b/core/jobs/transforms/validation/user_validation_test.py index a771542a6726..570c2c7f454c 100644 --- a/core/jobs/transforms/validation/user_validation_test.py +++ b/core/jobs/transforms/validation/user_validation_test.py @@ -31,14 +31,18 @@ import apache_beam as beam -(user_models,) = models.Registry.import_models([models.NAMES.user]) +from typing import Final +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) -class ValidateModelWithUserIdTests(job_test_utils.PipelinedTestBase): - NOW = datetime.datetime.utcnow() +class ValidateModelWithUserIdTests(job_test_utils.PipelinedTestBase): - def test_process_reports_error_for_invalid_uid(self): + def test_process_reports_error_for_invalid_uid(self) -> None: model_with_invalid_id = user_models.UserSettingsModel( id='123', email='a@a.com', created_on=self.NOW, last_updated=self.NOW) @@ -54,7 +58,7 @@ def test_process_reports_error_for_invalid_uid(self): model_with_invalid_id, feconf.USER_ID_REGEX), ]) - def test_process_reports_nothing_for_valid_uid(self): + def test_process_reports_nothing_for_valid_uid(self) -> None: valid_user_id = 'uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH) model_with_valid_id = user_models.UserSettingsModel( id=valid_user_id, email='a@a.com', created_on=self.NOW, @@ -72,20 +76,19 @@ def test_process_reports_nothing_for_valid_uid(self): class ValidateActivityMappingOnlyAllowedKeysTests( job_test_utils.PipelinedTestBase): - NOW = datetime.datetime.utcnow() - USER_ID = 'test_id' - EMAIL_ID = 'a@a.com' - INCORRECT_KEY = 'audit' - ROLE = 'ADMIN' + USER_ID: Final = 'test_id' + EMAIL_ID: Final = 'a@a.com' + INCORRECT_KEY: Final = 'audit' + ROLE: Final = 'ADMIN' - def test_process_with_incorrect_keys(self): + def test_process_with_incorrect_keys(self) -> None: test_model = user_models.PendingDeletionRequestModel( id=self.USER_ID, email=self.EMAIL_ID, created_on=self.NOW, last_updated=self.NOW, pseudonymizable_entity_mappings={ - models.NAMES.audit.value: {'key': 'value'} + models.Names.AUDIT.value: {'key': 'value'} } ) @@ -101,14 +104,14 @@ def test_process_with_incorrect_keys(self): test_model, [self.INCORRECT_KEY]) ]) - def test_process_with_correct_keys(self): + def test_process_with_correct_keys(self) -> None: test_model = user_models.PendingDeletionRequestModel( id=self.USER_ID, email=self.EMAIL_ID, created_on=self.NOW, last_updated=self.NOW, pseudonymizable_entity_mappings={ - models.NAMES.collection.value: {'key': 'value'} + models.Names.COLLECTION.value: {'key': 'value'} } ) @@ -122,55 +125,17 @@ def test_process_with_correct_keys(self): self.assert_pcoll_equal(output, []) -class ValidateOldModelsMarkedDeletedTests(job_test_utils.PipelinedTestBase): - - NOW = datetime.datetime.utcnow() - VALID_USER_ID = 'test_user' - SUBMITTER_ID = 'submitter_id' - - def test_model_not_marked_as_deleted_when_older_than_4_weeks(self): - model = user_models.UserQueryModel( - id=self.VALID_USER_ID, - submitter_id=self.SUBMITTER_ID, - created_on=self.NOW - datetime.timedelta(weeks=5), - last_updated=self.NOW - datetime.timedelta(weeks=5) - ) - output = ( - self.pipeline - | beam.Create([model]) - | beam.ParDo(user_validation.ValidateOldModelsMarkedDeleted()) - ) - self.assert_pcoll_equal(output, [ - user_validation_errors.ModelExpiringError(model) - ]) - - def test_model_not_marked_as_deleted_recently(self): - model = user_models.UserQueryModel( - id=self.VALID_USER_ID, - submitter_id=self.SUBMITTER_ID, - created_on=self.NOW - datetime.timedelta(weeks=1), - last_updated=self.NOW - datetime.timedelta(weeks=1) - ) - output = ( - self.pipeline - | beam.Create([model]) - | beam.ParDo(user_validation.ValidateOldModelsMarkedDeleted()) - ) - self.assert_pcoll_equal(output, []) - - class ValidateDraftChangeListLastUpdatedTests(job_test_utils.PipelinedTestBase): - NOW = datetime.datetime.utcnow() - VALID_USER_ID = 'test_user' - VALID_EXPLORATION_ID = 'exploration_id' - VALID_DRAFT_CHANGE_LIST = [{ + VALID_USER_ID: Final = 'test_user' + VALID_EXPLORATION_ID: Final = 'exploration_id' + VALID_DRAFT_CHANGE_LIST: Final = [{ 'cmd': 'edit_exploration_property', 'property_name': 'objective', 'new_value': 'the objective' }] - def test_model_with_draft_change_list_but_no_last_updated(self): + def test_model_with_draft_change_list_but_no_last_updated(self) -> None: model = user_models.ExplorationUserDataModel( id='123', user_id=self.VALID_USER_ID, @@ -189,7 +154,9 @@ def test_model_with_draft_change_list_but_no_last_updated(self): user_validation_errors.DraftChangeListLastUpdatedNoneError(model) ]) - def test_model_with_draft_change_list_last_updated_greater_than_now(self): + def test_model_with_draft_change_list_last_updated_greater_than_now( + self + ) -> None: model = user_models.ExplorationUserDataModel( id='123', user_id=self.VALID_USER_ID, @@ -209,7 +176,7 @@ def test_model_with_draft_change_list_last_updated_greater_than_now(self): user_validation_errors.DraftChangeListLastUpdatedInvalidError(model) ]) - def test_model_with_valid_draft_change_list_last_updated(self): + def test_model_with_valid_draft_change_list_last_updated(self) -> None: model = user_models.ExplorationUserDataModel( id='123', user_id=self.VALID_USER_ID, @@ -230,7 +197,7 @@ def test_model_with_valid_draft_change_list_last_updated(self): class RelationshipsOfTests(test_utils.TestBase): - def test_completed_activities_model_relationships(self): + def test_completed_activities_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'CompletedActivitiesModel', 'exploration_ids'), @@ -240,7 +207,7 @@ def test_completed_activities_model_relationships(self): 'CompletedActivitiesModel', 'collection_ids'), ['CollectionModel']) - def test_incomplete_activities_model_relationships(self): + def test_incomplete_activities_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'IncompleteActivitiesModel', 'exploration_ids'), @@ -250,13 +217,13 @@ def test_incomplete_activities_model_relationships(self): 'IncompleteActivitiesModel', 'collection_ids'), ['CollectionModel']) - def test_exp_user_last_playthrough_model_relationships(self): + def test_exp_user_last_playthrough_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'ExpUserLastPlaythroughModel', 'exploration_id'), ['ExplorationModel']) - def test_learner_playlist_model_relationships(self): + def test_learner_playlist_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'LearnerPlaylistModel', 'exploration_ids'), @@ -266,7 +233,7 @@ def test_learner_playlist_model_relationships(self): 'LearnerPlaylistModel', 'collection_ids'), ['CollectionModel']) - def test_user_contributions_model_relationships(self): + def test_user_contributions_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserContributionsModel', 'created_exploration_ids'), @@ -276,13 +243,13 @@ def test_user_contributions_model_relationships(self): 'UserContributionsModel', 'edited_exploration_ids'), ['ExplorationModel']) - def test_user_email_preferences_model_relationships(self): + def test_user_email_preferences_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserEmailPreferencesModel', 'id'), ['UserSettingsModel']) - def test_user_subscriptions_model_relationships(self): + def test_user_subscriptions_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserSubscriptionsModel', 'exploration_ids'), @@ -300,31 +267,31 @@ def test_user_subscriptions_model_relationships(self): 'UserSubscriptionsModel', 'creator_ids'), ['UserSubscribersModel']) - def test_user_subscribers_model_relationships(self): + def test_user_subscribers_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserSubscribersModel', 'subscriber_ids'), ['UserSubscriptionsModel']) - def test_user_recent_changes_batch_model_relationships(self): + def test_user_recent_changes_batch_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserRecentChangesBatchModel', 'id'), ['UserSettingsModel']) - def test_user_stats_model_relationships(self): + def test_user_stats_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserStatsModel', 'id'), ['UserSettingsModel']) - def test_exploration_user_data_model_relationships(self): + def test_exploration_user_data_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'ExplorationUserDataModel', 'exploration_id'), ['ExplorationModel']) - def test_collection_progress_model_relationships(self): + def test_collection_progress_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'CollectionProgressModel', 'collection_id'), @@ -338,37 +305,37 @@ def test_collection_progress_model_relationships(self): 'CollectionProgressModel', 'user_id'), ['CompletedActivitiesModel']) - def test_story_progress_model_relationships(self): + def test_story_progress_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'StoryProgressModel', 'story_id'), ['StoryModel']) - def test_user_query_model_relationships(self): + def test_user_query_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserQueryModel', 'sent_email_model_id'), ['BulkEmailModel']) - def test_user_bulk_emails_model_relationships(self): + def test_user_bulk_emails_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserBulkEmailsModel', 'sent_email_model_ids'), ['BulkEmailModel']) - def test_user_skill_mastery_model_relationships(self): + def test_user_skill_mastery_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserSkillMasteryModel', 'skill_id'), ['SkillModel']) - def test_user_contribution_proficiency_model_relationships(self): + def test_user_contribution_proficiency_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserContributionProficiencyModel', 'user_id'), ['UserSettingsModel']) - def test_user_contribution_rights_model_relationships(self): + def test_user_contribution_rights_model_relationships(self) -> None: self.assertItemsEqual( validation_decorators.RelationshipsOf.get_model_kind_references( 'UserContributionRightsModel', 'id'), @@ -378,7 +345,7 @@ def test_user_contribution_rights_model_relationships(self): class ValidateArchivedModelsMarkedDeletedTests( job_test_utils.PipelinedTestBase): - def test_archived_model_not_marked_deleted(self): + def test_archived_model_not_marked_deleted(self) -> None: model = user_models.UserQueryModel( id='123', submitter_id='111', @@ -394,7 +361,7 @@ def test_archived_model_not_marked_deleted(self): self.assert_pcoll_equal(output, [ user_validation_errors.ArchivedModelNotMarkedDeletedError(model)]) - def test_model_not_archived_not_marked_deleted(self): + def test_model_not_archived_not_marked_deleted(self) -> None: model = user_models.UserQueryModel( id='123', submitter_id='111', diff --git a/core/jobs/types/base_validation_errors.py b/core/jobs/types/base_validation_errors.py index 769083989f3a..06d2a9b5878e 100644 --- a/core/jobs/types/base_validation_errors.py +++ b/core/jobs/types/base_validation_errors.py @@ -20,22 +20,38 @@ from core import feconf from core import utils +from core.domain import change_domain from core.jobs import job_utils from core.jobs.types import job_run_result +from core.jobs.types import model_property +from core.platform import models + +from typing import Mapping, Optional, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) class BaseAuditError(job_run_result.JobRunResult): """Base class for model audit errors.""" - def __init__(self, message, model_or_kind, model_id=None): + def __init__( + self, + message: str, + model_or_kind: Union[base_models.BaseModel, str], + model_id: Optional[str] = None + ) -> None: """Initializes a new audit error. Args: message: str. The message describing the error. - model_or_kind: Model|bytes. If model_id is not provided, then this + model_or_kind: Model|str. If model_id is not provided, then this is a model (type: BaseModel). - Otherwise, this is a model's kind (type: bytes). - model_id: bytes|None. The model's ID, or None when model_or_kind is + Otherwise, this is a model's kind (type: str). + model_id: str|None. The model's ID, or None when model_or_kind is a model. Raises: @@ -48,113 +64,142 @@ def __init__(self, message, model_or_kind, model_id=None): if not message: raise ValueError('message must be a non-empty string') - if model_id is None: + if ( + model_id is None and ( + isinstance(model_or_kind, base_models.BaseModel)) + ): model_id = job_utils.get_model_id(model_or_kind) model_kind = job_utils.get_model_kind(model_or_kind) - else: + elif isinstance(model_or_kind, str): model_kind = model_or_kind - error_message = '%s in %s(id=%s): %s' % ( - self.__class__.__name__, - model_kind, utils.quoted(model_id), message) - - super(BaseAuditError, self).__init__(stderr=error_message) + if model_id: + error_message = '%s in %s(id=%s): %s' % ( + self.__class__.__name__, + model_kind, + utils.quoted(model_id), + message + ) + else: + error_message = '%s in %s: %s' % ( + self.__class__.__name__, + model_kind, + message + ) + super().__init__(stderr=error_message) class InconsistentTimestampsError(BaseAuditError): """Error class for models with inconsistent timestamps.""" - def __init__(self, model): + def __init__(self, model: base_models.BaseModel) -> None: message = 'created_on=%r is later than last_updated=%r' % ( model.created_on, model.last_updated) - super(InconsistentTimestampsError, self).__init__(message, model) + super().__init__(message, model) class InvalidCommitStatusError(BaseAuditError): """Error class for commit models with inconsistent status values.""" - def __init__(self, model): + def __init__(self, model: base_models.BaseCommitLogEntryModel) -> None: message = 'post_commit_status is %s' % model.post_commit_status - super(InvalidCommitStatusError, self).__init__(message, model) + super().__init__(message, model) class InvalidPublicCommitStatusError(BaseAuditError): """Error class for commit models with inconsistent public status values.""" - def __init__(self, model): + def __init__(self, model: base_models.BaseCommitLogEntryModel) -> None: message = ( 'post_commit_status=%s but post_commit_community_owned=%s' % ( model.post_commit_status, model.post_commit_community_owned)) - super(InvalidPublicCommitStatusError, self).__init__(message, model) + super().__init__(message, model) class InvalidPrivateCommitStatusError(BaseAuditError): """Error class for commit models with inconsistent private status values.""" - def __init__(self, model): + def __init__(self, model: base_models.BaseCommitLogEntryModel) -> None: message = ( 'post_commit_status=%s but post_commit_is_private=%r' % ( model.post_commit_status, model.post_commit_is_private)) - super(InvalidPrivateCommitStatusError, self).__init__(message, model) + super().__init__(message, model) class ModelMutatedDuringJobError(BaseAuditError): """Error class for models mutated during a job.""" - def __init__(self, model): + def __init__(self, model: base_models.BaseModel) -> None: message = ( 'last_updated=%r is later than the audit job\'s start time' % ( model.last_updated)) - super(ModelMutatedDuringJobError, self).__init__(message, model) + super().__init__(message, model) class ModelIdRegexError(BaseAuditError): """Error class for models with ids that fail to match a regex pattern.""" - def __init__(self, model, regex_string): + def __init__( + self, model: base_models.BaseModel, regex_string: str + ) -> None: message = 'id does not match the expected regex=%s' % ( utils.quoted(regex_string)) - super(ModelIdRegexError, self).__init__(message, model) + super().__init__(message, model) class ModelDomainObjectValidateError(BaseAuditError): """Error class for domain object validation errors.""" - def __init__(self, model, error_message): + def __init__( + self, model: base_models.BaseModel, error_message: str + ) -> None: message = 'Entity fails domain validation with the error: %s' % ( error_message) - super(ModelDomainObjectValidateError, self).__init__(message, model) + super().__init__(message, model) class ModelExpiredError(BaseAuditError): """Error class for expired models.""" - def __init__(self, model): + def __init__(self, model: base_models.BaseModel) -> None: message = 'deleted=True when older than %s days' % ( feconf.PERIOD_TO_HARD_DELETE_MODELS_MARKED_AS_DELETED.days) - super(ModelExpiredError, self).__init__(message, model) + super().__init__(message, model) class InvalidCommitTypeError(BaseAuditError): """Error class for commit_type validation errors.""" - def __init__(self, model): + def __init__( + self, + model: Union[ + base_models.BaseCommitLogEntryModel, + base_models.BaseSnapshotMetadataModel + ] + ) -> None: message = 'Commit type %s is not allowed' % model.commit_type - super(InvalidCommitTypeError, self).__init__(message, model) + super().__init__(message, model) class ModelRelationshipError(BaseAuditError): """Error class for models with invalid relationships.""" - def __init__(self, id_property, model_id, target_kind, target_id): + def __init__( + self, + id_property: model_property.ModelProperty, + model_id: Optional[str], + target_kind: str, + target_id: str + ) -> None: """Initializes a new ModelRelationshipError. Args: id_property: ModelProperty. The property referring to the ID of the target model. - model_id: bytes. The ID of the model with problematic ID property. + model_id: str|None. The ID of the model with problematic ID + property. target_kind: str. The kind of model the property refers to. - target_id: bytes. The ID of the specific model that the property + target_id: str. The ID of the specific model that the property refers to. NOTE: This is the value of the ID property. """ # NOTE: IDs are converted to bytes because that's how they're read from @@ -163,25 +208,39 @@ def __init__(self, id_property, model_id, target_kind, target_id): '%s=%s should correspond to the ID of an existing %s, but no such ' 'model exists' % ( id_property, utils.quoted(target_id), target_kind)) - super(ModelRelationshipError, self).__init__( + super().__init__( message, id_property.model_kind, model_id=model_id) class CommitCmdsNoneError(BaseAuditError): """Error class for None Commit Cmds.""" - def __init__(self, model): + def __init__( + self, + model: Union[ + base_models.BaseCommitLogEntryModel, + base_models.BaseSnapshotMetadataModel + ] + ) -> None: message = ( 'No commit command domain object defined for entity with commands: ' '%s' % model.commit_cmds) - super(CommitCmdsNoneError, self).__init__(message, model) + super().__init__(message, model) class CommitCmdsValidateError(BaseAuditError): """Error class for wrong commit cmmds.""" - def __init__(self, model, commit_cmd_dict, e): + def __init__( + self, + model: Union[ + base_models.BaseCommitLogEntryModel, + base_models.BaseSnapshotMetadataModel + ], + commit_cmd_dict: Mapping[str, change_domain.AcceptableChangeDictTypes], + e: str + ) -> None: message = ( 'Commit command domain validation for command: %s failed with ' 'error: %s' % (commit_cmd_dict, e)) - super(CommitCmdsValidateError, self).__init__(message, model) + super().__init__(message, model) diff --git a/core/jobs/types/base_validation_errors_test.py b/core/jobs/types/base_validation_errors_test.py index 0a3dec25da59..86a32d6c2409 100644 --- a/core/jobs/types/base_validation_errors_test.py +++ b/core/jobs/types/base_validation_errors_test.py @@ -28,7 +28,14 @@ from core.platform import models from core.tests import test_utils as core_test_utils -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +from typing import Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() @@ -48,87 +55,113 @@ class BarModel(base_models.BaseModel): class FooError(base_validation_errors.BaseAuditError): """A simple test-only error.""" - def __init__(self, model): - super(FooError, self).__init__('foo', model) + def __init__(self, model: Union[base_models.BaseModel, str]) -> None: + super().__init__('foo', model) class BarError(base_validation_errors.BaseAuditError): """A simple test-only error.""" - def __init__(self, model): - super(BarError, self).__init__('bar', model) + def __init__(self, model: Union[base_models.BaseModel, str]) -> None: + super().__init__('bar', model) class AuditErrorsTestBase(core_test_utils.TestBase): """Base class for validator error tests.""" - NOW = datetime.datetime.utcnow() - YEAR_AGO = NOW - datetime.timedelta(weeks=52) - YEAR_LATER = NOW + datetime.timedelta(weeks=52) + NOW: datetime.datetime = datetime.datetime.utcnow() + YEAR_AGO: datetime.datetime = NOW - datetime.timedelta(weeks=52) + YEAR_LATER: datetime.datetime = NOW + datetime.timedelta(weeks=52) + + +class ErrorMessageTests(core_test_utils.TestBase): + + def test_error_message_with_wrong_input(self) -> None: + error = base_validation_errors.BaseAuditError( + 'testing string', + 'non-existing model', + None + ) + self.assertEqual( + error.stderr, + 'BaseAuditError in non-existing model: testing string' + ) class BaseAuditErrorTests(AuditErrorsTestBase): - def setUp(self): - super(BaseAuditErrorTests, self).setUp() + def setUp(self) -> None: + super().setUp() self.model = base_models.BaseModel(id='123') - def test_message(self): + def test_message(self) -> None: error = FooError(self.model) self.assertEqual(error.stderr, 'FooError in BaseModel(id="123"): foo') - def test_stdout(self): + def test_stdout(self) -> None: error = FooError(self.model) self.assertEqual(error.stdout, '') - def test_stderr(self): + def test_stderr(self) -> None: error = FooError(self.model) self.assertEqual(error.stderr, 'FooError in BaseModel(id="123"): foo') - def test_message_raises_type_error_if_assigned_a_non_string_value(self): + def test_message_raises_type_error_if_assigned_a_non_string_value( + self + ) -> None: class ErrorWithIntMessage(base_validation_errors.BaseAuditError): """Subclass that tries to assign an int value to self.stderr.""" - def __init__(self, model): - super(ErrorWithIntMessage, self).__init__(123, model) + def __init__( + self, model: Union[base_models.BaseModel, str] + ) -> None: + # TODO(#13059): Here we use MyPy ignore because after we + # fully type the codebase we plan to get rid of the tests + # that intentionally test wrong inputs that we can normally + # catch by typing. + super().__init__(123, model) # type: ignore[arg-type] - with self.assertRaisesRegexp(TypeError, 'must be a string'): + with self.assertRaisesRegex(TypeError, 'must be a string'): ErrorWithIntMessage(self.model) - def test_message_raises_value_error_if_assigned_an_empty_value(self): + def test_message_raises_value_error_if_assigned_an_empty_value( + self + ) -> None: class ErrorWithEmptyMessage(base_validation_errors.BaseAuditError): """Subclass that tries to assign an empty value to self.stderr.""" - def __init__(self, model): - super(ErrorWithEmptyMessage, self).__init__('', model) + def __init__( + self, model: Union[base_models.BaseModel, str] + ) -> None: + super().__init__('', model) - with self.assertRaisesRegexp(ValueError, 'must be a non-empty string'): + with self.assertRaisesRegex(ValueError, 'must be a non-empty string'): ErrorWithEmptyMessage(self.model) - def test_equality_between_different_types(self): + def test_equality_between_different_types(self) -> None: self.assertNotEqual(FooError(self.model), BarError(self.model)) - def test_equality_between_same_types_and_same_values(self): + def test_equality_between_same_types_and_same_values(self) -> None: self.assertEqual( FooError(self.model), FooError(job_utils.clone_model(self.model))) - def test_equality_between_same_types_and_different_values(self): + def test_equality_between_same_types_and_different_values(self) -> None: self.assertNotEqual( FooError(self.model), FooError(job_utils.clone_model(self.model, id='987'))) - def test_hashable(self): + def test_hashable(self) -> None: set_of_errors = { FooError(self.model), FooError(job_utils.clone_model(self.model)), } self.assertEqual(len(set_of_errors), 1) - def test_pickling_sub_classes(self): + def test_pickling_sub_classes(self) -> None: foo_error, bar_error = FooError(self.model), BarError(self.model) pickled_foo_error, pickled_bar_error = ( @@ -143,7 +176,7 @@ def test_pickling_sub_classes(self): class InconsistentTimestampsErrorTests(AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = base_models.BaseModel( id='123', created_on=self.NOW, @@ -159,7 +192,7 @@ def test_message(self): class InvalidCommitStatusErrorTests(AuditErrorsTestBase): - def test_message_for_invalid_post_commit_status(self): + def test_message_for_invalid_post_commit_status(self) -> None: model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -175,7 +208,7 @@ def test_message_for_invalid_post_commit_status(self): 'InvalidCommitStatusError in BaseCommitLogEntryModel(id="123"): ' 'post_commit_status is invalid') - def test_message_for_private_post_commit_status(self): + def test_message_for_private_post_commit_status(self) -> None: model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -193,7 +226,7 @@ def test_message_for_private_post_commit_status(self): 'BaseCommitLogEntryModel(id="123"): post_commit_status=private ' 'but post_commit_is_private=False') - def test_message_for_public_post_commit_status(self): + def test_message_for_public_post_commit_status(self) -> None: model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -211,7 +244,9 @@ def test_message_for_public_post_commit_status(self): 'BaseCommitLogEntryModel(id="123"): post_commit_status=public ' 'but post_commit_is_private=True') - def test_message_for_public_post_commit_status_raise_exception(self): + def test_message_for_public_post_commit_status_raise_exception( + self + ) -> None: model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -232,7 +267,7 @@ def test_message_for_public_post_commit_status_raise_exception(self): class ModelMutatedDuringJobErrorTests(AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = base_models.BaseModel( id='123', created_on=self.NOW, @@ -248,7 +283,7 @@ def test_message(self): class ModelIdRegexErrorTests(AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = base_models.BaseModel( id='?!"', created_on=self.YEAR_AGO, @@ -263,7 +298,7 @@ def test_message(self): class ModelExpiredErrorTests(AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = base_models.BaseModel( id='123', deleted=True, @@ -280,7 +315,7 @@ def test_message(self): class ModelDomainObjectValidateErrorTests(AuditErrorsTestBase): - def test_model_domain_object_validate_error(self): + def test_model_domain_object_validate_error(self) -> None: model = base_models.BaseModel( id='123', deleted=True, @@ -300,7 +335,7 @@ def test_model_domain_object_validate_error(self): class InvalidCommitTypeErrorTests(AuditErrorsTestBase): - def test_model_invalid_id_error(self): + def test_model_invalid_id_error(self) -> None: model = base_models.BaseCommitLogEntryModel( id='123', created_on=self.YEAR_AGO, @@ -319,10 +354,13 @@ def test_model_invalid_id_error(self): class ModelRelationshipErrorTests(AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: error = base_validation_errors.ModelRelationshipError( - model_property.ModelProperty(FooModel, FooModel.bar_id), '123', - 'BarModel', '123') + model_property.ModelProperty(FooModel, FooModel.bar_id), + '123', + 'BarModel', + '123' + ) self.assertEqual( error.stderr, @@ -333,7 +371,7 @@ def test_message(self): class CommitCmdsNoneErrorTests(AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = base_models.BaseCommitLogEntryModel( id='invalid', created_on=self.YEAR_AGO, @@ -353,7 +391,7 @@ def test_message(self): class CommitCmdsValidateErrorTests(AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = base_models.BaseCommitLogEntryModel( id='invalid', created_on=self.YEAR_AGO, @@ -364,8 +402,10 @@ def test_message(self): commit_cmds=[{'cmd-invalid': 'invalid_test_command'}]) error_message = 'Missing cmd key in change dict' error = base_validation_errors.CommitCmdsValidateError( - model, {'cmd-invalid': 'invalid_test_command'}, - error_message) + model, + {'cmd-invalid': 'invalid_test_command'}, + error_message + ) self.assertEqual( error.stderr, diff --git a/core/jobs/types/blog_validation_errors.py b/core/jobs/types/blog_validation_errors.py index ce69f4b36c3a..7e338fba7149 100644 --- a/core/jobs/types/blog_validation_errors.py +++ b/core/jobs/types/blog_validation_errors.py @@ -20,50 +20,120 @@ from core import utils from core.jobs.types import base_validation_errors +from core.platform import models + +from typing import Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import blog_models + +(blog_models,) = models.Registry.import_models([models.Names.BLOG]) class DuplicateBlogTitleError(base_validation_errors.BaseAuditError): """Error class for blog posts with duplicate titles.""" - def __init__(self, model): + def __init__( + self, + model: Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel, + ] + ) -> None: message = 'title=%s is not unique' % utils.quoted(model.title) - super(DuplicateBlogTitleError, self).__init__(message, model) + super().__init__(message, model) class DuplicateBlogUrlError(base_validation_errors.BaseAuditError): """Error class for blog posts with duplicate urls.""" - def __init__(self, model): + def __init__( + self, + model: Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel, + ] + ) -> None: message = 'url=%s is not unique' % utils.quoted(model.url_fragment) - super(DuplicateBlogUrlError, self).__init__(message, model) + super().__init__(message, model) -class InconsistentPublishTimestampsError(base_validation_errors.BaseAuditError): +class InconsistentLastUpdatedTimestampsError( + base_validation_errors.BaseAuditError +): """Error class for models with inconsistent timestamps.""" - def __init__(self, model): - message = 'created_on=%r is later than published_on=%r' % ( - model.created_on, model.published_on) - super(InconsistentPublishTimestampsError, self).__init__(message, model) + def __init__( + self, + model: Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel + ] + ) -> None: + message = 'created_on=%r is later than last_updated=%r' % ( + model.created_on, model.last_updated) + super().__init__(message, model) class InconsistentPublishLastUpdatedTimestampsError( base_validation_errors.BaseAuditError): """Error class for models with inconsistent timestamps.""" - def __init__(self, model): + def __init__( + self, + model: Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel + ] + ) -> None: message = 'published_on=%r is later than last_updated=%r' % ( model.published_on, model.last_updated) - super( - InconsistentPublishLastUpdatedTimestampsError, self - ).__init__(message, model) + super().__init__(message, model) -class ModelMutatedDuringJobError(base_validation_errors.BaseAuditError): +class ModelMutatedDuringJobErrorForLastUpdated( + base_validation_errors.BaseAuditError +): """Error class for models mutated during a job.""" - def __init__(self, model): + def __init__( + self, + model: Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel + ] + ) -> None: + message = ( + 'last_updated=%r is later than the audit job\'s start time' % ( + model.last_updated)) + super().__init__(message, model) + + +class ModelMutatedDuringJobErrorForPublishedOn( + base_validation_errors.BaseAuditError +): + """Error class for models mutated during a job.""" + + def __init__( + self, + model: Union[ + blog_models.BlogPostModel, + blog_models.BlogPostSummaryModel + ] + ) -> None: message = ( 'published_on=%r is later than the audit job\'s start time' % ( model.published_on)) - super(ModelMutatedDuringJobError, self).__init__(message, model) + super().__init__(message, model) + + +class DuplicateBlogAuthorModelError(base_validation_errors.BaseAuditError): + """Error class for blog author detail models with duplicate author ids.""" + + def __init__( + self, + model: blog_models.BlogAuthorDetailsModel + ) -> None: + message = 'author id=%s is not unique' % utils.quoted(model.author_id) + super().__init__(message, model) diff --git a/core/jobs/types/blog_validation_errors_test.py b/core/jobs/types/blog_validation_errors_test.py index 04cba9446893..2cc540fcc2c0 100644 --- a/core/jobs/types/blog_validation_errors_test.py +++ b/core/jobs/types/blog_validation_errors_test.py @@ -27,7 +27,7 @@ if MYPY: # pragma: no cover from mypy_imports import blog_models -(blog_models,) = models.Registry.import_models([models.NAMES.blog]) +(blog_models,) = models.Registry.import_models([models.Names.BLOG]) class DuplicateBlogTitleErrorTests( @@ -68,7 +68,28 @@ def test_message(self) -> None: ' is not unique' % utils.quoted(blog_post_model.url_fragment)) -class InconsistentPublishTimestampsErrorTests( +class DuplicateBlogAuthorModelErrorTests( + base_validation_errors_test.AuditErrorsTestBase): + + def test_message(self) -> None: + author_details_model = blog_models.BlogAuthorDetailsModel( + id='id1', + displayed_author_name='user one', + author_id='user', + author_bio='') + + error = blog_validation_errors.DuplicateBlogAuthorModelError( + author_details_model) + + self.assertEqual( + error.stderr, + 'DuplicateBlogAuthorModelError in BlogAuthorDetailsModel(id="id1"):' + ' author id=%s is not unique' % utils.quoted( + author_details_model.author_id) + ) + + +class InconsistentLastUpdatedTimestampsErrorTests( base_validation_errors_test.AuditErrorsTestBase): def test_message(self) -> None: @@ -81,12 +102,13 @@ def test_message(self) -> None: created_on=self.NOW, last_updated=self.YEAR_AGO, published_on=self.YEAR_AGO) - error = blog_validation_errors.InconsistentPublishTimestampsError(model) + error = blog_validation_errors.InconsistentLastUpdatedTimestampsError( + model) self.assertEqual( error.stderr, - 'InconsistentPublishTimestampsError in BlogPostModel' - '(id="validblogid1"): created_on=%r is later than published_on=%r' % + 'InconsistentLastUpdatedTimestampsError in BlogPostModel' + '(id="validblogid1"): created_on=%r is later than last_updated=%r' % (self.NOW, self.YEAR_AGO)) @@ -117,7 +139,7 @@ def test_message(self) -> None: class ModelMutatedDuringJobErrorTests( base_validation_errors_test.AuditErrorsTestBase): - def test_message(self) -> None: + def test_message_for_published_on(self) -> None: model = blog_models.BlogPostModel( id='validblogid1', title='Sample Title', @@ -126,11 +148,31 @@ def test_message(self) -> None: url_fragment='url_fragment_1', created_on=self.YEAR_AGO, last_updated=self.NOW, + published_on=self.YEAR_LATER) + error = blog_validation_errors.ModelMutatedDuringJobErrorForPublishedOn( + model) + + self.assertEqual( + error.stderr, + 'ModelMutatedDuringJobErrorForPublishedOn in BlogPostModel(' + 'id="validblogid1"): published_on=%r is later than the audit job\'s' + ' start time' % (model.published_on)) + + def test_message_for_last_updated(self) -> None: + model = blog_models.BlogPostModel( + id='validblogid1', + title='Sample Title', + content='

    hello

    ,', + author_id='user', + url_fragment='url_fragment_1', + created_on=self.YEAR_AGO, + last_updated=self.YEAR_LATER, published_on=self.YEAR_AGO) - error = blog_validation_errors.ModelMutatedDuringJobError(model) + error = blog_validation_errors.ModelMutatedDuringJobErrorForLastUpdated( + model) self.assertEqual( error.stderr, - 'ModelMutatedDuringJobError in BlogPostModel(id="validblogid1"): ' - 'published_on=%r is later than the audit job\'s start time' % ( - model.published_on)) + 'ModelMutatedDuringJobErrorForLastUpdated in BlogPostModel(' + 'id="validblogid1"): last_updated=%r is later than the audit job\'s' + ' start time' % (model.last_updated)) diff --git a/core/jobs/types/feedback_validation_errors.py b/core/jobs/types/feedback_validation_errors.py index 4a0134e8b0f2..9f95abb0b44a 100644 --- a/core/jobs/types/feedback_validation_errors.py +++ b/core/jobs/types/feedback_validation_errors.py @@ -19,11 +19,20 @@ from __future__ import annotations from core.jobs.types import base_validation_errors +from core.platform import models + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import feedback_models + +(feedback_models,) = models.Registry.import_models([models.Names.FEEDBACK]) class InvalidEntityTypeError(base_validation_errors.BaseAuditError): """Error class for models that have invalid entity type.""" - def __init__(self, model): + def __init__( + self, model: feedback_models.GeneralFeedbackThreadModel + ) -> None: message = 'entity type %s is invalid.' % model.entity_type - super(InvalidEntityTypeError, self).__init__(message, model) + super().__init__(message, model) diff --git a/core/jobs/types/feedback_validation_errors_test.py b/core/jobs/types/feedback_validation_errors_test.py index d2c10e44ca5a..ac1d75af41d3 100644 --- a/core/jobs/types/feedback_validation_errors_test.py +++ b/core/jobs/types/feedback_validation_errors_test.py @@ -22,7 +22,11 @@ from core.jobs.types import feedback_validation_errors from core.platform import models -(feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import feedback_models + +(feedback_models,) = models.Registry.import_models([models.Names.FEEDBACK]) datastore_services = models.Registry.import_datastore_services() @@ -30,7 +34,7 @@ class InvalidEntityTypeErrorTests( base_validation_errors_test.AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = feedback_models.GeneralFeedbackThreadModel( id='123', entity_id='123', diff --git a/core/jobs/types/improvements_validation_errors.py b/core/jobs/types/improvements_validation_errors.py index e4e6400da707..7efc17d9f39c 100644 --- a/core/jobs/types/improvements_validation_errors.py +++ b/core/jobs/types/improvements_validation_errors.py @@ -19,12 +19,22 @@ from __future__ import annotations from core.jobs.types import base_validation_errors +from core.platform import models + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import improvements_models + +(improvements_models,) = models.Registry.import_models( + [models.Names.IMPROVEMENTS]) class InvalidCompositeEntityError(base_validation_errors.BaseAuditError): """Error class for models that have invalid composite entity id.""" - def __init__(self, model): + def __init__( + self, model: improvements_models.ExplorationStatsTaskEntryModel + ) -> None: message = 'model has invalid composite entity %s' % ( model.composite_entity_id) - super(InvalidCompositeEntityError, self).__init__(message, model) + super().__init__(message, model) diff --git a/core/jobs/types/improvements_validation_errors_test.py b/core/jobs/types/improvements_validation_errors_test.py index e1f14214d7b9..b4d37a24a862 100644 --- a/core/jobs/types/improvements_validation_errors_test.py +++ b/core/jobs/types/improvements_validation_errors_test.py @@ -22,15 +22,19 @@ from core.jobs.types import improvements_validation_errors from core.platform import models +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import improvements_models + (improvements_models,) = models.Registry.import_models( - [models.NAMES.improvements]) + [models.Names.IMPROVEMENTS]) class InvalidCompositeEntityErrorTests( base_validation_errors_test.AuditErrorsTestBase): - def test_message(self): - model = improvements_models.TaskEntryModel( + def test_message(self) -> None: + model = improvements_models.ExplorationStatsTaskEntryModel( id='23', entity_id='999', entity_type='exploration', @@ -48,5 +52,6 @@ def test_message(self): self.assertEqual( error.stderr, - 'InvalidCompositeEntityError in TaskEntryModel(id="23"): model ' + 'InvalidCompositeEntityError in ' + 'ExplorationStatsTaskEntryModel(id="23"): model ' 'has invalid composite entity %s' % model.composite_entity_id) diff --git a/core/jobs/types/job_run_result.py b/core/jobs/types/job_run_result.py index 7043cb75f5ab..b4d948be5826 100644 --- a/core/jobs/types/job_run_result.py +++ b/core/jobs/types/job_run_result.py @@ -22,9 +22,12 @@ from core import utils -from typing import Any, List, Tuple # isort: skip +from typing import Any, List, Tuple, Union # isort: skip -MAX_OUTPUT_BYTES = 1500 +# This is just to make sure that the output of the job have some reasonable +# length. The maximum that model can hold is around 1 MB and this is much lower. +MAX_OUTPUT_CHARACTERS = 5000 +TRUNCATED_MARK = '[TRUNCATED]' class JobRunResult: @@ -42,18 +45,29 @@ def __init__(self, stdout: str = '', stderr: str = ''): Args: stdout: str. The standard output from a job run. stderr: str. The error output from a job run. + + Raises: + ValueError. Both stdout and stderr are empty. + ValueError. JobRunResult exceeds maximum limit. """ if not stdout and not stderr: raise ValueError('JobRunResult instances must not be empty') self.stdout, self.stderr = stdout, stderr - if self.len_in_bytes() > MAX_OUTPUT_BYTES: - raise ValueError( - 'JobRunResult must not exceed %d bytes' % MAX_OUTPUT_BYTES) + if len(self.stdout) > MAX_OUTPUT_CHARACTERS: + self.stdout = '%s%s' % ( + self.stdout[:MAX_OUTPUT_CHARACTERS], TRUNCATED_MARK + ) + if len(self.stderr) > MAX_OUTPUT_CHARACTERS: + self.stderr = '%s%s' % ( + self.stderr[:MAX_OUTPUT_CHARACTERS], TRUNCATED_MARK + ) @classmethod - def as_stdout(cls, value: Any, use_repr: bool = False) -> JobRunResult: + def as_stdout( + cls, value: Union[str, int], use_repr: bool = False + ) -> JobRunResult: """Returns a new JobRunResult with a stdout value. Args: @@ -68,7 +82,9 @@ def as_stdout(cls, value: Any, use_repr: bool = False) -> JobRunResult: return JobRunResult(stdout=str_value) @classmethod - def as_stderr(cls, value: Any, use_repr: bool = False) -> JobRunResult: + def as_stderr( + cls, value: Union[str, int], use_repr: bool = False + ) -> JobRunResult: """Returns a new JobRunResult with a stderr value. Args: @@ -103,7 +119,10 @@ def accumulate(cls, results: List[JobRunResult]) -> List[JobRunResult]: for i, result in enumerate(results): # Use i as a tie-breaker so that results, which don't implement the # comparison operators, don't get compared with one another. - heapq.heappush(results_heap, (result.len_in_bytes(), i, result)) + heapq.heappush( + results_heap, + (len(result.stdout) + len(result.stderr), i, result) + ) batches = [] latest_batch_size, _, smallest = heapq.heappop(results_heap) @@ -117,7 +136,8 @@ def accumulate(cls, results: List[JobRunResult]) -> List[JobRunResult]: # them is empty). padding = 2 if next_smallest.stdout and next_smallest.stderr else 1 - if latest_batch_size + padding + result_size < MAX_OUTPUT_BYTES: + overall_size = latest_batch_size + padding + result_size + if overall_size <= MAX_OUTPUT_CHARACTERS: latest_batch_size += padding + result_size batches[-1].append(next_smallest) else: @@ -131,15 +151,6 @@ def accumulate(cls, results: List[JobRunResult]) -> List[JobRunResult]: batched_results.append(JobRunResult(stdout=stdout, stderr=stderr)) return batched_results - def len_in_bytes(self) -> int: - """Returns the number of bytes encoded by the JobRunResult instance. - - Returns: - int. The number of bytes encoded by the JobRunResult instance. - """ - output_bytes = (s.encode('utf-8') for s in (self.stdout, self.stderr)) - return sum(len(output) for output in output_bytes) - def __repr__(self) -> str: return '%s(stdout=%s, stderr=%s)' % ( self.__class__.__name__, @@ -148,14 +159,16 @@ def __repr__(self) -> str: def __hash__(self) -> int: return hash((self.stdout, self.stderr)) - # NOTE: Needs to return Any because of: + # NOTE: Here we use type Any because the function could also return + # NotImplemented: # https://github.com/python/mypy/issues/363#issue-39383094 def __eq__(self, other: Any) -> Any: return ( (self.stdout, self.stderr) == (other.stdout, other.stderr) # pylint: disable=protected-access if self.__class__ is other.__class__ else NotImplemented) - # NOTE: Needs to return Any because of: + # NOTE: Here we use type Any because the function could also return + # NotImplemented: # https://github.com/python/mypy/issues/363#issue-39383094 def __ne__(self, other: Any) -> Any: return ( diff --git a/core/jobs/types/job_run_result_test.py b/core/jobs/types/job_run_result_test.py index 67e71cc0ff76..a65b3590a4b0 100644 --- a/core/jobs/types/job_run_result_test.py +++ b/core/jobs/types/job_run_result_test.py @@ -26,40 +26,46 @@ class JobRunResultTests(test_utils.TestBase): - def test_usage(self): + def test_usage(self) -> None: run_result = job_run_result.JobRunResult(stdout='abc', stderr='123') self.assertEqual(run_result.stdout, 'abc') self.assertEqual(run_result.stderr, '123') - def test_as_stdout(self): + def test_as_stdout(self) -> None: run_result = job_run_result.JobRunResult.as_stdout(123) self.assertEqual(run_result.stdout, '123') self.assertEqual(run_result.stderr, '') - def test_as_stderr(self): + def test_as_stderr(self) -> None: run_result = job_run_result.JobRunResult.as_stderr(123) self.assertEqual(run_result.stderr, '123') self.assertEqual(run_result.stdout, '') - def test_as_stdout_using_repr(self): + def test_as_stdout_using_repr(self) -> None: run_result = job_run_result.JobRunResult.as_stdout('abc', use_repr=True) self.assertEqual(run_result.stdout, '\'abc\'') self.assertEqual(run_result.stderr, '') - def test_as_stderr_using_repr(self): + def test_as_stderr_using_repr(self) -> None: run_result = job_run_result.JobRunResult.as_stderr('abc', use_repr=True) self.assertEqual(run_result.stderr, '\'abc\'') self.assertEqual(run_result.stdout, '') - def test_empty_result_raises_value_error(self): - with self.assertRaisesRegexp(ValueError, 'must not be empty'): + def test_empty_result_raises_value_error(self) -> None: + with self.assertRaisesRegex(ValueError, 'must not be empty'): job_run_result.JobRunResult() - def test_enormous_result_raises_value_error(self): - with self.assertRaisesRegexp(ValueError, r'must not exceed \d+ bytes'): - job_run_result.JobRunResult(stdout='a' * 1501) + def test_enormous_stdout_result_is_truncated(self) -> None: + run_result = job_run_result.JobRunResult(stdout='a' * 5010) + self.assertEqual(run_result.stdout, '%s[TRUNCATED]' % ('a' * 5000)) + self.assertEqual(run_result.stderr, '') + + def test_enormous_stderr_result_is_truncated(self) -> None: + run_result = job_run_result.JobRunResult(stderr='a' * 5010) + self.assertEqual(run_result.stderr, '%s[TRUNCATED]' % ('a' * 5000)) + self.assertEqual(run_result.stdout, '') - def test_accumulate(self): + def test_accumulate(self) -> None: single_job_run_result = job_run_result.JobRunResult.accumulate([ job_run_result.JobRunResult(stdout='abc', stderr=''), job_run_result.JobRunResult(stdout='', stderr='123'), @@ -71,36 +77,47 @@ def test_accumulate(self): self.assertItemsEqual( single_job_run_result.stderr.split('\n'), ['123', '456']) - def test_accumulate_with_enormous_outputs(self): + def test_accumulate_one_less_than_limit_is_not_truncated(self) -> None: + accumulated_results = job_run_result.JobRunResult.accumulate([ + job_run_result.JobRunResult(stdout='', stderr='a' * 1999), + job_run_result.JobRunResult(stdout='', stderr='b' * 3000), + ]) + + self.assertEqual(len(accumulated_results), 1) + + self.assertItemsEqual( + accumulated_results[0].stderr.split('\n'), ['a' * 1999, 'b' * 3000]) + + def test_accumulate_one_more_than_limit_case_is_split(self) -> None: + accumulated_results = job_run_result.JobRunResult.accumulate([ + job_run_result.JobRunResult(stdout='', stderr='a' * 2000), + job_run_result.JobRunResult(stdout='', stderr='b' * 3000), + ]) + + self.assertEqual(len(accumulated_results), 2) + + def test_accumulate_with_enormous_outputs(self) -> None: accumulated_results = job_run_result.JobRunResult.accumulate([ job_run_result.JobRunResult( - stdout='a' * 750, stderr='b' * 750), + stdout='a' * 5002, stderr='b' * 5002), job_run_result.JobRunResult( - stdout='a' * 500, stderr='b' * 500), + stdout='a' * 2000, stderr='b' * 2000), job_run_result.JobRunResult( - stdout='a' * 250, stderr='b' * 250), + stdout='a' * 1000, stderr='b' * 1000), job_run_result.JobRunResult( - stdout='a' * 100, stderr='b' * 100), + stdout='a' * 1000, stderr='b' * 1000), job_run_result.JobRunResult( - stdout='a' * 50, stderr='b' * 50), + stdout='a' * 2000, stderr='b' * 2000), ]) # 100000 and 200000 are small enough ot fit as one, but the others will # each need their own result. - self.assertEqual(len(accumulated_results), 3) + self.assertEqual(len(accumulated_results), 4) - def test_accumulate_with_empty_list(self): + def test_accumulate_with_empty_list(self) -> None: self.assertEqual(job_run_result.JobRunResult.accumulate([]), []) - def test_len_in_bytes(self): - result = job_run_result.JobRunResult(stdout='123', stderr='123') - self.assertEqual(result.len_in_bytes(), 6) - - def test_len_in_bytes_of_unicode(self): - result = job_run_result.JobRunResult(stdout='😀', stderr='😀') - self.assertEqual(result.len_in_bytes(), 8) - - def test_equality(self): + def test_equality(self) -> None: a_result = job_run_result.JobRunResult(stdout='abc', stderr='123') b_result = job_run_result.JobRunResult(stdout='def', stderr='456') @@ -108,20 +125,20 @@ def test_equality(self): self.assertEqual(b_result, b_result) self.assertNotEqual(a_result, b_result) - def test_hash(self): + def test_hash(self) -> None: a_result = job_run_result.JobRunResult(stdout='abc', stderr='123') b_result = job_run_result.JobRunResult(stdout='def', stderr='456') self.assertIn(a_result, {a_result}) self.assertNotIn(b_result, {a_result}) - def test_pickle(self): + def test_pickle(self) -> None: run_result = job_run_result.JobRunResult(stdout='abc', stderr='123') pickle_result = pickle.loads(pickle.dumps(run_result)) self.assertEqual(run_result, pickle_result) - def test_repr(self): + def test_repr(self) -> None: run_result = job_run_result.JobRunResult(stdout='abc', stderr='123') self.assertEqual( diff --git a/core/jobs/types/model_property.py b/core/jobs/types/model_property.py index d58020bacfa8..e958a335656b 100644 --- a/core/jobs/types/model_property.py +++ b/core/jobs/types/model_property.py @@ -21,17 +21,37 @@ from core.jobs import job_utils from core.platform import models -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +from typing import Any, Callable, Iterator, Tuple, Type, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() +# The ModelProperty class can accept `id` Python property and all other +# properties that are derived from datastore_services.Property. Thus +# to generalize the type of properties that ModelProperty can accept, +# we defined a type variable here. +PropertyType = Union[ + datastore_services.Property, + Callable[[base_models.BaseModel], str] +] + class ModelProperty: """Represents a Property in a BaseModel subclass.""" __slots__ = ('_model_kind', '_property_name') - def __init__(self, model_class, property_obj): + def __init__( + self, + model_class: Type[base_models.BaseModel], + property_obj: PropertyType + ) -> None: """Initializes a new ModelProperty instance. Args: @@ -47,7 +67,7 @@ def __init__(self, model_class, property_obj): """ if not isinstance(model_class, type): raise TypeError('%r is not a model class' % model_class) - elif not issubclass(model_class, base_models.BaseModel): + if not issubclass(model_class, base_models.BaseModel): raise TypeError('%r is not a subclass of BaseModel' % model_class) self._model_kind = job_utils.get_model_kind(model_class) @@ -62,12 +82,12 @@ def __init__(self, model_class, property_obj): raise ValueError( '%r is not a property of %s' % (property_obj, self._model_kind)) else: - property_name = property_obj._name # pylint: disable=protected-access + property_name = property_obj._name # pylint: disable=protected-access self._property_name = property_name @property - def model_kind(self): + def model_kind(self) -> str: """Returns the kind of model this instance refers to. Returns: @@ -76,7 +96,7 @@ def model_kind(self): return self._model_kind @property - def property_name(self): + def property_name(self) -> str: """Returns the name of the property this instance refers to. Returns: @@ -84,7 +104,13 @@ def property_name(self): """ return self._property_name - def yield_value_from_model(self, model): + # Here we use type Any because this method yields the values of properties + # of a model and that values can be of type string, list, integer and other + # types too. So, that's why Iterator[Any] type is used as a yield type of + # function. + def yield_value_from_model( + self, model: base_models.BaseModel + ) -> Iterator[Any]: """Yields the value(s) of the property from the given model. If the property is repeated, all values are yielded. Otherwise, a single @@ -109,31 +135,62 @@ def yield_value_from_model(self, model): else: yield value - def _to_model_class(self): + def _to_model_class(self) -> Type[base_models.BaseModel]: """Returns the model class associated with this instance. Returns: type(BaseModel). The model type. """ - return job_utils.get_model_class(self._model_kind) + model_class = job_utils.get_model_class(self._model_kind) + + # To narrow down the type from datastore_services.Model to + # base_models.BaseModel, we used assert statement here. + assert issubclass(model_class, base_models.BaseModel) + return model_class - def _to_property(self): + def _to_property(self) -> PropertyType: """Returns the Property object associated with this instance. Returns: *. A property instance. """ - return getattr(self._to_model_class(), self._property_name) + property_obj = getattr(self._to_model_class(), self._property_name) + + # The behavior of `id` Python property is different during type + # checking and during runtime. During type checking it is considered as + # `Callable[]` because a Python property is decorated using Python's + # property class, while during runtime a Python property is considered + # as instance of Python's inbuilt property class. So to split the + # assertion in both the cases, we used `if MYPY:` clause here. + if MYPY: # pragma: no cover + assert ( + isinstance(property_obj, datastore_services.Property) and + callable(property_obj) + ) + else: + assert isinstance( + property_obj, + (datastore_services.Property, property) + ) + + return property_obj - def _is_repeated_property(self): + def _is_repeated_property(self) -> bool: """Returns whether the property is repeated. Returns: bool. Whether the property is repeated. """ - return self._property_name != 'id' and self._to_property()._repeated # pylint: disable=protected-access + model_property = self._to_property() + if ( + self._property_name != 'id' and + isinstance(model_property, datastore_services.Property) + ): + return model_property._repeated # pylint: disable=protected-access + else: + return False - def __getstate__(self): + def __getstate__(self) -> Tuple[str, str]: """Called by pickle to get the value that uniquely defines self. Returns: @@ -141,7 +198,7 @@ def __getstate__(self): """ return self._model_kind, self._property_name - def __setstate__(self, state): + def __setstate__(self, state: Tuple[str, str]) -> None: """Called by pickle to build an instance from __getstate__'s value. Args: @@ -149,22 +206,28 @@ def __setstate__(self, state): """ self._model_kind, self._property_name = state - def __str__(self): + def __str__(self) -> str: return '%s.%s' % (self._model_kind, self._property_name) - def __repr__(self): + def __repr__(self) -> str: return 'ModelProperty(%s, %s)' % (self._model_kind, self) - def __eq__(self, other): + # NOTE: Here we use type Any because the function could also return + # NotImplemented: + # https://github.com/python/mypy/issues/363#issue-39383094 + def __eq__(self, other: Any) -> Any: return ( (self._model_kind, self._property_name) == ( other._model_kind, other._property_name) # pylint: disable=protected-access if self.__class__ is other.__class__ else NotImplemented) - def __ne__(self, other): + # NOTE: Here we use type Any because the function could also return + # NotImplemented: + # https://github.com/python/mypy/issues/363#issue-39383094 + def __ne__(self, other: Any) -> Any: return ( not (self == other) if self.__class__ is other.__class__ else NotImplemented) - def __hash__(self): + def __hash__(self) -> int: return hash((self._model_kind, self._property_name)) diff --git a/core/jobs/types/model_property_test.py b/core/jobs/types/model_property_test.py index 3c4c64c8c646..66098ff463d8 100644 --- a/core/jobs/types/model_property_test.py +++ b/core/jobs/types/model_property_test.py @@ -24,7 +24,12 @@ from core.platform import models from core.tests import test_utils -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() @@ -49,7 +54,7 @@ class RepeatedValueModel(base_models.BaseModel): class ModelPropertyTests(test_utils.TestBase): - def setUp(self): + def setUp(self) -> None: self.id_property = model_property.ModelProperty( SubclassOfBaseModel, SubclassOfBaseModel.id) self.ndb_property = model_property.ModelProperty( @@ -57,88 +62,104 @@ def setUp(self): self.ndb_repeated_property = model_property.ModelProperty( RepeatedValueModel, RepeatedValueModel.values) - def test_init_with_id_property(self): + def test_init_with_id_property(self) -> None: # Does not raise. model_property.ModelProperty( SubclassOfBaseModel, SubclassOfBaseModel.id) - def test_init_with_ndb_property(self): + def test_init_with_ndb_property(self) -> None: # Does not raise. model_property.ModelProperty( SubclassOfBaseModel, SubclassOfBaseModel.value) - def test_init_with_ndb_repeated_property(self): + def test_init_with_ndb_repeated_property(self) -> None: # Does not raise. model_property.ModelProperty( RepeatedValueModel, RepeatedValueModel.values) - def test_init_raises_type_error_when_model_is_not_a_class(self): + def test_init_raises_type_error_when_model_is_not_a_class(self) -> None: model = SubclassOfBaseModel() - with self.assertRaisesRegexp(TypeError, 'not a model class'): - model_property.ModelProperty(model, SubclassOfBaseModel.value) - - def test_init_raises_type_error_when_model_is_unrelated_to_base_model(self): - with self.assertRaisesRegexp(TypeError, 'not a subclass of BaseModel'): + with self.assertRaisesRegex(TypeError, 'not a model class'): + # Here we use MyPy ignore because ModelProperty has + # model_class argument, which can only accept classes + # that are inherited from BaseModel. But here we are + # passing an object of SubclassOfBaseModel. So, to + # avoid mypy error we added an ignore here. + model_property.ModelProperty(model, SubclassOfBaseModel.value) # type: ignore[arg-type] + + def test_init_raises_type_error_when_model_is_unrelated_to_base_model( + self + ) -> None: + with self.assertRaisesRegex(TypeError, 'not a subclass of BaseModel'): + # Here we use MyPy ignore because ModelProperty has model_class + # argument, which can only accept classes that are inherited from + # BaseModel. But here we are passing a class that is inherited from + # datastore_services.Model. Thus to silence mypy error, we added an + # ignore here. model_property.ModelProperty( - SubclassOfNdbModel, SubclassOfNdbModel.value) + SubclassOfNdbModel, SubclassOfNdbModel.value) # type: ignore[arg-type] - def test_init_raises_type_error_when_property_is_not_an_ndb_property(self): + def test_init_raises_type_error_when_property_is_not_an_ndb_property( + self + ) -> None: model = SubclassOfBaseModel(value='123') - with self.assertRaisesRegexp(TypeError, 'not an NDB Property'): + with self.assertRaisesRegex(TypeError, 'not an NDB Property'): model_property.ModelProperty(SubclassOfBaseModel, model.value) - def test_init_raises_value_error_when_property_is_not_in_model(self): - with self.assertRaisesRegexp(ValueError, 'not a property of'): + def test_init_raises_value_error_when_property_is_not_in_model( + self + ) -> None: + with self.assertRaisesRegex(ValueError, 'not a property of'): model_property.ModelProperty( SubclassOfBaseModel, SubclassOfNdbModel.value) - def test_model_kind_of_id_property(self): + def test_model_kind_of_id_property(self) -> None: self.assertEqual(self.id_property.model_kind, 'SubclassOfBaseModel') - def test_model_kind_of_ndb_property(self): + def test_model_kind_of_ndb_property(self) -> None: self.assertEqual(self.ndb_property.model_kind, 'SubclassOfBaseModel') - def test_model_kind_of_ndb_repeated_property(self): + def test_model_kind_of_ndb_repeated_property(self) -> None: self.assertEqual( self.ndb_repeated_property.model_kind, 'RepeatedValueModel') - def test_property_name_of_id_property(self): + def test_property_name_of_id_property(self) -> None: self.assertEqual(self.id_property.property_name, 'id') - def test_property_name_of_ndb_property(self): + def test_property_name_of_ndb_property(self) -> None: self.assertEqual(self.ndb_property.property_name, 'value') - def test_property_name_of_ndb_repeated_property(self): + def test_property_name_of_ndb_repeated_property(self) -> None: self.assertEqual(self.ndb_repeated_property.property_name, 'values') - def test_str_of_id_property(self): + def test_str_of_id_property(self) -> None: self.assertEqual(str(self.id_property), 'SubclassOfBaseModel.id') - def test_str_of_ndb_property(self): + def test_str_of_ndb_property(self) -> None: self.assertEqual(str(self.ndb_property), 'SubclassOfBaseModel.value') - def test_str_of_ndb_repeated_property(self): + def test_str_of_ndb_repeated_property(self) -> None: self.assertEqual( str(self.ndb_repeated_property), 'RepeatedValueModel.values') - def test_repr_of_id_property(self): + def test_repr_of_id_property(self) -> None: self.assertEqual( repr(self.id_property), 'ModelProperty(SubclassOfBaseModel, SubclassOfBaseModel.id)') - def test_repr_of_ndb_property(self): + def test_repr_of_ndb_property(self) -> None: self.assertEqual( repr(self.ndb_property), 'ModelProperty(SubclassOfBaseModel, SubclassOfBaseModel.value)') - def test_repr_of_ndb_repeated_property(self): + def test_repr_of_ndb_repeated_property(self) -> None: self.assertEqual( repr(self.ndb_repeated_property), 'ModelProperty(RepeatedValueModel, RepeatedValueModel.values)') - def test_equality(self): + def test_equality(self) -> None: self.assertNotEqual(self.id_property, self.ndb_property) self.assertNotEqual(self.ndb_property, self.ndb_repeated_property) self.assertNotEqual(self.ndb_repeated_property, self.id_property) @@ -156,7 +177,7 @@ def test_equality(self): model_property.ModelProperty( RepeatedValueModel, RepeatedValueModel.values)) - def test_hash_of_id_property(self): + def test_hash_of_id_property(self) -> None: id_property_set = { model_property.ModelProperty( SubclassOfBaseModel, SubclassOfBaseModel.id), @@ -166,7 +187,7 @@ def test_hash_of_id_property(self): self.assertNotIn(self.ndb_property, id_property_set) self.assertNotIn(self.ndb_repeated_property, id_property_set) - def test_hash_of_ndb_property(self): + def test_hash_of_ndb_property(self) -> None: ndb_property_set = { model_property.ModelProperty( SubclassOfBaseModel, SubclassOfBaseModel.value), @@ -176,7 +197,7 @@ def test_hash_of_ndb_property(self): self.assertNotIn(self.id_property, ndb_property_set) self.assertNotIn(self.ndb_repeated_property, ndb_property_set) - def test_hash_of_ndb_repeated_property(self): + def test_hash_of_ndb_repeated_property(self) -> None: ndb_repeated_property_set = { model_property.ModelProperty( RepeatedValueModel, RepeatedValueModel.values), @@ -186,45 +207,48 @@ def test_hash_of_ndb_repeated_property(self): self.assertNotIn(self.id_property, ndb_repeated_property_set) self.assertNotIn(self.ndb_property, ndb_repeated_property_set) - def test_yield_value_from_id_property(self): + def test_yield_value_from_id_property(self) -> None: model = SubclassOfBaseModel(id='123') self.assertEqual( list(self.id_property.yield_value_from_model(model)), ['123']) - def test_yield_value_from_ndb_property(self): + def test_yield_value_from_ndb_property(self) -> None: model = SubclassOfBaseModel(value='abc') self.assertEqual( list(self.ndb_property.yield_value_from_model(model)), ['abc']) - def test_yield_value_from_ndb_repeated_property(self): + def test_yield_value_from_ndb_repeated_property(self) -> None: model = RepeatedValueModel(values=['123', '456', '789']) self.assertEqual( list(self.ndb_repeated_property.yield_value_from_model(model)), ['123', '456', '789']) - def test_yield_value_from_model_raises_type_error_if_not_right_kind(self): + def test_yield_value_from_model_raises_type_error_if_not_right_kind( + self + ) -> None: model = RepeatedValueModel(values=['123', '456', '789']) - self.assertRaisesRegexp( - TypeError, 'not an instance of SubclassOfBaseModel', - lambda: list(self.ndb_property.yield_value_from_model(model))) + with self.assertRaisesRegex( + TypeError, 'not an instance of SubclassOfBaseModel' + ): + list(self.ndb_property.yield_value_from_model(model)) - def test_pickle_id_property(self): + def test_pickle_id_property(self) -> None: pickle_value = pickle.loads(pickle.dumps(self.id_property)) self.assertEqual(self.id_property, pickle_value) self.assertIn(pickle_value, {self.id_property}) - def test_pickle_ndb_property(self): + def test_pickle_ndb_property(self) -> None: pickle_value = pickle.loads(pickle.dumps(self.ndb_property)) self.assertEqual(self.ndb_property, pickle_value) self.assertIn(pickle_value, {self.ndb_property}) - def test_pickle_ndb_repeated_property(self): + def test_pickle_ndb_repeated_property(self) -> None: pickle_value = pickle.loads(pickle.dumps(self.ndb_repeated_property)) self.assertEqual(self.ndb_repeated_property, pickle_value) diff --git a/core/jobs/types/topic_validation_errors.py b/core/jobs/types/topic_validation_errors.py index 24d8970ede93..b6406358bcfd 100644 --- a/core/jobs/types/topic_validation_errors.py +++ b/core/jobs/types/topic_validation_errors.py @@ -19,13 +19,20 @@ from __future__ import annotations from core.jobs.types import base_validation_errors +from core.platform import models + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import topic_models + +(topic_models,) = models.Registry.import_models([models.Names.TOPIC]) class ModelCanonicalNameMismatchError(base_validation_errors.BaseAuditError): """Error class for models that have mismatching names.""" - def __init__(self, model): + def __init__(self, model: topic_models.TopicModel) -> None: message = ( 'Entity name %s in lowercase does not match ' 'canonical name %s' % (model.name, model.canonical_name)) - super(ModelCanonicalNameMismatchError, self).__init__(message, model) + super().__init__(message, model) diff --git a/core/jobs/types/topic_validation_errors_test.py b/core/jobs/types/topic_validation_errors_test.py index 830c26beccb5..e9666da1555b 100644 --- a/core/jobs/types/topic_validation_errors_test.py +++ b/core/jobs/types/topic_validation_errors_test.py @@ -22,7 +22,11 @@ from core.jobs.types import topic_validation_errors from core.platform import models -(topic_models,) = models.Registry.import_models([models.NAMES.topic]) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import topic_models + +(topic_models,) = models.Registry.import_models([models.Names.TOPIC]) datastore_services = models.Registry.import_datastore_services() @@ -30,7 +34,7 @@ class ModelCanonicalNameMismatchErrorTests( base_validation_errors_test.AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = topic_models.TopicModel( id='test', name='name', diff --git a/core/jobs/types/user_validation_errors.py b/core/jobs/types/user_validation_errors.py index 8ae8772de5e1..fce99179f634 100644 --- a/core/jobs/types/user_validation_errors.py +++ b/core/jobs/types/user_validation_errors.py @@ -18,57 +18,64 @@ from __future__ import annotations -from core import feconf from core.jobs.types import base_validation_errors +from core.platform import models +from typing import List -class ModelIncorrectKeyError(base_validation_errors.BaseAuditError): - """Error class for incorrect key in PendingDeletionRequestModel.""" +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import user_models - def __init__(self, model, incorrect_keys): - message = 'contains keys %s are not allowed' % (incorrect_keys) - super(ModelIncorrectKeyError, self).__init__(message, model) +(base_models, user_models) = models.Registry.import_models( + [models.Names.BASE_MODEL, models.Names.USER]) -class ModelExpiringError(base_validation_errors.BaseAuditError): - """Error class for models that are expiring.""" +class ModelIncorrectKeyError(base_validation_errors.BaseAuditError): + """Error class for incorrect key in PendingDeletionRequestModel.""" - def __init__(self, model): - message = 'mark model as deleted when older than %s days' % ( - feconf.PERIOD_TO_MARK_MODELS_AS_DELETED.days) - super(ModelExpiringError, self).__init__(message, model) + def __init__( + self, + model: user_models.PendingDeletionRequestModel, + incorrect_keys: List[str] + ) -> None: + message = 'contains keys %s are not allowed' % (incorrect_keys) + super().__init__(message, model) class DraftChangeListLastUpdatedNoneError( - base_validation_errors.BaseAuditError): + base_validation_errors.BaseAuditError +): """Error class for models with draft change list but draft change list last_updated is None. """ - def __init__(self, model): + def __init__(self, model: user_models.ExplorationUserDataModel) -> None: message = ( 'draft change list %s exists but draft change list ' 'last updated is None' % model.draft_change_list) - super(DraftChangeListLastUpdatedNoneError, self).__init__( + super().__init__( message, model) class DraftChangeListLastUpdatedInvalidError( - base_validation_errors.BaseAuditError): + base_validation_errors.BaseAuditError +): """Error class for models with invalid draft change list last_updated.""" - def __init__(self, model): + def __init__(self, model: user_models.ExplorationUserDataModel) -> None: message = ( 'draft change list last updated %s is greater than the time ' 'when job was run' % model.draft_change_list_last_updated) - super(DraftChangeListLastUpdatedInvalidError, self).__init__( + super().__init__( message, model) class ArchivedModelNotMarkedDeletedError( - base_validation_errors.BaseAuditError): + base_validation_errors.BaseAuditError +): """Error class for models which are archived but not deleted.""" - def __init__(self, model): + def __init__(self, model: user_models.UserQueryModel) -> None: message = 'model is archived but not marked as deleted' - super(ArchivedModelNotMarkedDeletedError, self).__init__(message, model) + super().__init__(message, model) diff --git a/core/jobs/types/user_validation_errors_test.py b/core/jobs/types/user_validation_errors_test.py index 6b108941ff1d..4fdb8d2767a2 100644 --- a/core/jobs/types/user_validation_errors_test.py +++ b/core/jobs/types/user_validation_errors_test.py @@ -26,34 +26,22 @@ from core.jobs.types import user_validation_errors from core.platform import models +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import user_models + (base_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.user]) + [models.Names.BASE_MODEL, models.Names.USER]) datastore_services = models.Registry.import_datastore_services() -class ModelExpiringErrorTests(base_validation_errors_test.AuditErrorsTestBase): - - def test_message(self): - model = user_models.UserQueryModel( - id='test', - submitter_id='submitter', - created_on=self.YEAR_AGO, - last_updated=self.YEAR_AGO - ) - error = user_validation_errors.ModelExpiringError(model) - - self.assertEqual( - error.stderr, - 'ModelExpiringError in UserQueryModel(id="test"): mark model ' - 'as deleted when older than %s days' % ( - feconf.PERIOD_TO_MARK_MODELS_AS_DELETED.days)) - - class ModelIncorrectKeyErrorTests( - base_validation_errors_test.AuditErrorsTestBase): + base_validation_errors_test.AuditErrorsTestBase +): - def test_message(self): + def test_message(self) -> None: model = user_models.PendingDeletionRequestModel( id='test' ) @@ -70,7 +58,7 @@ def test_message(self): class ModelIdRegexErrorTests(base_validation_errors_test.AuditErrorsTestBase): - def test_message(self): + def test_message(self) -> None: model = base_models.BaseModel( id='?!"', created_on=self.YEAR_AGO, @@ -84,9 +72,10 @@ def test_message(self): class DraftChangeListLastUpdatedNoneErrorTests( - base_validation_errors_test.AuditErrorsTestBase): + base_validation_errors_test.AuditErrorsTestBase +): - def test_message(self): + def test_message(self) -> None: draft_change_list = [{ 'cmd': 'edit_exploration_property', 'property_name': 'objective', @@ -113,9 +102,10 @@ def test_message(self): class DraftChangeListLastUpdatedInvalidErrorTests( - base_validation_errors_test.AuditErrorsTestBase): + base_validation_errors_test.AuditErrorsTestBase +): - def test_message(self): + def test_message(self) -> None: draft_change_list = [{ 'cmd': 'edit_exploration_property', 'property_name': 'objective', @@ -144,9 +134,10 @@ def test_message(self): class ArchivedModelNotMarkedDeletedErrorTests( - base_validation_errors_test.AuditErrorsTestBase): + base_validation_errors_test.AuditErrorsTestBase +): - def test_message(self): + def test_message(self) -> None: model = user_models.UserQueryModel( id='test', submitter_id='submitter', diff --git a/core/platform/app_identity/gae_app_identity_services.py b/core/platform/app_identity/gae_app_identity_services.py index a3d97690a5be..3c0e4d49709b 100644 --- a/core/platform/app_identity/gae_app_identity_services.py +++ b/core/platform/app_identity/gae_app_identity_services.py @@ -18,7 +18,7 @@ from __future__ import annotations -import os +from core import feconf _GCS_RESOURCE_BUCKET_NAME_SUFFIX = '-resources' @@ -32,8 +32,11 @@ def get_application_id() -> str: Returns: str. The application ID. + + Raises: + ValueError. Value can't be None for application id. """ - app_id = os.getenv('GOOGLE_CLOUD_PROJECT') + app_id = feconf.OPPIA_PROJECT_ID if app_id is None: raise ValueError('Value None for application id is invalid.') return app_id diff --git a/core/platform/app_identity/gae_app_identity_services_test.py b/core/platform/app_identity/gae_app_identity_services_test.py index 4597992bb15f..a2321d4591a6 100644 --- a/core/platform/app_identity/gae_app_identity_services_test.py +++ b/core/platform/app_identity/gae_app_identity_services_test.py @@ -18,8 +18,7 @@ from __future__ import annotations -import os - +from core import feconf from core.platform.app_identity import gae_app_identity_services from core.tests import test_utils @@ -27,19 +26,19 @@ class GaeAppIdentityServicesTests(test_utils.GenericTestBase): def test_get_application_id(self) -> None: - with self.swap(os, 'getenv', lambda _: 'some_id'): + with self.swap(feconf, 'OPPIA_PROJECT_ID', 'some_id'): self.assertEqual( gae_app_identity_services.get_application_id(), 'some_id') def test_get_application_id_throws_error(self) -> None: - with self.swap(os, 'getenv', lambda _: None): - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.swap(feconf, 'OPPIA_PROJECT_ID', None): + with self.assertRaisesRegex( ValueError, 'Value None for application id is invalid.' ): gae_app_identity_services.get_application_id() def test_get_default_gcs_bucket_name(self) -> None: - with self.swap(os, 'getenv', lambda _: 'some_id'): + with self.swap(feconf, 'OPPIA_PROJECT_ID', 'some_id'): self.assertEqual( gae_app_identity_services.get_gcs_resource_bucket_name(), 'some_id-resources') diff --git a/core/platform/auth/firebase_auth_services.py b/core/platform/auth/firebase_auth_services.py index cd6654efede2..02248c19b1a8 100644 --- a/core/platform/auth/firebase_auth_services.py +++ b/core/platform/auth/firebase_auth_services.py @@ -56,7 +56,6 @@ import logging from core import feconf -from core import python_utils from core.constants import constants from core.domain import auth_domain from core.platform import models @@ -64,7 +63,7 @@ import firebase_admin from firebase_admin import auth as firebase_auth from firebase_admin import exceptions as firebase_exceptions -from typing import Dict, List, Optional, Union +from typing import List, Optional import webapp2 MYPY = False @@ -72,7 +71,7 @@ from mypy_imports import auth_models auth_models, user_models = ( - models.Registry.import_models([models.NAMES.auth, models.NAMES.user])) + models.Registry.import_models([models.Names.AUTH, models.Names.USER])) transaction_services = models.Registry.import_transaction_services() @@ -89,7 +88,7 @@ def establish_firebase_connection() -> None: firebase_admin.App. The App being by the Firebase SDK. Raises: - Exception. The Firebase app has a genuine problem. + ValueError. The Firebase app has a genuine problem. """ try: firebase_admin.get_app() @@ -409,14 +408,13 @@ def associate_multi_auth_ids_with_user_ids( Exception. One or more auth associations already exist. """ # Turn list(pair) to pair(list): https://stackoverflow.com/a/7558990/4859885 - auth_ids, user_ids = python_utils.ZIP(*auth_id_user_id_pairs) + auth_ids, user_ids = zip(*auth_id_user_id_pairs) user_id_collisions = get_multi_user_ids_from_auth_ids(auth_ids) if any(user_id is not None for user_id in user_id_collisions): user_id_collisions_text = ', '.join( '{auth_id=%r: user_id=%r}' % (auth_id, user_id) - for auth_id, user_id in python_utils.ZIP( - auth_ids, user_id_collisions) + for auth_id, user_id in zip(auth_ids, user_id_collisions) if user_id is not None) raise Exception('already associated: %s' % user_id_collisions_text) @@ -424,8 +422,7 @@ def associate_multi_auth_ids_with_user_ids( if any(auth_id is not None for auth_id in auth_id_collisions): auth_id_collisions_text = ', '.join( '{user_id=%r: auth_id=%r}' % (user_id, auth_id) - for user_id, auth_id in python_utils.ZIP( - user_ids, auth_id_collisions) + for user_id, auth_id in zip(user_ids, auth_id_collisions) if auth_id is not None) raise Exception('already associated: %s' % auth_id_collisions_text) @@ -433,7 +430,7 @@ def associate_multi_auth_ids_with_user_ids( # doesn't exist because get_auth_id_from_user_id returned None. assoc_by_auth_id_models = [ auth_models.UserIdByFirebaseAuthIdModel(id=auth_id, user_id=user_id) - for auth_id, user_id in python_utils.ZIP(auth_ids, user_ids) + for auth_id, user_id in zip(auth_ids, user_ids) ] auth_models.UserIdByFirebaseAuthIdModel.update_timestamps_multi( assoc_by_auth_id_models) @@ -447,7 +444,7 @@ def associate_multi_auth_ids_with_user_ids( # create a new model rather than update an existing one. assoc_by_user_id_models = [ auth_models.UserAuthDetailsModel(id=user_id, firebase_auth_id=auth_id) - for auth_id, user_id, assoc_by_user_id_model in python_utils.ZIP( + for auth_id, user_id, assoc_by_user_id_model in zip( auth_ids, user_ids, auth_models.UserAuthDetailsModel.get_multi(user_ids)) if (assoc_by_user_id_model is None or @@ -464,6 +461,9 @@ def grant_super_admin_privileges(user_id: str) -> None: Args: user_id: str. The Oppia user ID to promote to super admin. + + Raises: + ValueError. No Firebase account associated with given user ID. """ auth_id = get_auth_id_from_user_id(user_id) if auth_id is None: @@ -480,6 +480,9 @@ def revoke_super_admin_privileges(user_id: str) -> None: Args: user_id: str. The Oppia user ID to revoke privileges from. + + Raises: + ValueError. No Firebase account associated with given user ID. """ auth_id = get_auth_id_from_user_id(user_id) if auth_id is None: @@ -556,18 +559,25 @@ def _get_auth_claims_from_session_cookie( return None try: claims = firebase_auth.verify_session_cookie(cookie, check_revoked=True) - except firebase_auth.ExpiredSessionCookieError: - raise auth_domain.StaleAuthSessionError('session has expired') - except firebase_auth.RevokedSessionCookieError: - raise auth_domain.StaleAuthSessionError('session has been revoked') - except (firebase_exceptions.FirebaseError, ValueError) as error: - raise auth_domain.InvalidAuthSessionError('session invalid: %s' % error) + except firebase_auth.ExpiredSessionCookieError as e: + raise auth_domain.StaleAuthSessionError( + 'session has expired') from e + except firebase_auth.RevokedSessionCookieError as e: + raise auth_domain.StaleAuthSessionError( + 'session has been revoked') from e + except firebase_auth.UserDisabledError as e: + raise auth_domain.UserDisabledError( + 'user is being deleted') from e + except ( + firebase_exceptions.FirebaseError, ValueError) as error: + raise auth_domain.InvalidAuthSessionError( + 'session invalid: %s' % error) from error else: return _create_auth_claims(claims) def _create_auth_claims( - firebase_claims: Dict[str, Optional[Union[str, bool]]] + firebase_claims: auth_domain.AuthClaimsDict ) -> auth_domain.AuthClaims: """Returns a new AuthClaims domain object from Firebase claims. @@ -578,10 +588,10 @@ def _create_auth_claims( Returns: AuthClaims. Oppia's representation of auth claims. """ - auth_id = firebase_claims.get('sub') + auth_id = firebase_claims['sub'] email = firebase_claims.get('email') role_is_super_admin = ( email == feconf.ADMIN_EMAIL_ADDRESS or firebase_claims.get('role') == feconf.FIREBASE_ROLE_SUPER_ADMIN) - return auth_domain.AuthClaims( # type: ignore[no-untyped-call] + return auth_domain.AuthClaims( auth_id, email, role_is_super_admin=role_is_super_admin) diff --git a/core/platform/auth/firebase_auth_services_test.py b/core/platform/auth/firebase_auth_services_test.py index f3b81577a36a..726673b9e8b5 100644 --- a/core/platform/auth/firebase_auth_services_test.py +++ b/core/platform/auth/firebase_auth_services_test.py @@ -26,7 +26,6 @@ from unittest import mock from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import auth_domain @@ -46,7 +45,7 @@ from mypy_imports import auth_models auth_models, user_models = ( - models.Registry.import_models([models.NAMES.auth, models.NAMES.user])) + models.Registry.import_models([models.Names.AUTH, models.Names.USER])) UidsPartitionTupleType = Tuple[ List[Tuple[int, str]], @@ -253,6 +252,10 @@ def delete_users( uids_to_delete = set(uids) errors = [] else: + # Here we use cast because method 'utils.partition' returns a + # broader type Tuple[Iterable[...], Iterable[...]], thus to + # narrow down the type to 'UidsPartitionTupleType' we used + # cast here. disabled_uids, enabled_uids = cast( UidsPartitionTupleType, utils.partition( @@ -388,8 +391,8 @@ def list_users( try: page_index = int(page_token) if page_token is not None else 0 - except (ValueError, TypeError): - raise ValueError('page_token=%r is invalid' % page_token) + except (ValueError, TypeError) as e: + raise ValueError('page_token=%r is invalid' % page_token) from e if 0 <= page_index < len(page_list): return self._create_list_users_page_fragile(page_list, page_index) @@ -652,10 +655,14 @@ def mock_delete_users( if error_to_raise is not None: raise error_to_raise + # Here we use cast because method 'utils.partition' returns a + # broader type Tuple[Iterable[...], Iterable[...]], thus to + # narrow down the type to 'UidsZipPartitionTupleType' we used + # cast here. uids_to_delete, uids_to_fail = cast( UidsZipPartitionTupleType, utils.partition( - python_utils.ZIP(uids, updated_individual_error_pattern), + zip(uids, updated_individual_error_pattern), predicate=lambda uid_and_error: uid_and_error[1] is None, enumerated=True)) @@ -710,20 +717,24 @@ def mock_import_users( if error_to_raise is not None: raise error_to_raise + # Here we use cast because method 'utils.partition' returns a + # broader type Tuple[Iterable[...], Iterable[...]], thus to + # narrow down the type to 'RecordsPartitionTupleType' we used + # cast here. records_to_import, records_to_fail = cast( RecordsPartitionTupleType, utils.partition( - python_utils.ZIP(records, updated_individual_error_pattern), + zip(records, updated_individual_error_pattern), predicate=( lambda record_and_error: record_and_error[1] is None), enumerated=True)) self.import_users([record for _, (record, _) in records_to_import]) + errors = [(i, error) for i, (_, error) in records_to_fail] return self._create_user_import_result_fragile( - len(records), cast( - List[Tuple[int, str]], - [(i, error) for i, (_, error) in records_to_fail])) + len(records), errors=errors + ) assert self._test is not None return self._test.swap(firebase_auth, 'import_users', mock_import_users) @@ -757,7 +768,7 @@ def _decode_user_claims( dict(str: *)|None. The decoded claims or None. """ try: - # Casting the result here because the type stubs for library 'json' + # Here we use cast because the type stubs for library 'json' # returns 'Any' from json.loads. # https://github.com/python/typeshed/blob/30ad9e945f42cca1190cdba58c65bdcfc313480f/stdlib/json/__init__.pyi#L36 return cast( @@ -902,7 +913,7 @@ def test_raises_authentic_get_app_error(self) -> None: raises=Exception('unexpected call')) with get_app_swap as get_app_counter, init_app_swap as init_app_counter: - with self.assertRaisesRegexp(ValueError, 'uh-oh!'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(ValueError, 'uh-oh!'): firebase_auth_services.establish_firebase_connection() self.assertEqual(get_app_counter.times_called, 1) @@ -915,7 +926,7 @@ def test_raises_authentic_initialize_app_error(self) -> None: firebase_admin, 'initialize_app', raises=ValueError('uh-oh!')) with get_app_swap as get_app_counter, init_app_swap as init_app_counter: - with self.assertRaisesRegexp(ValueError, 'uh-oh!'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(ValueError, 'uh-oh!'): firebase_auth_services.establish_firebase_connection() self.assertEqual(get_app_counter.times_called, 1) @@ -929,13 +940,13 @@ class FirebaseAuthServicesTestBase(test_utils.AppEngineTestBase): EMAIL = 'foo@bar.com' def setUp(self) -> None: - super(FirebaseAuthServicesTestBase, self).setUp() + super().setUp() self.firebase_sdk_stub = FirebaseAdminSdkStub() self.firebase_sdk_stub.install(self) def tearDown(self) -> None: self.firebase_sdk_stub.uninstall() - super(FirebaseAuthServicesTestBase, self).tearDown() + super().tearDown() def capture_logging( self, min_level: int = logging.INFO @@ -953,7 +964,7 @@ def capture_logging( Returns: Context manager. The context manager for capturing logging messages. """ - return super(FirebaseAuthServicesTestBase, self).capture_logging( + return super().capture_logging( min_level=min_level) def create_request( @@ -1019,11 +1030,11 @@ def test_updates_user_successfully(self) -> None: def test_raises_error_when_user_does_not_exist(self) -> None: auth_models.UserAuthDetailsModel(id='uid', firebase_auth_id=None).put() - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( ValueError, 'user_id=uid has no Firebase account'): firebase_auth_services.grant_super_admin_privileges('uid') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( ValueError, 'user_id=uid has no Firebase account'): firebase_auth_services.revoke_super_admin_privileges('uid') @@ -1039,7 +1050,7 @@ def test_grant_super_admin_privileges_revokes_session_cookies(self) -> None: firebase_auth_services.grant_super_admin_privileges('uid') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( firebase_auth.RevokedSessionCookieError, 'invalid'): firebase_auth.verify_session_cookie(cookie, check_revoked=True) @@ -1057,7 +1068,7 @@ def test_revoke_super_admin_privileges_revokes_session_cookies( firebase_auth_services.revoke_super_admin_privileges('uid') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( firebase_auth.RevokedSessionCookieError, 'invalid'): firebase_auth.verify_session_cookie(cookie, check_revoked=True) @@ -1065,7 +1076,7 @@ def test_revoke_super_admin_privileges_revokes_session_cookies( class EstablishAuthSessionTests(FirebaseAuthServicesTestBase): def setUp(self) -> None: - super(EstablishAuthSessionTests, self).setUp() + super().setUp() self.id_token = ( self.firebase_sdk_stub.create_user(self.AUTH_ID, email=self.EMAIL)) @@ -1075,7 +1086,7 @@ def test_adds_cookie_to_response_from_id_token_in_request(self) -> None: firebase_auth_services.establish_auth_session(req, res) - self.assert_matches_regexps( # type: ignore[no-untyped-call] + self.assert_matches_regexps( res.headers.get_all('Set-Cookie'), ['session=.*;']) def test_does_nothing_when_request_has_cookie(self) -> None: @@ -1094,7 +1105,7 @@ def test_reports_error_when_request_missing_both_cookie_and_id_token( req = self.create_request() res = self.create_response() - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( firebase_auth.InvalidIdTokenError, 'missing id_token'): firebase_auth_services.establish_auth_session(req, res) @@ -1105,12 +1116,12 @@ class DestroyAuthSessionTests(FirebaseAuthServicesTestBase): def test_deletes_cookie_from_response(self) -> None: res = self.create_response(session_cookie='abc') - self.assert_matches_regexps( # type: ignore[no-untyped-call] + self.assert_matches_regexps( res.headers.get_all('Set-Cookie'), ['session=abc;']) firebase_auth_services.destroy_auth_session(res) - self.assert_matches_regexps( # type: ignore[no-untyped-call] + self.assert_matches_regexps( res.headers.get_all('Set-Cookie'), ['session=abc;', 'session=; Max-Age=0;']) @@ -1133,7 +1144,7 @@ def test_returns_claims_when_cookie_is_present(self) -> None: self.assertEqual( firebase_auth_services.get_auth_claims_from_request( self.create_request(session_cookie=cookie)), - auth_domain.AuthClaims(self.AUTH_ID, self.EMAIL, False)) # type: ignore[no-untyped-call] + auth_domain.AuthClaims(self.AUTH_ID, self.EMAIL, False)) def test_feconf_admin_email_address_is_super_admin(self) -> None: cookie = firebase_auth.create_session_cookie( @@ -1144,7 +1155,7 @@ def test_feconf_admin_email_address_is_super_admin(self) -> None: self.assertEqual( firebase_auth_services.get_auth_claims_from_request( self.create_request(session_cookie=cookie)), - auth_domain.AuthClaims( # type: ignore[no-untyped-call] + auth_domain.AuthClaims( self.AUTH_ID, feconf.ADMIN_EMAIL_ADDRESS, True)) def test_raises_stale_auth_session_error_when_cookie_is_expired( @@ -1158,7 +1169,7 @@ def test_raises_stale_auth_session_error_when_cookie_is_expired( firebase_auth, 'verify_session_cookie', error=firebase_auth.ExpiredSessionCookieError('uh-oh', None)) - with always_raise_expired_session_cookie_error, self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with always_raise_expired_session_cookie_error, self.assertRaisesRegex( auth_domain.StaleAuthSessionError, 'expired' ): firebase_auth_services.get_auth_claims_from_request( @@ -1176,10 +1187,30 @@ def test_raises_stale_auth_session_error_when_cookie_is_revoked( error=firebase_auth.RevokedSessionCookieError('uh-oh')) with always_raise_revoked_session_cookie_error: - self.assertRaisesRegexp( # type: ignore[no-untyped-call] - auth_domain.StaleAuthSessionError, 'revoked', - lambda: firebase_auth_services.get_auth_claims_from_request( - self.create_request(session_cookie=cookie))) + with self.assertRaisesRegex( + auth_domain.StaleAuthSessionError, 'revoked' + ): + firebase_auth_services.get_auth_claims_from_request( + self.create_request(session_cookie=cookie) + ) + + def test_raises_user_disabled_error_when_user_is_disabled(self) -> None: + cookie = firebase_auth.create_session_cookie( + self.firebase_sdk_stub.create_user( + self.AUTH_ID, email=self.EMAIL, disabled=True + ), + feconf.FIREBASE_SESSION_COOKIE_MAX_AGE + ) + + always_raise_expired_session_cookie_error = self.swap_to_always_raise( + firebase_auth, 'verify_session_cookie', + error=firebase_auth.UserDisabledError('uh-oh')) + + with always_raise_expired_session_cookie_error, self.assertRaisesRegex( + auth_domain.UserDisabledError, 'user is being deleted' + ): + firebase_auth_services.get_auth_claims_from_request( + self.create_request(session_cookie=cookie)) def test_raises_auth_session_error_when_cookie_is_invalid(self) -> None: cookie = firebase_auth.create_session_cookie( @@ -1191,10 +1222,12 @@ def test_raises_auth_session_error_when_cookie_is_invalid(self) -> None: error=firebase_exceptions.UnknownError('uh-oh')) with always_raise_unknown_error: - self.assertRaisesRegexp( # type: ignore[no-untyped-call] - auth_domain.InvalidAuthSessionError, 'uh-oh', - lambda: firebase_auth_services.get_auth_claims_from_request( - self.create_request(session_cookie=cookie))) + with self.assertRaisesRegex( + auth_domain.InvalidAuthSessionError, 'uh-oh' + ): + firebase_auth_services.get_auth_claims_from_request( + self.create_request(session_cookie=cookie) + ) class GenericAssociationTests(FirebaseAuthServicesTestBase): @@ -1260,7 +1293,7 @@ def test_associate_with_user_id_collision_raises(self) -> None: firebase_auth_services.associate_auth_id_with_user_id( auth_domain.AuthIdUserIdPair('aid', 'uid')) - with self.assertRaisesRegexp(Exception, 'already associated'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, 'already associated'): firebase_auth_services.associate_auth_id_with_user_id( auth_domain.AuthIdUserIdPair('aid', 'uid')) @@ -1270,7 +1303,7 @@ def test_associate_with_auth_id_collision_raises(self) -> None: # Erase the user_id collision, but leave the auth_id collision. auth_models.UserIdByFirebaseAuthIdModel.delete_by_id('aid') - with self.assertRaisesRegexp(Exception, 'already associated'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, 'already associated'): firebase_auth_services.associate_auth_id_with_user_id( auth_domain.AuthIdUserIdPair('aid', 'uid')) @@ -1290,7 +1323,7 @@ def test_associate_multi_with_user_id_collision_raises(self) -> None: firebase_auth_services.associate_auth_id_with_user_id( auth_domain.AuthIdUserIdPair('aid1', 'uid1')) - with self.assertRaisesRegexp(Exception, 'already associated'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, 'already associated'): firebase_auth_services.associate_multi_auth_ids_with_user_ids( [auth_domain.AuthIdUserIdPair('aid1', 'uid1'), auth_domain.AuthIdUserIdPair('aid2', 'uid2'), @@ -1302,7 +1335,7 @@ def test_associate_multi_with_auth_id_collision_raises(self) -> None: # Erase the user_id collision, but leave the auth_id collision. auth_models.UserIdByFirebaseAuthIdModel.delete_by_id('aid1') - with self.assertRaisesRegexp(Exception, 'already associated'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, 'already associated'): firebase_auth_services.associate_multi_auth_ids_with_user_ids( [auth_domain.AuthIdUserIdPair('aid1', 'uid1'), auth_domain.AuthIdUserIdPair('aid2', 'uid2'), @@ -1378,7 +1411,7 @@ def test_disable_association_warns_when_firebase_fails_to_update_user( with update_user_swap, log_capturing_context as logs: firebase_auth_services.mark_user_for_deletion('uid') - self.assert_matches_regexps(logs, ['could not update']) # type: ignore[no-untyped-call] + self.assert_matches_regexps(logs, ['could not update']) self.assertIsNone( firebase_auth_services.get_user_id_from_auth_id('aid')) self.assertEqual( @@ -1393,7 +1426,7 @@ def test_disable_association_gives_up_when_auth_assocs_do_not_exist( with self.capture_logging() as logs: firebase_auth_services.mark_user_for_deletion('uid') - self.assert_matches_regexps( # type: ignore[no-untyped-call] + self.assert_matches_regexps( logs, [ r'\[WIPEOUT\] User with user_id=uid has no Firebase account' ]) @@ -1405,7 +1438,7 @@ class FirebaseSpecificAssociationTests(FirebaseAuthServicesTestBase): AUTH_ID = 'sub' def setUp(self) -> None: - super(FirebaseSpecificAssociationTests, self).setUp() + super().setUp() self.firebase_sdk_stub.create_user(self.AUTH_ID) firebase_auth_services.associate_auth_id_with_user_id( auth_domain.AuthIdUserIdPair(self.AUTH_ID, self.USER_ID)) @@ -1422,7 +1455,7 @@ def test_delete_user_when_firebase_raises_an_error(self) -> None: self.assertFalse( firebase_auth_services .verify_external_auth_associations_are_deleted(self.USER_ID)) - self.assert_matches_regexps(logs, ['could not connect']) # type: ignore[no-untyped-call] + self.assert_matches_regexps(logs, ['could not connect']) def test_delete_user_when_firebase_succeeds(self) -> None: with self.capture_logging() as logs: @@ -1444,9 +1477,9 @@ class DeleteAuthAssociationsTests(FirebaseAuthServicesTestBase): UNKNOWN_ERROR = firebase_exceptions.UnknownError('error') def setUp(self) -> None: - super(DeleteAuthAssociationsTests, self).setUp() + super().setUp() self.firebase_sdk_stub.create_user(self.AUTH_ID) - user_settings = user_services.create_new_user(self.AUTH_ID, self.EMAIL) # type: ignore[no-untyped-call] + user_settings = user_services.create_new_user(self.AUTH_ID, self.EMAIL) self.user_id = user_settings.user_id firebase_auth_services.mark_user_for_deletion(self.user_id) @@ -1492,7 +1525,7 @@ def test_delete_external_auth_associations_when_user_not_found( firebase_auth_services.delete_external_auth_associations( self.user_id) - self.assert_matches_regexps( # type: ignore[no-untyped-call] + self.assert_matches_regexps( logs, [ r'\[WIPEOUT\] Firebase account already deleted', ]) diff --git a/core/platform/bulk_email/dev_mode_bulk_email_services.py b/core/platform/bulk_email/dev_mode_bulk_email_services.py index 6cac10c2722d..552e3f1e8bdc 100644 --- a/core/platform/bulk_email/dev_mode_bulk_email_services.py +++ b/core/platform/bulk_email/dev_mode_bulk_email_services.py @@ -20,6 +20,8 @@ import logging +from typing import Dict + def permanently_delete_user_from_list(user_email: str) -> None: """Logs that the delete request was sent. @@ -33,7 +35,11 @@ def permanently_delete_user_from_list(user_email: str) -> None: def add_or_update_user_status( - user_email: str, can_receive_email_updates: bool + user_email: str, + unused_merge_fields: Dict[str, str], + unused_tag: str, + *, + can_receive_email_updates: bool ) -> bool: """Subscribes/unsubscribes an existing user or creates a new user with correct status in the mailchimp DB. @@ -42,6 +48,11 @@ def add_or_update_user_status( user_email: str. Email id of the user. can_receive_email_updates: bool. Whether they want to be subscribed to list or not. + unused_merge_fields: dict. Additional 'merge fields' used by mailchimp + for adding extra information for each user. The format is + { 'KEY': value } where the key is defined in the mailchimp + dashboard. + unused_tag: str. Tag to add to user in mailchimp. Returns: bool. True to mock successful user creation. diff --git a/core/platform/bulk_email/dev_mode_bulk_email_services_test.py b/core/platform/bulk_email/dev_mode_bulk_email_services_test.py index 90cd1501531e..a24bac84294b 100644 --- a/core/platform/bulk_email/dev_mode_bulk_email_services_test.py +++ b/core/platform/bulk_email/dev_mode_bulk_email_services_test.py @@ -21,25 +21,20 @@ from core.platform.bulk_email import dev_mode_bulk_email_services from core.tests import test_utils -from typing import Any - class DevModeBulkEmailServicesUnitTests(test_utils.GenericTestBase): """Tests for mailchimp services.""" def test_add_or_update_user_status(self) -> None: observed_log_messages = [] - # We are using Any here because the following function mocks - # logging.info methods, whose stubs denote the type of args as Any. - # https://github.com/python/typeshed/blob/837b57fdd1a814237ef4b15f6ce19c701303aebb/stdlib/logging/__init__.pyi#L76 - def _mock_logging_function(msg: str, *args: Any) -> None: + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) with self.swap(logging, 'info', _mock_logging_function): dev_mode_bulk_email_services.add_or_update_user_status( - 'test@example.com', True) - self.assertItemsEqual( # type: ignore[no-untyped-call] + 'test@example.com', {}, 'Web', can_receive_email_updates=True) + self.assertItemsEqual( observed_log_messages, ['Updated status of email ID test@example.com\'s bulk email ' 'preference in the service provider\'s db to True. Cannot ' @@ -47,8 +42,8 @@ def _mock_logging_function(msg: str, *args: Any) -> None: observed_log_messages = [] dev_mode_bulk_email_services.add_or_update_user_status( - 'test@example.com', False) - self.assertItemsEqual( # type: ignore[no-untyped-call] + 'test@example.com', {}, 'Web', can_receive_email_updates=False) + self.assertItemsEqual( observed_log_messages, ['Updated status of email ID test@example.com\'s bulk email ' 'preference in the service provider\'s db to False. Cannot ' @@ -56,17 +51,14 @@ def _mock_logging_function(msg: str, *args: Any) -> None: def test_permanently_delete_user(self) -> None: observed_log_messages = [] - # We are using Any here because the following function mocks - # logging.info methods, whose stubs denote the type of args as Any. - # https://github.com/python/typeshed/blob/837b57fdd1a814237ef4b15f6ce19c701303aebb/stdlib/logging/__init__.pyi#L76 - def _mock_logging_function(msg: str, *args: Any) -> None: + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) with self.swap(logging, 'info', _mock_logging_function): dev_mode_bulk_email_services.permanently_delete_user_from_list( 'test@example.com') - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( observed_log_messages, ['Email ID test@example.com permanently deleted from bulk ' 'email provider\'s db. Cannot access API, since this is a ' diff --git a/core/platform/bulk_email/mailchimp_bulk_email_services.py b/core/platform/bulk_email/mailchimp_bulk_email_services.py index 66a22c4497c3..7234d07a4577 100644 --- a/core/platform/bulk_email/mailchimp_bulk_email_services.py +++ b/core/platform/bulk_email/mailchimp_bulk_email_services.py @@ -23,10 +23,19 @@ import logging from core import feconf +from core.platform import models import mailchimp3 from mailchimp3 import mailchimpclient +from typing import Any, Dict, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import secrets_services + +secrets_services = models.Registry.import_secrets_services() + def _get_subscriber_hash(email: str) -> str: """Returns Mailchimp subscriber hash from email. @@ -49,42 +58,48 @@ def _get_subscriber_hash(email: str) -> str: return md5_hash.hexdigest() -def _get_mailchimp_class() -> mailchimp3.MailChimp: +def _get_mailchimp_class() -> Optional[mailchimp3.MailChimp]: """Returns the mailchimp api class. This is separated into a separate function to facilitate testing. NOTE: No other functionalities should be added to this function. Returns: - Mailchimp. A mailchimp class instance with the API key and username + Mailchimp|None. A mailchimp class instance with the API key and username initialized. """ - # The return value ignore pragma is required for this. This is - # because adding a Union[] type annotation to handle both None and - # mailchimp3.MailChimp causes errors where the return value is called - # (for eg: client.lists), since NoneType does not have an attribute lists. - if not feconf.MAILCHIMP_API_KEY: - logging.exception('Mailchimp API key is not available.') - return None # type: ignore[return-value] + mailchimp_api_key: Optional[str] = secrets_services.get_secret( + 'MAILCHIMP_API_KEY') + if not mailchimp_api_key: + logging.error('Mailchimp API key is not available.') + return None if not feconf.MAILCHIMP_USERNAME: - logging.exception('Mailchimp username is not set.') + logging.error('Mailchimp username is not set.') return None # The following is a class initialized in the library with the API key and # username and hence cannot be tested directly. The mailchimp functions are # tested with a mock class. return mailchimp3.MailChimp( # pragma: no cover - mc_api=feconf.MAILCHIMP_API_KEY, mc_user=feconf.MAILCHIMP_USERNAME) + mc_api=mailchimp_api_key, mc_user=feconf.MAILCHIMP_USERNAME) -def _create_user_in_mailchimp_db(user_email: str) -> bool: +def _create_user_in_mailchimp_db( + client: mailchimp3.MailChimp, + # Here we use type Any because the value can be a list (for Tags) or dict + # (for merge_fields). + subscribed_mailchimp_data: Dict[str, Any] +) -> bool: """Creates a new user in the mailchimp database and handles the case where the user was permanently deleted from the database. Args: - user_email: str. Email ID of the user. Email is used to uniquely - identify the user in the mailchimp DB. + client: mailchimp3.MailChimp. A mailchimp instance with the API key and + username initialized. + subscribed_mailchimp_data: dict. Post body with required fields for a + new user. The required fields are email_address, status and tags. + Any relevant merge_fields are optional. Returns: bool. Whether the user was successfully added to the db. (This will be @@ -95,14 +110,9 @@ def _create_user_in_mailchimp_db(user_email: str) -> bool: Exception. Any error (other than the one mentioned below) raised by the mailchimp API. """ - post_data = { - 'email_address': user_email, - 'status': 'subscribed' - } - client = _get_mailchimp_class() - try: - client.lists.members.create(feconf.MAILCHIMP_AUDIENCE_ID, post_data) + client.lists.members.create( + feconf.MAILCHIMP_AUDIENCE_ID, subscribed_mailchimp_data) except mailchimpclient.MailChimpError as error: error_message = ast.literal_eval(str(error)) # This is the specific error message returned for the case where the @@ -114,7 +124,7 @@ def _create_user_in_mailchimp_db(user_email: str) -> bool: # common error titles. if error_message['title'] == 'Forgotten Email Not Subscribed': return False - raise Exception(error_message['detail']) + raise Exception(error_message['detail']) from error return True @@ -155,11 +165,15 @@ def permanently_delete_user_from_list(user_email: str) -> None: error_message = ast.literal_eval(str(error)) # Ignore if the error corresponds to "User does not exist". if error_message['status'] != 404: - raise Exception(error_message['detail']) + raise Exception(error_message['detail']) from error def add_or_update_user_status( - user_email: str, can_receive_email_updates: bool + user_email: str, + merge_fields: Dict[str, str], + tag: str, + *, + can_receive_email_updates: bool ) -> bool: """Subscribes/unsubscribes an existing user or creates a new user with correct status in the mailchimp DB. @@ -172,6 +186,13 @@ def add_or_update_user_status( identify the user in the mailchimp DB. can_receive_email_updates: bool. Whether they want to be subscribed to the bulk email list or not. + merge_fields: dict. Additional 'merge fields' used by mailchimp for + adding extra information for each user. The format is + { 'KEY': value } where the key is defined in the mailchimp + dashboard. + (Reference: + https://mailchimp.com/developer/marketing/docs/merge-fields/). + tag: str. Tag to add to user in mailchimp. Returns: bool. Whether the user was successfully added to the db. (This will be @@ -187,7 +208,28 @@ def add_or_update_user_status( return False subscriber_hash = _get_subscriber_hash(user_email) - subscribed_mailchimp_data = { + if tag not in feconf.VALID_MAILCHIMP_TAGS: + raise Exception('Invalid tag: %s' % tag) + + invalid_keys = [ + key for key in merge_fields + if key not in feconf.VALID_MAILCHIMP_FIELD_KEYS + ] + if invalid_keys: + raise Exception('Invalid Merge Fields: %s' % invalid_keys) + + # Here we use type Any because the value can be a list (for Tags) or dict + # (for merge_fields), which will be added later depending on Android update + # or not. + new_user_mailchimp_data: Dict[str, Any] = { + 'email_address': user_email, + 'status': 'subscribed', + 'tags': [tag] + } + + # Here we use type Any because the value can be dict (for merge_fields), + # which will be added later depending on Android update or not. + subscribed_mailchimp_data: Dict[str, Any] = { 'email_address': user_email, 'status': 'subscribed' } @@ -197,22 +239,45 @@ def add_or_update_user_status( 'status': 'unsubscribed' } + tag_data = { + 'tags': [{ + 'name': tag, + 'status': 'active' + }] + } + + # Additional fields for the Android tag. + if tag == 'Android': + new_user_mailchimp_data = { + 'email_address': user_email, + 'status': 'subscribed', + 'tags': [tag], + 'merge_fields': { + 'NAME': merge_fields['NAME'] + } + } + subscribed_mailchimp_data = { + 'email_address': user_email, + 'status': 'subscribed', + 'merge_fields': { + 'NAME': merge_fields['NAME'] + } + } + try: - member_details = client.lists.members.get( + client.lists.members.get( feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash) # If member is already added to mailchimp list, we cannot permanently # delete a list member, since they cannot be programmatically added # back, so we change their status based on preference. - if ( - can_receive_email_updates and - member_details['status'] != 'subscribed'): + if can_receive_email_updates: + client.lists.members.tags.update( + feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash, tag_data) client.lists.members.update( feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash, subscribed_mailchimp_data) - elif ( - not can_receive_email_updates and - member_details['status'] == 'subscribed'): + else: client.lists.members.update( feconf.MAILCHIMP_AUDIENCE_ID, subscriber_hash, unsubscribed_mailchimp_data) @@ -226,13 +291,13 @@ def add_or_update_user_status( # workaround for Python2, the 'message' attribute is obtained by # str() and then it is converted to dict. This works in Python3 as well. error_message = ast.literal_eval(str(error)) - # Error 404 corresponds to "User does not exist". + # Error 404 corresponds to 'User does not exist'. if error_message['status'] == 404: if can_receive_email_updates: user_creation_successful = _create_user_in_mailchimp_db( - user_email) + client, new_user_mailchimp_data) if not user_creation_successful: return False else: - raise Exception(error_message['detail']) + raise Exception(error_message['detail']) from error return True diff --git a/core/platform/bulk_email/mailchimp_bulk_email_services_test.py b/core/platform/bulk_email/mailchimp_bulk_email_services_test.py index de6b6741559f..58735ab51357 100644 --- a/core/platform/bulk_email/mailchimp_bulk_email_services_test.py +++ b/core/platform/bulk_email/mailchimp_bulk_email_services_test.py @@ -19,18 +19,21 @@ import logging from core import feconf +from core.platform import models from core.platform.bulk_email import mailchimp_bulk_email_services from core.tests import test_utils from mailchimp3 import mailchimpclient -from typing import Any, Dict +from typing import Dict, List + +secrets_services = models.Registry.import_secrets_services() class MailchimpServicesUnitTests(test_utils.GenericTestBase): """Tests for mailchimp services.""" def setUp(self) -> None: - super(MailchimpServicesUnitTests, self).setUp() + super().setUp() self.user_email_1 = 'test1@example.com' self.user_email_2 = 'test2@example.com' self.user_email_3 = 'test3@example.com' @@ -46,6 +49,32 @@ class MailchimpLists: class MailchimpMembers: """Class to mock Mailchimp members object.""" + class MailchimpTags: + """Class to mock Mailchimp tags object.""" + + def __init__(self) -> None: + self.tag_names: List[str] = [] + + def update( + self, + unused_id: str, + unused_hash: str, + tag_data: Dict[str, List[Dict[str, str]]] + ) -> None: + """Mocks the tag update function in mailchimp api. + + Args: + unused_id: str. List Id of mailchimp list. + unused_hash: str. Subscriber hash, which is an MD5 + hash of subscriber's email ID. + tag_data: dict. A dict with the 'tags' key + containing the tags to be updated for the user. + """ + self.tag_names = [ + tag['name'] for tag in tag_data['tags'] + if tag['status'] == 'active' + ] + def __init__(self) -> None: self.users_data = [{ # Email: test1@example.com. @@ -55,7 +84,12 @@ def __init__(self) -> None: # Email: test2@example.com. 'email_hash': '43b05f394d5611c54a1a9e8e20baee21', 'status': 'subscribed' + }, { + # Email: test4@example.com, but intentionally + # incorrect to trigger failure. + 'email_hash': 'incorrecthash' }] + self.tags = self.MailchimpTags() def get( self, _list_id: str, subscriber_hash: str @@ -115,7 +149,7 @@ def create(self, _list_id: str, data: Dict[str, str]) -> None: if data['email_address'] == 'test3@example.com': self.users_data.append({ # Email: test3@example.com. - 'email': 'fedd8b80a7a813966263853b9af72151', + 'email_hash': 'fedd8b80a7a813966263853b9af72151', 'status': data['status'] }) elif data['email_address'] == 'test4@example.com': @@ -156,48 +190,110 @@ def test_get_subscriber_hash(self) -> None: mailchimp_bulk_email_services._get_subscriber_hash(sample_email), # pylint: disable=protected-access subscriber_hash) + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[arg-type] + # is used to test method _get_subscriber_hash() for invalid argument + # type. sample_email_2 = 5 - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid type for email. Expected string, received 5'): mailchimp_bulk_email_services._get_subscriber_hash(sample_email_2) # type: ignore[arg-type] # pylint: disable=protected-access - def test_get_mailchimp_class_error(self) -> None: - observed_log_messages = [] - - def _mock_logging_function( - msg: str, *args: Any, **unused_kwargs: Any) -> None: - """Mocks logging.exception(). - - Args: - msg: str. The logging message. - *args: list(*). A list of arguments. - **unused_kwargs: *. Keyword arguments. - """ - observed_log_messages.append(msg % args) - - logging_swap = self.swap(logging, 'exception', _mock_logging_function) - with logging_swap: - mailchimp_bulk_email_services._get_mailchimp_class() # pylint: disable=protected-access - self.assertItemsEqual( # type: ignore[no-untyped-call] - observed_log_messages, ['Mailchimp API key is not available.']) - - observed_log_messages = [] - swap_api = self.swap(feconf, 'MAILCHIMP_API_KEY', 'key') - with swap_api: - mailchimp_bulk_email_services._get_mailchimp_class() # pylint: disable=protected-access - self.assertItemsEqual( # type: ignore[no-untyped-call] - observed_log_messages, ['Mailchimp username is not set.']) - - # For the tests below, the email ID for the user doesn't matter - # since the function should return earlier if mailchimp api key or - # username is not set. - # Permanently deletes returns None when mailchimp keys are not set. - self.assertIsNone( - mailchimp_bulk_email_services.permanently_delete_user_from_list( # type: ignore[func-returns-value] - 'sample_email')) + def test_function_input_validation(self) -> None: + mailchimp = self.MockMailchimpClass() + swapped_mailchimp = lambda: mailchimp + swap_mailchimp_context = self.swap( + mailchimp_bulk_email_services, '_get_mailchimp_class', + swapped_mailchimp) + with swap_mailchimp_context: + with self.assertRaisesRegex( + Exception, 'Invalid Merge Fields' + ): + mailchimp_bulk_email_services.add_or_update_user_status( + 'valid@example.com', {'INVALID': 'value'}, 'Android', + can_receive_email_updates=True) + + with self.assertRaisesRegex( + Exception, 'Invalid tag: Invalid' + ): + mailchimp_bulk_email_services.add_or_update_user_status( + 'valid@example.com', {}, 'Invalid', + can_receive_email_updates=True) + + def test_get_mailchimp_class_errors_when_api_key_is_not_available( + self + ) -> None: + swap_get_secret = self.swap_with_checks( + secrets_services, + 'get_secret', + lambda _: None, + expected_args=[('MAILCHIMP_API_KEY',)] + ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with swap_get_secret: + self.assertIsNone( + mailchimp_bulk_email_services._get_mailchimp_class() # pylint: disable=protected-access + ) + self.assertItemsEqual( + logs, ['Mailchimp API key is not available.'] + ) + + def test_get_mailchimp_class_errors_when_username_is_not_available( + self + ) -> None: + swap_mailchimp_username = self.swap( + feconf, 'MAILCHIMP_USERNAME', None + ) + swap_get_secret = self.swap_with_checks( + secrets_services, + 'get_secret', + lambda _: 'key', + expected_args=[('MAILCHIMP_API_KEY',)] + ) + with self.capture_logging(min_level=logging.ERROR) as logs: + with swap_mailchimp_username, swap_get_secret: + self.assertIsNone( + mailchimp_bulk_email_services._get_mailchimp_class() # pylint: disable=protected-access + ) + self.assertItemsEqual( + logs, ['Mailchimp username is not set.'] + ) + + def test_add_or_update_user_status_returns_false_when_username_is_none( + self + ) -> None: + swap_get_secret = self.swap_with_checks( + secrets_services, + 'get_secret', + lambda _: 'key', + expected_args=[ + ('MAILCHIMP_API_KEY',), + ] + ) + with swap_get_secret: self.assertFalse( mailchimp_bulk_email_services.add_or_update_user_status( - 'sample_email', True)) + 'sample_email', + {}, + 'Web', + can_receive_email_updates=True + ) + ) + + def test_permanently_delete_user_from_list_when_username_is_none( + self + ) -> None: + swap_get_secret = self.swap_with_checks( + secrets_services, + 'get_secret', + lambda _: 'key', + expected_args=[ + ('MAILCHIMP_API_KEY',), + ] + ) + with swap_get_secret: + mailchimp_bulk_email_services.permanently_delete_user_from_list( + 'sample_email') def test_add_or_update_mailchimp_user_status(self) -> None: mailchimp = self.MockMailchimpClass() @@ -205,7 +301,7 @@ def test_add_or_update_mailchimp_user_status(self) -> None: swap_mailchimp_context = self.swap( mailchimp_bulk_email_services, '_get_mailchimp_class', swapped_mailchimp) - swap_api = self.swap(feconf, 'MAILCHIMP_API_KEY', 'key') + swap_api = self.swap(secrets_services, 'get_secret', lambda _: 'key') swap_username = self.swap(feconf, 'MAILCHIMP_USERNAME', 'username') with swap_mailchimp_context, swap_api, swap_username: @@ -214,9 +310,10 @@ def test_add_or_update_mailchimp_user_status(self) -> None: self.assertEqual( mailchimp.lists.members.users_data[0]['status'], 'unsubscribed') mailchimp_bulk_email_services.add_or_update_user_status( - self.user_email_1, True) + self.user_email_1, {}, 'Web', can_receive_email_updates=True) self.assertEqual( mailchimp.lists.members.users_data[0]['status'], 'subscribed') + self.assertEqual(mailchimp.lists.members.tags.tag_names, ['Web']) # Tests condition where user was initally subscribed in list and # becomes unsubscribed. @@ -224,25 +321,60 @@ def test_add_or_update_mailchimp_user_status(self) -> None: mailchimp.lists.members.users_data[1]['status'], 'subscribed') mailchimp_bulk_email_services.add_or_update_user_status( - self.user_email_2, False) + self.user_email_2, {}, 'Web', can_receive_email_updates=False) self.assertEqual( mailchimp.lists.members.users_data[1]['status'], 'unsubscribed') # Creates a mailchimp entry for a new user. - self.assertEqual(len(mailchimp.lists.members.users_data), 2) + self.assertEqual(len(mailchimp.lists.members.users_data), 3) return_status = ( mailchimp_bulk_email_services.add_or_update_user_status( - self.user_email_3, True)) + self.user_email_3, {}, 'Web', + can_receive_email_updates=True)) self.assertTrue(return_status) self.assertEqual( - mailchimp.lists.members.users_data[2]['status'], 'subscribed') + mailchimp.lists.members.users_data[3]['status'], 'subscribed') + + # Creates a mailchimp entry for a new user. + return_status = ( + mailchimp_bulk_email_services.add_or_update_user_status( + 'test4@example.com', {}, 'Web', + can_receive_email_updates=True)) + self.assertFalse(return_status) + # Here we use MyPy ignore because attribute 'users_data' can only + # accept Dict but for testing purposes here we are providing None + # which causes mypy to throw an error. Thus to avoid the error, we + # used ignore here. mailchimp.lists.members.users_data = None # type: ignore[assignment] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Server Error'): mailchimp_bulk_email_services.add_or_update_user_status( - self.user_email_1, True) + self.user_email_1, {}, 'Web', + can_receive_email_updates=True) + + def test_android_merge_fields(self) -> None: + mailchimp = self.MockMailchimpClass() + swapped_mailchimp = lambda: mailchimp + swap_mailchimp_context = self.swap( + mailchimp_bulk_email_services, '_get_mailchimp_class', + swapped_mailchimp) + swap_api = self.swap(secrets_services, 'get_secret', lambda _: 'key') + swap_username = self.swap(feconf, 'MAILCHIMP_USERNAME', 'username') + + with swap_mailchimp_context, swap_api, swap_username: + # Tests condition where user was initally unsubscribed in list and + # becomes subscribed. + self.assertEqual( + mailchimp.lists.members.users_data[0]['status'], 'unsubscribed') + mailchimp_bulk_email_services.add_or_update_user_status( + self.user_email_1, {'NAME': 'name'}, 'Android', + can_receive_email_updates=True) + self.assertEqual( + mailchimp.lists.members.users_data[0]['status'], 'subscribed') + self.assertEqual( + mailchimp.lists.members.tags.tag_names, ['Android']) def test_catch_or_raise_errors_when_creating_new_invalid_user(self) -> None: mailchimp = self.MockMailchimpClass() @@ -250,23 +382,25 @@ def test_catch_or_raise_errors_when_creating_new_invalid_user(self) -> None: swap_mailchimp_context = self.swap( mailchimp_bulk_email_services, '_get_mailchimp_class', swapped_mailchimp) - swap_api = self.swap(feconf, 'MAILCHIMP_API_KEY', 'key') + swap_api = self.swap(secrets_services, 'get_secret', lambda _: 'key') swap_username = self.swap(feconf, 'MAILCHIMP_USERNAME', 'username') with swap_mailchimp_context, swap_api, swap_username: # Creates a mailchimp entry for a deleted user. - self.assertEqual(len(mailchimp.lists.members.users_data), 2) + self.assertEqual(len(mailchimp.lists.members.users_data), 3) return_status = ( mailchimp_bulk_email_services.add_or_update_user_status( - 'test4@example.com', True)) + 'test4@example.com', {}, 'Web', + can_receive_email_updates=True)) self.assertFalse(return_status) - self.assertEqual(len(mailchimp.lists.members.users_data), 2) + self.assertEqual(len(mailchimp.lists.members.users_data), 3) # Create user raises exception for other errors. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Server Issue'): mailchimp_bulk_email_services.add_or_update_user_status( - 'test5@example.com', True) + 'test5@example.com', {}, 'Web', + can_receive_email_updates=True) def test_permanently_delete_user(self) -> None: mailchimp = self.MockMailchimpClass() @@ -274,17 +408,21 @@ def test_permanently_delete_user(self) -> None: swap_mailchimp_context = self.swap( mailchimp_bulk_email_services, '_get_mailchimp_class', swapped_mailchimp) - swap_api = self.swap(feconf, 'MAILCHIMP_API_KEY', 'key') + swap_api = self.swap(secrets_services, 'get_secret', lambda _: 'key') swap_username = self.swap(feconf, 'MAILCHIMP_USERNAME', 'username') with swap_mailchimp_context, swap_api, swap_username: - self.assertEqual(len(mailchimp.lists.members.users_data), 2) + self.assertEqual(len(mailchimp.lists.members.users_data), 3) mailchimp_bulk_email_services.permanently_delete_user_from_list( self.user_email_1) - self.assertEqual(len(mailchimp.lists.members.users_data), 1) + self.assertEqual(len(mailchimp.lists.members.users_data), 2) + # Here we use MyPy ignore because attribute 'users_data' can only + # accept Dict but for testing purposes here we are providing None + # which causes mypy to throw an error. Thus to avoid the error, we + # used ignore here. mailchimp.lists.members.users_data = None # type: ignore[assignment] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Server Error'): mailchimp_bulk_email_services.permanently_delete_user_from_list( self.user_email_1) diff --git a/core/platform/cache/redis_cache_services.py b/core/platform/cache/redis_cache_services.py index f8dd56ddae4d..64b160ab8775 100644 --- a/core/platform/cache/redis_cache_services.py +++ b/core/platform/cache/redis_cache_services.py @@ -22,7 +22,7 @@ from core.domain import caching_domain import redis -from typing import Dict, List, Optional, cast +from typing import Dict, List, Optional # Redis client for our own implementation of caching. OPPIA_REDIS_CLIENT = redis.StrictRedis( @@ -50,16 +50,12 @@ def get_memory_cache_stats() -> caching_domain.MemoryCacheStats: memory in bytes, peak memory usage in bytes, and the total number of keys stored as values. """ - # We have ignored [attr-defined] below because there is some error in - # the redis typeshed. Typestubs don't define the method memory_stats() - # for the redis.StrictRedis object. - # TODO(#13617): Update our typeshed after redis stubs are improved in - # typeshed. Then the ignore[attr-defined] used below can be removed. - redis_full_profile = OPPIA_REDIS_CLIENT.memory_stats() # type: ignore[attr-defined] - memory_stats = caching_domain.MemoryCacheStats( # type: ignore[no-untyped-call] - redis_full_profile.get('total.allocated'), - redis_full_profile.get('peak.allocated'), - redis_full_profile.get('keys.count')) + redis_full_profile = OPPIA_REDIS_CLIENT.memory_stats() + memory_stats = caching_domain.MemoryCacheStats( + redis_full_profile['total.allocated'], + redis_full_profile['peak.allocated'], + redis_full_profile['keys.count'] + ) return memory_stats @@ -81,12 +77,7 @@ def get_multi(keys: List[str]) -> List[Optional[str]]: that are passed in. """ assert isinstance(keys, list) - # TODO(#13663): After we install mypy in virtual environment and upgrade - # our mypy, we will have latest stubs of redis available. After this - # the cast and type ignore used below can be removed. - return cast( - List[Optional[str]], - OPPIA_REDIS_CLIENT.mget(keys)) # type: ignore[no-untyped-call] + return OPPIA_REDIS_CLIENT.mget(keys) def set_multi(key_value_mapping: Dict[str, str]) -> bool: @@ -101,12 +92,7 @@ def set_multi(key_value_mapping: Dict[str, str]) -> bool: bool. Whether the set action succeeded. """ assert isinstance(key_value_mapping, dict) - # TODO(#13663): After we install mypy in virtual environment and upgrade - # our mypy, we will have latest stubs of redis available. After this - # the cast and type ignore used below can be removed. - return cast( - bool, - OPPIA_REDIS_CLIENT.mset(key_value_mapping)) # type: ignore[no-untyped-call] + return OPPIA_REDIS_CLIENT.mset(key_value_mapping) def delete_multi(keys: List[str]) -> int: @@ -120,5 +106,4 @@ def delete_multi(keys: List[str]) -> int: """ for key in keys: assert isinstance(key, str) - number_of_deleted_keys = OPPIA_REDIS_CLIENT.delete(*keys) - return number_of_deleted_keys + return OPPIA_REDIS_CLIENT.delete(*keys) diff --git a/core/platform/cache/redis_cache_services_test.py b/core/platform/cache/redis_cache_services_test.py index 6e48300c0535..066ed5b22a66 100644 --- a/core/platform/cache/redis_cache_services_test.py +++ b/core/platform/cache/redis_cache_services_test.py @@ -21,7 +21,7 @@ import os from core import feconf -from core import python_utils +from core import utils from core.platform.cache import redis_cache_services from core.tests import test_utils from scripts import common @@ -123,7 +123,7 @@ def test_redis_configuration_file_matches_feconf_redis_configuration( self.assertTrue(os.path.exists( os.path.join(common.CURR_DIR, 'redis.conf'))) - with python_utils.open_file( # type: ignore[no-untyped-call] + with utils.open_file( os.path.join(common.CURR_DIR, 'redis.conf'), 'r') as redis_conf: lines = redis_conf.readlines() elements = lines[0].split() diff --git a/core/platform/datastore/cloud_datastore_services.py b/core/platform/datastore/cloud_datastore_services.py index f5ebc9bb075c..1ddeed3514fc 100644 --- a/core/platform/datastore/cloud_datastore_services.py +++ b/core/platform/datastore/cloud_datastore_services.py @@ -19,6 +19,7 @@ from __future__ import annotations import contextlib +import logging from core.platform import models @@ -29,7 +30,7 @@ MYPY = False if MYPY: # pragma: no cover - from mypy_imports import base_models # pylint: disable=unused-import + from mypy_imports import base_models from mypy_imports import transaction_services transaction_services = models.Registry.import_transaction_services() @@ -52,6 +53,7 @@ TextProperty = ndb.TextProperty TYPE_MODEL_SUBCLASS = TypeVar('TYPE_MODEL_SUBCLASS', bound=Model) # pylint: disable=invalid-name +MAX_GET_RETRIES = 3 CLIENT = ndb.Client() @@ -85,8 +87,17 @@ def get_multi(keys: List[Key]) -> List[Optional[TYPE_MODEL_SUBCLASS]]: Returns: list(datastore_services.Model | None). List whose items are either a Model instance or None if the corresponding key wasn't found. + + Raises: + Exception. If ndb.get_multi fails for MAX_GET_RETRIES. """ - return ndb.get_multi(keys) + for unused_i in range(0, MAX_GET_RETRIES): + try: + return ndb.get_multi(keys) + except Exception as e: + logging.exception('Exception raised: %s', e) + continue + raise Exception('get_multi failed after %s retries' % MAX_GET_RETRIES) def update_timestamps_multi( @@ -145,7 +156,7 @@ def delete_multi(keys: Sequence[Key]) -> List[None]: return ndb.delete_multi(keys) -# Here Any is used in the type annotation because it mimics the types defined in +# Here we use type Any because it mimics the types defined in # the stubs for this library. def query_everything(**kwargs: Dict[str, Any]) -> Query: """Returns a query that targets every single entity in the datastore. diff --git a/core/platform/datastore/cloud_datastore_services_test.py b/core/platform/datastore/cloud_datastore_services_test.py new file mode 100644 index 000000000000..5efc9328c160 --- /dev/null +++ b/core/platform/datastore/cloud_datastore_services_test.py @@ -0,0 +1,224 @@ +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the cloud_datastore_services.py""" + +from __future__ import annotations + +import datetime +import logging + +from core import feconf +from core.platform import models +from core.platform.datastore import cloud_datastore_services +from core.tests import test_utils + +from google.cloud import ndb + +from typing import Sequence, Tuple + +MYPY = False +if MYPY: + from mypy_imports import datastore_services + from mypy_imports import user_models + +(user_models,) = models.Registry.import_models([models.Names.USER]) +datastore_services = models.Registry.import_datastore_services() + + +class CloudDatastoreServicesTests(test_utils.GenericTestBase): + """Unit tests for the cloud_datastore_services.py""" + + THREE_WEEKS = datetime.timedelta(weeks=3) + + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + + self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) + self.admin_user_id = self.get_user_id_from_email( + self.CURRICULUM_ADMIN_EMAIL) + self.curr_time = datetime.datetime.utcnow() + self.completed_activities_model = user_models.CompletedActivitiesModel( + id=self.admin_user_id, + exploration_ids=[], + collection_ids=[], + story_ids=[], + learnt_topic_ids=[], + last_updated=self.curr_time + ) + self.user_query_model = user_models.UserQueryModel( + id='query_id', + user_ids=[], + submitter_id=self.admin_user_id, + query_status=feconf.USER_QUERY_STATUS_PROCESSING, + last_updated=self.curr_time + ) + + def test_update_timestamps_multi(self) -> None: + self.assertIsNone( + user_models.CompletedActivitiesModel.get_by_id(self.admin_user_id)) + self.assertIsNone(user_models.UserQueryModel.get_by_id('query_id')) + + cloud_datastore_services.update_timestamps_multi( + [self.completed_activities_model, self.user_query_model], False) + cloud_datastore_services.put_multi( + [self.completed_activities_model, self.user_query_model]) + + self.assertIsNotNone( + user_models.CompletedActivitiesModel.get_by_id(self.admin_user_id)) + self.assertIsNotNone(user_models.UserQueryModel.get_by_id('query_id')) + + self.assertEqual( + self.completed_activities_model.get_by_id( + self.admin_user_id).last_updated, + self.curr_time) + self.assertEqual( + self.user_query_model.get_by_id('query_id').last_updated, + self.curr_time) + + def test_delete_multi_transactional(self) -> None: + cloud_datastore_services.update_timestamps_multi( + [self.completed_activities_model, self.user_query_model], False) + cloud_datastore_services.put_multi( + [self.completed_activities_model, self.user_query_model]) + + self.assertIsNotNone( + user_models.CompletedActivitiesModel.get_by_id(self.admin_user_id)) + self.assertIsNotNone(user_models.UserQueryModel.get_by_id('query_id')) + + cloud_datastore_services.delete_multi_transactional([ + datastore_services.Key( + user_models.CompletedActivitiesModel, self.admin_user_id), + datastore_services.Key(user_models.UserQueryModel, 'query_id') + ]) + + self.assertIsNone( + user_models.CompletedActivitiesModel.get_by_id(self.admin_user_id)) + self.assertIsNone(user_models.UserQueryModel.get_by_id('query_id')) + + def test_fetch_multiple_entities_by_ids_and_models(self) -> None: + cloud_datastore_services.update_timestamps_multi( + [self.completed_activities_model, self.user_query_model], False) + cloud_datastore_services.put_multi( + [self.completed_activities_model, self.user_query_model]) + + returned_models = ( + cloud_datastore_services.fetch_multiple_entities_by_ids_and_models( + [ + ('CompletedActivitiesModel', [self.admin_user_id]), + ('UserQueryModel', ['query_id']) + ]) + ) + + self.assertEqual( + returned_models, + [[self.completed_activities_model], [self.user_query_model]]) + + def test_fetch_multiple_entities_throws_error_on_duplicate_parameters( + self) -> None: + cloud_datastore_services.update_timestamps_multi( + [self.completed_activities_model, self.user_query_model], False) + cloud_datastore_services.put_multi( + [self.completed_activities_model, self.user_query_model]) + + error_msg = 'Model names should not be duplicated in input list.' + with self.assertRaisesRegex(Exception, error_msg): + cloud_datastore_services.fetch_multiple_entities_by_ids_and_models( + [ + ('UserQueryModel', ['query_id']), + ('UserQueryModel', ['query_id']) + ] + ) + + def test_get_multi_throws_error_on_failure( + self + ) -> None: + observed_log_messages = [] + + def _mock_logging_function(msg: str, *args: str) -> None: + + """Mocks logging.exception().""" + observed_log_messages.append(msg % args) + dummy_keys = [ + ndb.Key('model1', 'id1'), + ndb.Key('model2', 'id2'), + ndb.Key('model3', 'id3') + ] + error_msg = ( + 'get_multi failed after %s retries' % + cloud_datastore_services.MAX_GET_RETRIES + ) + with self.swap_to_always_raise( + ndb, + 'get_multi', + Exception('Mock key error') + ), self.swap(logging, 'exception', _mock_logging_function): + with self.assertRaisesRegex(Exception, error_msg): + cloud_datastore_services.get_multi(dummy_keys) + self.assertEqual( + observed_log_messages, + ['Exception raised: Mock key error', + 'Exception raised: Mock key error', + 'Exception raised: Mock key error'] + ) + + def test_ndb_query_with_filters(self) -> None: + user_query_model1 = user_models.UserQueryModel( + id='query_id1', + user_ids=[], + submitter_id=self.admin_user_id, + query_status=feconf.USER_QUERY_STATUS_PROCESSING, + last_updated=self.curr_time - self.THREE_WEEKS + ) + user_query_model2 = user_models.UserQueryModel( + id='query_id2', + user_ids=[], + submitter_id=self.admin_user_id, + query_status=feconf.USER_QUERY_STATUS_COMPLETED, + last_updated=self.curr_time + ) + cloud_datastore_services.update_timestamps_multi( + [user_query_model1, user_query_model2], False) + cloud_datastore_services.put_multi( + [user_query_model1, user_query_model2]) + + result = user_models.UserQueryModel.query( + cloud_datastore_services.all_of( + user_models.UserQueryModel.submitter_id == self.admin_user_id, + user_models.UserQueryModel.query_status == ( + feconf.USER_QUERY_STATUS_COMPLETED) + )).get() + + self.assertEqual(result, user_query_model2) + + result = user_models.UserQueryModel.query( + cloud_datastore_services.any_of( + user_models.UserQueryModel.submitter_id == 'new_id', + user_models.UserQueryModel.query_status == ( + feconf.USER_QUERY_STATUS_PROCESSING) + )).get() + + self.assertEqual(result, user_query_model1) + + results: Tuple[ + Sequence[cloud_datastore_services.Model], + cloud_datastore_services.Cursor, bool + ] = user_models.UserQueryModel.query( + user_models.UserQueryModel.submitter_id == self.admin_user_id, + ).fetch_page(2, cloud_datastore_services.make_cursor()) + + self.assertEqual(results[0], [user_query_model1, user_query_model2]) diff --git a/core/platform/email/dev_mode_email_services_test.py b/core/platform/email/dev_mode_email_services_test.py index 930c13d55121..e27ef443b707 100644 --- a/core/platform/email/dev_mode_email_services_test.py +++ b/core/platform/email/dev_mode_email_services_test.py @@ -25,7 +25,7 @@ from core.platform.email import dev_mode_email_services from core.tests import test_utils -from typing import Any, Dict, Union +from typing import Dict, Union class EmailTests(test_utils.GenericTestBase): @@ -37,7 +37,7 @@ def test_send_mail_logs_to_terminal(self) -> None: """ observed_log_messages = [] - def _mock_logging_function(msg: str, *args: Any) -> None: + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) @@ -84,7 +84,7 @@ def test_send_mail_to_multiple_recipients_logs_to_terminal(self) -> None: """ observed_log_messages = [] - def _mock_logging_function(msg: str, *args: Any) -> None: + def _mock_logging_function(msg: str, *args: str) -> None: """Mocks logging.info().""" observed_log_messages.append(msg % args) diff --git a/core/platform/email/mailgun_email_services.py b/core/platform/email/mailgun_email_services.py index b1ede5db4514..06813f76f694 100644 --- a/core/platform/email/mailgun_email_services.py +++ b/core/platform/email/mailgun_email_services.py @@ -22,21 +22,28 @@ import urllib from core import feconf -from core import python_utils +from core import utils +from core.platform import models from typing import Dict, List, Optional, Union +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import secrets_services + +secrets_services = models.Registry.import_secrets_services() + def send_email_to_recipients( - sender_email: str, - recipient_emails: List[str], - subject: str, - plaintext_body: str, - html_body: str, - bcc: Optional[List[str]] = None, - reply_to: Optional[str] = None, - recipient_variables: Optional[ - Dict[str, Dict[str, Union[str, float]]]] = None + sender_email: str, + recipient_emails: List[str], + subject: str, + plaintext_body: str, + html_body: str, + bcc: Optional[List[str]] = None, + reply_to: Optional[str] = None, + recipient_variables: Optional[ + Dict[str, Dict[str, Union[str, float]]]] = None ) -> bool: """Send POST HTTP request to mailgun api. This method is adopted from the requests library's post method. @@ -77,7 +84,9 @@ def send_email_to_recipients( Returns: bool. Whether the emails are sent successfully. """ - if not feconf.MAILGUN_API_KEY: + mailgun_api_key: Optional[str] = secrets_services.get_secret( + 'MAILGUN_API_KEY') + if mailgun_api_key is None: raise Exception('Mailgun API key is not available.') if not feconf.MAILGUN_DOMAIN_NAME: @@ -114,19 +123,19 @@ def send_email_to_recipients( # the MAILGUN_API_KEY to bytes, then decode the returned bytes back # to string. base64_mailgun_api_key = base64.b64encode( - b'api:%b' % feconf.MAILGUN_API_KEY.encode('utf-8') + b'api:%b' % mailgun_api_key.encode('utf-8') ).strip().decode('utf-8') auth_str = 'Basic %s' % base64_mailgun_api_key header = {'Authorization': auth_str} - server = ( - ('https://api.mailgun.net/v3/%s/messages') - % feconf.MAILGUN_DOMAIN_NAME) + server = 'https://api.mailgun.net/v3/%s/messages' % ( + feconf.MAILGUN_DOMAIN_NAME + ) # The 'ascii' is used here, because only ASCII char are allowed in url, # also the docs recommend this approach: # https://docs.python.org/3.7/library/urllib.request.html#urllib-examples encoded_url = urllib.parse.urlencode(data).encode('ascii') - req = python_utils.url_request(server, encoded_url, header) - resp = python_utils.url_open(req) + req = urllib.request.Request(server, encoded_url, header) + resp = utils.url_open(req) # The function url_open returns a file_like object that can be queried # for the status code of the url query. If it is not 200, the mail query # failed so we return False (this function did not complete diff --git a/core/platform/email/mailgun_email_services_test.py b/core/platform/email/mailgun_email_services_test.py index 4e47819b80fb..bd27ccf54601 100644 --- a/core/platform/email/mailgun_email_services_test.py +++ b/core/platform/email/mailgun_email_services_test.py @@ -18,21 +18,40 @@ from __future__ import annotations +import urllib + from core import feconf -from core import python_utils +from core import utils +from core.platform import models from core.platform.email import mailgun_email_services from core.tests import test_utils from typing import Dict, Tuple +secrets_services = models.Registry.import_secrets_services() + MailgunQueryType = Tuple[str, bytes, Dict[str, str]] class EmailTests(test_utils.GenericTestBase): """Tests for sending emails.""" + def setUp(self) -> None: + super().setUp() + self.swapped_request = lambda *args: args + self.swap_api_key_secrets_return_none = self.swap_to_always_return( + secrets_services, 'get_secret', None) + self.swap_api_key_secrets_return_secret = self.swap_with_checks( + secrets_services, + 'get_secret', + lambda _: 'key', + expected_args=[ + ('MAILGUN_API_KEY',), + ] + ) + class Response: - """Class to mock python_utils.url_open responses.""" + """Class to mock utils.url_open responses.""" def __init__( self, url: MailgunQueryType, expected_url: MailgunQueryType @@ -48,7 +67,9 @@ def getcode(self) -> int: """ return 200 if self.url == self.expected_url else 500 - def test_send_email_to_mailgun(self) -> None: + def test_send_email_to_mailgun_without_bcc_reply_to_and_recipients( + self + ) -> None: """Test for sending HTTP POST request.""" # Test sending email without bcc, reply_to or recipient_variables. expected_query_url: MailgunQueryType = ( @@ -62,22 +83,23 @@ def test_send_email_to_mailgun(self) -> None: {'Authorization': 'Basic YXBpOmtleQ=='} ) swapped_urlopen = lambda x: self.Response(x, expected_query_url) - swapped_request = lambda *args: args + swap_urlopen_context = self.swap( - python_utils, 'url_open', swapped_urlopen) + utils, 'url_open', swapped_urlopen) swap_request_context = self.swap( - python_utils, 'url_request', swapped_request) - swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key') + urllib.request, 'Request', self.swapped_request) swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain') - with swap_urlopen_context, swap_request_context, swap_api, swap_domain: - resp = mailgun_email_services.send_email_to_recipients( - 'a@a.com', - ['b@b.com'], - 'Hola 😂 - invitation to collaborate', - 'plaintext_body 😂', - 'Hi abc,
    😂') - self.assertTrue(resp) + with self.swap_api_key_secrets_return_secret, swap_urlopen_context: + with swap_request_context, swap_domain: + resp = mailgun_email_services.send_email_to_recipients( + 'a@a.com', + ['b@b.com'], + 'Hola 😂 - invitation to collaborate', + 'plaintext_body 😂', + 'Hi abc,
    😂') + self.assertTrue(resp) + def test_send_email_to_mailgun_with_bcc_and_recipient(self) -> None: # Test sending email with single bcc and single recipient email. expected_query_url = ( 'https://api.mailgun.net/v3/domain/messages', @@ -93,23 +115,24 @@ def test_send_email_to_mailgun(self) -> None: {'Authorization': 'Basic YXBpOmtleQ=='}) swapped_urlopen = lambda x: self.Response(x, expected_query_url) swap_urlopen_context = self.swap( - python_utils, 'url_open', swapped_urlopen) + utils, 'url_open', swapped_urlopen) swap_request_context = self.swap( - python_utils, 'url_request', swapped_request) - swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key') + urllib.request, 'Request', self.swapped_request) swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain') - with swap_urlopen_context, swap_request_context, swap_api, swap_domain: - resp = mailgun_email_services.send_email_to_recipients( - 'a@a.com', - ['b@b.com'], - 'Hola 😂 - invitation to collaborate', - 'plaintext_body 😂', - 'Hi abc,
    😂', - bcc=['c@c.com'], - reply_to='abc', - recipient_variables={'b@b.com': {'first': 'Bob', 'id': 1}}) - self.assertTrue(resp) + with self.swap_api_key_secrets_return_secret, swap_urlopen_context: + with swap_request_context, swap_domain: + resp = mailgun_email_services.send_email_to_recipients( + 'a@a.com', + ['b@b.com'], + 'Hola 😂 - invitation to collaborate', + 'plaintext_body 😂', + 'Hi abc,
    😂', + bcc=['c@c.com'], + reply_to='abc', + recipient_variables={'b@b.com': {'first': 'Bob', 'id': 1}}) + self.assertTrue(resp) + def test_send_email_to_mailgun_with_bcc_and_recipients(self) -> None: # Test sending email with single bcc, and multiple recipient emails # differentiated by recipient_variables ids. expected_query_url = ( @@ -126,22 +149,25 @@ def test_send_email_to_mailgun(self) -> None: {'Authorization': 'Basic YXBpOmtleQ=='}) swapped_urlopen = lambda x: self.Response(x, expected_query_url) swap_urlopen_context = self.swap( - python_utils, 'url_open', swapped_urlopen) + utils, 'url_open', swapped_urlopen) swap_request_context = self.swap( - python_utils, 'url_request', swapped_request) - swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key') + urllib.request, 'Request', self.swapped_request) swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain') - with swap_urlopen_context, swap_request_context, swap_api, swap_domain: - resp = mailgun_email_services.send_email_to_recipients( - 'a@a.com', - ['b@b.com'], - 'Hola 😂 - invitation to collaborate', - 'plaintext_body 😂', - 'Hi abc,
    😂', - bcc=['c@c.com', 'd@d.com'], - reply_to='abc', - recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}})) - self.assertTrue(resp) + with self.swap_api_key_secrets_return_secret, swap_urlopen_context: + with swap_request_context, swap_domain: + resp = mailgun_email_services.send_email_to_recipients( + 'a@a.com', + ['b@b.com'], + 'Hola 😂 - invitation to collaborate', + 'plaintext_body 😂', + 'Hi abc,
    😂', + bcc=['c@c.com', 'd@d.com'], + reply_to='abc', + recipient_variables=({ + 'b@b.com': {'first': 'Bob', 'id': 1} + }) + ) + self.assertTrue(resp) def test_batch_send_to_mailgun(self) -> None: """Test for sending HTTP POST request.""" @@ -157,46 +183,56 @@ def test_batch_send_to_mailgun(self) -> None: swapped_urlopen = lambda x: self.Response(x, expected_query_url) swapped_request = lambda *args: args swap_urlopen_context = self.swap( - python_utils, 'url_open', swapped_urlopen) + utils, 'url_open', swapped_urlopen) swap_request_context = self.swap( - python_utils, 'url_request', swapped_request) - swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key') + urllib.request, 'Request', swapped_request) swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain') - with swap_urlopen_context, swap_request_context, swap_api, swap_domain: - resp = mailgun_email_services.send_email_to_recipients( - 'a@a.com', - ['b@b.com', 'c@c.com', 'd@d.com'], - 'Hola 😂 - invitation to collaborate', - 'plaintext_body 😂', - 'Hi abc,
    😂') - self.assertTrue(resp) - - def test_mailgun_key_or_domain_name_not_set_raises_exception(self) -> None: + with self.swap_api_key_secrets_return_secret, swap_urlopen_context: + with swap_request_context, swap_domain: + resp = mailgun_email_services.send_email_to_recipients( + 'a@a.com', + ['b@b.com', 'c@c.com', 'd@d.com'], + 'Hola 😂 - invitation to collaborate', + 'plaintext_body 😂', + 'Hi abc,
    😂') + self.assertTrue(resp) + + def test_mailgun_key_not_set_raises_exception(self) -> None: """Test that exceptions are raised when API key or domain name are unset. """ # Testing no mailgun api key. - mailgun_exception = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + mailgun_exception = self.assertRaisesRegex( Exception, 'Mailgun API key is not available.') - with mailgun_exception: - mailgun_email_services.send_email_to_recipients( - 'a@a.com', - ['b@b.com', 'c@c.com', 'd@d.com'], - 'Hola 😂 - invitation to collaborate', - 'plaintext_body 😂', - 'Hi abc,
    😂') + with self.swap_api_key_secrets_return_none, mailgun_exception: + with self.capture_logging() as logs: + mailgun_email_services.send_email_to_recipients( + 'a@a.com', + ['b@b.com', 'c@c.com', 'd@d.com'], + 'Hola 😂 - invitation to collaborate', + 'plaintext_body 😂', + 'Hi abc,
    😂') + self.assertIn( + 'Cloud Secret Manager is not able to get MAILGUN_API_KEY.', + logs + ) + def test_mailgun_domain_name_not_set_raises_exception(self) -> None: # Testing no mailgun domain name. - swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key') - mailgun_exception = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + mailgun_exception = self.assertRaisesRegex( Exception, 'Mailgun domain name is not set.') - with swap_api, mailgun_exception: - mailgun_email_services.send_email_to_recipients( - 'a@a.com', - ['b@b.com', 'c@c.com', 'd@d.com'], - 'Hola 😂 - invitation to collaborate', - 'plaintext_body 😂', - 'Hi abc,
    😂') + with self.swap_api_key_secrets_return_secret, mailgun_exception: + with self.capture_logging() as logs: + mailgun_email_services.send_email_to_recipients( + 'a@a.com', + ['b@b.com', 'c@c.com', 'd@d.com'], + 'Hola 😂 - invitation to collaborate', + 'plaintext_body 😂', + 'Hi abc,
    😂') + self.assertIn( + 'Cloud Secret Manager is not able to get MAILGUN_API_KEY.', + logs + ) def test_invalid_status_code_returns_false(self) -> None: expected_query_url: MailgunQueryType = ( @@ -211,19 +247,22 @@ def test_invalid_status_code_returns_false(self) -> None: swapped_request = lambda *args: args swapped_urlopen = lambda x: self.Response(x, expected_query_url) swap_urlopen_context = self.swap( - python_utils, 'url_open', swapped_urlopen) + utils, 'url_open', swapped_urlopen) swap_request_context = self.swap( - python_utils, 'url_request', swapped_request) - swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key') + urllib.request, 'Request', swapped_request) swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain') - with swap_urlopen_context, swap_request_context, swap_api, swap_domain: - resp = mailgun_email_services.send_email_to_recipients( - 'a@a.com', - ['b@b.com'], - 'Hola 😂 - invitation to collaborate', - 'plaintext_body 😂', - 'Hi abc,
    😂', - bcc=['c@c.com', 'd@d.com'], - reply_to='abc', - recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}})) - self.assertFalse(resp) + with self.swap_api_key_secrets_return_secret, swap_urlopen_context: + with swap_request_context, swap_domain: + resp = mailgun_email_services.send_email_to_recipients( + 'a@a.com', + ['b@b.com'], + 'Hola 😂 - invitation to collaborate', + 'plaintext_body 😂', + 'Hi abc,
    😂', + bcc=['c@c.com', 'd@d.com'], + reply_to='abc', + recipient_variables=({ + 'b@b.com': {'first': 'Bob', 'id': 1} + }) + ) + self.assertFalse(resp) diff --git a/core/platform/models.py b/core/platform/models.py index 7b0b2cc16712..9240806a20f7 100644 --- a/core/platform/models.py +++ b/core/platform/models.py @@ -22,33 +22,26 @@ from types import ModuleType # pylint: disable=import-only-modules from core import feconf -from core import python_utils from core.constants import constants from typing import List, Tuple, Type MYPY = False if MYPY: # pragma: no cover - from mypy_imports import base_models # pylint: disable=unused-import - -# Valid model names. -NAMES = python_utils.create_enum( # type: ignore[no-untyped-call] - 'activity', 'app_feedback_report', 'audit', 'base_model', 'beam_job', - 'blog', 'classifier', 'collection', 'config', 'email', 'exploration', - 'feedback', 'improvements', 'job', 'opportunity', 'question', - 'recommendations', 'skill', 'statistics', 'activity', 'audit', 'auth', - 'base_model', 'classifier', 'collection', 'config', 'email', 'exploration', - 'feedback', 'improvements', 'job', 'opportunity', 'question', - 'recommendations', 'skill', 'statistics', 'story', 'subtopic', 'suggestion', - 'topic', 'translation', 'user') + from mypy_imports import base_models + +# Constant for valid model names. +Names = feconf.ValidModelNames # Types of deletion policies. The pragma comment is needed because Enums are # evaluated as classes in Python and they should use PascalCase, but using # UPPER_CASE seems more appropriate here. + + MODULES_WITH_PSEUDONYMIZABLE_CLASSES = ( # pylint: disable=invalid-name - NAMES.app_feedback_report, NAMES.blog, NAMES.collection, NAMES.config, - NAMES.exploration, NAMES.feedback, NAMES.question, NAMES.skill, NAMES.story, - NAMES.subtopic, NAMES.suggestion, NAMES.topic + Names.APP_FEEDBACK_REPORT, Names.BLOG, Names.COLLECTION, Names.CONFIG, + Names.EXPLORATION, Names.FEEDBACK, Names.QUESTION, Names.SKILL, Names.STORY, + Names.SUBTOPIC, Names.SUGGESTION, Names.TOPIC ) GAE_PLATFORM = 'gae' @@ -59,7 +52,7 @@ class Platform: @classmethod def import_models( - cls, unused_model_names: List[str] + cls, unused_model_names: List[Names] ) -> Tuple[ModuleType, ...]: """An abstract method that should be implemented on inherited classes. @@ -81,11 +74,11 @@ class _Gae(Platform): # doesn't match with BaseModel.delete_multi(). # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override @classmethod - def import_models(cls, model_names: List[str]) -> Tuple[ModuleType, ...]: + def import_models(cls, model_names: List[Names]) -> Tuple[ModuleType, ...]: """Imports and returns the storage modules listed in model_names. Args: - model_names: list(NAMES). List of storage module names. + model_names: list(Names). List of storage module names. Returns: tuple(module). Tuple of storage modules. @@ -99,91 +92,99 @@ def import_models(cls, model_names: List[str]) -> Tuple[ModuleType, ...]: # here since when we import modules using this function, we need to add # separate imports for mypy anyway. for name in model_names: - if name == NAMES.activity: + if name == Names.ACTIVITY: from core.storage.activity import gae_models as activity_models returned_models.append(activity_models) - elif name == NAMES.app_feedback_report: + elif name == Names.APP_FEEDBACK_REPORT: from core.storage.app_feedback_report import ( gae_models as app_feedback_report_models) returned_models.append(app_feedback_report_models) - elif name == NAMES.audit: + elif name == Names.AUDIT: from core.storage.audit import gae_models as audit_models returned_models.append(audit_models) - elif name == NAMES.auth: + elif name == Names.AUTH: from core.storage.auth import gae_models as auth_models returned_models.append(auth_models) - elif name == NAMES.base_model: + elif name == Names.BASE_MODEL: from core.storage.base_model import gae_models as base_model returned_models.append(base_model) - elif name == NAMES.beam_job: + elif name == Names.BEAM_JOB: from core.storage.beam_job import gae_models as beam_job_models returned_models.append(beam_job_models) - elif name == NAMES.blog: + elif name == Names.BLOG: from core.storage.blog import gae_models as blog_models returned_models.append(blog_models) - elif name == NAMES.classifier: + elif name == Names.CLASSIFIER: from core.storage.classifier import ( gae_models as classifier_models) returned_models.append(classifier_models) - elif name == NAMES.collection: + elif name == Names.CLASSROOM: + from core.storage.classroom import ( + gae_models as classroom_models) + returned_models.append(classroom_models) + elif name == Names.COLLECTION: from core.storage.collection import ( gae_models as collection_models) returned_models.append(collection_models) - elif name == NAMES.config: + elif name == Names.CONFIG: from core.storage.config import gae_models as config_models returned_models.append(config_models) - elif name == NAMES.email: + elif name == Names.EMAIL: from core.storage.email import gae_models as email_models returned_models.append(email_models) - elif name == NAMES.exploration: + elif name == Names.EXPLORATION: from core.storage.exploration import gae_models as exp_models returned_models.append(exp_models) - elif name == NAMES.feedback: + elif name == Names.FEEDBACK: from core.storage.feedback import gae_models as feedback_models returned_models.append(feedback_models) - elif name == NAMES.improvements: + elif name == Names.IMPROVEMENTS: from core.storage.improvements import ( gae_models as improvements_models) returned_models.append(improvements_models) - elif name == NAMES.job: + elif name == Names.JOB: from core.storage.job import gae_models as job_models returned_models.append(job_models) - elif name == NAMES.opportunity: + elif name == Names.LEARNER_GROUP: + from core.storage.learner_group import ( + gae_models as learner_group_models) + returned_models.append(learner_group_models) + elif name == Names.OPPORTUNITY: from core.storage.opportunity import ( gae_models as opportunity_models) returned_models.append(opportunity_models) - elif name == NAMES.question: + elif name == Names.QUESTION: from core.storage.question import gae_models as question_models returned_models.append(question_models) - elif name == NAMES.recommendations: + elif name == Names.RECOMMENDATIONS: from core.storage.recommendations import ( gae_models as recommendations_models) returned_models.append(recommendations_models) - elif name == NAMES.skill: + elif name == Names.SKILL: from core.storage.skill import gae_models as skill_models returned_models.append(skill_models) - elif name == NAMES.statistics: + elif name == Names.STATISTICS: from core.storage.statistics import ( gae_models as statistics_models) returned_models.append(statistics_models) - elif name == NAMES.story: + elif name == Names.STORY: from core.storage.story import gae_models as story_models returned_models.append(story_models) - elif name == NAMES.subtopic: + elif name == Names.SUBTOPIC: from core.storage.subtopic import gae_models as subtopic_models returned_models.append(subtopic_models) - elif name == NAMES.suggestion: + elif name == Names.SUGGESTION: from core.storage.suggestion import ( gae_models as suggestion_models) returned_models.append(suggestion_models) - elif name == NAMES.topic: + elif name == Names.TOPIC: from core.storage.topic import gae_models as topic_models returned_models.append(topic_models) - elif name == NAMES.translation: + elif name == Names.TRANSLATION: from core.storage.translation import ( gae_models as translation_models) returned_models.append(translation_models) - elif name == NAMES.user: + elif name == Names.USER: from core.storage.user import gae_models as user_models returned_models.append(user_models) else: @@ -193,8 +194,8 @@ def import_models(cls, model_names: List[str]) -> Tuple[ModuleType, ...]: @classmethod def get_storage_model_classes( - cls, model_names: List[str] - ) -> List[base_models.BaseModel]: + cls, model_names: List[Names] + ) -> List[Type[base_models.BaseModel]]: """Get the storage model classes that are in the modules listed in model_names. @@ -218,7 +219,7 @@ def get_storage_model_classes( return model_classes @classmethod - def get_all_storage_model_classes(cls) -> List[base_models.BaseModel]: + def get_all_storage_model_classes(cls) -> List[Type[base_models.BaseModel]]: """Get all model classes that are saved in the storage, NOT model classes that are just inherited from (BaseModel, BaseCommitLogEntryModel, etc.). @@ -226,7 +227,7 @@ def get_all_storage_model_classes(cls) -> List[base_models.BaseModel]: Returns: list(class). The corresponding storage-layer model classes. """ - model_names = [name for name in NAMES if name != NAMES.base_model] + model_names = [name for name in Names if name != Names.BASE_MODEL] return cls.get_storage_model_classes(model_names) @classmethod @@ -384,6 +385,16 @@ def import_storage_services(cls) -> ModuleType: from core.platform.storage import cloud_storage_services return cloud_storage_services + @classmethod + def import_secrets_services(cls) -> ModuleType: + """Imports and returns cloud_secrets_services module. + + Returns: + module. The cloud_secrets_services module. + """ + from core.platform.secrets import cloud_secrets_services + return cloud_secrets_services + NAME = 'gae' @@ -411,11 +422,11 @@ def _get(cls) -> Type[_Gae]: return klass @classmethod - def import_models(cls, model_names: List[str]) -> Tuple[ModuleType, ...]: + def import_models(cls, model_names: List[Names]) -> Tuple[ModuleType, ...]: """Imports and returns the storage modules listed in model_names. Args: - model_names: list(NAMES). List of storage modules. + model_names: list(Names). List of storage modules. Returns: tuple(module). The corresponding storage-layer modules. @@ -424,8 +435,8 @@ def import_models(cls, model_names: List[str]) -> Tuple[ModuleType, ...]: @classmethod def get_storage_model_classes( - cls, model_names: List[str] - ) -> List[base_models.BaseModel]: + cls, model_names: List[Names] + ) -> List[Type[base_models.BaseModel]]: """Get the storage model classes that are in the modules listed in model_names. @@ -438,7 +449,7 @@ def get_storage_model_classes( return cls._get().get_storage_model_classes(model_names) @classmethod - def get_all_storage_model_classes(cls) -> List[base_models.BaseModel]: + def get_all_storage_model_classes(cls) -> List[Type[base_models.BaseModel]]: """Get all model classes that are saved in the storage, NOT model classes that are just inherited from (BaseModel, BaseCommitLogEntryModel, etc.). @@ -546,3 +557,12 @@ def import_storage_services(cls) -> ModuleType: module. The storage_services module. """ return cls._get().import_storage_services() + + @classmethod + def import_secrets_services(cls) -> ModuleType: + """Imports and returns secrets_services module. + + Returns: + module. The secrets_services module. + """ + return cls._get().import_secrets_services() diff --git a/core/platform/models_test.py b/core/platform/models_test.py index 4207bdc8c85e..331297173033 100644 --- a/core/platform/models_test.py +++ b/core/platform/models_test.py @@ -34,7 +34,7 @@ class RegistryUnitTest(test_utils.TestBase): """Tests the Registry class interface.""" def setUp(self) -> None: - super(RegistryUnitTest, self).setUp() + super().setUp() self.registry_instance = models.Registry() def test_import_models_activity(self) -> None: @@ -43,7 +43,7 @@ def test_import_models_activity(self) -> None: expected_activity_models = (activity_models,) self.assertEqual( expected_activity_models, - self.registry_instance.import_models([models.NAMES.activity])) + self.registry_instance.import_models([models.Names.ACTIVITY])) def test_import_models_audit(self) -> None: """Tests import_models function with audit option.""" @@ -51,7 +51,7 @@ def test_import_models_audit(self) -> None: expected_audit_models = (audit_models,) self.assertEqual( expected_audit_models, - self.registry_instance.import_models([models.NAMES.audit])) + self.registry_instance.import_models([models.Names.AUDIT])) def test_import_models_auth_model(self) -> None: """Tests import_models function with auth option.""" @@ -59,7 +59,7 @@ def test_import_models_auth_model(self) -> None: expected_auth_models = (auth_models,) self.assertEqual( expected_auth_models, - self.registry_instance.import_models([models.NAMES.auth])) + self.registry_instance.import_models([models.Names.AUTH])) def test_import_models_base_model(self) -> None: """Tests import_models function with base model option.""" @@ -67,7 +67,7 @@ def test_import_models_base_model(self) -> None: expected_base_models = (base_models,) self.assertEqual( expected_base_models, - self.registry_instance.import_models([models.NAMES.base_model])) + self.registry_instance.import_models([models.Names.BASE_MODEL])) def test_import_models_blog_model(self) -> None: """Tests import_models function with blog post model option.""" @@ -75,7 +75,7 @@ def test_import_models_blog_model(self) -> None: expected_blog_models = (blog_models,) self.assertEqual( expected_blog_models, - self.registry_instance.import_models([models.NAMES.blog])) + self.registry_instance.import_models([models.Names.BLOG])) def test_import_models_beam_job_model(self) -> None: """Tests import_models function with base model option.""" @@ -83,7 +83,7 @@ def test_import_models_beam_job_model(self) -> None: expected_beam_job_models = (beam_job_models,) self.assertEqual( expected_beam_job_models, - self.registry_instance.import_models([models.NAMES.beam_job])) + self.registry_instance.import_models([models.Names.BEAM_JOB])) def test_import_models_classifier(self) -> None: """Tests import_models function with classifier option.""" @@ -91,7 +91,7 @@ def test_import_models_classifier(self) -> None: expected_classifier_models = (classifier_data_models,) self.assertEqual( expected_classifier_models, - self.registry_instance.import_models([models.NAMES.classifier])) + self.registry_instance.import_models([models.Names.CLASSIFIER])) def test_import_models_collection(self) -> None: """Tests import_models function with collection option.""" @@ -99,7 +99,7 @@ def test_import_models_collection(self) -> None: expected_collection_models = (collection_models,) self.assertEqual( expected_collection_models, - self.registry_instance.import_models([models.NAMES.collection])) + self.registry_instance.import_models([models.Names.COLLECTION])) def test_import_models_config(self) -> None: """Tests import_models function with config option.""" @@ -107,7 +107,7 @@ def test_import_models_config(self) -> None: expected_config_models = (config_models,) self.assertEqual( expected_config_models, - self.registry_instance.import_models([models.NAMES.config])) + self.registry_instance.import_models([models.Names.CONFIG])) def test_import_models_email(self) -> None: """Tests import_models function with email option.""" @@ -115,7 +115,7 @@ def test_import_models_email(self) -> None: expected_email_models = (email_models,) self.assertEqual( expected_email_models, - self.registry_instance.import_models([models.NAMES.email])) + self.registry_instance.import_models([models.Names.EMAIL])) def test_import_models_exploration(self) -> None: """Tests import_models function with exploration option.""" @@ -123,7 +123,7 @@ def test_import_models_exploration(self) -> None: expected_exploration_models = (exp_models,) self.assertEqual( expected_exploration_models, - self.registry_instance.import_models([models.NAMES.exploration])) + self.registry_instance.import_models([models.Names.EXPLORATION])) def test_import_models_feedback(self) -> None: """Tests import_models function with feedback option.""" @@ -131,7 +131,16 @@ def test_import_models_feedback(self) -> None: expected_feedback_models = (feedback_models,) self.assertEqual( expected_feedback_models, - self.registry_instance.import_models([models.NAMES.feedback])) + self.registry_instance.import_models([models.Names.FEEDBACK])) + + def test_import_models_learner_group(self) -> None: + """Tests import_models function with learner group option.""" + from core.storage.learner_group import ( + gae_models as learner_group_models) + expected_learner_group_models = (learner_group_models,) + self.assertEqual( + expected_learner_group_models, + self.registry_instance.import_models([models.Names.LEARNER_GROUP])) def test_import_models_job(self) -> None: """Tests import_models function with job option.""" @@ -139,7 +148,7 @@ def test_import_models_job(self) -> None: expected_job_models = (job_models,) self.assertEqual( expected_job_models, - self.registry_instance.import_models([models.NAMES.job])) + self.registry_instance.import_models([models.Names.JOB])) def test_import_models_question(self) -> None: """Tests import_models function with question option.""" @@ -147,7 +156,7 @@ def test_import_models_question(self) -> None: expected_question_models = (question_models,) self.assertEqual( expected_question_models, - self.registry_instance.import_models([models.NAMES.question])) + self.registry_instance.import_models([models.Names.QUESTION])) def test_import_models_recommendations(self) -> None: """Tests import_models function with recommendations option.""" @@ -156,7 +165,7 @@ def test_import_models_recommendations(self) -> None: self.assertEqual( expected_recommendations_models, self.registry_instance.import_models( - [models.NAMES.recommendations])) + [models.Names.RECOMMENDATIONS])) def test_import_models_skill(self) -> None: """Tests import_models function with skill option.""" @@ -164,7 +173,7 @@ def test_import_models_skill(self) -> None: expected_skills_models = (skill_models,) self.assertEqual( expected_skills_models, - self.registry_instance.import_models([models.NAMES.skill])) + self.registry_instance.import_models([models.Names.SKILL])) def test_import_models_statistics(self) -> None: """Tests import_models function with statistics option.""" @@ -172,7 +181,7 @@ def test_import_models_statistics(self) -> None: expected_statistics_models = (statistics_models,) self.assertEqual( expected_statistics_models, - self.registry_instance.import_models([models.NAMES.statistics])) + self.registry_instance.import_models([models.Names.STATISTICS])) def test_import_models_story(self) -> None: """Tests import_models function with story option.""" @@ -180,7 +189,7 @@ def test_import_models_story(self) -> None: expected_story_models = (story_models,) self.assertEqual( expected_story_models, - self.registry_instance.import_models([models.NAMES.story])) + self.registry_instance.import_models([models.Names.STORY])) def test_import_models_suggestion(self) -> None: """Tests import_models function with suggestion option.""" @@ -188,7 +197,7 @@ def test_import_models_suggestion(self) -> None: expected_suggestion_models = (suggestion_models,) self.assertEqual( expected_suggestion_models, - self.registry_instance.import_models([models.NAMES.suggestion])) + self.registry_instance.import_models([models.Names.SUGGESTION])) def test_import_models_topic(self) -> None: """Tests import_models function with topic option.""" @@ -196,7 +205,7 @@ def test_import_models_topic(self) -> None: expected_topic_models = (topic_models,) self.assertEqual( expected_topic_models, - self.registry_instance.import_models([models.NAMES.topic])) + self.registry_instance.import_models([models.Names.TOPIC])) def test_import_models_user(self) -> None: """Tests import_models function with user option.""" @@ -204,18 +213,21 @@ def test_import_models_user(self) -> None: expected_user_models = (user_models,) self.assertEqual( expected_user_models, - self.registry_instance.import_models([models.NAMES.user])) + self.registry_instance.import_models([models.Names.USER])) def test_import_models_invalid(self) -> None: """Tests import_models function with an invalid option.""" - with self.assertRaisesRegexp(Exception, 'Invalid model name: '): # type: ignore[no-untyped-call] - self.registry_instance.import_models(['']) + with self.assertRaisesRegex(Exception, 'Invalid model name: '): + # Here we use MyPy ignore because list item 0 is a string. + # expected type class names. This is done to test the function + # with invalid model names. + self.registry_instance.import_models(['']) # type: ignore[list-item] def test_get_storage_model_classes(self) -> None: """Tests get_all_storage_model_classes.""" from core.storage.user import gae_models as user_models classes = self.registry_instance.get_storage_model_classes( - [models.NAMES.user]) + [models.Names.USER]) self.assertIn(user_models.UserSettingsModel, classes) self.assertIn(user_models.CompletedActivitiesModel, classes) self.assertIn(user_models.IncompleteActivitiesModel, classes) @@ -253,7 +265,7 @@ def test_import_datastore_services(self) -> None: def test_errors_in_datastore_services_functions(self) -> None: """Tests datastore services functions errors.""" from core.platform.datastore import cloud_datastore_services - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Model names should not be duplicated in input list.'): cloud_datastore_services.fetch_multiple_entities_by_ids_and_models( [('SampleModel', ['id_1', 'id_2']), @@ -302,7 +314,7 @@ def test_import_email_services_invalid(self) -> None: feconf, 'EMAIL_SERVICE_PROVIDER', 'invalid service provider'), ( self.swap(constants, 'DEV_MODE', False)): - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid email service provider: invalid service provider' ): @@ -329,7 +341,7 @@ def test_import_bulk_email_services_invalid(self) -> None: feconf, 'BULK_EMAIL_SERVICE_PROVIDER', 'invalid service provider'), ( self.swap(constants, 'EMULATOR_MODE', False)): - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid bulk email service provider: invalid service ' 'provider'): @@ -349,6 +361,10 @@ class MockCloudTaskqueue(): pass with self.swap(constants, 'EMULATOR_MODE', False): + # Here we use cast because sys.modules can only accept ModuleTypes + # but for testing purposes here we are providing MockCloudTaskqueue + # which is of class type. So because of this MyPy throws an error. + # Thus to avoid the error, we used cast here. sys.modules['core.platform.taskqueue.cloud_taskqueue_services'] = ( cast(ModuleType, MockCloudTaskqueue) ) @@ -387,6 +403,10 @@ class MockCloudStorage(): pass with self.swap(constants, 'EMULATOR_MODE', False): + # Here we use cast because sys.modules can only accept ModuleTypes + # but for testing purposes here we are providing MockCloudStorage + # which is of class type. So because of this MyPy throws an error. + # Thus to avoid the error, we used cast here. # Mock Cloud Storage since importing it fails in emulator env. sys.modules['core.platform.storage.cloud_storage_services'] = ( cast(ModuleType, MockCloudStorage) @@ -404,9 +424,9 @@ def test_import_models_not_implemented_has_not_implemented_error( self ) -> None: """Tests NotImplementedError of Platform.""" - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'import_models() method is not overwritten in ' 'derived classes')): - models.Platform().import_models([models.NAMES.base_model]) + models.Platform().import_models([models.Names.BASE_MODEL]) diff --git a/core/platform/search/elastic_search_services.py b/core/platform/search/elastic_search_services.py index b7dbdf9627e1..787fbd2e6b4a 100644 --- a/core/platform/search/elastic_search_services.py +++ b/core/platform/search/elastic_search_services.py @@ -21,10 +21,18 @@ from __future__ import annotations from core import feconf +from core.domain import search_services +from core.platform import models import elasticsearch -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import secrets_services + +secrets_services = models.Registry.import_secrets_services() # A timeout of 30 seconds is needed to avoid calls to # exp_services.load_demo() failing with a ReadTimeoutError @@ -35,7 +43,7 @@ if feconf.ES_CLOUD_ID is None else None, cloud_id=feconf.ES_CLOUD_ID, http_auth=( - (feconf.ES_USERNAME, feconf.ES_PASSWORD) + (feconf.ES_USERNAME, secrets_services.get_secret('ES_PASSWORD')) if feconf.ES_CLOUD_ID else None), timeout=30) @@ -45,6 +53,70 @@ class SearchException(Exception): pass +# Here we use type Any because the query_definition is a dictionary having +# values of various types. +# This can be seen from the type stubs of elastic search. +# The type of 'body' is 'Any'. +# https://github.com/elastic/elasticsearch-py/blob/acf1e0d94e083c85bb079564d17ff7ee29cf28f6/elasticsearch/client/__init__.pyi#L768 +def _fetch_response_from_elastic_search( + query_definition: Dict[str, Any], + index_name: str, + offset: int, + size: int, +) -> Tuple[List[str], Optional[int]]: + """Searches for documents matching the given query in the given index. + NOTE: We cannot search through more than 10,000 results from a search by + paginating using size and offset. If the number of items to search through + is greater that 10,000, use the elasticsearch scroll API instead. + + This function also creates the index if it does not exist yet. + + Args: + query_definition: dict(str, any). The Query DSL object. + index_name: str. The name of the index. Use '_all' or empty string to + perform the operation on all indices. + offset: int|None. The offset into the index. Pass this in to start at + the 'offset' when searching through a list of results of max length + 'size'. Leave as None to start at the beginning. + size: int. The maximum number of documents to return. + + Returns: + 2-tuple of (result_ids, resulting_offset). Where: + result_ids: list(str). Represents search documents, this will be a + list of strings corresponding to the search document ids. + resulting_offset: int. The resulting offset to start at for the next + section of the results. Returns None if there are no more + results. + """ + # Fetch (size + 1) results in order to decide whether a "next + # page" offset needs to be returned. + num_docs_to_fetch = size + 1 + try: + response = ES.search( + body=query_definition, index=index_name, + params={ + 'size': num_docs_to_fetch, + 'from': offset + }) + except elasticsearch.NotFoundError: + # The index does not exist yet. Create it and return an empty result. + _create_index(index_name) + empty_list: List[str] = [] + return empty_list, None + + matched_search_docs = response['hits']['hits'] + + resulting_offset = None + if len(matched_search_docs) == num_docs_to_fetch: + # There is at least one more page of results to fetch. Trim the results + # in this call to the desired size. + matched_search_docs = matched_search_docs[:size] + resulting_offset = int(offset) + size + + result_ids = [doc['_id'] for doc in matched_search_docs] + return result_ids, resulting_offset + + def _create_index(index_name: str) -> None: """Creates a new index. @@ -58,13 +130,13 @@ def _create_index(index_name: str) -> None: ES.indices.create(index_name) -# In the type annotation below Dict[str, Any] is used for documents because -# there are no constraints for a document dictionary. +# Here we use type Any because the argument 'documents' represents the list of +# document dictionaries and there are no constraints for a document dictionary. # This can be seen from the type stubs of elastic search. # The type of 'body' here is Any. # https://github.com/elastic/elasticsearch-py/blob/acf1e0d94e083c85bb079564d17ff7ee29cf28f6/elasticsearch/client/__init__.pyi#L172 def add_documents_to_index( - documents: List[Dict[str, Any]], index_name: str + documents: Sequence[Mapping[str, Any]], index_name: str ) -> None: """Adds a document to an index. This function also creates the index if it does not exist yet. @@ -141,25 +213,16 @@ def clear_index(index_name: str) -> None: }) -# In the type annotation below Dict[str, Any] is used in return type because -# it returns the list of documents and document dictionaries can have -# any value. -# This can be seen from the type stubs of elastic search. -# The type of 'body' here is 'Any'. -# https://github.com/elastic/elasticsearch-py/blob/acf1e0d94e083c85bb079564d17ff7ee29cf28f6/elasticsearch/client/__init__.pyi#L172 def search( - query_string: str, - index_name: str, - categories: List[str], - language_codes: List[str], - offset: Optional[int] = None, - size: int = feconf.SEARCH_RESULTS_PAGE_SIZE, - ids_only: bool = False -) -> Tuple[Union[List[Dict[str, Any]], List[str]], Optional[int]]: - """Searches for documents matching the given query in the given index. - NOTE: We cannot search through more than 10,000 results from a search by - paginating using size and offset. If the number of items to search through - is greater that 10,000, use the elasticsearch scroll API instead. + query_string: str, + index_name: str, + categories: List[str], + language_codes: List[str], + offset: Optional[int] = None, + size: int = feconf.SEARCH_RESULTS_PAGE_SIZE, +) -> Tuple[List[str], Optional[int]]: + """Searches for documents (explorations or collections) matching the given + query in the given index. This function also creates the index if it does not exist yet. @@ -179,16 +242,12 @@ def search( the 'offset' when searching through a list of results of max length 'size'. Leave as None to start at the beginning. size: int. The maximum number of documents to return. - ids_only: bool. Whether to only return document ids. Returns: - 2-tuple of (result_docs, resulting_offset). Where: - result_docs: list(dict)|list(str). Represents search documents. If - 'ids_only' is True, this will be a list of strings corresponding - to the search document ids. If 'ids_only' is False, the full - dictionaries representing each document retrieved from the - elastic search instance will be returned. The document id will - be contained as the '_id' attribute in each document. + 2-tuple of (result_ids, resulting_offset). Where: + result_ids: list(str). Represents search documents, this + will be a list of strings corresponding to the search document + ids. resulting_offset: int. The resulting offset to start at for the next section of the results. Returns None if there are no more results. @@ -199,9 +258,8 @@ def search( # Convert the query into a Query DSL object. See # elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html # for more details about Query DSL. - # In the type annotation below Dict[str, Any] is used for query_definiton - # because the query_definition is a dictionary having values of various - # types. + # Here we use type Any because the query_definition is a dictionary having + # values of various types. # This can be seen from the type stubs of elastic search. # The type of 'body' is 'Any'. # https://github.com/elastic/elasticsearch-py/blob/acf1e0d94e083c85bb079564d17ff7ee29cf28f6/elasticsearch/client/__init__.pyi#L768 @@ -237,36 +295,85 @@ def search( {'match': {'language_code': language_code_string}} ) - # Fetch (size + 1) results in order to decide whether a "next - # page" offset needs to be returned. - num_docs_to_fetch = size + 1 + result_ids, resulting_offset = _fetch_response_from_elastic_search( + query_definition, index_name, offset, size + ) - try: - response = ES.search( - body=query_definition, index=index_name, - params={ - 'size': num_docs_to_fetch, - 'from': offset - }) - except elasticsearch.NotFoundError: - # The index does not exist yet. Create it and return an empty result. - _create_index(index_name) - empty_list: List[str] = [] - return empty_list, None + return result_ids, resulting_offset - matched_search_docs = response['hits']['hits'] - resulting_offset = None - if len(matched_search_docs) == num_docs_to_fetch: - # There is at least one more page of results to fetch. Trim the results - # in this call to the desired size. - matched_search_docs = matched_search_docs[:size] - resulting_offset = int(offset) + size +def blog_post_summaries_search( + query_string: str, + tags: List[str], + offset: Optional[int] = None, + size: int = feconf.SEARCH_RESULTS_PAGE_SIZE, +) -> Tuple[List[str], Optional[int]]: + """Searches for blog post summary documents matching the given query in the + blog post search index. + NOTE: We cannot search through more than 10,000 results from a search by + paginating using size and offset. - if ids_only: - result_docs = [doc['_id'] for doc in matched_search_docs] - else: - # Each dictionary(document) stored in doc['_source'] also contains an - # attribute '_id' which contains the document id. - result_docs = [doc['_source'] for doc in matched_search_docs] - return result_docs, resulting_offset + This function also creates the blog post search index if it does not exist + yet. + + Args: + query_string: str. The terms that the user is searching for in the + blog posts. + tags: list(str). The list of tags to query for. If it is + empty, no tag filter is applied to the results. If it is not + empty, then a result is considered valid if it matches at least one + of these tags. + offset: int|None. The offset into the index. Pass this in to start at + the 'offset' when searching through a list of results of max length + 'size'. Leave as None to start at the beginning. + size: int. The maximum number of documents to return. + + Returns: + 2-tuple of (result_ids, resulting_offset). Where: + result_ids: list(str). Represents search documents, this will be a + list of strings corresponding to the search document ids. + resulting_offset: int. The resulting offset to start at for the next + section of the results. Returns None if there are no more + results. + """ + if offset is None: + offset = 0 + + # Here we use type Any because the query_definition is a dictionary having + # values of various types. + # This can be seen from the type stubs of elastic search. + # The type of 'body' is 'Any'. + # https://github.com/elastic/elasticsearch-py/blob/acf1e0d94e083c85bb079564d17ff7ee29cf28f6/elasticsearch/client/__init__.pyi#L768 + query_definition: Dict[str, Any] = { + 'query': { + 'bool': { + 'must': [], + 'filter': [], + } + }, + 'sort': [{ + 'rank': { + 'order': 'desc', + 'missing': '_last', + 'unmapped_type': 'float', + } + }], + } + if query_string: + query_definition['query']['bool']['must'] = [{ + 'multi_match': { + 'query': query_string, + } + }] + if tags: + for tag in tags: + query_definition['query']['bool']['filter'].append( + {'match': {'tags': tag}} + ) + + index_name = search_services.SEARCH_INDEX_BLOG_POSTS + result_ids, resulting_offset = _fetch_response_from_elastic_search( + query_definition, index_name, offset, size + ) + + return result_ids, resulting_offset diff --git a/core/platform/search/elastic_search_services_test.py b/core/platform/search/elastic_search_services_test.py index 14e5df14b90c..3b4cfc8dee63 100644 --- a/core/platform/search/elastic_search_services_test.py +++ b/core/platform/search/elastic_search_services_test.py @@ -18,6 +18,7 @@ from __future__ import annotations +from core.domain import search_services from core.platform.search import elastic_search_services from core.tests import test_utils @@ -70,7 +71,7 @@ def mock_index( 'id': correct_id } ] - assert_raises_ctx = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + assert_raises_ctx = self.assertRaisesRegex( Exception, 'Failed to add document to index.') with assert_raises_ctx, self.swap( @@ -101,7 +102,7 @@ def test_delete_ignores_documents_that_do_not_exist(self) -> None: ['not_a_real_id'], 'index1') def test_delete_returns_without_error_when_index_does_not_exist( - self + self ) -> None: elastic_search_services.delete_documents_from_index( ['doc_id'], 'nonexistent_index') @@ -143,60 +144,30 @@ def test_search_returns_ids_only(self) -> None: result, new_offset = ( elastic_search_services.search( '', correct_index_name, [], [], offset=0, - size=50, ids_only=True)) + size=50 + ) + ) self.assertEqual(result, [1, 12]) self.assertIsNone(new_offset) - def test_search_returns_full_response(self) -> None: - correct_index_name = 'index1' - elastic_search_services.add_documents_to_index([{ - 'id': 1, - 'source': { - 'param1': 1, - 'param2': 2 - } - }, { - 'id': 12, - 'source': { - 'param1': 3, - 'param2': 4 - } - }], correct_index_name) - - result, new_offset = elastic_search_services.search( - '', correct_index_name, [], [], offset=0, size=50, ids_only=False) - self.assertEqual(result, [{ - 'id': 1, - 'source': { - 'param1': 1, - 'param2': 2 - } - }, { - 'id': 12, - 'source': { - 'param1': 3, - 'param2': 4 - } - }]) - self.assertIsNone(new_offset) - def test_search_returns_none_when_response_is_empty(self) -> None: result, new_offset = elastic_search_services.search( - '', 'index', [], [], offset=0, - size=50, ids_only=False) + '', 'index', [], [], offset=0, size=50 + ) self.assertEqual(new_offset, None) self.assertEqual(result, []) def test_search_constructs_query_with_categories_and_languages( - self + self ) -> None: correct_index_name = 'index1' - # In the type annotation below Dict[str, Any] is used for body - # because this mocks the behavior of elastic_search_services.ES.search - # and in the type stubs the type is Any. + # Here we use type Any because this method mocks the behavior of + # elastic_search_services.ES.search, so to match the type annotations + # with 'search' method we defined the body as 'Dict[str, Any]' type, + # and also in the type stubs the type of body is mentioned as Any. def mock_search( - body: Dict[str, Any], index: str, params: Dict[str, int] + body: Dict[str, Any], index: str, params: Dict[str, int] ) -> Dict[str, Dict[str, List[str]]]: self.assertEqual(body, { 'query': { @@ -242,15 +213,16 @@ def mock_search( self.assertIsNone(new_offset) def test_search_constructs_nonempty_query_with_categories_and_langs( - self + self ) -> None: correct_index_name = 'index1' - # In the type annotation below Dict[str, Any] is used for body - # because this mocks the behavior of elastic_search_services.ES.search - # and in the type stubs the type is Any. + # Here we use type Any because this method mocks the behavior of + # elastic_search_services.ES.search, so to match the type annotations + # with 'search' method we defined the body as 'Dict[str, Any]' type, + # and also in the type stubs the type of body is mentioned as Any. def mock_search( - body: Dict[str, Any], index: str, params: Dict[str, int] + body: Dict[str, Any], index: str, params: Dict[str, int] ) -> Dict[str, Dict[str, List[str]]]: self.assertEqual(body, { 'query': { @@ -300,7 +272,7 @@ def mock_search( self.assertIsNone(new_offset) def test_search_returns_the_right_number_of_docs_even_if_more_exist( - self + self ) -> None: elastic_search_services.add_documents_to_index([{ 'id': 'doc_id1', @@ -310,25 +282,210 @@ def test_search_returns_the_right_number_of_docs_even_if_more_exist( 'title': 'hello me' }], 'index') results, new_offset = elastic_search_services.search( - 'hello', 'index', [], [], size=1) + 'hello', 'index', [], [], offset=None, size=1 + ) self.assertEqual(len(results), 1) - # Letting mypy know that results[0] is a dict. - assert isinstance(results[0], dict) - self.assertEqual(results[0]['id'], 'doc_id1') self.assertEqual(new_offset, 1) results, new_offset = elastic_search_services.search( - 'hello', 'index', [], [], offset=1, size=1) + 'hello', 'index', [], [], offset=1, size=1 + ) self.assertEqual(len(results), 1) - # Letting mypy know that results[0] is a dict. - assert isinstance(results[0], dict) - self.assertEqual(results[0]['id'], 'doc_id2') self.assertIsNone(new_offset) def test_search_returns_without_error_when_index_does_not_exist( - self + self ) -> None: result, new_offset = elastic_search_services.search( 'query', 'nonexistent_index', [], []) self.assertEqual(result, []) self.assertEqual(new_offset, None) + + def test_blog_post_summaries_search_returns_ids_only(self) -> None: + correct_index_name = search_services.SEARCH_INDEX_BLOG_POSTS + elastic_search_services.add_documents_to_index([{ + 'id': 1, + 'source': { + 'param1': 1, + 'param2': 2 + } + }, { + 'id': 12, + 'source': { + 'param1': 3, + 'param2': 4 + } + }], correct_index_name) + + result, new_offset = ( + elastic_search_services.blog_post_summaries_search( + '', [], offset=0, size=50 + ) + ) + self.assertEqual(result, [1, 12]) + self.assertIsNone(new_offset) + + def test_blog_post_summaries_search_returns_none_when_response_is_empty( + self + ) -> None: + result, new_offset = elastic_search_services.blog_post_summaries_search( + '', [], offset=0, size=50 + ) + self.assertEqual(new_offset, None) + self.assertEqual(result, []) + + def test_blog_post_summaries_search_constructs_query_with_tags( + self + ) -> None: + correct_index_name = search_services.SEARCH_INDEX_BLOG_POSTS + + # Here we use type Any because this method mocks the behavior of + # elastic_search_services.ES.search, so to match the type annotations + # with 'search' method we defined the body as 'Dict[str, Any]' type, + # and also in the type stubs the type of body is mentioned as Any. + def mock_search( + body: Dict[str, Any], index: str, params: Dict[str, int] + ) -> Dict[str, Dict[str, List[str]]]: + self.assertEqual(body, { + 'query': { + 'bool': { + 'filter': [{ + 'match': { + 'tags': 'tag1', + } + }, { + 'match': { + 'tags': 'tag2', + } + }], + 'must': [], + } + }, + 'sort': [{ + 'rank': { + 'order': 'desc', + 'missing': '_last', + 'unmapped_type': 'float' + } + }] + }) + self.assertEqual(index, correct_index_name) + self.assertEqual(params, { + 'from': 0, + 'size': 21 + }) + return { + 'hits': { + 'hits': [] + } + } + + swap_search = self.swap( + elastic_search_services.ES, 'search', mock_search) + with swap_search: + result, new_offset = ( + elastic_search_services.blog_post_summaries_search( + '', + ['tag1', 'tag2'] + ) + ) + self.assertEqual(result, []) + self.assertIsNone(new_offset) + + def test_blog_post_summaries_search_constructs_nonempty_query_with_tags( + self + ) -> None: + correct_index_name = search_services.SEARCH_INDEX_BLOG_POSTS + + # Here we use type Any because this method mocks the behavior of + # elastic_search_services.ES.search, so to match the type annotations + # with 'search' method we defined the body as 'Dict[str, Any]' type, + # and also in the type stubs the type of body is mentioned as Any. + def mock_search( + body: Dict[str, Any], index: str, params: Dict[str, int] + ) -> Dict[str, Dict[str, List[str]]]: + self.assertEqual(body, { + 'query': { + 'bool': { + 'must': [{ + 'multi_match': { + 'query': 'query' + } + }], + 'filter': [{ + 'match': { + 'tags': 'tag1', + } + }, { + 'match': { + 'tags': 'tag2', + } + }] + } + }, + 'sort': [{ + 'rank': { + 'order': 'desc', + 'missing': '_last', + 'unmapped_type': 'float' + } + }] + }) + self.assertEqual(index, correct_index_name) + self.assertEqual(params, { + 'from': 0, + 'size': 21 + }) + return { + 'hits': { + 'hits': [] + } + } + + swap_search = self.swap( + elastic_search_services.ES, 'search', mock_search) + with swap_search: + result, new_offset = ( + elastic_search_services.blog_post_summaries_search( + 'query', ['tag1', 'tag2'] + ) + ) + self.assertEqual(result, []) + self.assertIsNone(new_offset) + + def test_blog_post_search_returns_the_right_num_of_docs_even_if_more_exist( + self + ) -> None: + elastic_search_services.add_documents_to_index([{ + 'id': 'doc_id1', + 'title': 'blog post world' + }, { + 'id': 'doc_id2', + 'title': 'hello blog' + }], search_services.SEARCH_INDEX_BLOG_POSTS) + results, new_offset = ( + elastic_search_services.blog_post_summaries_search( + 'blog', [], offset=None, size=1 + ) + ) + self.assertEqual(len(results), 1) + self.assertEqual(new_offset, 1) + + results, new_offset = ( + elastic_search_services.blog_post_summaries_search( + 'blog', [], offset=1, size=1 + ) + ) + self.assertEqual(len(results), 1) + self.assertIsNone(new_offset) + + def test_blog_post_search_returns_without_error_when_index_does_not_exist( + self + ) -> None: + # We perform search without adding any document to index. Therefore blog + # post search index doesn't exist. + result, new_offset = elastic_search_services.blog_post_summaries_search( + 'query', [] + ) + self.assertEqual(result, []) + self.assertEqual(new_offset, None) diff --git a/core/platform/secrets/__init__.py b/core/platform/secrets/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/core/platform/secrets/cloud_secrets_services.py b/core/platform/secrets/cloud_secrets_services.py new file mode 100644 index 000000000000..5c3af9afe29a --- /dev/null +++ b/core/platform/secrets/cloud_secrets_services.py @@ -0,0 +1,55 @@ +# coding: utf-8 +# +# Copyright 2020 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS-IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides cloud secrets services.""" + +from __future__ import annotations + +import functools + +from core import feconf +from core.constants import constants + +from google import auth +from google.cloud import secretmanager +from typing import Optional + +# The 'auth.default()' returns tuple of credentials and project ID. As we are +# only interested in credentials, we are using '[0]' to access it. +CLIENT = secretmanager.SecretManagerServiceClient( + credentials=( + auth.credentials.AnonymousCredentials() + if constants.EMULATOR_MODE else auth.default()[0])) + + +@functools.lru_cache(maxsize=64) +def get_secret(name: str) -> Optional[str]: + """Gets the value of a secret. + + Args: + name: str. The name of the secret to retrieve. + + Returns: + str. The value of the secret. + """ + secret_name = ( + f'projects/{feconf.OPPIA_PROJECT_ID}/secrets/{name}/versions/latest') + try: + response = CLIENT.access_secret_version(request={'name': secret_name}) + except Exception: + return None + + return response.payload.data.decode('utf-8') diff --git a/core/platform/secrets/cloud_secrets_services_test.py b/core/platform/secrets/cloud_secrets_services_test.py new file mode 100644 index 000000000000..d86fe79fb4b7 --- /dev/null +++ b/core/platform/secrets/cloud_secrets_services_test.py @@ -0,0 +1,46 @@ +# coding: utf-8 +# +# Copyright 2020 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS-IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# sizeations under the License. + +"""Tests for the Python Cloud Secret services.""" + +from __future__ import annotations + +import types + +from core.platform.secrets import cloud_secrets_services +from core.tests import test_utils + + +class CloudSecretsServicesTests(test_utils.GenericTestBase): + """Tests for the Python Cloud Secret services.""" + + def test_get_secret_returns_existing_secret(self) -> None: + with self.swap_to_always_return( + cloud_secrets_services.CLIENT, + 'access_secret_version', + types.SimpleNamespace(payload=types.SimpleNamespace(data=b'secret')) + ): + secret = cloud_secrets_services.get_secret('name') + self.assertEqual(secret, 'secret') + + def test_get_secret_returns_none_when_secret_does_not_exist(self) -> None: + with self.swap_to_always_raise( + cloud_secrets_services.CLIENT, + 'access_secret_version', + Exception('Secret not found') + ): + secret = cloud_secrets_services.get_secret('name2') + self.assertIsNone(secret) diff --git a/core/platform/storage/cloud_storage_emulator.py b/core/platform/storage/cloud_storage_emulator.py index b2f7c2bbb085..fb001910fbdf 100644 --- a/core/platform/storage/cloud_storage_emulator.py +++ b/core/platform/storage/cloud_storage_emulator.py @@ -38,11 +38,11 @@ class EmulatorBlob: """Object for storing the file data.""" def __init__( - self, - name: str, - data: Union[bytes, str], - content_type: Optional[str] - ): + self, + name: str, + data: Union[bytes, str], + content_type: Optional[str] + ) -> None: """Initialize blob. Args: @@ -52,6 +52,9 @@ def __init__( from Cloud Storage as bytes. content_type: str|None. The content type of the blob. It should be in the MIME format. + + Raises: + Exception. Content type contains unknown MIME type. """ self._name = name # TODO(#13500): Refactor this method that only bytes are passed @@ -71,6 +74,11 @@ def __init__( # this set. Only then can this exception be removed. elif content_type == 'audio/mp3': self._content_type = content_type + # Currently 'image/webp' is not recognized as a valid MIME type. + # To verify it is a valid type you can visit + # https://datatracker.ietf.org/doc/html/draft-zern-webp#section-6.1. + elif content_type == 'image/webp': + self._content_type = content_type else: if mimetypes.guess_extension(content_type) is None: raise Exception('Content type contains unknown MIME type.') @@ -78,7 +86,7 @@ def __init__( @classmethod def create_copy( - cls, original_blob: EmulatorBlob, new_name: str + cls, original_blob: EmulatorBlob, new_name: str ) -> EmulatorBlob: """Create new instance of EmulatorBlob with the same values. @@ -158,6 +166,8 @@ def download_as_bytes(self) -> bytes: """ return self._raw_bytes + # Here we use object because we want to allow every object with which + # we can compare. def __eq__(self, other: object) -> bool: if not isinstance(other, self.__class__): return False diff --git a/core/platform/storage/cloud_storage_emulator_test.py b/core/platform/storage/cloud_storage_emulator_test.py index 4ed70e475dde..53fb2f8d66c3 100644 --- a/core/platform/storage/cloud_storage_emulator_test.py +++ b/core/platform/storage/cloud_storage_emulator_test.py @@ -39,8 +39,29 @@ def test_init_blob_with_bytes_creates_blob(self) -> None: self.assertEqual(blob.download_as_bytes(), b'string') self.assertEqual(blob.content_type, 'image/png') + def test_init_blob_with_none_content_type_creates_blob(self) -> None: + blob = ( + cloud_storage_emulator.EmulatorBlob('name', 'string', None)) + self.assertEqual(blob.name, 'name') + self.assertEqual(blob.download_as_bytes(), b'string') + self.assertEqual(blob.content_type, 'application/octet-stream') + + def test_init_blob_with_content_type_audio_creates_blob(self) -> None: + blob = ( + cloud_storage_emulator.EmulatorBlob('name', 'string', 'audio/mp3')) + self.assertEqual(blob.name, 'name') + self.assertEqual(blob.download_as_bytes(), b'string') + self.assertEqual(blob.content_type, 'audio/mp3') + + def test_init_blob_with_content_type_images_webp_creates_blob(self) -> None: + blob = cloud_storage_emulator.EmulatorBlob( + 'name', 'string', 'image/webp') + self.assertEqual(blob.name, 'name') + self.assertEqual(blob.download_as_bytes(), b'string') + self.assertEqual(blob.content_type, 'image/webp') + def test_init_blob_with_wrong_mimetype_raise_exception(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Content type contains unknown MIME type.'): cloud_storage_emulator.EmulatorBlob('name', b'string', 'png') @@ -97,7 +118,7 @@ class CloudStorageEmulatorUnitTests(test_utils.TestBase): """Tests for CloudStorageEmulator.""" def setUp(self) -> None: - super(CloudStorageEmulatorUnitTests, self).setUp() + super().setUp() self.emulator = cloud_storage_emulator.CloudStorageEmulator() self.emulator.namespace = 'namespace' self.emulator.reset() @@ -109,7 +130,7 @@ def setUp(self) -> None: '/different/path.png', b'data2', 'image/png') def tearDown(self) -> None: - super(CloudStorageEmulatorUnitTests, self).tearDown() + super().tearDown() self.emulator.reset() def test_get_blob_retrieves_correct_blob_from_redis(self) -> None: @@ -165,12 +186,12 @@ def test_list_blobs_returns_list_of_blobs_with_prefix(self) -> None: 'namespace:/file/path2.png', mapping=self.blob2.to_dict()) cloud_storage_emulator.REDIS_CLIENT.hset( 'namespace:/different/path.png', mapping=self.blob3.to_dict()) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( self.emulator.list_blobs('/'), [self.blob1, self.blob2, self.blob3]) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( self.emulator.list_blobs('/file'), [self.blob1, self.blob2]) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( self.emulator.list_blobs('/different'), [self.blob3]) def test_reset_removes_all_values_from_redis(self) -> None: diff --git a/core/platform/storage/cloud_storage_services_test.py b/core/platform/storage/cloud_storage_services_test.py index f33293ef96a0..eaf10f7096c3 100644 --- a/core/platform/storage/cloud_storage_services_test.py +++ b/core/platform/storage/cloud_storage_services_test.py @@ -267,7 +267,7 @@ def test_listdir_lists_files_with_provided_prefix(self) -> None: path_slash_blobs = ( cloud_storage_services.listdir('bucket_1', 'path/')) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( path_blobs, [ self.bucket_1.blobs['path/to/file.txt'], @@ -275,7 +275,7 @@ def test_listdir_lists_files_with_provided_prefix(self) -> None: self.bucket_1.blobs['path/to/file2.txt'] ] ) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( path_slash_blobs, [ self.bucket_1.blobs['path/to/file.txt'], diff --git a/core/platform/storage/dev_mode_storage_services.py b/core/platform/storage/dev_mode_storage_services.py index 5ea59e61cfe6..b3a84b20d3db 100644 --- a/core/platform/storage/dev_mode_storage_services.py +++ b/core/platform/storage/dev_mode_storage_services.py @@ -20,7 +20,7 @@ from core.platform.storage import cloud_storage_emulator -from typing import List, Union +from typing import List, Optional, Union CLIENT = cloud_storage_emulator.CloudStorageEmulator() @@ -58,7 +58,7 @@ def commit( unused_bucket_name: str, filepath: str, raw_bytes: Union[bytes, str], - mimetype: str + mimetype: Optional[str] ) -> None: """Commits bytes to the relevant file. @@ -66,7 +66,7 @@ def commit( unused_bucket_name: str. Unused name of the GCS bucket. filepath: str. The path to the relevant file. raw_bytes: bytes|str. The content to be stored in the file. - mimetype: str. The content-type of the file. + mimetype: Optional[str]. The content-type of the file. """ # TODO(#13500): Refactor this method that only bytes are passed # into raw_bytes. @@ -96,6 +96,9 @@ def copy( folder. dest_assets_path: str. The path to the relevant file within the entity's assets folder. + + Raises: + Exception. Source asset does not exist. """ src_blob = CLIENT.get_blob(source_assets_path) if src_blob is None: diff --git a/core/platform/storage/dev_mode_storage_services_test.py b/core/platform/storage/dev_mode_storage_services_test.py index 89b755e62c3b..eaf5a169ef55 100644 --- a/core/platform/storage/dev_mode_storage_services_test.py +++ b/core/platform/storage/dev_mode_storage_services_test.py @@ -72,7 +72,7 @@ def test_copy_with_existing_source_blob_is_successful(self) -> None: ) def test_copy_with_non_existing_source_blob_fails(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Source asset does not exist' ): dev_mode_storage_services.copy( @@ -90,7 +90,7 @@ def test_listdir_with_slash_returns_all_blobs(self) -> None: blob.download_as_bytes() for blob in dev_mode_storage_services.listdir('bucket', '/') ] - self.assertItemsEqual(blob_data, [b'data1', b'data2', b'data3']) # type: ignore[no-untyped-call] + self.assertItemsEqual(blob_data, [b'data1', b'data2', b'data3']) def test_listdir_with_specific_folder_returns_some_blobs(self) -> None: dev_mode_storage_services.commit( @@ -104,4 +104,4 @@ def test_listdir_with_specific_folder_returns_some_blobs(self) -> None: blob.download_as_bytes() for blob in dev_mode_storage_services.listdir('bucket', '/file') ] - self.assertItemsEqual(blob_data, [b'data1', b'data2']) # type: ignore[no-untyped-call] + self.assertItemsEqual(blob_data, [b'data1', b'data2']) diff --git a/core/platform/taskqueue/cloud_taskqueue_services.py b/core/platform/taskqueue/cloud_taskqueue_services.py index 85955738c36a..916a93a7f666 100644 --- a/core/platform/taskqueue/cloud_taskqueue_services.py +++ b/core/platform/taskqueue/cloud_taskqueue_services.py @@ -31,14 +31,16 @@ from google.protobuf import timestamp_pb2 from typing import Any, Dict, Optional +# The 'auth.default()' returns tuple of credentials and project ID. As we are +# only interested in credentials, we are using '[0]' to access it. CLIENT = tasks_v2.CloudTasksClient( credentials=( auth.credentials.AnonymousCredentials() if constants.EMULATOR_MODE else auth.default()[0])) -# In the type annotation below, payload is of type Dict[str, Any] because -# the payload here has no constraints. +# Here we use type Any because the payload here has no constraints, so that's +# why payload is annotated with 'Dict[str, Any]' type. def create_http_task( queue_name: str, url: str, @@ -69,8 +71,8 @@ def create_http_task( parent = CLIENT.queue_path( feconf.OPPIA_PROJECT_ID, feconf.GOOGLE_APP_ENGINE_REGION, queue_name) - # In the type annotation below, task is of type Dict[str, Any] because - # its structure can vary a lot. + # Here we use type Any because task's structure can vary a lot. So, to allow + # every type of value we used Dict[str, Any] type here. # We can see how the proto message for Task is defined. See the link: # https://github.com/googleapis/python-tasks/blob/2f6ae8318e9a6fc2963d4a7825ee96e41f330043/google/cloud/tasks_v2/types/task.py#L29 task: Dict[str, Any] = { diff --git a/core/platform/taskqueue/cloud_taskqueue_services_test.py b/core/platform/taskqueue/cloud_taskqueue_services_test.py index 4bd3e7d0e951..1efd3d364b84 100644 --- a/core/platform/taskqueue/cloud_taskqueue_services_test.py +++ b/core/platform/taskqueue/cloud_taskqueue_services_test.py @@ -55,9 +55,9 @@ def test_http_task_scheduled_immediately_sends_correct_request( } task_name = 'task1' - # In the type annotation below, task is of type Dict[str, Any] - # because it mocks the behaviour of - # cloud_taskqueue_services.CLIENT.create_task. + # Here we use type Any because this method mocks the behaviour of + # cloud_taskqueue_services.CLIENT.create_task and in 'create_task' + # task is defined as Dict[str, Any]. def mock_create_task( parent: str, task: Dict[str, Any], @@ -104,9 +104,9 @@ def test_http_task_scheduled_for_later_sends_correct_request(self) -> None: timestamp.FromDatetime(datetime_to_execute_task) task_name = 'task1' - # In the type annotation below, task is of type Dict[str, Any] - # because it mocks the behaviour of - # cloud_taskqueue_services.CLIENT.create_task. + # Here we use type Any because this method mocks the behaviour of + # cloud_taskqueue_services.CLIENT.create_task and in 'create_task' + # task is defined as Dict[str, Any]. def mock_create_task( parent: str, task: Dict[str, Any], diff --git a/core/platform/taskqueue/cloud_tasks_emulator.py b/core/platform/taskqueue/cloud_tasks_emulator.py index 73d79d38dcce..8b606c646216 100644 --- a/core/platform/taskqueue/cloud_tasks_emulator.py +++ b/core/platform/taskqueue/cloud_tasks_emulator.py @@ -23,11 +23,13 @@ from __future__ import annotations -import datetime # pylint: disable=unused-import import threading import time -from typing import Any, Callable, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional + +if TYPE_CHECKING: # pragma: no cover + import datetime class Task: @@ -35,8 +37,8 @@ class Task: the cloud tasks emulator. """ - # In the type annotation below, payload is of type Dict[str, Any] because - # the payload here has no constraints. + # Here we use type Any because the payload can accept Dict and + # this Dict has no constraints on its values. def __init__( self, queue_name: str, @@ -85,6 +87,9 @@ class Emulator: can be executed using process_and_flush_tasks(). """ + # Here we use type Any because 'task_handler' can accept any kind of + # function that will handle the task execution. So, to allow every + # function we used Callable[..., Any] type here. def __init__( self, task_handler: Callable[..., Any], @@ -170,8 +175,8 @@ def _total_enqueued_tasks(self) -> int: """ return sum(len(q) for q in self._queues.values()) - # In the type annotation below, payload is of type Dict[str, Any] because - # the payload here has no constraints. + # Here we use type Any because the payload can accept Dict and + # this Dict has no constraints on its values. def create_task( self, queue_name: str, diff --git a/core/platform/taskqueue/cloud_tasks_emulator_test.py b/core/platform/taskqueue/cloud_tasks_emulator_test.py index 51484f8c5c9a..62f61ef62a90 100644 --- a/core/platform/taskqueue/cloud_tasks_emulator_test.py +++ b/core/platform/taskqueue/cloud_tasks_emulator_test.py @@ -29,9 +29,10 @@ class CloudTasksEmulatorUnitTests(test_utils.TestBase): """Tests for cloud tasks emulator.""" - # In the type annotation below, payload is of type Dict[str, Any] - # because it emulates the behaviour of - # dev_mode_taskqueue_services._task_handler. + # Here we use type Any because it emulates the behaviour of + # dev_mode_taskqueue_services._task_handler. So, to match the + # type annotations with '_task_handler' we annotated the payload + # as 'Dict[str, Any]'. def mock_task_handler( self, url: str, @@ -48,7 +49,7 @@ def mock_task_handler( ) def setUp(self) -> None: - super(CloudTasksEmulatorUnitTests, self).setUp() + super().setUp() self.url = 'dummy_url' self.queue_name1 = 'queue_name1' self.queue_name2 = 'queue_name2' diff --git a/core/platform/taskqueue/dev_mode_taskqueue_services.py b/core/platform/taskqueue/dev_mode_taskqueue_services.py index 5fc94cbfe032..973ba95de460 100644 --- a/core/platform/taskqueue/dev_mode_taskqueue_services.py +++ b/core/platform/taskqueue/dev_mode_taskqueue_services.py @@ -18,20 +18,22 @@ from __future__ import annotations -import datetime # pylint: disable=unused-import import os from core import feconf from core.platform.taskqueue import cloud_tasks_emulator import requests -from typing import Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Dict, Optional + +if TYPE_CHECKING: # pragma: no cover + import datetime GOOGLE_APP_ENGINE_PORT = os.environ['PORT'] if 'PORT' in os.environ else '8181' -# In the type annotation below, payload is of type Dict[str, Any] because -# the payload here has no constraints. +# Here we use type Any because the payload here has no constraints, so that's +# why payload is annotated with 'Dict[str, Any]' type. def _task_handler( url: str, payload: Dict[str, Any], @@ -68,8 +70,8 @@ def _task_handler( CLIENT = cloud_tasks_emulator.Emulator(task_handler=_task_handler) -# In the type annotation below, payload is of type Dict[str, Any] because -# the payload here has no constraints. +# Here we use type Any because the payload here has no constraints, so that's +# why payload is annotated with 'Dict[str, Any]' type. def create_http_task( queue_name: str, url: str, diff --git a/core/platform/taskqueue/dev_mode_taskqueue_services_test.py b/core/platform/taskqueue/dev_mode_taskqueue_services_test.py index 5256cee0168d..6e1eb5a5ec55 100644 --- a/core/platform/taskqueue/dev_mode_taskqueue_services_test.py +++ b/core/platform/taskqueue/dev_mode_taskqueue_services_test.py @@ -46,9 +46,9 @@ def test_creating_dev_mode_task_will_create_the_correct_post_request( correct_task_name = 'task1' - # In the type annotation below, payload is of type Dict[str, Any] - # because it mocks the behaviour of - # dev_mode_taskqueue_services.CLIENT.create_task. + # Here we use type Any because this method mocks the behavior of + # dev_mode_taskqueue_services.CLIENT.create_task. and in 'create_task' + # payload is defined as Dict[str, Any]. def mock_create_task( queue_name: str, url: str, @@ -89,9 +89,9 @@ def test_task_handler_will_create_the_correct_post_request(self) -> None: 'X-AppEngine-Fake-Is-Admin': '1', 'method': 'POST' } - # In the type annotation below, we have used Dict[str, Any] for JSON. - # This is because this function mocks requests.post function where the - # type of JSON has been defined Any, hence using Dict[str, Any] here. + # Here we use type Any because this function mocks requests.post + # function where the type of JSON has been defined as Any, hence using + # Dict[str, Any] here. # https://github.com/python/typeshed/blob/5e0fc4607323a4657b587bf70e3c26becf1c88d0/stubs/requests/requests/api.pyi#L78 def mock_post( url: str, diff --git a/core/platform/transactions/cloud_transaction_services.py b/core/platform/transactions/cloud_transaction_services.py index c298a0ed3ca0..ceaa97270e99 100644 --- a/core/platform/transactions/cloud_transaction_services.py +++ b/core/platform/transactions/cloud_transaction_services.py @@ -27,8 +27,8 @@ CLIENT = datastore.Client() -# Any is used here because the method `wrapper` is used as a decorator for other -# functions, and these functions can have almost any types of arguments. +# Here we use type Any because the method `wrapper` is used as a decorator for +# other functions, and these functions can have almost any types of arguments. def run_in_transaction_wrapper(fn: Callable[..., Any]) -> Callable[..., Any]: """Runs a decorated function in a transaction. Either all of the operations in the transaction are applied, or none of them are applied. @@ -43,8 +43,9 @@ def run_in_transaction_wrapper(fn: Callable[..., Any]) -> Callable[..., Any]: Exception. Whatever fn() raises. datastore_errors.TransactionFailedError. The transaction failed. """ - # Any is used here because this function is used as a decorator for other - # functions, and these functions can have almost any types of arguments. + # Here we use type Any because this function is used as a decorator for + # other functions, and these functions can have almost any types of + # arguments. @functools.wraps(fn) def wrapper(*args: Any, **kwargs: Any) -> Any: """Wrapper for the transaction.""" diff --git a/core/platform/transactions/cloud_transaction_services_test.py b/core/platform/transactions/cloud_transaction_services_test.py new file mode 100644 index 000000000000..09cf424cc4ef --- /dev/null +++ b/core/platform/transactions/cloud_transaction_services_test.py @@ -0,0 +1,54 @@ +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests for the cloud_transaction_services.py""" + +from __future__ import annotations + +from core.platform.transactions import cloud_transaction_services +from core.tests import test_utils + + +class CloudTransactionServicesTests(test_utils.GenericTestBase): + """Unit tests for the cloud_transaction_services.py""" + + def test_run_in_transaction_wrapper(self) -> None: + calls_made = { + 'enter_context': False, + 'exit_context': False, + } + class MockTransaction: + def __enter__(self) -> None: + calls_made['enter_context'] = True + + def __exit__(self, *unused_args: str) -> None: + calls_made['exit_context'] = True + + class MockClient: + def transaction(self) -> MockTransaction: # pylint: disable=missing-docstring + return MockTransaction() + + swap_client = self.swap( + cloud_transaction_services, 'CLIENT', MockClient()) + + def add(x: int, y: int) -> int: + return x + y + with swap_client: + wrapper_fn = cloud_transaction_services.run_in_transaction_wrapper( + add) + result = wrapper_fn(1, 2) + + self.assertEqual(result, 3) + self.assertTrue(calls_made['enter_context']) + self.assertTrue(calls_made['exit_context']) diff --git a/core/platform/translate/cloud_translate_emulator_test.py b/core/platform/translate/cloud_translate_emulator_test.py index c2075a5ef6a0..23c7da0b441a 100644 --- a/core/platform/translate/cloud_translate_emulator_test.py +++ b/core/platform/translate/cloud_translate_emulator_test.py @@ -26,7 +26,7 @@ class CloudTranslateEmulatorUnitTests(test_utils.TestBase): """Tests for cloud_translate_emulator.""" def setUp(self) -> None: - super(CloudTranslateEmulatorUnitTests, self).setUp() + super().setUp() self.emulator = cloud_translate_emulator.CloudTranslateEmulator() def test_init_prepopulates_responses(self) -> None: @@ -35,12 +35,12 @@ def test_init_prepopulates_responses(self) -> None: self.emulator.PREGENERATED_TRANSLATIONS) def test_translate_with_invalid_source_language_raises_error(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( ValueError, 'Invalid source language code: invalid'): self.emulator.translate('hello world', 'invalid', 'es') def test_translate_with_invalid_target_language_raises_error(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( ValueError, 'Invalid target language code: invalid'): self.emulator.translate('hello world', 'en', 'invalid') diff --git a/core/platform/translate/cloud_translate_services.py b/core/platform/translate/cloud_translate_services.py index 01942dc9a795..f09e322d58f1 100644 --- a/core/platform/translate/cloud_translate_services.py +++ b/core/platform/translate/cloud_translate_services.py @@ -25,6 +25,8 @@ from google import auth from google.cloud import translate_v2 as translate +# The 'auth.default()' returns tuple of credentials and project ID. As we are +# only interested in credentials, we are using '[0]' to access it. CLIENT = translate.Client( credentials=( auth.credentials.AnonymousCredentials() diff --git a/core/platform/translate/cloud_translate_services_test.py b/core/platform/translate/cloud_translate_services_test.py index 743ac5900ecd..24edb4499437 100644 --- a/core/platform/translate/cloud_translate_services_test.py +++ b/core/platform/translate/cloud_translate_services_test.py @@ -28,7 +28,7 @@ class CloudTranslateServicesUnitTests(test_utils.TestBase): def test_translate_text_with_invalid_source_language_raises_error( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( # Hindi (hi) is not a allowlisted language code. ValueError, 'Invalid source language code: hi'): cloud_translate_services.translate_text( @@ -37,7 +37,7 @@ def test_translate_text_with_invalid_source_language_raises_error( def test_translate_text_with_invalid_target_language_raises_error( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( # Hindi (hi) is not a allowlisted language code. ValueError, 'Invalid target language code: hi'): cloud_translate_services.translate_text( diff --git a/core/platform/translate/dev_mode_translate_services_test.py b/core/platform/translate/dev_mode_translate_services_test.py index ee7629e67737..cf389c8dc7e4 100644 --- a/core/platform/translate/dev_mode_translate_services_test.py +++ b/core/platform/translate/dev_mode_translate_services_test.py @@ -28,7 +28,7 @@ class DevModeCloudTranslateServicesUnitTests(test_utils.TestBase): def test_translate_text_with_invalid_source_language_raises_error( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( # Hindi (hi) is not a allowlisted language code. ValueError, 'Invalid source language code: hi'): dev_mode_translate_services.translate_text( @@ -37,7 +37,7 @@ def test_translate_text_with_invalid_source_language_raises_error( def test_translate_text_with_invalid_target_language_raises_error( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( # Hindi (hi) is not a allowlisted language code. ValueError, 'Invalid target language code: hi'): dev_mode_translate_services.translate_text( diff --git a/core/platform_feature_list.py b/core/platform_feature_list.py index 9855d7a02a13..58860f71be1d 100644 --- a/core/platform_feature_list.py +++ b/core/platform_feature_list.py @@ -20,8 +20,9 @@ from core.domain import platform_parameter_list as params +from typing import List -PARAM_NAMES = params.PARAM_NAMES # pylint: disable=invalid-name +ParamNames = params.ParamNames # Names of feature objects defined in domain/platform_parameter_list.py # should be added to one of the following lists: @@ -42,21 +43,26 @@ # Names of features in dev stage, the corresponding feature flag instances must # be in dev stage otherwise it will cause a test error in the backend test. DEV_FEATURES_LIST = [ - params.PARAM_NAMES.dummy_feature + params.ParamNames.DUMMY_FEATURE, ] # Names of features in test stage, the corresponding feature flag instances must # be in test stage otherwise it will cause a test error in the backend test. -TEST_FEATURES_LIST = [ +TEST_FEATURES_LIST: List[ParamNames] = [ ] # Names of features in prod stage, the corresponding feature flag instances must # be in prod stage otherwise it will cause a test error in the backend test. -PROD_FEATURES_LIST = [ +PROD_FEATURES_LIST: List[ParamNames] = [ + params.ParamNames.END_CHAPTER_CELEBRATION, + params.ParamNames.CHECKPOINT_CELEBRATION, + params.ParamNames.ANDROID_BETA_LANDING_PAGE, + params.ParamNames.BLOG_PAGES, + params.ParamNames.CONTRIBUTOR_DASHBOARD_ACCOMPLISHMENTS, ] # Names of features that should not be used anymore, e.g. features that are # completed and no longer gated because their functionality is permanently # built into the codebase. -DEPRECATED_FEATURE_NAMES = [ +DEPRECATED_FEATURE_NAMES: List[ParamNames] = [ ] diff --git a/core/platform_feature_list_test.py b/core/platform_feature_list_test.py index 86af1edee391..57e21bdae252 100644 --- a/core/platform_feature_list_test.py +++ b/core/platform_feature_list_test.py @@ -22,25 +22,31 @@ import re from core import platform_feature_list -from core import python_utils +from core import utils from core.domain import platform_parameter_domain from core.domain import platform_parameter_registry as registry from core.tests import test_utils -FRONTEND_FEATURE_NAMES_PATH = os.path.join( +from typing import Final, List + +FRONTEND_FEATURE_NAMES_PATH: Final = os.path.join( os.getcwd(), 'core/templates/domain/platform_feature', 'feature-status-summary.model.ts') -ENUM_BODY_REGEXP = re.compile(r'enum FeatureNames \{(.+?)\}', flags=re.DOTALL) -ENUM_MEMBER_REGEXP = re.compile(r'([a-zA-Z0-9_]+?)\s+=\s+\'([a-zA-Z0-9_]+?)\'') +ENUM_BODY_REGEXP: Final = re.compile( + r'enum FeatureNames \{(.+?)\}', flags=re.DOTALL +) +ENUM_MEMBER_REGEXP: Final = re.compile( + r'([a-zA-Z0-9_]+?)\s+=\s+\'([a-zA-Z0-9_]+?)\'' +) class PlatformFeatureListTest(test_utils.GenericTestBase): """Tests for feature flags listed in platform_feature_list.py.""" - def setUp(self): - super(PlatformFeatureListTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.all_features_list = ( platform_feature_list.DEV_FEATURES_LIST + @@ -48,15 +54,18 @@ def setUp(self): platform_feature_list.PROD_FEATURES_LIST) self.all_features_set = set(self.all_features_list) - def _parse_feature_names_in_frontend(self): + def _parse_feature_names_in_frontend(self) -> List[str]: """Reads and parses feature flag definition in frontend.""" - with python_utils.open_file(FRONTEND_FEATURE_NAMES_PATH, 'r') as f: + with utils.open_file(FRONTEND_FEATURE_NAMES_PATH, 'r') as f: content = f.read() - body = ENUM_BODY_REGEXP.search(content).group(1) + body_content = ENUM_BODY_REGEXP.search(content) + # Ruling out the possibility of None for mypy type checking. + assert body_content is not None + body = body_content.group(1) return [name for _, name in ENUM_MEMBER_REGEXP.findall(body)] - def test_all_names_in_features_lists_exist(self): + def test_all_names_in_features_lists_exist(self) -> None: missing_names = [] for feature in self.all_features_set: if feature.value not in registry.Registry.parameter_registry: @@ -67,7 +76,7 @@ def test_all_names_in_features_lists_exist(self): missing_names) ) - def test_no_duplicated_names_in_features_lists(self): + def test_no_duplicated_names_in_features_lists(self) -> None: duplicate_names = [] for feature in self.all_features_set: if self.all_features_list.count(feature) > 1: @@ -78,7 +87,7 @@ def test_no_duplicated_names_in_features_lists(self): ': %s.' % (duplicate_names) ) - def test_no_duplicate_names_in_deprecated_names_list(self): + def test_no_duplicate_names_in_deprecated_names_list(self) -> None: duplicate_names = [] deprecated_features = platform_feature_list.DEPRECATED_FEATURE_NAMES for feature in set(deprecated_features): @@ -90,7 +99,7 @@ def test_no_duplicate_names_in_deprecated_names_list(self): 'list: %s.' % (duplicate_names) ) - def test_no_deprecated_names_in_features_lists(self): + def test_no_deprecated_names_in_features_lists(self) -> None: deprecated_names_set = set( platform_feature_list.DEPRECATED_FEATURE_NAMES) found_deprecated_names = [] @@ -103,7 +112,7 @@ def test_no_deprecated_names_in_features_lists(self): 'not be used: %s.' % (found_deprecated_names) ) - def test_all_entries_in_features_lists_are_features(self): + def test_all_entries_in_features_lists_are_features(self) -> None: non_feature_names = [] for feature in self.all_features_set: feature_flag = ( @@ -116,13 +125,13 @@ def test_all_entries_in_features_lists_are_features(self): non_feature_names) ) - def test_all_entries_in_dev_features_list_are_in_dev_stage(self): + def test_all_entries_in_dev_features_list_are_in_dev_stage(self) -> None: invalid_feature_names = [] for feature in platform_feature_list.DEV_FEATURES_LIST: feature_flag = ( registry.Registry.get_platform_parameter(feature.value)) if (feature_flag.feature_stage != - platform_parameter_domain.FEATURE_STAGES.dev.value): + platform_parameter_domain.FeatureStages.DEV.value): invalid_feature_names.append(feature.value) self.assertTrue( len(invalid_feature_names) == 0, @@ -130,13 +139,13 @@ def test_all_entries_in_dev_features_list_are_in_dev_stage(self): '\'dev\' stage: %s.' % (invalid_feature_names) ) - def test_all_entries_in_test_features_list_are_in_test_stage(self): + def test_all_entries_in_test_features_list_are_in_test_stage(self) -> None: invalid_feature_names = [] for feature in platform_feature_list.TEST_FEATURES_LIST: feature_flag = ( registry.Registry.get_platform_parameter(feature.name)) if (feature_flag.feature_stage != - platform_parameter_domain.FEATURE_STAGES.test.value): + platform_parameter_domain.FeatureStages.TEST.value): invalid_feature_names.append(feature.name) self.assertTrue( len(invalid_feature_names) == 0, @@ -144,13 +153,13 @@ def test_all_entries_in_test_features_list_are_in_test_stage(self): '\'test\' stage: %s.' % (invalid_feature_names) ) - def test_all_entries_in_prod_features_list_are_in_prod_stage(self): + def test_all_entries_in_prod_features_list_are_in_prod_stage(self) -> None: invalid_feature_names = [] for feature in platform_feature_list.PROD_FEATURES_LIST: feature_flag = ( registry.Registry.get_platform_parameter(feature.value)) if (feature_flag.feature_stage != - platform_parameter_domain.FEATURE_STAGES.prod.value): + platform_parameter_domain.FeatureStages.PROD.value): invalid_feature_names.append(feature.value) self.assertTrue( len(invalid_feature_names) == 0, @@ -158,7 +167,7 @@ def test_all_entries_in_prod_features_list_are_in_prod_stage(self): '\'prod\' stage: %s.' % (invalid_feature_names) ) - def test_all_names_in_features_lists_exist_in_frontend(self): + def test_all_names_in_features_lists_exist_in_frontend(self) -> None: feature_names_in_frontend = self._parse_feature_names_in_frontend() all_feature_names_set = [ feature.value for feature in self.all_features_set] @@ -170,7 +179,7 @@ def test_all_names_in_features_lists_exist_in_frontend(self): list(missing_features)) ) - def test_all_names_in_frontend_are_known(self): + def test_all_names_in_frontend_are_known(self) -> None: feature_names_in_frontend = self._parse_feature_names_in_frontend() all_feature_names_set = [ feature.value for feature in self.all_features_set] diff --git a/core/python_utils.py b/core/python_utils.py deleted file mode 100644 index fff871848002..000000000000 --- a/core/python_utils.py +++ /dev/null @@ -1,454 +0,0 @@ -# coding: utf-8 -# -# Copyright 2019 The Oppia Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS-IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Feature detection utilities for Python 2 and Python 3.""" - -from __future__ import annotations - -import io -import itertools -import os -import pkgutil -import sys - -_THIRD_PARTY_PATH = os.path.join(os.getcwd(), 'third_party', 'python_libs') -sys.path.insert(0, _THIRD_PARTY_PATH) - -_YAML_PATH = os.path.join(os.getcwd(), '..', 'oppia_tools', 'pyyaml-5.4.1') -sys.path.insert(0, _YAML_PATH) - -_CERTIFI_PATH = os.path.join( - os.getcwd(), '..', 'oppia_tools', 'certifi-2021.5.30') -sys.path.insert(0, _CERTIFI_PATH) - -import yaml # isort:skip pylint: disable=wrong-import-position, wrong-import-order - -import builtins # isort:skip pylint: disable=wrong-import-position, wrong-import-order -import past.builtins # isort:skip pylint: disable=wrong-import-position, wrong-import-order -import past.utils # isort:skip pylint: disable=wrong-import-position, wrong-import-order - -import certifi # isort:skip pylint: disable=wrong-import-position, wrong-import-order -import ssl # isort:skip pylint: disable=wrong-import-position, wrong-import-order - - -MAP = builtins.map -NEXT = builtins.next -OBJECT = builtins.object -PRINT = print -ZIP = builtins.zip - - -def SimpleXMLRPCServer( # pylint: disable=invalid-name - addr, requestHandler=None, logRequests=True, allow_none=False, - encoding=None, bind_and_activate=True): - """Returns SimpleXMLRPCServer from SimpleXMLRPCServer module if run under - Python 2 and from xmlrpc module if run under Python 3. - - Args: - addr: tuple(str, int). The host and port of the server. - requestHandler: callable. A factory for request handler instances. - Defaults to SimpleXMLRPCRequestHandler. - logRequests: bool. Whether to log the requests sent to the server. - allow_none: bool. Permits None in the XML-RPC responses that will be - returned from the server. - encoding: str|None. The encoding used by the XML-RPC responses that will - be returned from the server. - bind_and_activate: bool. Whether server_bind() and server_activate() are - called immediately by the constructor; defaults to true. Setting it - to false allows code to manipulate the allow_reuse_address class - variable before the address is bound. - - Returns: - SimpleXMLRPCServer. The SimpleXMLRPCServer object. - """ - try: - from xmlrpc.server import SimpleXMLRPCServer as impl # pylint: disable=import-only-modules - except ImportError: - from SimpleXMLRPCServer import SimpleXMLRPCServer as impl # pylint: disable=import-only-modules - if requestHandler is None: - try: - from xmlrpc.server import SimpleXMLRPCRequestHandler # isort:skip pylint: disable=import-only-modules - except ImportError: - from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler # isort:skip pylint: disable=import-only-modules - requestHandler = SimpleXMLRPCRequestHandler - return impl( - addr, requestHandler=requestHandler, logRequests=logRequests, - allow_none=allow_none, encoding=encoding, - bind_and_activate=bind_and_activate) - - -def redirect_stdout(new_target): - """Returns redirect_stdout from contextlib2 if run under Python 2 and from - contextlib if run under Python 3. - - Args: - new_target: FileLike. The file-like object all messages printed to - stdout will be redirected to. - - Returns: - contextlib.redirect_stdout or contextlib2.redirect_stdout. The - redirect_stdout object. - """ - try: - from contextlib import redirect_stdout as impl # pylint: disable=import-only-modules - except ImportError: - from contextlib2 import redirect_stdout as impl # pylint: disable=import-only-modules - return impl(new_target) - - -def string_io(buffer_value=''): - """Returns StringIO from StringIO module if run under Python 2 and from io - module if run under Python 3. - - Args: - buffer_value: str. A string that is to be converted to in-memory text - stream. - - Returns: - StringIO.StringIO or io.StringIO. The StringIO object. - """ - try: - from StringIO import StringIO # pylint: disable=import-only-modules - except ImportError: - from io import StringIO # pylint: disable=import-only-modules - return StringIO(buffer_value) # pylint: disable=disallowed-function-calls - - -def get_args_of_function_node(function_node, args_to_ignore): - """Extracts the arguments from a function definition. - - Args: - function_node: ast.FunctionDef. Represents a function. - args_to_ignore: list(str). Ignore these arguments in a function - definition. - - Returns: - list(str). The args for a function as listed in the function - definition. - """ - try: - return [ - a.arg - for a in function_node.args.args - if a.arg not in args_to_ignore - ] - except AttributeError: - return [ - a.id for a in function_node.args.args if a.id not in args_to_ignore - ] - - -def open_file(filename, mode, encoding='utf-8', newline=None): - """Open file and return a corresponding file object. - - Args: - filename: str. The file to be opened. - mode: str. Mode in which the file is opened. - encoding: str. Encoding in which the file is opened. - newline: None|str. Controls how universal newlines work. - - Returns: - _io.TextIOWrapper. The file object. - - Raises: - IOError. The file cannot be opened. - """ - # The try/except is needed here to unify the errors because io.open in - # Python 3 throws FileNotFoundError while in Python 2 it throws an IOError. - # This should be removed after we fully migrate to Python 3. - try: - return io.open(filename, mode, encoding=encoding, newline=newline) - except: - raise IOError('Unable to open file: %s' % filename) - - -def get_package_file_contents(package: str, filepath: str) -> str: - """Open file and return its contents. This needs to be used for files that - are loaded by the Python code directly, like constants.ts or - rich_text_components.json. This function is needed to make loading these - files work even when Oppia is packaged. - - Args: - package: str. The package where the file is located. - For Oppia the package is usually the folder in the root folder, - like 'core' or 'extensions'. - filepath: str. The path to the file in the package. - - Returns: - str. The contents of the file. - """ - try: - file = io.open(os.path.join(package, filepath), 'r', encoding='utf-8') - return file.read() - except FileNotFoundError: - return pkgutil.get_data(package, filepath).decode('utf-8') - - -def url_parse(urlstring): - """Parse a URL into six components using urlparse.urlparse if run under - Python 2 and urllib.parse.urlparse if run under Python 3. This corresponds - to the general structure of a URL: - scheme://netloc/path;parameters?query#fragment. - - Args: - urlstring: str. The URL. - - Returns: - tuple(str). The components of a URL. - """ - try: - import urllib.parse as urlparse - except ImportError: - import urlparse - return urlparse.urlparse(urlstring) # pylint: disable=disallowed-function-calls - - -def url_unsplit(url_parts): - """Combine the elements of a tuple as returned by urlsplit() into a complete - URL as a string using urlparse.urlunsplit if run under Python 2 and - urllib.parse.urlunsplit if run under Python 3. - - Args: - url_parts: tuple(str). The components of a URL. - - Returns: - str. The complete URL. - """ - try: - import urllib.parse as urlparse - except ImportError: - import urlparse - return urlparse.urlunsplit(url_parts) # pylint: disable=disallowed-function-calls - - -def parse_query_string(query_string): - """Parse a query string given as a string argument - (data of type application/x-www-form-urlencoded) using urlparse.parse_qs if - run under Python 2 and urllib.parse.parse_qs if run under Python 3. - - Args: - query_string: str. The query string. - - Returns: - dict. The keys are the unique query variable names and the values are - lists of values for each name. - """ - try: - import urllib.parse as urlparse - except ImportError: - import urlparse - return urlparse.parse_qs(query_string) # pylint: disable=disallowed-function-calls - - -def urllib_unquote(content) -> str: - """Replace %xx escapes by their single-character equivalent using - urllib.unquote if run under Python 2 and urllib.parse.unquote if run under - Python 3. - - Args: - content: str. The string to be unquoted. - - Returns: - str. The unquoted string. - """ - try: - import urllib.parse as urlparse - except ImportError: - import urllib as urlparse - return urlparse.unquote(content) - - -def url_quote(content): - """Quotes a string using urllib.quote if run under Python 2 and - urllib.parse.quote if run under Python 3. - - Args: - content: str. The string to be quoted. - - Returns: - str. The quoted string. - """ - try: - import urllib.parse as urlparse - except ImportError: - import urllib as urlparse - return urlparse.quote(content) - - -def url_encode(query, doseq=False): - """Convert a mapping object or a sequence of two-element tuples to a - 'url-encoded' string using urllib.urlencode if run under Python 2 and - urllib.parse.urlencode if run under Python 3. - - Args: - query: dict or tuple. The query to be encoded. - doseq: bool. If true, individual key=value pairs separated by '&' are - generated for each element of the value sequence for the key. - - Returns: - str. The 'url-encoded' string. - """ - try: - import urllib.parse as urlparse - except ImportError: - import urllib as urlparse - return urlparse.urlencode(query, doseq=doseq) - - -def url_open(source_url): - """Open a network object denoted by a URL for reading using - urllib2.urlopen if run under Python 2 and urllib.request.urlopen if - run under Python 3. - - Args: - source_url: str. The URL. - - Returns: - urlopen. The 'urlopen' object. - """ - # TODO(#12912): Remove pylint disable after the arg-name-for-non-keyword-arg - # check is refactored. - context = ssl.create_default_context(cafile=certifi.where()) # pylint: disable=arg-name-for-non-keyword-arg - try: - import urllib.request as urlrequest - except ImportError: - import urllib2 as urlrequest - return urlrequest.urlopen(source_url, context=context) - - -def url_request(source_url, data, headers): - """This function provides an abstraction of a URL request. It uses - urllib2.Request if run under Python 2 and urllib.request.Request if - run under Python 3. - - Args: - source_url: str. The URL. - data: str. Additional data to send to the server. - headers: dict. The request headers. - - Returns: - Request. The 'Request' object. - """ - try: - import urllib.request as urlrequest - except ImportError: - import urllib2 as urlrequest - return urlrequest.Request(source_url, data, headers) - - -def divide(number1, number2): - """This function divides number1 by number2 in the Python 2 way, i.e it - performs an integer division. - - Args: - number1: int. The dividend. - number2: int. The divisor. - - Returns: - int. The quotent. - """ - return past.utils.old_div(number1, number2) - - -def _recursively_convert_to_str(value): - """Convert all builtins.bytes and builtins.str elements in a data structure - to bytes and unicode respectively. This is required for the - yaml.safe_dump() function to work as it only works for unicode and bytes and - not builtins.bytes nor builtins.str(UNICODE). See: - https://stackoverflow.com/a/1950399/11755830 - - Args: - value: list|dict|BASESTRING. The data structure to convert. - - Returns: - list|dict|bytes|unicode. The data structure in bytes and unicode. - """ - if isinstance(value, list): - return [_recursively_convert_to_str(e) for e in value] - elif isinstance(value, dict): - return { - _recursively_convert_to_str(k): _recursively_convert_to_str(v) - for k, v in value.items() - } - # We are using 'type' here instead of 'isinstance' because we need to - # clearly distinguish the builtins.str and builtins.bytes strings. - elif type(value) == str: # pylint: disable=unidiomatic-typecheck - return value - elif type(value) == builtins.bytes: # pylint: disable=unidiomatic-typecheck - return value.decode('utf-8') - else: - return value - - -def yaml_from_dict(dictionary, width=80): - """Gets the YAML representation of a dict. - - Args: - dictionary: dict. Dictionary for conversion into yaml. - width: int. Width for the yaml representation, default value - is set to be of 80. - - Returns: - str. Converted yaml of the passed dictionary. - """ - dictionary = _recursively_convert_to_str(dictionary) - return yaml.safe_dump(dictionary, default_flow_style=False, width=width) - - -def create_enum(*sequential): - """Creates a enumerated constant. - - Args: - *sequential: *. Sequence List to generate the enumerations. - - Returns: - dict. Dictionary containing the enumerated constants. - """ - enum_values = dict(ZIP(sequential, sequential)) - try: - from enum import Enum # pylint: disable=import-only-modules - - # The type() of argument 1 in Enum must be str, not unicode. - return Enum(str('Enum'), enum_values) # pylint: disable=disallowed-function-calls - except ImportError: - _enums = {} - for name, value in enum_values.items(): - _value = { - 'name': name, - 'value': value - } - _enums[name] = type('Enum', (), _value) - return type('Enum', (), _enums) - - -def zip_longest(*args, **kwargs): - """Creates an iterator that aggregates elements from each of the iterables. - If the iterables are of uneven length, missing values are - filled-in with fillvalue. - - Args: - *args: list(*). Iterables that needs to be aggregated into an iterable. - **kwargs: dict. It contains fillvalue. - - Returns: - iterable(iterable). A sequence of aggregates elements - from each of the iterables. - """ - fillvalue = kwargs.get('fillvalue') - try: - return itertools.zip_longest(*args, fillvalue=fillvalue) - except AttributeError: - return itertools.izip_longest(*args, fillvalue=fillvalue) diff --git a/core/python_utils_test.py b/core/python_utils_test.py deleted file mode 100644 index dcc0fca3d1ea..000000000000 --- a/core/python_utils_test.py +++ /dev/null @@ -1,209 +0,0 @@ -# coding: utf-8 -# -# Copyright 2019 The Oppia Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS-IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for feature detection utilities for Python 2 and Python 3.""" - -from __future__ import annotations - -import ast -import builtins -import io -import os -import sys -import unittest -import urllib - -from core import python_utils -from core.tests import test_utils -from core.tests.data import unicode_and_str_handler - - -class PythonUtilsTests(test_utils.GenericTestBase): - """Tests for feature detection utilities that are common for Python 2 and - Python 3. - """ - - def test_get_args_of_function_node(self): - function_txt = b"""def _mock_function(arg1, arg2): - pass""" - - ast_node = ast.walk(ast.parse(function_txt)) - function_node = [n for n in ast_node if isinstance(n, ast.FunctionDef)] - args_list = python_utils.get_args_of_function_node(function_node[0], []) - self.assertEqual(args_list, ['arg1', 'arg2']) - - def test_open_file(self): - with python_utils.open_file( - os.path.join('core', 'python_utils.py'), 'r' - ) as f: - file_content = f.readlines() - self.assertIsNotNone(file_content) - - def test_can_not_open_file(self): - with self.assertRaisesRegexp( - IOError, 'Unable to open file: invalid_file.py'): - with python_utils.open_file('invalid_file.py', 'r') as f: - f.readlines() - - def test_url_open(self): - response = python_utils.url_open('http://www.google.com') - self.assertEqual(response.getcode(), 200) - self.assertEqual(response.url, 'http://www.google.com') - - def test_url_request(self): - response = python_utils.url_request('http://www.google.com', None, {}) - self.assertEqual(response.get_full_url(), 'http://www.google.com') - - def test_divide(self): - self.assertEqual(python_utils.divide(4, 2), 2) - self.assertEqual(python_utils.divide(5, 2), 2) - - def test_url_unsplit(self): - response = urllib.parse.urlsplit('http://www.google.com') - self.assertEqual( - python_utils.url_unsplit(response), 'http://www.google.com') - - def test_parse_query_string(self): - response = python_utils.parse_query_string( - 'http://www.google.com?search=oppia') - self.assertEqual(response, {'http://www.google.com?search': ['oppia']}) - - def test_urllib_unquote(self): - response = python_utils.urllib_unquote('/El%20Ni%C3%B1o/') - self.assertEqual(response, '/El Niño/') - - def test_url_parse(self): - response = python_utils.url_parse('http://www.google.com') - self.assertEqual(response.geturl(), 'http://www.google.com') - - def test_recursively_convert_to_str_with_dict(self): - test_var_1_in_unicode = str('test_var_1') - test_var_2_in_unicode = str('test_var_2') - test_var_3_in_bytes = test_var_1_in_unicode.encode(encoding='utf-8') - test_var_4_in_bytes = test_var_2_in_unicode.encode(encoding='utf-8') - test_dict = { - test_var_1_in_unicode: test_var_3_in_bytes, - test_var_2_in_unicode: test_var_4_in_bytes - } - self.assertEqual( - test_dict, - {'test_var_1': b'test_var_1', 'test_var_2': b'test_var_2'}) - - for key, val in test_dict.items(): - self.assertEqual(type(key), str) - self.assertEqual(type(val), builtins.bytes) - - dict_in_str = python_utils._recursively_convert_to_str(test_dict) # pylint: disable=protected-access - self.assertEqual( - dict_in_str, - {'test_var_1': 'test_var_1', 'test_var_2': 'test_var_2'}) - - for key, val in dict_in_str.items(): - self.assertEqual(type(key), str) - self.assertEqual(type(val), str) - - def test_recursively_convert_to_str_with_nested_structure(self): - test_var_1_in_unicode = str('test_var_1') - test_list_1 = [ - test_var_1_in_unicode, - test_var_1_in_unicode.encode(encoding='utf-8'), - 'test_var_2', - b'test_var_3', - {'test_var_4': b'test_var_5'} - ] - test_dict = {test_var_1_in_unicode: test_list_1} - self.assertEqual( - test_dict, - { - 'test_var_1': [ - 'test_var_1', b'test_var_1', 'test_var_2', b'test_var_3', - {'test_var_4': b'test_var_5'}] - } - ) - - dict_in_str = python_utils._recursively_convert_to_str(test_dict) # pylint: disable=protected-access - self.assertEqual( - dict_in_str, - { - 'test_var_1': [ - 'test_var_1', 'test_var_1', 'test_var_2', 'test_var_3', - {'test_var_4': 'test_var_5'}] - } - ) - - for key, value in dict_in_str.items(): - self.assertNotEqual(type(key), builtins.bytes) - self.assertTrue(isinstance(key, str)) - - for item in value: - self.assertNotEqual(type(item), builtins.bytes) - self.assertTrue(isinstance(item, (str, bytes, dict))) - - for k, v in value[-1].items(): - self.assertEqual(type(k), str) - self.assertEqual(type(v), str) - - def test_create_enum_method_and_check_its_values(self): - """Test create_enum method.""" - enums = python_utils.create_enum('first', 'second', 'third') - self.assertEqual(enums.first.value, 'first') - self.assertEqual(enums.second.value, 'second') - self.assertEqual(enums.third.value, 'third') - - def test_create_enum_method_and_check_its_names(self): - """Test create_enum method.""" - enums = python_utils.create_enum('first', 'second', 'third') - self.assertEqual(enums.first.name, 'first') - self.assertEqual(enums.second.name, 'second') - self.assertEqual(enums.third.name, 'third') - - def test_enum_for_invalid_attribute(self): - enums = python_utils.create_enum('first', 'second', 'third') - with self.assertRaisesRegexp(AttributeError, 'fourth'): - getattr(enums, 'fourth') - - def test_zip_longest(self): - self.assertEqual( - [list(g) for g in python_utils.zip_longest( - [0, 1, 2, 3], [4, 5, 6], [7, 8])], - [[0, 4, 7], [1, 5, 8], [2, 6, None], [3, None, None]]) - # Zip longest with fillvalue. - self.assertEqual( - [''.join(g) for g in python_utils.zip_longest( - 'ABC', 'DE', 'F', fillvalue='x')], - ['ADF', 'BEx', 'Cxx']) - - -@unittest.skipUnless( - sys.version[0] == '3', 'Test cases for ensuring Python 3 behavior only') -class PythonUtilsForPython3Tests(test_utils.GenericTestBase): - """Tests for feature detection utilities for Python 3.""" - - def test_string_io(self): - stdout = python_utils.string_io() - self.assertIsInstance(stdout, io.StringIO) - - def test_unicode_and_str_chars_in_file(self): - self.assertIsInstance(unicode_and_str_handler.SOME_STR_TEXT, str) - self.assertIsInstance( - unicode_and_str_handler.SOME_UNICODE_TEXT, str) - self.assertIsInstance( - unicode_and_str_handler.SOME_BINARY_TEXT, bytes) - - with python_utils.open_file( - 'core/tests/data/unicode_and_str_handler.py', 'r') as f: - file_content = f.read() - self.assertIsInstance(file_content, str) diff --git a/core/schema_utils.py b/core/schema_utils.py index ad7446064272..0b070a7b2218 100644 --- a/core/schema_utils.py +++ b/core/schema_utils.py @@ -26,17 +26,20 @@ from __future__ import annotations +import io import numbers import re import urllib from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.domain import expression_parser from core.domain import html_cleaner from core.domain import user_domain +from extensions.objects.models import objects + +import mutagen from typing import Any, Callable, Dict, List, Optional, cast @@ -47,16 +50,20 @@ SCHEMA_KEY_POST_NORMALIZERS = 'post_normalizers' SCHEMA_KEY_CHOICES = 'choices' SCHEMA_KEY_NAME = 'name' +SCHEMA_KEY_KEYS = 'keys' +SCHEMA_KEY_VALUES = 'values' SCHEMA_KEY_SCHEMA = 'schema' SCHEMA_KEY_OBJ_TYPE = 'obj_type' SCHEMA_KEY_VALIDATORS = 'validators' SCHEMA_KEY_DEFAULT_VALUE = 'default_value' SCHEMA_KEY_OBJECT_CLASS = 'object_class' SCHEMA_KEY_VALIDATION_METHOD = 'validation_method' +SCHEMA_KEY_OPTIONS = 'options' SCHEMA_TYPE_BOOL = 'bool' SCHEMA_TYPE_CUSTOM = 'custom' SCHEMA_TYPE_DICT = 'dict' +SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS = 'variable_keys_dict' SCHEMA_TYPE_FLOAT = 'float' SCHEMA_TYPE_HTML = 'html' SCHEMA_TYPE_INT = 'int' @@ -65,15 +72,28 @@ SCHEMA_TYPE_BASESTRING = 'basestring' SCHEMA_TYPE_UNICODE_OR_NONE = 'unicode_or_none' SCHEMA_TYPE_OBJECT_DICT = 'object_dict' +SCHEMA_TYPE_WEAK_MULTIPLE = 'weak_multiple' SCHEMA_OBJ_TYPE_SUBTITLED_HTML = 'SubtitledHtml' SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE = 'SubtitledUnicode' +ALL_SCHEMAS: Dict[str, type] = { + SCHEMA_TYPE_BOOL: bool, + SCHEMA_TYPE_DICT: dict, + SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS: dict, + SCHEMA_TYPE_FLOAT: float, + SCHEMA_TYPE_HTML: str, + SCHEMA_TYPE_INT: int, + SCHEMA_TYPE_LIST: list, + SCHEMA_TYPE_UNICODE: str, + SCHEMA_TYPE_BASESTRING: str, + SCHEMA_TYPE_UNICODE_OR_NONE: str +} EMAIL_REGEX = r'[\w\.\+\-]+\@[\w]+\.[a-z]{2,3}' -# Using Dict[str, Any] here for schema because the following schema can have a -# recursive structure and mypy doesn't support recursive type currently. +# Here we use type Any because the following schema can have a recursive +# structure and mypy doesn't support recursive type currently. # See: https://github.com/python/mypy/issues/731 def normalize_against_schema( obj: Any, @@ -96,11 +116,22 @@ def normalize_against_schema( *. The normalized object. Raises: - AssertionError. The object fails to validate against the schema. + Exception. The object fails to validate against the schema. + AssertionError. The validation for schema validators fails. """ + # Here we use type Any because 'normalized_obj' can be of type int, str, + # Dict, List and other types too. normalized_obj: Any = None - if schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_BOOL: + if schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_WEAK_MULTIPLE: + for i in schema[SCHEMA_KEY_OPTIONS]: + if isinstance(obj, ALL_SCHEMAS[i]): + normalized_obj = obj + break + if normalized_obj is None: + raise Exception( + 'Type of %s is not present in options' % obj) + elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_BOOL: assert isinstance(obj, bool), ('Expected bool, received %s' % obj) normalized_obj = obj elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_CUSTOM: @@ -108,7 +139,7 @@ def normalize_against_schema( # TODO(sll): Either get rid of custom objects or find a way to merge # them into the schema framework -- probably the latter. from core.domain import object_registry - obj_class = object_registry.Registry.get_object_class_by_type( # type: ignore[no-untyped-call] + obj_class = object_registry.Registry.get_object_class_by_type( schema[SCHEMA_KEY_OBJ_TYPE]) if not apply_custom_validators: normalized_obj = normalize_against_schema( @@ -134,21 +165,35 @@ def normalize_against_schema( prop[SCHEMA_KEY_SCHEMA], global_validators=global_validators ) + elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS: + assert isinstance(obj, dict), ('Expected dict, received %s' % obj) + normalized_obj = {} + for key, value in obj.items(): + normalized_key = normalize_against_schema( + key, schema[SCHEMA_KEY_KEYS][SCHEMA_KEY_SCHEMA], + global_validators=global_validators + ) + normalized_obj[normalized_key] = normalize_against_schema( + value, schema[SCHEMA_KEY_VALUES][SCHEMA_KEY_SCHEMA], + global_validators=global_validators + ) elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_FLOAT: + if isinstance(obj, bool): + raise Exception('Expected float, received %s' % obj) try: obj = float(obj) - except Exception: + except Exception as e: raise Exception('Could not convert %s to float: %s' % ( - type(obj).__name__, obj)) + type(obj).__name__, obj)) from e assert isinstance(obj, numbers.Real), ( 'Expected float, received %s' % obj) normalized_obj = obj elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_INT: try: obj = int(obj) - except Exception: + except Exception as e: raise Exception('Could not convert %s to int: %s' % ( - type(obj).__name__, obj)) + type(obj).__name__, obj)) from e assert isinstance(obj, numbers.Integral), ( 'Expected int, received %s' % obj) assert isinstance(obj, int), ('Expected int, received %s' % obj) @@ -163,7 +208,7 @@ def normalize_against_schema( obj = str(obj) assert isinstance(obj, str), ( 'Expected unicode, received %s' % obj) - normalized_obj = html_cleaner.clean(obj) # type: ignore[no-untyped-call] + normalized_obj = html_cleaner.clean(obj) elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_LIST: assert isinstance(obj, list), ('Expected list, received %s' % obj) item_schema = schema[SCHEMA_KEY_ITEMS] @@ -173,7 +218,9 @@ def normalize_against_schema( schema[SCHEMA_KEY_LEN], len(obj))) normalized_obj = [ normalize_against_schema( - item, item_schema, global_validators=global_validators + item, + item_schema, + global_validators=global_validators ) for item in obj ] elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_BASESTRING: @@ -219,11 +266,12 @@ def normalize_against_schema( validate_class = schema[SCHEMA_KEY_OBJECT_CLASS] domain_object = validate_class.from_dict(obj) domain_object.validate() + normalized_obj = domain_object else: validation_method = schema[SCHEMA_KEY_VALIDATION_METHOD] - validation_method(obj) + normalized_obj = validation_method(obj) - normalized_obj = obj + return normalized_obj else: raise Exception('Invalid schema type: %s' % schema[SCHEMA_KEY_TYPE]) @@ -246,11 +294,21 @@ def normalize_against_schema( if SCHEMA_KEY_VALIDATORS in schema: for validator in schema[SCHEMA_KEY_VALIDATORS]: kwargs = dict(validator) + expect_invalid_default_value = False + if 'expect_invalid_default_value' in kwargs: + expect_invalid_default_value = kwargs[ + 'expect_invalid_default_value'] + del kwargs['expect_invalid_default_value'] del kwargs['id'] - assert get_validator( - validator['id'])(normalized_obj, **kwargs), ( + validator_func = get_validator(validator['id']) + if ( + not validator_func(normalized_obj, **kwargs) and + not expect_invalid_default_value + ): + raise AssertionError( 'Validation failed: %s (%s) for object %s' % ( - validator['id'], kwargs, normalized_obj)) + validator['id'], kwargs, normalized_obj) + ) if global_validators is not None: for validator in global_validators: @@ -311,7 +369,7 @@ def get(cls, normalizer_id: str) -> Callable[..., str]: """ if not hasattr(cls, normalizer_id): raise Exception('Invalid normalizer id: %s' % normalizer_id) - # Using a cast here because the return value of getattr() method is + # Here we use cast because the return value of getattr() method is # dynamic and mypy will assume it to be Any otherwise. return cast(Callable[..., str], getattr(cls, normalizer_id)) @@ -334,15 +392,15 @@ def sanitize_url(obj: str) -> str: if obj == '': return obj url_components = urllib.parse.urlsplit(obj) - quoted_url_components = ( - urllib.parse.quote(component) for component in url_components) - raw = python_utils.url_unsplit(quoted_url_components) # type: ignore[no-untyped-call] + quoted_url_components = [ + urllib.parse.quote(component) for component in url_components] + raw = urllib.parse.urlunsplit(quoted_url_components) - acceptable = html_cleaner.filter_a('a', 'href', obj) # type: ignore[no-untyped-call] + acceptable = html_cleaner.filter_a('a', 'href', obj) assert acceptable, ( 'Invalid URL: Sanitized URL should start with ' '\'http://\' or \'https://\'; received %s' % raw) - return raw # type: ignore[no-any-return] + return raw @staticmethod def normalize_spaces(obj: str) -> str: @@ -383,10 +441,13 @@ def get(cls, validator_id: str) -> Callable[..., bool]: Returns: function. The validator method corresponding to the specified validator_id. + + Raises: + Exception. Given validator method is invalid. """ if not hasattr(cls, validator_id): raise Exception('Invalid validator id: %s' % validator_id) - # Using a cast here because the return value of getattr() method is + # Here we use cast because the return value of getattr() method is # dynamic and mypy will assume it to be Any otherwise. return cast(Callable[..., bool], getattr(cls, validator_id)) @@ -499,11 +560,11 @@ def is_at_most(obj: float, max_value: int) -> bool: return obj <= max_value @staticmethod - def does_not_contain_email(obj: object) -> bool: + def does_not_contain_email(obj: str) -> bool: """Ensures that obj doesn't contain a valid email. Args: - obj: object. The object to validate. + obj: str. The object to validate. Returns: bool. Whether the given object doesn't contain a valid email. @@ -541,14 +602,13 @@ def is_valid_math_expression(obj: str, algebraic: bool) -> bool: if len(obj) == 0: return True - if not expression_parser.is_valid_expression(obj): # type: ignore[no-untyped-call] + if not expression_parser.is_valid_expression(obj): return False - expression_is_algebraic = expression_parser.is_algebraic(obj) # type: ignore[no-untyped-call] - # If the algebraic flag is true, expression_is_algebraic should - # also be true, otherwise both should be false which would imply - # that the expression is numeric. - return not algebraic ^ expression_is_algebraic + expression_contains_at_least_one_variable = ( + expression_parser.contains_at_least_one_variable(obj)) + # This ensures that numeric expressions don't contain variables. + return algebraic or not expression_contains_at_least_one_variable @staticmethod def is_valid_algebraic_expression(obj: str) -> bool: @@ -593,25 +653,24 @@ def is_valid_math_equation(obj: str) -> bool: is_valid_algebraic_expression = get_validator( 'is_valid_algebraic_expression') - is_valid_numeric_expression = get_validator( - 'is_valid_numeric_expression') lhs, rhs = obj.split('=') # Both sides have to be valid expressions and at least one of them has - # to be a valid algebraic expression. - lhs_is_algebraically_valid = is_valid_algebraic_expression(lhs) - rhs_is_algebraically_valid = is_valid_algebraic_expression(rhs) + # to have at least one variable. + lhs_is_valid = is_valid_algebraic_expression(lhs) + rhs_is_valid = is_valid_algebraic_expression(rhs) - lhs_is_numerically_valid = is_valid_numeric_expression(lhs) - rhs_is_numerically_valid = is_valid_numeric_expression(rhs) + if not lhs_is_valid or not rhs_is_valid: + return False - if lhs_is_algebraically_valid and rhs_is_algebraically_valid: - return True - if lhs_is_algebraically_valid and rhs_is_numerically_valid: - return True - if lhs_is_numerically_valid and rhs_is_algebraically_valid: - return True - return False + lhs_contains_variable = ( + expression_parser.contains_at_least_one_variable(lhs)) + rhs_contains_variable = ( + expression_parser.contains_at_least_one_variable(rhs)) + + if not lhs_contains_variable and not rhs_contains_variable: + return False + return True @staticmethod def is_supported_audio_language_code(obj: str) -> bool: @@ -681,3 +740,92 @@ def is_valid_username_string(obj: str) -> bool: return True except utils.ValidationError: return False + + @staticmethod + def has_expected_subtitled_content_length( + obj: objects.SubtitledUnicode, max_value: int + ) -> bool: + """Checks if the given subtitled content length is within max value. + + Args: + obj: objects.SubtitledUnicode. The object to verify. + max_value: int. The maximum allowed value for the obj. + + Returns: + bool. Whether the given object has length atmost the max_value. + """ + # Ruling out the possibility of different types for mypy type checking. + assert isinstance(obj, dict) + return len(obj['unicode_str']) <= max_value + + @staticmethod + def has_subtitled_html_non_empty(obj: objects.SubtitledHtml) -> bool: + """Checks if the given subtitled html content is empty + + Args: + obj: objects.SubtitledHtml. The object to verify. + + Returns: + bool. Whether the given object is empty. + """ + # Ruling out the possibility of different types for mypy type checking. + assert isinstance(obj, dict) + return obj['html'] not in ('', '

    ') + + @staticmethod + def has_unique_subtitled_contents( + obj: List[objects.SubtitledHtml] + ) -> bool: + """Checks if the given subtitled html content is uniquified. + + Args: + obj: List[objects.SubtitledHtml]. The list of SubtitledHtml + content. + + Returns: + bool. Returns True if the content inside the list is uniquified. + """ + seen_choices = [] + for choice in obj: + # Ruling out the possibility of different types for mypy type + # checking. + assert isinstance(choice, dict) + if choice['html'] in seen_choices: + return False + seen_choices.append(choice['html']) + return True + + @staticmethod + def is_valid_audio_file(obj: bytes) -> bool: + """Checks if given audio file is a valid audio file. + + Args: + obj: str. The raw audio file to validate. + + Returns: + bool. Returns True if obj is a valid audio file. + + Raises: + Exception. The obj is not a valid audio file. + """ + if not obj: + raise Exception('No audio supplied') + tempbuffer = io.BytesIO() + tempbuffer.write(obj) + tempbuffer.seek(0) + + # .mp3 is the only allowed extension. + extension = 'mp3' + try: + audio = mutagen.mp3.MP3(tempbuffer) + except mutagen.MutagenError as e: + raise Exception( + 'Audio not recognized as a %s file' % extension + ) from e + + if audio.info.length > feconf.MAX_AUDIO_FILE_LENGTH_SEC: + raise Exception( + 'Audio files must be under %s seconds in length. The uploaded ' + 'file is %.2f seconds long.' % ( + feconf.MAX_AUDIO_FILE_LENGTH_SEC, audio.info.length)) + return True diff --git a/core/schema_utils_test.py b/core/schema_utils_test.py index 9bf05dda67a8..09b0e7bd62fd 100644 --- a/core/schema_utils_test.py +++ b/core/schema_utils_test.py @@ -32,6 +32,8 @@ SCHEMA_KEY_LEN = schema_utils.SCHEMA_KEY_LEN SCHEMA_KEY_PROPERTIES = schema_utils.SCHEMA_KEY_PROPERTIES SCHEMA_KEY_TYPE = schema_utils.SCHEMA_KEY_TYPE +SCHEMA_KEY_KEYS = schema_utils.SCHEMA_KEY_KEYS +SCHEMA_KEY_VALUES = schema_utils.SCHEMA_KEY_VALUES SCHEMA_KEY_POST_NORMALIZERS = schema_utils.SCHEMA_KEY_POST_NORMALIZERS SCHEMA_KEY_CHOICES = schema_utils.SCHEMA_KEY_CHOICES SCHEMA_KEY_NAME = schema_utils.SCHEMA_KEY_NAME @@ -62,6 +64,8 @@ # in the relevant extensions/objects/models/objects.py class. SCHEMA_TYPE_CUSTOM = schema_utils.SCHEMA_TYPE_CUSTOM SCHEMA_TYPE_DICT = schema_utils.SCHEMA_TYPE_DICT +SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS = ( + schema_utils.SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS) SCHEMA_TYPE_FLOAT = schema_utils.SCHEMA_TYPE_FLOAT SCHEMA_TYPE_HTML = schema_utils.SCHEMA_TYPE_HTML SCHEMA_TYPE_INT = schema_utils.SCHEMA_TYPE_INT @@ -73,20 +77,22 @@ ALLOWED_SCHEMA_TYPES = [ SCHEMA_TYPE_BOOL, SCHEMA_TYPE_CUSTOM, SCHEMA_TYPE_DICT, SCHEMA_TYPE_FLOAT, SCHEMA_TYPE_HTML, SCHEMA_TYPE_INT, SCHEMA_TYPE_LIST, SCHEMA_TYPE_BASESTRING, - SCHEMA_TYPE_UNICODE, SCHEMA_TYPE_UNICODE_OR_NONE, SCHEMA_TYPE_OBJECT_DICT] + SCHEMA_TYPE_UNICODE, SCHEMA_TYPE_UNICODE_OR_NONE, SCHEMA_TYPE_OBJECT_DICT, + SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS] ALLOWED_CUSTOM_OBJ_TYPES = [ 'Filepath', 'MathExpressionContent', 'MusicPhrase', 'ParameterName', 'SanitizedUrl', 'Graph', 'ImageWithRegions', 'ListOfTabs', 'SkillSelector', 'SubtitledHtml', 'SubtitledUnicode', - 'SvgFilename', 'CustomOskLetters', 'PositiveInt'] + 'SvgFilename', 'AllowedVariables', 'PositiveInt'] # Schemas for the UI config for the various types. All of these configuration # options are optional additions to the schema, and, if omitted, should not # result in any errors. # Note to developers: please keep this in sync with # https://github.com/oppia/oppia/wiki/Schema-Based-Forms -# The following types have recurive type definition, and currently mypy does -# not support it, hence we are using type Any here. +# Here we use type Any because the following types have recursive type +# definition, and currently mypy does not support it, hence we are using +# type Any here. # See - https://github.com/python/mypy/issues/731 UI_CONFIG_SPECS: Dict[str, Dict[str, Any]] = { SCHEMA_TYPE_BOOL: {}, @@ -121,16 +127,18 @@ 'placeholder': { 'type': SCHEMA_TYPE_UNICODE, }, - }, + } } # Schemas for validators for the various types. -# The following types have recurive type definition, and currently mypy does -# not support it, hence we are using type Any here. +# Here we use type Any because the following types have recursive type +# definition, and currently mypy does not support it, hence we are +# using type Any here. # See - https://github.com/python/mypy/issues/731 VALIDATOR_SPECS: Dict[str, Dict[str, Any]] = { SCHEMA_TYPE_BOOL: {}, SCHEMA_TYPE_DICT: {}, + SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS: {}, SCHEMA_TYPE_FLOAT: { 'is_at_least': { 'min_value': { @@ -175,7 +183,8 @@ }], } }, - 'is_uniquified': {} + 'is_uniquified': {}, + 'has_unique_subtitled_contents': {} }, SCHEMA_TYPE_UNICODE: { 'matches_regex': { @@ -211,11 +220,19 @@ } } }, + SCHEMA_TYPE_CUSTOM: { + 'has_subtitled_html_non_empty': {}, + 'has_expected_subtitled_content_length': { + 'max_value': { + 'type': SCHEMA_TYPE_INT + } + } + } } -# The type of `validator` is Dict[str, Any] here because its values represent -# objects for normalization, and they can have any type. +# Here we use type Any because the argument `validator` represents the object +# to be normalized, and that object can be of any type. def _validate_ui_config(obj_type: str, ui_config: Dict[str, Any]) -> None: """Validates the value of a UI configuration. @@ -236,8 +253,8 @@ def _validate_ui_config(obj_type: str, ui_config: Dict[str, Any]) -> None: value, reference_dict[key]) -# The type of `validator` is Dict[str, Any] here because its values represent -# objects for normalization, and they can have any type. +# Here we use type Any because the argument `validator` represents the object +# to be normalized, and that object can be of any type. def _validate_validator(obj_type: str, validator: Dict[str, Any]) -> None: """Validates the value of a 'validator' field. @@ -271,7 +288,7 @@ def _validate_validator(obj_type: str, validator: Dict[str, Any]) -> None: try: schema_utils.normalize_against_schema(value, schema) except Exception as e: - raise AssertionError(e) + raise AssertionError(e) from e # Check that the id corresponds to a valid normalizer function. validator_fn = schema_utils.get_validator(validator['id']) @@ -286,9 +303,9 @@ def _validate_validator(obj_type: str, validator: Dict[str, Any]) -> None: set(customization_keys + ['obj'])))) -# Here the type chosen for `dict_to_check` is Dict[str, Any] because here we are -# only concerned about the keys of the dictionary, not its values. Using Any for -# dictionary values helps us achieve that. +# Here we use type Any because here we are only concerned about the keys +# of the 'dict_to_check' dictionary, not its values. Using Any for dictionary +# values helps us achieve that. def _validate_dict_keys( dict_to_check: Dict[str, Any], required_keys: List[str], @@ -312,8 +329,8 @@ def _validate_dict_keys( 'Extra keys: %s' % dict_to_check) -# The type Dict[str, Any] is used as type for schema because it can have a -# recursive structure and mypy doesn't support recursive type currently. +# Here we use type Any because schema can have a recursive structure and +# mypy doesn't support recursive type currently. # See: https://github.com/python/mypy/issues/731 def validate_schema(schema: Dict[str, Any]) -> None: """Validates a schema. @@ -352,7 +369,7 @@ def validate_schema(schema: Dict[str, Any]) -> None: _validate_dict_keys( schema, [SCHEMA_KEY_TYPE, SCHEMA_KEY_OBJ_TYPE], - [SCHEMA_KEY_REPLACEMENT_UI_CONFIG]) + [SCHEMA_KEY_REPLACEMENT_UI_CONFIG, SCHEMA_KEY_VALIDATORS]) assert schema[SCHEMA_KEY_OBJ_TYPE] in ALLOWED_CUSTOM_OBJ_TYPES, schema elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_LIST: _validate_dict_keys( @@ -388,6 +405,24 @@ def validate_schema(schema: Dict[str, Any]) -> None: prop[SCHEMA_KEY_DESCRIPTION], str), ( 'Expected %s, got %s' % ( str, prop[SCHEMA_KEY_DESCRIPTION])) + elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS: + _validate_dict_keys( + schema, + [SCHEMA_KEY_TYPE, SCHEMA_KEY_KEYS, SCHEMA_KEY_VALUES], + OPTIONAL_SCHEMA_KEYS + ) + items = [SCHEMA_KEY_VALUES, SCHEMA_KEY_KEYS] + for item in items: + assert isinstance(schema[item], dict), ( + 'Expected dict, got %s' % (schema[item]) + ) + _validate_dict_keys( + schema[item], + [SCHEMA_KEY_SCHEMA], + OPTIONAL_SCHEMA_KEYS + ) + schema_item = schema[item] + validate_schema(schema_item[SCHEMA_KEY_SCHEMA]) elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_OBJECT_DICT: _validate_dict_keys( schema, @@ -446,20 +481,18 @@ class SchemaValidationUnitTests(test_utils.GenericTestBase): }] } - # The following types have recurive type definition, and currently mypy does - # not support it, hence we are using type Any here. - # See - https://github.com/python/mypy/issues/731 - GLOBAL_VALIDATORS: List[Dict[str, Any]] = [{ + GLOBAL_VALIDATORS: List[Dict[str, str]] = [{ 'id': 'does_not_contain_email' }] - # We are only concerned with dictionary keys here and the method should work - # regardless of dictionary value type, hence using type Any for it. - def arbitary_method(self, obj: Dict[str, Any]) -> None: + def arbitary_method(self, obj: Dict[str, str]) -> None: """Only required for testing. Args: obj: dict. Argument which needs to be validated. + + Raises: + Exception. Given argument is missing 'any_arg'. """ if 'any_arg' not in obj: raise Exception('Missing \'any_arg\'.') @@ -536,6 +569,32 @@ def test_schemas_are_correctly_validated(self) -> None: }, re.escape('\'len\'') ), + ( + { + 'type': 'variable_keys_dict', + 'keys': 1, + 'values': { + 'schema': { + 'type': 'basestring' + } + } + }, + 'Expected dict, got 1' + ), + ( + { + 'type': 'variable_keys_dict', + 'fake_arg': 'value', + 'values': { + 'schema': { + 'type': 'basestring' + } + } + }, + 'Missing keys: {\'type\': \'variable_keys_dict\', ' + '\'fake_arg\': \'value\', \'values\': {\'schema\': ' + '{\'type\': \'basestring\'}}}' + ), ( { 'type': 'unicode', @@ -599,8 +658,9 @@ def test_schemas_are_correctly_validated(self) -> None: ) ] - # The following types have recurive type definition, and currently mypy - # does not support it, hence we are using type Any here. + # Here we use type Any because the following types have recursive type + # definition, and currently mypy does not support it, hence we are using + # type Any here. # See - https://github.com/python/mypy/issues/731 valid_schemas: List[Dict[str, Any]] = [{ 'type': 'float' @@ -614,6 +674,18 @@ def test_schemas_are_correctly_validated(self) -> None: 'type': 'unicode' } }] + }, { + 'type': 'variable_keys_dict', + 'keys': { + 'schema': { + 'type': 'basestring' + } + }, + 'values': { + 'schema': { + 'type': 'float' + } + } }, { 'type': 'list', 'items': { @@ -659,14 +731,17 @@ def test_schemas_are_correctly_validated(self) -> None: for schema in valid_schemas: validate_schema(schema) for schemas, error_msg in invalid_schemas_with_error_messages: - with self.assertRaisesRegexp((AssertionError, KeyError), error_msg): # type: ignore[no-untyped-call] + # TODO(#13059): Here we use MyPy ignore because after we fully type + # the codebase we plan to get rid of the tests that intentionally + # test wrong inputs that we can normally catch by typing. + with self.assertRaisesRegex((AssertionError, KeyError), error_msg): validate_schema(schemas) # type: ignore[arg-type] def test_normalize_against_schema_raises_exception(self) -> None: """Tests if normalize against schema raises exception for invalid key. """ - with self.assertRaisesRegexp(Exception, 'Invalid schema type: invalid'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, 'Invalid schema type: invalid'): schema = {SCHEMA_KEY_TYPE: 'invalid'} schema_utils.normalize_against_schema('obj', schema) @@ -704,7 +779,7 @@ def test_get_raises_invalid_validator_id(self) -> None: """Tests if class method 'get' in _Validator raises exception for invalid validator id. """ - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid validator id: some invalid validator method name'): schema_utils.get_validator('some invalid validator method name') @@ -717,7 +792,8 @@ def test_is_valid_algebraic_expression_validator(self) -> None: 'is_valid_algebraic_expression') self.assertTrue(is_valid_algebraic_expression('a+b*2')) - self.assertFalse(is_valid_algebraic_expression('3+4/2')) + self.assertTrue(is_valid_algebraic_expression('3+4/2')) + self.assertFalse(is_valid_algebraic_expression('3+4/a*')) def test_is_valid_numeric_expression_validator(self) -> None: """Tests for the is_valid_numeric_expression static method with @@ -792,7 +868,7 @@ def test_is_url_fragment(self) -> None: self.assertFalse(validate_url_fragment('!@#$%^&*()_+=')) def test_global_validators_raise_exception_when_error_in_dict(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( AssertionError, r'^Validation failed: does_not_contain_email .* email@email.com$' ): @@ -806,7 +882,7 @@ def test_global_validators_raise_exception_when_error_in_dict(self) -> None: ) def test_global_validators_raise_exception_when_error_in_list(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( AssertionError, r'^Validation failed: does_not_contain_email .* email2@email.com$' ): @@ -879,6 +955,52 @@ def test_is_valid_username_string(self) -> None: self.assertFalse(is_valid_username_string('oppia')) self.assertFalse(is_valid_username_string('')) + def test_has_expected_subtitled_content_length(self) -> None: + """Checks whether the given subtitled content does not exceed the + given length. + + Returns: + bool. A boolean value representing whether the content matches + the given max length. + """ + has_expected_subtitled_content_length = ( + schema_utils.get_validator('has_expected_subtitled_content_length')) + + obj = { + 'content_id': 'id', + 'unicode_str': 'Continueeeeeeeeeeeeeeee' + } + self.assertFalse(has_expected_subtitled_content_length(obj, 20)) + + obj['unicode_str'] = 'Continue' + self.assertTrue(has_expected_subtitled_content_length(obj, 20)) + + def test_has_unique_subtitled_contents(self) -> None: + """Checks whether the subtitled html content has unique value or not. + + Returns: + bool. A boolean value representing whether the content has unique + value. + """ + has_unique_subtitled_contents = ( + schema_utils.get_validator('has_unique_subtitled_contents') + ) + + obj_list = [ + { + 'content_id': 'id_1', + 'html': '

    1

    ' + }, + { + 'content_id': 'id_2', + 'html': '

    1

    ' + } + ] + self.assertFalse(has_unique_subtitled_contents(obj_list)) + + obj_list[1]['html'] = '

    2

    ' + self.assertTrue(has_unique_subtitled_contents(obj_list)) + def test_has_length(self) -> None: """Tests if static method has_length returns true iff given list has length of the given value. @@ -894,8 +1016,8 @@ def test_has_length(self) -> None: class SchemaNormalizationUnitTests(test_utils.GenericTestBase): """Test schema-based normalization of objects.""" - # `schema` has recursive type definition, and currently mypy does not - # support it, hence we are using type Any here. + # Here we use type Any because `schema` has recursive type definition, + # and currently mypy does not support it, hence we are using type Any here. # See - https://github.com/python/mypy/issues/731 # `mappings` has type Tuple[Any, Any] because objects for normalization and # normalized objects can have any type. @@ -928,7 +1050,7 @@ def check_normalization( schema_utils.normalize_against_schema(raw_value, schema), expected_value) for value, error_msg in invalid_items_with_error_messages: - with self.assertRaisesRegexp(Exception, error_msg): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, error_msg): schema_utils.normalize_against_schema(value, schema) def test_float_schema(self) -> None: @@ -960,9 +1082,7 @@ def test_unicode_or_none_schema(self) -> None: 'type': schema_utils.SCHEMA_TYPE_UNICODE_OR_NONE, } mappings = [('a', 'a'), ('', ''), (b'bytes', 'bytes'), (None, None)] - # Type Any used because its passed as an argument to check_normalization - # method defined above. - invalid_values_with_error_messages: List[Tuple[List[Any], str]] = [ + invalid_values_with_error_messages: List[Tuple[List[str], str]] = [ ([], r'Expected unicode string or None, received'), ] self.check_normalization( @@ -1163,6 +1283,55 @@ def test_dict_schema(self) -> None: self.check_normalization( schema, mappings, invalid_values_with_error_messages) + def test_dict_with_variable_key_schema(self) -> None: + schema = { + 'type': schema_utils.SCHEMA_TYPE_DICT_WITH_VARIABLE_NO_OF_KEYS, + 'keys': { + 'schema': { + 'type': 'basestring' + } + }, + 'values': { + 'schema': { + 'type': schema_utils.SCHEMA_TYPE_LIST, + 'items': { + 'type': schema_utils.SCHEMA_TYPE_INT + }, + 'len': 2 + } + } + } + + mappings = [({ + 'skills_id1': [1.2, 3], + 'skills_id2': [2.0, 0] + }, { + 'skills_id1': [1, 3], + 'skills_id2': [2, 0] + }), ({ + 'skills_id1': ['45', 2], + 'skills_id2': [23, 3] + }, { + 'skills_id1': [45, 2], + 'skills_id2': [23, 3] + }), ({ + 'skills_id1': [1, 2], + 'skills_id2': [2, 3] + }, { + 'skills_id1': [1, 2], + 'skills_id2': [2, 3] + })] + invalid_values_with_error_messages = [ + ([1, 2], re.escape('Expected dict, received [1, 2]')), + ({1: 2, 'topic_id1': 3}, 'Expected string, received 1'), + ({'topics_id1': 1}, 'Expected list, received 1'), + (None, 'Expected dict, received None'), + ({'skill_id1': [45, 2, 34]}, 'Expected length of 2 got 3') + ] + + self.check_normalization( + schema, mappings, invalid_values_with_error_messages) + def test_string_schema(self) -> None: schema = { 'type': schema_utils.SCHEMA_TYPE_BASESTRING, @@ -1200,29 +1369,6 @@ def test_object_dict_schema_with_validation_method_key(self) -> None: self.check_normalization( schema, mappings, invalid_values_with_error_messages) - def test_object_dict_schema_with_object_class_key(self) -> None: - schema = { - 'type': SCHEMA_TYPE_OBJECT_DICT, - 'object_class': ValidateClassForTesting - } - - mappings = [ - ({ - 'arg_a': 'arbitary_argument_a', - 'arg_b': 'arbitary_argument_b' - }, { - 'arg_a': 'arbitary_argument_a', - 'arg_b': 'arbitary_argument_b' - }) - ] - - # Type Any used because its passed as an argument to check_normalization - # method defined above. - invalid_values_with_error_messages: List[Tuple[Any, str]] = [] - - self.check_normalization( - schema, mappings, invalid_values_with_error_messages) - def test_notification_user_ids_list_validator(self) -> None: schema = email_manager.NOTIFICATION_USER_IDS_LIST_SCHEMA valid_user_id_list = [ @@ -1279,12 +1425,12 @@ def test_normalizer_get_raises_exception_for_invalid_id(self) -> None: """Tests if class method get of Normalizers raises exception when given an invalid normalizer id. """ - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid normalizer id: some invalid normalizer method name'): schema_utils.Normalizers.get('some invalid normalizer method name') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid normalizer id: normalize_space'): # Test substring of an actual id. schema_utils.Normalizers.get('normalize_space') @@ -1318,31 +1464,57 @@ def test_normalizer_sanitize_url(self) -> None: # Raise AssertionError if string does not start with http:// or # https://. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( AssertionError, 'Invalid URL: Sanitized URL should start with \'http://\' or' ' \'https://\'; received oppia.org'): sanitize_url('oppia.org') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( AssertionError, 'Invalid URL: Sanitized URL should start with \'http://\' or' ' \'https://\'; received www.oppia.org'): sanitize_url('www.oppia.org') -# We are only concerned with dictionary keys here and the method should work -# regardless of dictionary value type, hence using type Any for it. -def validation_method_for_testing(obj: Dict[str, Any]) -> None: +class ValidateArgumentHavingSpecificClass(test_utils.GenericTestBase): + """Test class is to validate the arguments which have a corresponding domain + class representation in the codebase. This test class is written uniquely + because it returns an object which is different from all other cases. + """ + + def test_object_dict_schema_with_object_class_key(self) -> None: + schema = { + 'type': SCHEMA_TYPE_OBJECT_DICT, + 'object_class': ValidateClassForTesting + } + + sample_dict = { + 'arg_a': 'arbitary_argument_a', + 'arg_b': 'arbitary_argument_b' + } + arg1 = schema_utils.normalize_against_schema(sample_dict, schema) + arg2 = ValidateClassForTesting.from_dict(sample_dict) + self.assertEqual(arg1.arg_a, arg2.arg_a) + + +def validation_method_for_testing(obj: Dict[str, str]) -> Dict[str, str]: """Method to test 'validation_method' key of schema. Args: obj: dict. Dictionary form of the argument. + + Returns: + dict(str, str). Returns a dict value after validation. + + Raises: + Exception. If any one argument is missing. """ if 'arg_a' not in obj: raise Exception('Missing arg_a in argument.') if 'arg_b' not in obj: raise Exception('Missing arg_b in argument.') + return obj class ValidateClassForTesting: diff --git a/core/storage/activity/gae_models_test.py b/core/storage/activity/gae_models_test.py index 1a376a6e0d94..5527811edd02 100644 --- a/core/storage/activity/gae_models_test.py +++ b/core/storage/activity/gae_models_test.py @@ -27,8 +27,9 @@ from mypy_imports import activity_models from mypy_imports import base_models -(base_models, activity_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.activity]) +(base_models, activity_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.ACTIVITY +]) class ActivityListModelTest(test_utils.GenericTestBase): @@ -39,6 +40,20 @@ def test_get_deletion_policy(self) -> None: activity_models.ActivityReferencesModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + activity_models.ActivityReferencesModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + sample_dict = base_models.BaseModel.get_export_policy() + sample_dict.update( + {'activity_references': base_models.EXPORT_POLICY.NOT_APPLICABLE}) + self.assertEqual( + activity_models.ActivityReferencesModel.get_export_policy(), + sample_dict) + def test_featured_activity_list_always_exists(self) -> None: featured_model_instance = ( activity_models.ActivityReferencesModel.get_or_create('featured')) @@ -47,7 +62,7 @@ def test_featured_activity_list_always_exists(self) -> None: self.assertEqual(featured_model_instance.activity_references, []) def test_retrieving_non_existent_list(self) -> None: - with self.assertRaisesRegexp(Exception, 'Invalid ActivityListModel'): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, 'Invalid ActivityListModel'): activity_models.ActivityReferencesModel.get_or_create( 'nonexistent_key') diff --git a/core/storage/app_feedback_report/gae_models.py b/core/storage/app_feedback_report/gae_models.py index 0dc8f9ba5ab8..af8e42e6d3be 100644 --- a/core/storage/app_feedback_report/gae_models.py +++ b/core/storage/app_feedback_report/gae_models.py @@ -17,41 +17,76 @@ from __future__ import annotations import datetime +import enum from core import feconf -from core import python_utils from core import utils from core.platform import models -from typing import Any, Dict, List, Optional, Sequence, TypeVar +from typing import ( + Dict, Final, List, Literal, Optional, Sequence, TypedDict, TypeVar) SELF_REPORT_MODEL = TypeVar( # pylint: disable=invalid-name - 'SELF_REPORT_MODEL', bound='AppFeedbackReportModel') + 'SELF_REPORT_MODEL', bound='AppFeedbackReportModel' +) MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services + from mypy_imports import user_models -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.USER +]) datastore_services = models.Registry.import_datastore_services() -PLATFORM_CHOICE_ANDROID = 'android' -PLATFORM_CHOICE_WEB = 'web' -PLATFORM_CHOICES = [PLATFORM_CHOICE_ANDROID, PLATFORM_CHOICE_WEB] -GITHUB_REPO_CHOICES = PLATFORM_CHOICES +PLATFORM_CHOICE_ANDROID: Final = 'android' +PLATFORM_CHOICE_WEB: Final = 'web' +PLATFORM_CHOICES: Final = [PLATFORM_CHOICE_ANDROID, PLATFORM_CHOICE_WEB] +GITHUB_REPO_CHOICES: Final = PLATFORM_CHOICES + + +class ReportInfoDict(TypedDict): + """Type for the report_info dictionary.""" + + user_feedback_selected_items: List[str] + user_feedback_other_text_input: str + build_fingerprint: str + event_logs: List[str] + logcat_logs: List[str] + package_version_code: int + language_locale_code: str + entry_point_info: Dict[str, str] + text_size: str + only_allows_wifi_download_and_update: bool + automatically_update_topics: bool + is_curriculum_admin: bool + android_device_language_locale_code: str + account_is_profile_admin: bool + network_type: str + # The model field names that can be filtered / sorted for when maintainers # triage feedback reports. -FILTER_FIELD_NAMES = python_utils.create_enum( # type: ignore[no-untyped-call] - 'platform', 'report_type', 'entry_point', 'submitted_on', - 'android_device_model', 'android_sdk_version', 'text_language_code', - 'audio_language_code', 'platform_version', - 'android_device_country_locale_code') +class FilterFieldNames(enum.Enum): + """Enum for the model field names that can be filtered""" + + PLATFORM = 'platform' + REPORT_TYPE = 'report_type' + ENTRY_POINT = 'entry_point' + SUBMITTED_ON = 'submitted_on' + ANDROID_DEVICE_MODEL = 'android_device_model' + ANDROID_SDK_VERSION = 'android_sdk_version' + TEXT_LANGUAGE_CODE = 'text_language_code' + AUDIO_LANGUAGE_CODE = 'audio_language_code' + PLATFORM_VERSION = 'platform_version' + ANDROID_DEVICE_COUNTRY_LOCALE_CODE = 'android_device_country_locale_code' + # An ID used for stats model entities tracking all unticketed reports. -UNTICKETED_ANDROID_REPORTS_STATS_TICKET_ID = ( +UNTICKETED_ANDROID_REPORTS_STATS_TICKET_ID: Final = ( 'unticketed_android_reports_stats_ticket_id') @@ -68,7 +103,7 @@ class AppFeedbackReportModel(base_models.BaseModel): """ # We use the model id as a key in the Takeout dict. - ID_IS_USED_AS_TAKEOUT_KEY = True + ID_IS_USED_AS_TAKEOUT_KEY: Literal[True] = True # The platform (web or Android) that the report is sent from and that the # feedback corresponds to. @@ -158,26 +193,26 @@ class AppFeedbackReportModel(base_models.BaseModel): # objects/TypedDict to remove Any from type-annotation below. @classmethod def create( - cls, - entity_id: str, - platform: str, - submitted_on: datetime.datetime, - local_timezone_offset_hrs: int, - report_type: str, - category: str, - platform_version: str, - android_device_country_locale_code: Optional[str], - android_sdk_version: Optional[int], - android_device_model: Optional[str], - entry_point: str, - entry_point_topic_id: Optional[str], - entry_point_story_id: Optional[str], - entry_point_exploration_id: Optional[str], - entry_point_subtopic_id: Optional[str], - text_language_code: str, - audio_language_code: str, - android_report_info: Optional[Dict[str, Any]], - web_report_info: Optional[Dict[str, Any]] + cls, + entity_id: str, + platform: str, + submitted_on: datetime.datetime, + local_timezone_offset_hrs: int, + report_type: str, + category: str, + platform_version: str, + android_device_country_locale_code: Optional[str], + android_sdk_version: Optional[int], + android_device_model: Optional[str], + entry_point: str, + entry_point_topic_id: Optional[str], + entry_point_story_id: Optional[str], + entry_point_exploration_id: Optional[str], + entry_point_subtopic_id: Optional[str], + text_language_code: str, + audio_language_code: str, + android_report_info: Optional[ReportInfoDict], + web_report_info: Optional[ReportInfoDict] ) -> str: """Creates a new AppFeedbackReportModel instance and returns its ID. @@ -254,9 +289,9 @@ def create( @classmethod def generate_id( - cls, - platform: str, - submitted_on_datetime: datetime.datetime + cls, + platform: str, + submitted_on_datetime: datetime.datetime ) -> str: """Generates key for the instance of AppFeedbackReportModel class in the required format with the arguments provided. @@ -270,6 +305,9 @@ def generate_id( str. The generated ID for this entity using platform, submitted_on_sec, and a random string, of the form '[platform].[submitted_on_msec].[random hash]'. + + Raises: + Exception. If the id generator is producing too many collisions. """ submitted_datetime_in_msec = utils.get_time_in_millisecs( submitted_on_datetime) @@ -309,7 +347,9 @@ def get_all_unscrubbed_expiring_report_models( return report_models @classmethod - def get_filter_options_for_field(cls, filter_field: str) -> List[str]: + def get_filter_options_for_field( + cls, filter_field: FilterFieldNames + ) -> List[str]: """Fetches values that can be used to filter reports by. Args: @@ -319,34 +359,34 @@ def get_filter_options_for_field(cls, filter_field: str) -> List[str]: Returns: list(str). The possible values that the field name can have. """ - query = cls.query(projection=[filter_field.name], distinct=True) # type: ignore[attr-defined] + query = cls.query(projection=[filter_field.value], distinct=True) filter_values = [] - if filter_field == FILTER_FIELD_NAMES.report_type: + if filter_field == FilterFieldNames.REPORT_TYPE: filter_values = [model.report_type for model in query] - elif filter_field == FILTER_FIELD_NAMES.platform: + elif filter_field == FilterFieldNames.PLATFORM: filter_values = [model.platform for model in query] - elif filter_field == FILTER_FIELD_NAMES.entry_point: + elif filter_field == FilterFieldNames.ENTRY_POINT: filter_values = [model.entry_point for model in query] - elif filter_field == FILTER_FIELD_NAMES.submitted_on: + elif filter_field == FilterFieldNames.SUBMITTED_ON: filter_values = [model.submitted_on.date() for model in query] - elif filter_field == FILTER_FIELD_NAMES.android_device_model: + elif filter_field == FilterFieldNames.ANDROID_DEVICE_MODEL: filter_values = [model.android_device_model for model in query] - elif filter_field == FILTER_FIELD_NAMES.android_sdk_version: + elif filter_field == FilterFieldNames.ANDROID_SDK_VERSION: filter_values = [model.android_sdk_version for model in query] - elif filter_field == FILTER_FIELD_NAMES.text_language_code: + elif filter_field == FilterFieldNames.TEXT_LANGUAGE_CODE: filter_values = [model.text_language_code for model in query] - elif filter_field == FILTER_FIELD_NAMES.audio_language_code: + elif filter_field == FilterFieldNames.AUDIO_LANGUAGE_CODE: filter_values = [model.audio_language_code for model in query] - elif filter_field == FILTER_FIELD_NAMES.platform_version: + elif filter_field == FilterFieldNames.PLATFORM_VERSION: filter_values = [model.platform_version for model in query] elif filter_field == ( - FILTER_FIELD_NAMES.android_device_country_locale_code): + FilterFieldNames.ANDROID_DEVICE_COUNTRY_LOCALE_CODE): filter_values = [ model.android_device_country_locale_code for model in query] else: raise utils.InvalidInputException( 'The field %s is not a valid field to filter reports on' % ( - filter_field.name)) # type: ignore[attr-defined] + filter_field.name)) return filter_values @staticmethod @@ -407,8 +447,14 @@ def export_data(cls, user_id: str) -> Dict[str, Dict[str, str]]: for report_model in report_models: submitted_on_msec = utils.get_time_in_millisecs( report_model.submitted_on) + if utils.is_user_id_valid(report_model.scrubbed_by): + scrubbed_by_user_model = user_models.UserSettingsModel.get( + report_model.scrubbed_by) + scrubbed_by_username = scrubbed_by_user_model.username + else: + scrubbed_by_username = None user_data[report_model.id] = { - 'scrubbed_by': report_model.scrubbed_by, + 'scrubbed_by': scrubbed_by_username, 'platform': report_model.platform, 'ticket_id': report_model.ticket_id, 'submitted_on': utils.get_human_readable_time_string( @@ -485,14 +531,14 @@ class AppFeedbackReportTicketModel(base_models.BaseModel): @classmethod def create( - cls, - entity_id: str, - ticket_name: str, - platform: str, - github_issue_repo_name: Optional[str], - github_issue_number: Optional[int], - newest_report_timestamp: datetime.datetime, - report_ids: List[str] + cls, + entity_id: str, + ticket_name: str, + platform: str, + github_issue_repo_name: Optional[str], + github_issue_number: Optional[int], + newest_report_timestamp: datetime.datetime, + report_ids: List[str] ) -> str: """Creates a new AppFeedbackReportTicketModel instance and returns its ID. @@ -539,6 +585,9 @@ class in the required format with the arguments provided. milliseconds (as the entity's creation timestamp), a SHA1 hash of the ticket_name, and a random string, of the form '[creation_datetime_msec]:[hash(ticket_name)]:[random hash]'. + + Raises: + Exception. If the id generator is producing too many collisions. """ current_datetime_in_msec = utils.get_time_in_millisecs( datetime.datetime.utcnow()) @@ -633,13 +682,13 @@ class AppFeedbackReportStatsModel(base_models.BaseModel): @classmethod def create( - cls, - entity_id: str, - platform: str, - ticket_id: str, - stats_tracking_date: datetime.date, - total_reports_submitted: int, - daily_param_stats: Dict[str, Dict[str, int]] + cls, + entity_id: str, + platform: str, + ticket_id: str, + stats_tracking_date: datetime.date, + total_reports_submitted: int, + daily_param_stats: Dict[str, Dict[str, int]] ) -> str: """Creates a new AppFeedbackReportStatsModel instance and returns its ID. @@ -673,10 +722,10 @@ def create( @classmethod def calculate_id( - cls, - platform: str, - ticket_id: Optional[str], - stats_tracking_date: datetime.date + cls, + platform: str, + ticket_id: Optional[str], + stats_tracking_date: datetime.date ) -> str: """Generates key for the instance of AppFeedbackReportStatsModel class in the required format with the arguments provided. diff --git a/core/storage/app_feedback_report/gae_models_test.py b/core/storage/app_feedback_report/gae_models_test.py index b4b9fcfaf80b..651dab48c536 100644 --- a/core/storage/app_feedback_report/gae_models_test.py +++ b/core/storage/app_feedback_report/gae_models_test.py @@ -17,57 +17,70 @@ from __future__ import annotations import datetime +import enum import types from core import feconf -from core import python_utils from core import utils from core.platform import models from core.tests import test_utils -from mypy_imports import app_feedback_report_models, base_models # isort:skip +from typing import Final, List -from typing import List, Any # isort:skip # pylint: disable=unused-import +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import app_feedback_report_models + from mypy_imports import base_models -(base_models, app_feedback_report_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.app_feedback_report]) +(base_models, app_feedback_report_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.APP_FEEDBACK_REPORT +]) class AppFeedbackReportModelTests(test_utils.GenericTestBase): """Tests for the AppFeedbackReportModel class.""" - PLATFORM_ANDROID = 'android' - PLATFORM_WEB = 'web' + PLATFORM_ANDROID: Final = 'android' + PLATFORM_WEB: Final = 'web' # Timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC. - REPORT_SUBMITTED_TIMESTAMP_1 = datetime.datetime.fromtimestamp(1615151836) - REPORT_SUBMITTED_TIMESTAMP_1_MSEC = ( + REPORT_SUBMITTED_TIMESTAMP_1: Final = datetime.datetime.fromtimestamp( + 1615151836 + ) + REPORT_SUBMITTED_TIMESTAMP_1_MSEC: Final = ( utils.get_time_in_millisecs(REPORT_SUBMITTED_TIMESTAMP_1)) # Timestamp in sec since epoch for Mar 12 2021 3:22:17 UTC. - REPORT_SUBMITTED_TIMESTAMP_2 = datetime.datetime.fromtimestamp(1615519337) - REPORT_SUBMITTED_TIMESTAMP_2_MSEC = ( + REPORT_SUBMITTED_TIMESTAMP_2: Final = datetime.datetime.fromtimestamp( + 1615519337 + ) + REPORT_SUBMITTED_TIMESTAMP_2_MSEC: Final = ( utils.get_time_in_millisecs(REPORT_SUBMITTED_TIMESTAMP_2)) # Timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC. - TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836) - TICKET_CREATION_TIMESTAMP_MSEC = ( + TICKET_CREATION_TIMESTAMP: Final = datetime.datetime.fromtimestamp( + 1616173836 + ) + TICKET_CREATION_TIMESTAMP_MSEC: Final = ( utils.get_time_in_millisecs(TICKET_CREATION_TIMESTAMP)) - TICKET_ID = '%s.%s.%s' % ( + TICKET_ID: Final = '%s.%s.%s' % ( 'random_hash', int(TICKET_CREATION_TIMESTAMP_MSEC), '16CharString1234') - USER_ID = 'user_1' - REPORT_TYPE_SUGGESTION = 'suggestion' - CATEGORY_OTHER = 'other' - PLATFORM_VERSION = '0.1-alpha-abcdef1234' - DEVICE_COUNTRY_LOCALE_CODE_INDIA = 'in' - ANDROID_DEVICE_MODEL = 'Pixel 4a' - ANDROID_SDK_VERSION = 28 - ENTRY_POINT_NAVIGATION_DRAWER = 'navigation_drawer' - TEXT_LANGUAGE_CODE_ENGLISH = 'en' - AUDIO_LANGUAGE_CODE_ENGLISH = 'en' - ANDROID_REPORT_INFO = { + REPORT_TYPE_SUGGESTION: Final = 'suggestion' + CATEGORY_OTHER: Final = 'other' + PLATFORM_VERSION: Final = '0.1-alpha-abcdef1234' + DEVICE_COUNTRY_LOCALE_CODE_INDIA: Final = 'in' + ANDROID_DEVICE_MODEL: Final = 'Pixel 4a' + ANDROID_SDK_VERSION: Final = 28 + ENTRY_POINT_NAVIGATION_DRAWER: Final = 'navigation_drawer' + TEXT_LANGUAGE_CODE_ENGLISH: Final = 'en' + AUDIO_LANGUAGE_CODE_ENGLISH: Final = 'en' + ANDROID_REPORT_INFO: app_feedback_report_models.ReportInfoDict = { + 'user_feedback_selected_items': [], 'user_feedback_other_text_input': 'add an admin', 'event_logs': ['event1', 'event2'], 'logcat_logs': ['logcat1', 'logcat2'], 'package_version_code': 1, + 'build_fingerprint': 'example_fingerprint_id', + 'network_type': 'wifi', + 'android_device_language_locale_code': 'en', 'language_locale_code': 'en', 'entry_point_info': { 'entry_point_name': 'crash', @@ -75,17 +88,37 @@ class AppFeedbackReportModelTests(test_utils.GenericTestBase): 'text_size': 'MEDIUM_TEXT_SIZE', 'only_allows_wifi_download_and_update': True, 'automatically_update_topics': False, - 'is_curriculum_admin': False + 'is_curriculum_admin': False, + 'account_is_profile_admin': False } - WEB_REPORT_INFO = { - 'user_feedback_other_text_input': 'add an admin' + WEB_REPORT_INFO: app_feedback_report_models.ReportInfoDict = { + 'user_feedback_selected_items': [], + 'user_feedback_other_text_input': 'add an admin', + 'event_logs': ['event1', 'event2'], + 'logcat_logs': ['logcat1', 'logcat2'], + 'package_version_code': 1, + 'build_fingerprint': 'example_fingerprint_id', + 'network_type': 'wifi', + 'android_device_language_locale_code': 'en', + 'language_locale_code': 'en', + 'entry_point_info': { + 'entry_point_name': 'crash', + }, + 'text_size': 'MEDIUM_TEXT_SIZE', + 'only_allows_wifi_download_and_update': True, + 'automatically_update_topics': False, + 'is_curriculum_admin': False, + 'account_is_profile_admin': False } - ANDROID_REPORT_INFO_SCHEMA_VERSION = 1 - WEB_REPORT_INFO_SCHEMA_VERSION = 1 + ANDROID_REPORT_INFO_SCHEMA_VERSION: Final = 1 + WEB_REPORT_INFO_SCHEMA_VERSION: Final = 1 def setUp(self) -> None: """Set up models in datastore for use in testing.""" - super(AppFeedbackReportModelTests, self).setUp() + super().setUp() + + self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME) + self.user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL) self.feedback_report_model = ( app_feedback_report_models.AppFeedbackReportModel( @@ -94,7 +127,7 @@ def setUp(self) -> None: int(self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC), 'randomInteger123'), platform=self.PLATFORM_ANDROID, - scrubbed_by=self.USER_ID, + scrubbed_by=self.user_id, ticket_id='%s.%s.%s' % ( 'random_hash', int(self.TICKET_CREATION_TIMESTAMP_MSEC), @@ -135,8 +168,6 @@ def test_create_and_get_android_report_model(self) -> None: report_model = app_feedback_report_models.AppFeedbackReportModel.get( report_id) - # Ruling out the possibility of None for mypy type checking. - assert report_model is not None self.assertEqual(report_model.platform, self.PLATFORM_ANDROID) self.assertEqual( @@ -160,8 +191,6 @@ def test_create_and_get_web_report_model(self) -> None: report_model = app_feedback_report_models.AppFeedbackReportModel.get( report_id) - # Ruling out the possibility of None for mypy type checking. - assert report_model is not None self.assertEqual(report_model.platform, self.PLATFORM_WEB) self.assertEqual( @@ -173,7 +202,7 @@ def test_create_and_get_web_report_model(self) -> None: def test_create_raises_exception_by_mocking_collision(self) -> None: model_class = app_feedback_report_models.AppFeedbackReportModel # Test Exception for AppFeedbackReportModel. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'The id generator for AppFeedbackReportModel is ' 'producing too many collisions.'): # Swap dependent method get_by_id to simulate collision every time. @@ -202,10 +231,37 @@ def test_get_deletion_policy(self) -> None: model.get_deletion_policy(), base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE) - def test_export_data_nontrivial(self) -> None: + def test_export_data_without_scrubber(self) -> None: + self.feedback_report_model.scrubbed_by = 'id' + self.feedback_report_model.update_timestamps() + self.feedback_report_model.put() + + exported_data = ( + app_feedback_report_models.AppFeedbackReportModel.export_data('id')) + + report_id = '%s.%s.%s' % ( + self.PLATFORM_ANDROID, + int(self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC), + 'randomInteger123') + expected_data = { + report_id: { + 'scrubbed_by': None, + 'platform': self.PLATFORM_ANDROID, + 'ticket_id': self.TICKET_ID, + 'submitted_on': utils.get_human_readable_time_string( + self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC), + 'local_timezone_offset_hrs': 0, + 'report_type': self.REPORT_TYPE_SUGGESTION, + 'category': self.CATEGORY_OTHER, + 'platform_version': self.PLATFORM_VERSION + } + } + self.assertEqual(exported_data, expected_data) + + def test_export_data_with_scrubber(self) -> None: exported_data = ( app_feedback_report_models.AppFeedbackReportModel.export_data( - self.USER_ID)) + self.user_id)) report_id = '%s.%s.%s' % ( self.PLATFORM_ANDROID, @@ -213,7 +269,7 @@ def test_export_data_nontrivial(self) -> None: 'randomInteger123') expected_data = { report_id: { - 'scrubbed_by': self.USER_ID, + 'scrubbed_by': self.NEW_USER_USERNAME, 'platform': self.PLATFORM_ANDROID, 'ticket_id': self.TICKET_ID, 'submitted_on': utils.get_human_readable_time_string( @@ -279,8 +335,6 @@ def test_has_reference_to_user_id(self) -> None: int(self.REPORT_SUBMITTED_TIMESTAMP_1_MSEC), 'randomInteger123') model_entity = model_class.get(report_id) - # Ruling out the possibility of None for mypy type checking. - assert model_entity is not None model_entity.scrubbed_by = 'scrubber_user' model_entity.update_timestamps() model_entity.put() @@ -289,22 +343,119 @@ def test_has_reference_to_user_id(self) -> None: self.assertFalse(model_class.has_reference_to_user_id('id_x')) def test_get_filter_options_with_invalid_field_throws_exception( - self) -> None: + self + ) -> None: model_class = app_feedback_report_models.AppFeedbackReportModel - invalid_filter = python_utils.create_enum('invalid_field') # type: ignore[no-untyped-call] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + + class InvalidFilter(enum.Enum): + """Invalid filter.""" + + INVALID_FIELD = 'invalid_field' + with self.assertRaisesRegex( utils.InvalidInputException, 'The field %s is not a valid field to filter reports on' % ( - invalid_filter.invalid_field.name) + InvalidFilter.INVALID_FIELD.name) ): with self.swap( model_class, 'query', - self._mock_query_filters_returns_empy_list): + self._mock_query_filters_returns_empty_list): + # Here we use MyPy ignore because we passes arg of type + # InvalidFilter to type class filter_field_names. This is done + # to ensure that InvalidInputException is thrown. model_class.get_filter_options_for_field( - invalid_filter.invalid_field) + InvalidFilter.INVALID_FIELD) # type: ignore[arg-type] + + def test_get_filter_options_returns_correctly(self) -> None: + model = app_feedback_report_models.AppFeedbackReportModel + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames.REPORT_TYPE), + [self.REPORT_TYPE_SUGGESTION]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames.PLATFORM), + [self.PLATFORM_ANDROID]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames.ENTRY_POINT), + [self.ENTRY_POINT_NAVIGATION_DRAWER]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames.SUBMITTED_ON), + [self.REPORT_SUBMITTED_TIMESTAMP_1.date()]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames + .ANDROID_DEVICE_MODEL), + [self.ANDROID_DEVICE_MODEL]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames.TEXT_LANGUAGE_CODE), + [self.TEXT_LANGUAGE_CODE_ENGLISH]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames + .ANDROID_SDK_VERSION), + [self.ANDROID_SDK_VERSION]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames + .AUDIO_LANGUAGE_CODE), + [self.AUDIO_LANGUAGE_CODE_ENGLISH]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames.PLATFORM_VERSION), + [self.PLATFORM_VERSION]) + self.assertEqual( + model.get_filter_options_for_field( + app_feedback_report_models.FilterFieldNames + .ANDROID_DEVICE_COUNTRY_LOCALE_CODE), + [self.DEVICE_COUNTRY_LOCALE_CODE_INDIA]) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'platform': base_models.EXPORT_POLICY.EXPORTED, + 'scrubbed_by': base_models.EXPORT_POLICY.EXPORTED, + 'ticket_id': base_models.EXPORT_POLICY.EXPORTED, + 'submitted_on': base_models.EXPORT_POLICY.EXPORTED, + 'local_timezone_offset_hrs': base_models.EXPORT_POLICY.EXPORTED, + 'report_type': base_models.EXPORT_POLICY.EXPORTED, + 'category': base_models.EXPORT_POLICY.EXPORTED, + 'platform_version': base_models.EXPORT_POLICY.EXPORTED, + 'android_device_country_locale_code': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'android_device_model': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'android_sdk_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entry_point': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entry_point_topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entry_point_story_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entry_point_exploration_id': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'entry_point_subtopic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'text_language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'audio_language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'android_report_info': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'android_report_info_schema_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'web_report_info': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'web_report_info_schema_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = app_feedback_report_models.AppFeedbackReportModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = app_feedback_report_models.AppFeedbackReportModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) - def _mock_query_filters_returns_empy_list( - self, projection: bool, distinct: bool) -> List[Any]: # pylint: disable=unused-argument + def _mock_query_filters_returns_empty_list( + self, projection: bool, distinct: bool # pylint: disable=unused-argument + ) -> List[str]: """Mock the model query to test for an invalid filter field. Named parameters 'projection' and 'distinct' are required to mock the query function. @@ -316,23 +467,27 @@ class AppFeedbackReportTicketModelTests(test_utils.GenericTestBase): """Tests for the AppFeedbackReportTicketModel class.""" # Timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC. - REPORT_SUBMITTED_TIMESTAMP = datetime.datetime.fromtimestamp(1615151836) - REPORT_SUBMITTED_TIMESTAMP_MSEC = utils.get_time_in_millisecs( + REPORT_SUBMITTED_TIMESTAMP: Final = datetime.datetime.fromtimestamp( + 1615151836 + ) + REPORT_SUBMITTED_TIMESTAMP_MSEC: Final = utils.get_time_in_millisecs( REPORT_SUBMITTED_TIMESTAMP) # Timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC. - NEWEST_REPORT_TIMESTAMP = datetime.datetime.fromtimestamp(1615151836) + NEWEST_REPORT_TIMESTAMP: Final = datetime.datetime.fromtimestamp(1615151836) # Timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC. - TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836) - TICKET_CREATION_TIMESTAMP_MSEC = utils.get_time_in_millisecs( + TICKET_CREATION_TIMESTAMP: Final = datetime.datetime.fromtimestamp( + 1616173836 + ) + TICKET_CREATION_TIMESTAMP_MSEC: Final = utils.get_time_in_millisecs( TICKET_CREATION_TIMESTAMP) - PLATFORM = 'android' - PLATFORM_VERSION = '0.1-alpha-abcdef1234' - TICKET_NAME = 'example ticket name' - TICKET_ID = '%s.%s.%s' % ( + PLATFORM: Final = 'android' + PLATFORM_VERSION: Final = '0.1-alpha-abcdef1234' + TICKET_NAME: Final = 'example ticket name' + TICKET_ID: Final = '%s.%s.%s' % ( 'random_hash', int(TICKET_CREATION_TIMESTAMP_MSEC), '16CharString1234') - REPORT_IDS = ['%s.%s.%s' % ( + REPORT_IDS: Final = ['%s.%s.%s' % ( PLATFORM, int(REPORT_SUBMITTED_TIMESTAMP_MSEC), 'randomInteger123')] @@ -350,8 +505,6 @@ def test_create_and_get_ticket_model(self) -> None: ticket_model = ( app_feedback_report_models.AppFeedbackReportTicketModel.get( ticket_id)) - # Ruling out the possibility of None for mypy type checking. - assert ticket_model is not None self.assertEqual(ticket_model.id, ticket_id) self.assertEqual(ticket_model.platform, self.PLATFORM) @@ -363,7 +516,7 @@ def test_create_and_get_ticket_model(self) -> None: def test_create_raises_exception_by_mocking_collision(self) -> None: model_class = app_feedback_report_models.AppFeedbackReportTicketModel # Test Exception for AppFeedbackReportTicketModel. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'The id generator for AppFeedbackReportTicketModel is producing too' 'many collisions.' @@ -390,23 +543,47 @@ def test_get_lowest_supported_role(self) -> None: self.assertEqual( model.get_lowest_supported_role(), feconf.ROLE_ID_MODERATOR) + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'ticket_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'platform': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'github_issue_repo_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'github_issue_number': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'archived': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'newest_report_timestamp': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'report_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = app_feedback_report_models.AppFeedbackReportTicketModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = app_feedback_report_models.AppFeedbackReportTicketModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + class AppFeedbackReportStatsModelTests(test_utils.GenericTestBase): """Tests for the AppFeedbackReportStatsModel class.""" # Timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC. - TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836) - TICKET_CREATION_TIMESTAMP_MSEC = ( + TICKET_CREATION_TIMESTAMP: Final = datetime.datetime.fromtimestamp( + 1616173836 + ) + TICKET_CREATION_TIMESTAMP_MSEC: Final = ( utils.get_time_in_millisecs(TICKET_CREATION_TIMESTAMP)) - TICKET_ID = '%s.%s.%s' % ( + TICKET_ID: Final = '%s.%s.%s' % ( 'random_hash', int(TICKET_CREATION_TIMESTAMP_MSEC), '16CharString1234') # Timestamp date in sec since epoch for Mar 19 2021 UTC. - STATS_DATE = datetime.date.fromtimestamp(1616173836) - DAILY_STATS = { + STATS_DATE: Final = datetime.date.fromtimestamp(1616173836) + DAILY_STATS: Final = { 'report_type': { 'suggestion': 1, 'issue': 1, 'crash': 1}} - TOTAL_REPORTS_SUBMITTED = 3 + TOTAL_REPORTS_SUBMITTED: Final = 3 def test_create_and_get_stats_model(self) -> None: entity_id = ( @@ -445,6 +622,40 @@ def test_get_id_on_same_ticket_produces_same_id(self) -> None: self.assertEqual(entity_id, entity_id_copy) + def test_null_ticket_id_is_handled(self) -> None: + model_class = ( + app_feedback_report_models.AppFeedbackReportStatsModel) + entity_id = model_class.calculate_id( + 'android', None, self.STATS_DATE) + self.assertEqual( + entity_id, + '%s:%s:%s' % ( + 'android', 'unticketed_android_reports_stats_ticket_id', + self.STATS_DATE.isoformat()) + ) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'ticket_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'platform': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'stats_tracking_date': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'total_reports_submitted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'daily_param_stats_schema_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'daily_param_stats': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = app_feedback_report_models.AppFeedbackReportStatsModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = app_feedback_report_models.AppFeedbackReportStatsModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + def test_get_stats_for_ticket(self) -> None: entity_id = ( app_feedback_report_models.AppFeedbackReportStatsModel.calculate_id( diff --git a/core/storage/audit/gae_models.py b/core/storage/audit/gae_models.py index b38d6838d655..95b51a6b0628 100644 --- a/core/storage/audit/gae_models.py +++ b/core/storage/audit/gae_models.py @@ -28,7 +28,7 @@ from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() diff --git a/core/storage/audit/gae_models_test.py b/core/storage/audit/gae_models_test.py index 502cf078de20..47f92e1a8970 100644 --- a/core/storage/audit/gae_models_test.py +++ b/core/storage/audit/gae_models_test.py @@ -22,27 +22,29 @@ from core.platform import models from core.tests import test_utils +from typing import Final + MYPY = False if MYPY: # pragma: no cover from mypy_imports import audit_models from mypy_imports import base_models (audit_models, base_models) = models.Registry.import_models( - [models.NAMES.audit, models.NAMES.base_model]) + [models.Names.AUDIT, models.Names.BASE_MODEL]) class RoleQueryAuditModelUnitTests(test_utils.GenericTestBase): """Unit tests for the RoleQueryAuditModel class.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID = 'user_id' - ID = 'user_id.111.update.111' - USERNAME = 'username' - ROLE = 'role' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID: Final = 'user_id' + ID: Final = 'user_id.111.update.111' + USERNAME: Final = 'username' + ROLE: Final = 'role' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(RoleQueryAuditModelUnitTests, self).setUp() + super().setUp() audit_models.RoleQueryAuditModel( id=self.ID, @@ -57,6 +59,26 @@ def test_get_deletion_policy(self) -> None: audit_models.RoleQueryAuditModel.get_deletion_policy(), base_models.DELETION_POLICY.KEEP) + def test_get_export_policy(self) -> None: + sample_dict = { + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'intent': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'role': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'username': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + audit_models.RoleQueryAuditModel.get_export_policy(), + sample_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + audit_models.RoleQueryAuditModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + def test_has_reference_to_user_id(self) -> None: self.assertTrue( audit_models.RoleQueryAuditModel @@ -69,8 +91,6 @@ def test_has_reference_to_user_id(self) -> None: def test_get_model(self) -> None: audit_model = audit_models.RoleQueryAuditModel.get(self.ID) - # Ruling out the possibility of None for mypy type checking. - assert audit_model is not None self.assertEqual(audit_model.id, self.ID) self.assertEqual(audit_model.intent, feconf.ROLE_ACTION_ADD) @@ -82,15 +102,15 @@ def test_get_model(self) -> None: class UsernameChangeAuditModelUnitTests(test_utils.GenericTestBase): """Unit tests for the UsernameChangeAuditModel class.""" - NONEXISTENT_COMMITTER_ID = 'id_x' - COMMITTER_ID = 'committer_id' - ID = 'committer_id.111.222' - OLD_USERNAME = 'old_username' - NEW_USERNAME = 'new_username' + NONEXISTENT_COMMITTER_ID: Final = 'id_x' + COMMITTER_ID: Final = 'committer_id' + ID: Final = 'committer_id.111.222' + OLD_USERNAME: Final = 'old_username' + NEW_USERNAME: Final = 'new_username' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UsernameChangeAuditModelUnitTests, self).setUp() + super().setUp() audit_models.UsernameChangeAuditModel( id=self.ID, @@ -104,6 +124,25 @@ def test_get_deletion_policy(self) -> None: audit_models.UsernameChangeAuditModel.get_deletion_policy(), base_models.DELETION_POLICY.KEEP) + def test_get_export_policy(self) -> None: + sample_dict = { + 'committer_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'old_username': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'new_username': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + audit_models.UsernameChangeAuditModel.get_export_policy(), + sample_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + audit_models.UsernameChangeAuditModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + def test_has_reference_to_user_id(self) -> None: self.assertTrue( audit_models.UsernameChangeAuditModel @@ -116,8 +155,6 @@ def test_has_reference_to_user_id(self) -> None: def test_get_model(self) -> None: audit_model = audit_models.UsernameChangeAuditModel.get(self.ID) - # Ruling out the possibility of None for mypy type checking. - assert audit_model is not None self.assertEqual(audit_model.id, self.ID) self.assertEqual(audit_model.committer_id, self.COMMITTER_ID) diff --git a/core/storage/auth/gae_models.py b/core/storage/auth/gae_models.py index 6a1d3e4d984a..b2b32ed3c211 100644 --- a/core/storage/auth/gae_models.py +++ b/core/storage/auth/gae_models.py @@ -21,7 +21,7 @@ from core import feconf from core.platform import models -from typing import Dict, Optional +from typing import Dict, Final, Optional MYPY = False if MYPY: # pragma: no cover @@ -29,11 +29,12 @@ from mypy_imports import datastore_services from mypy_imports import user_models -base_models, user_models = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.user]) +base_models, user_models = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.USER +]) datastore_services = models.Registry.import_datastore_services() -ONLY_FIREBASE_SEED_MODEL_ID = '1' +ONLY_FIREBASE_SEED_MODEL_ID: Final = '1' class UserAuthDetailsModel(base_models.BaseModel): @@ -97,11 +98,9 @@ def export_data(cls, user_id: str) -> Dict[str, str]: """Exports the username of the parent.""" user_auth_model = cls.get(user_id, strict=False) if user_auth_model and user_auth_model.parent_user_id: - parent_data = user_models.UserSettingsModel.get( + parent_model = user_models.UserSettingsModel.get( user_auth_model.parent_user_id) - # Ruling out the possibility of None for mypy type checking. - assert parent_data is not None - parent_username = parent_data.username + parent_username = parent_model.username return {'parent_username': parent_username} else: return {} diff --git a/core/storage/auth/gae_models_test.py b/core/storage/auth/gae_models_test.py index 68e55353cd2c..47c6294e6bde 100644 --- a/core/storage/auth/gae_models_test.py +++ b/core/storage/auth/gae_models_test.py @@ -22,31 +22,39 @@ from core.platform import models from core.tests import test_utils +from typing import Final + MYPY = False if MYPY: # pragma: no cover from mypy_imports import auth_models from mypy_imports import base_models + from mypy_imports import user_models -(auth_models, base_models) = ( - models.Registry.import_models([models.NAMES.auth, models.NAMES.base_model])) +(auth_models, base_models, user_models) = ( + models.Registry.import_models([ + models.Names.AUTH, + models.Names.BASE_MODEL, + models.Names.USER + ]) +) class UserAuthDetailsModelTests(test_utils.GenericTestBase): """Tests for UserAuthDetailsModel.""" - NONEXISTENT_AUTH_METHOD_NAME = 'auth_method_x' - NONEXISTENT_USER_ID = 'id_x' - NONREGISTERED_GAE_ID = 'auth_id_x' - USER_ID = 'user_id' - USER_GAE_ID = 'auth_id' - FIREBASE_USER_ID = 'firebase_user_id' - FIREBASE_AUTH_ID = 'firebase_auth_id' - PROFILE_ID = 'profile_id' - PROFILE_2_ID = 'profile_2_id' + NONEXISTENT_AUTH_METHOD_NAME: Final = 'auth_method_x' + NONEXISTENT_USER_ID: Final = 'id_x' + NONREGISTERED_GAE_ID: Final = 'auth_id_x' + USER_ID: Final = 'user_id' + USER_GAE_ID: Final = 'auth_id' + FIREBASE_USER_ID: Final = 'firebase_user_id' + FIREBASE_AUTH_ID: Final = 'firebase_auth_id' + PROFILE_ID: Final = 'profile_id' + PROFILE_2_ID: Final = 'profile_2_id' def setUp(self) -> None: """Set up user models in storage for use in testing.""" - super(UserAuthDetailsModelTests, self).setUp() + super().setUp() auth_models.UserAuthDetailsModel( id=self.USER_ID, @@ -72,8 +80,76 @@ def test_get_deletion_policy_is_delete_at_end(self) -> None: auth_models.UserAuthDetailsModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE_AT_END) + def test_get_field_names_for_takeout(self) -> None: + expected_dict = { + 'parent_user_id': 'parent_username' + } + self.assertEqual( + auth_models.UserAuthDetailsModel + .get_field_names_for_takeout(), + expected_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + auth_models.UserAuthDetailsModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'gae_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'firebase_auth_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'parent_user_id': base_models.EXPORT_POLICY.EXPORTED, + } + self.assertEqual( + auth_models.UserAuthDetailsModel + .get_export_policy(), + expected_export_policy_dict) + + def test_export_data_trivial(self) -> None: + """Trivial test of export_data functionality.""" + + exported_dict = ( + auth_models.UserAuthDetailsModel.export_data( + self.NONEXISTENT_USER_ID)) + + self.assertEqual(exported_dict, {}) + + def test_export_data_nontrivial(self) -> None: + user_auth_model = ( + auth_models.UserAuthDetailsModel + .get_by_id(self.PROFILE_2_ID)) + self.assertIsNotNone(user_auth_model) + + # The parent_user_id should exist to fetch the + # parent_model further on in the test. + self.assertIsNotNone(user_auth_model.parent_user_id) + self.assertEqual(user_auth_model.parent_user_id, self.USER_ID) + + # Create the model instance to be fetched using + # user_auth_model.parent_user_id. The fetched + # parent_model will provide the username to be + # returned by .export_data(). + user_models.UserSettingsModel( + id=self.USER_ID, + email='user@example.com', + roles=[feconf.ROLE_ID_CURRICULUM_ADMIN], + banned=False, + username='user' + ).put() + + exported_dict = ( + auth_models.UserAuthDetailsModel.export_data( + self.PROFILE_2_ID)) + expected_dict = {'parent_username': 'user'} + + self.assertEqual(expected_dict, exported_dict) + def test_apply_deletion_policy_for_registered_user_deletes_them( - self + self ) -> None: # Deleting a full user. auth_models.UserAuthDetailsModel.apply_deletion_policy(self.USER_ID) @@ -86,7 +162,7 @@ def test_apply_deletion_policy_for_registered_user_deletes_them( self.PROFILE_ID)) def test_apply_deletion_policy_nonexistent_user_raises_no_exception( - self + self ) -> None: self.assertIsNone(auth_models.UserAuthDetailsModel.get_by_id( self.NONEXISTENT_USER_ID)) @@ -138,7 +214,7 @@ def test_get_by_auth_id_for_correct_user_id_auth_id_mapping(self) -> None: ) def test_get_by_auth_id_registered_auth_id_returns_no_profile_user( - self + self ) -> None: self.assertNotEqual( auth_models.UserAuthDetailsModel.get_by_id(self.PROFILE_ID), @@ -156,17 +232,17 @@ def test_get_by_firebase_auth_id_returns_correct_profile_user(self) -> None: class UserIdentifiersModelTests(test_utils.GenericTestBase): """Tests for UserIdentifiersModel.""" - NONEXISTENT_AUTH_METHOD_NAME = 'auth_method_x' - NONEXISTENT_USER_ID = 'id_x' - NONREGISTERED_GAE_ID = 'auth_id_x' - USER_ID = 'user_id' - USER_GAE_ID = 'auth_id' - PROFILE_ID = 'profile_id' - PROFILE_2_ID = 'profile_2_id' + NONEXISTENT_AUTH_METHOD_NAME: Final = 'auth_method_x' + NONEXISTENT_USER_ID: Final = 'id_x' + NONREGISTERED_GAE_ID: Final = 'auth_id_x' + USER_ID: Final = 'user_id' + USER_GAE_ID: Final = 'auth_id' + PROFILE_ID: Final = 'profile_id' + PROFILE_2_ID: Final = 'profile_2_id' def setUp(self) -> None: """Set up user models in storage for use in testing.""" - super(UserIdentifiersModelTests, self).setUp() + super().setUp() auth_models.UserIdentifiersModel( id=self.USER_GAE_ID, @@ -178,8 +254,25 @@ def test_get_deletion_policy_is_delete_at_end(self) -> None: auth_models.UserIdentifiersModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE_AT_END) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + auth_models.UserIdentifiersModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + auth_models.UserIdentifiersModel.get_export_policy(), + expected_export_policy_dict) + def test_apply_deletion_policy_for_registered_user_deletes_them( - self + self ) -> None: # Deleting a full user. auth_models.UserIdentifiersModel.apply_deletion_policy(self.USER_ID) @@ -187,7 +280,7 @@ def test_apply_deletion_policy_for_registered_user_deletes_them( self.USER_ID)) def test_apply_deletion_policy_nonexistent_user_raises_no_exception( - self + self ) -> None: self.assertIsNone(auth_models.UserIdentifiersModel.get_by_id( self.NONEXISTENT_USER_ID)) @@ -234,17 +327,17 @@ def test_get_by_user_id_for_correct_user_id(self) -> None: class UserIdByFirebaseAuthIdModelTests(test_utils.GenericTestBase): """Tests for auth_models.UserIdByFirebaseAuthIdModel.""" - NONEXISTENT_AUTH_METHOD_NAME = 'auth_method_x' - NONEXISTENT_USER_ID = 'id_x' - NONREGISTERED_AUTH_ID = 'auth_id_x' - USER_ID = 'user_id' - USER_AUTH_ID = 'auth_id' - PROFILE_ID = 'profile_id' - PROFILE_2_ID = 'profile_2_id' + NONEXISTENT_AUTH_METHOD_NAME: Final = 'auth_method_x' + NONEXISTENT_USER_ID: Final = 'id_x' + NONREGISTERED_AUTH_ID: Final = 'auth_id_x' + USER_ID: Final = 'user_id' + USER_AUTH_ID: Final = 'auth_id' + PROFILE_ID: Final = 'profile_id' + PROFILE_2_ID: Final = 'profile_2_id' def setUp(self) -> None: """Set up user models in storage for use in testing.""" - super(UserIdByFirebaseAuthIdModelTests, self).setUp() + super().setUp() auth_models.UserIdByFirebaseAuthIdModel( id=self.USER_AUTH_ID, user_id=self.USER_ID).put() @@ -255,7 +348,7 @@ def test_get_deletion_policy_is_delete_at_end(self) -> None: base_models.DELETION_POLICY.DELETE_AT_END) def test_apply_deletion_policy_for_registered_user_deletes_them( - self + self ) -> None: # Deleting a full user. auth_models.UserIdByFirebaseAuthIdModel.apply_deletion_policy( @@ -265,7 +358,7 @@ def test_apply_deletion_policy_for_registered_user_deletes_them( self.USER_ID, strict=False)) def test_apply_deletion_policy_nonexistent_user_raises_no_exception( - self + self ) -> None: self.assertIsNone( auth_models.UserIdByFirebaseAuthIdModel.get( @@ -309,7 +402,7 @@ def test_get_export_policy(self) -> None: class FirebaseSeedModelTests(test_utils.GenericTestBase): """Tests for auth_models.FirebaseSeedModel.""" - USER_ID = 'user_id' + USER_ID: Final = 'user_id' def test_get_deletion_policy(self) -> None: self.assertEqual( diff --git a/core/storage/base_model/gae_models.py b/core/storage/base_model/gae_models.py index 55e765cdeb06..97124811c386 100644 --- a/core/storage/base_model/gae_models.py +++ b/core/storage/base_model/gae_models.py @@ -18,14 +18,28 @@ import datetime import enum +import re from core import feconf from core import utils from core.constants import constants from core.platform import models +from typing import Final, Literal, TypedDict + from typing import ( # isort:skip - Any, Dict, List, Optional, Sequence, Tuple, Type, Union, TypeVar, cast + Any, + Dict, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, + TypeVar, + cast, + overload ) SELF_BASE_MODEL = TypeVar( # pylint: disable=invalid-name @@ -43,24 +57,41 @@ MYPY = False if MYPY: # pragma: no cover + # Here, 'change_domain' is imported only for type checking. + from core.domain import change_domain # pylint: disable=invalid-import # isort:skip from mypy_imports import datastore_services from mypy_imports import transaction_services + AllowedCommitCmdsListType = Sequence[ + Mapping[str, change_domain.AcceptableChangeDictTypes] + ] + transaction_services = models.Registry.import_transaction_services() datastore_services = models.Registry.import_datastore_services() # The delimiter used to separate the version number from the model instance # id. To get the instance id from a snapshot id, use Python's rfind() # method to find the location of this delimiter. -VERSION_DELIMITER = '-' +VERSION_DELIMITER: Final = '-' # Constant used when retrieving big number of models. -FETCH_BATCH_SIZE = 1000 +FETCH_BATCH_SIZE: Final = 1000 # Constants used for generating ids. -MAX_RETRIES = 10 -RAND_RANGE = (1 << 30) - 1 -ID_LENGTH = 12 +MAX_RETRIES: Final = 10 +RAND_RANGE: Final = (1 << 30) - 1 +ID_LENGTH: Final = 12 + + +class SnapshotsMetadataDict(TypedDict): + """Dictionary representing the snapshot metadata for versioned models.""" + + committer_id: str + commit_message: str + commit_cmds: List[Dict[str, change_domain.AcceptableChangeDictTypes]] + commit_type: str + version_number: int + created_on_ms: float # Types of deletion policies. The pragma comment is needed because Enums are @@ -111,12 +142,21 @@ class MODEL_ASSOCIATION_TO_USER(enum.Enum): # pylint: disable=invalid-name NOT_CORRESPONDING_TO_USER = 'NOT_CORRESPONDING_TO_USER' +class ModelsToPutDict(TypedDict, total=False): + """Dict representing models to be put into the datastore.""" + + versioned_model: VersionedModel + snapshot_metadata_model: BaseSnapshotMetadataModel + snapshot_content_model: BaseSnapshotContentModel + commit_log_model: BaseCommitLogEntryModel + + class BaseModel(datastore_services.Model): """Base model for all persistent object storage classes.""" # Specifies whether the model's id is used as a key in Takeout. By default, # the model's id is not used as the key for the Takeout dict. - ID_IS_USED_AS_TAKEOUT_KEY = False + ID_IS_USED_AS_TAKEOUT_KEY: bool = False # When this entity was first created. This value should only be modified by # the update_timestamps method. @@ -129,11 +169,11 @@ class BaseModel(datastore_services.Model): # Whether the current version of the model instance is deleted. deleted = datastore_services.BooleanProperty(indexed=True, default=False) - # We use type Any for *args and **kwargs to denote compatibility with the + # Here we use type Any because we need to denote the compatibility with the # overridden constructor of the parent class i.e datastore_services.Model # here. def __init__(self, *args: Any, **kwargs: Any) -> None: - super(BaseModel, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._last_updated_timestamp_is_fresh = False def _pre_put_hook(self) -> None: @@ -142,7 +182,7 @@ def _pre_put_hook(self) -> None: Raises: Exception. The model has not refreshed the value of last_updated. """ - super(BaseModel, self)._pre_put_hook() + super()._pre_put_hook() if self.created_on is None: self.created_on = datetime.datetime.utcnow() @@ -179,6 +219,18 @@ def get_deletion_policy() -> DELETION_POLICY: 'The get_deletion_policy() method is missing from the ' 'derived class. It should be implemented in the derived class.') + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """This method should be implemented by subclasses. + + Raises: + NotImplementedError. The method is not overwritten in a derived + class. + """ + raise NotImplementedError( + 'The apply_deletion_policy() method is missing from the ' + 'derived class. It should be implemented in the derived class.') + @classmethod def has_reference_to_user_id(cls, user_id: str) -> bool: """This method should be implemented by subclasses. @@ -194,7 +246,7 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: 'The has_reference_to_user_id() method is missing from the ' 'derived class. It should be implemented in the derived class.') - # Using Dict[str, Any] here so that the return type of all the export_data + # Here we use type Any because the return type of all the export_data # methods in BaseModel's subclasses is a subclass of Dict[str, Any]. # Otherwise subclass methods will throw [override] error. @staticmethod @@ -241,11 +293,45 @@ def get_field_names_for_takeout(cls) -> Dict[str, str]: """ return {} + @overload @classmethod def get( - cls: Type[SELF_BASE_MODEL], - entity_id: str, - strict: bool = True + cls: Type[SELF_BASE_MODEL], + entity_id: str, + ) -> SELF_BASE_MODEL: ... + + @overload + @classmethod + def get( + cls: Type[SELF_BASE_MODEL], + entity_id: str, + *, + strict: Literal[True] + ) -> SELF_BASE_MODEL: ... + + @overload + @classmethod + def get( + cls: Type[SELF_BASE_MODEL], + entity_id: str, + *, + strict: Literal[False] + ) -> Optional[SELF_BASE_MODEL]: ... + + @overload + @classmethod + def get( + cls: Type[SELF_BASE_MODEL], + entity_id: str, + *, + strict: bool = ... + ) -> Optional[SELF_BASE_MODEL]: ... + + @classmethod + def get( + cls: Type[SELF_BASE_MODEL], + entity_id: str, + strict: bool = True ) -> Optional[SELF_BASE_MODEL]: """Gets an entity by id. @@ -276,9 +362,9 @@ def get( @classmethod def get_multi( - cls: Type[SELF_BASE_MODEL], - entity_ids: Sequence[Optional[str]], - include_deleted: bool = False + cls: Type[SELF_BASE_MODEL], + entity_ids: Sequence[Optional[str]], + include_deleted: bool = False ) -> List[Optional[SELF_BASE_MODEL]]: """Gets list of entities by list of ids. @@ -332,9 +418,9 @@ def update_timestamps(self, update_last_updated_time: bool = True) -> None: @classmethod def update_timestamps_multi( - cls, - entities: List[SELF_BASE_MODEL], - update_last_updated_time: bool = True + cls, + entities: List[SELF_BASE_MODEL], + update_last_updated_time: bool = True ) -> None: """Update the created_on and last_updated fields of all given entities. @@ -395,7 +481,7 @@ def delete(self) -> None: @classmethod def get_all( - cls: Type[SELF_BASE_MODEL], include_deleted: bool = False + cls: Type[SELF_BASE_MODEL], include_deleted: bool = False ) -> datastore_services.Query: """Gets iterable of all entities of this class. @@ -440,10 +526,10 @@ def get_new_id(cls, entity_name: str) -> str: @classmethod def _fetch_page_sorted_by_last_updated( - cls: Type[SELF_BASE_MODEL], - query: datastore_services.Query, - page_size: int, - urlsafe_start_cursor: Optional[str] + cls: Type[SELF_BASE_MODEL], + query: datastore_services.Query, + page_size: int, + urlsafe_start_cursor: Optional[str] ) -> Tuple[Sequence[SELF_BASE_MODEL], Optional[str], bool]: """Fetches a page of entities sorted by their last_updated attribute in descending order (newly updated first). @@ -508,7 +594,7 @@ class BaseHumanMaintainedModel(BaseModel): last_updated_by_human = ( datastore_services.DateTimeProperty(indexed=True, required=True)) - # We use type Any for *args and **kwargs to denote compatibility with the + # Here we use type Any because we need to denote the compatibility with the # overridden put method of the parent class i.e. BaseModel here. def put(self, *args: Any, **kwargs: Any) -> None: """Unsupported operation on human-maintained models.""" @@ -517,11 +603,11 @@ def put(self, *args: Any, **kwargs: Any) -> None: def put_for_human(self) -> None: """Stores the model instance on behalf of a human.""" self.last_updated_by_human = datetime.datetime.utcnow() - return super(BaseHumanMaintainedModel, self).put() + return super().put() def put_for_bot(self) -> None: """Stores the model instance on behalf of a non-human.""" - return super(BaseHumanMaintainedModel, self).put() + return super().put() @classmethod def put_multi(cls, unused_instances: List[SELF_BASE_MODEL]) -> None: @@ -531,7 +617,7 @@ def put_multi(cls, unused_instances: List[SELF_BASE_MODEL]) -> None: @classmethod def put_multi_for_human( - cls, instances: List[SELF_BASE_HUMAN_MAINTAINED_MODEL] + cls, instances: List[SELF_BASE_HUMAN_MAINTAINED_MODEL] ) -> None: """Stores the given model instances on behalf of a human. @@ -548,7 +634,7 @@ def put_multi_for_human( @classmethod def put_multi_for_bot( - cls, instances: List[SELF_BASE_HUMAN_MAINTAINED_MODEL] + cls, instances: List[SELF_BASE_HUMAN_MAINTAINED_MODEL] ) -> None: """Stores the given model instances on behalf of a non-human. @@ -604,10 +690,9 @@ def get_deletion_policy() -> DELETION_POLICY: @classmethod def get_export_policy(cls) -> Dict[str, EXPORT_POLICY]: - """Model contains data corresponding to a user, - but this isn't exported because the history of commits is not - relevant to a user for the purposes of Takeout, since commits do not - contain any personal user data. + """Model contains data corresponding to a user, but this isn't exported + because all user-related data for a commit is already being exported + as part of the corresponding SnapshotMetadataModel. """ return dict(BaseModel.get_export_policy(), **{ 'user_id': EXPORT_POLICY.NOT_APPLICABLE, @@ -633,19 +718,17 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: """ return cls.query(cls.user_id == user_id).get(keys_only=True) is not None - # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to - # remove Any from type-annotation below. @classmethod def create( - cls: Type[SELF_BASE_COMMIT_LOG_ENTRY_MODEL], - entity_id: str, - version: int, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: Union[Dict[str, Any], List[Dict[str, Any]], None], - status: str, - community_owned: bool + cls: Type[SELF_BASE_COMMIT_LOG_ENTRY_MODEL], + entity_id: str, + version: int, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: AllowedCommitCmdsListType, + status: str, + community_owned: bool ) -> SELF_BASE_COMMIT_LOG_ENTRY_MODEL: """This method returns an instance of the CommitLogEntryModel for a construct with the common fields filled. @@ -659,7 +742,8 @@ def create( change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit description message, or None if + draft (or unpublished) model is provided. commit_cmds: list(dict). A list of commands, describing changes made in this model, which should give sufficient information to reconstruct the commit. Each dict always contains: @@ -706,9 +790,9 @@ def get_instance_id(cls, target_entity_id: str, version: int) -> str: @classmethod def get_all_commits( - cls: Type[SELF_BASE_COMMIT_LOG_ENTRY_MODEL], - page_size: int, - urlsafe_start_cursor: Optional[str] + cls: Type[SELF_BASE_COMMIT_LOG_ENTRY_MODEL], + page_size: int, + urlsafe_start_cursor: Optional[str] ) -> Tuple[Sequence[SELF_BASE_COMMIT_LOG_ENTRY_MODEL], Optional[str], bool]: """Fetches a list of all the commits sorted by their last updated attribute. @@ -737,9 +821,9 @@ def get_all_commits( @classmethod def get_commit( - cls: Type[SELF_BASE_COMMIT_LOG_ENTRY_MODEL], - target_entity_id: str, - version: int + cls: Type[SELF_BASE_COMMIT_LOG_ENTRY_MODEL], + target_entity_id: str, + version: int ) -> Optional[SELF_BASE_COMMIT_LOG_ENTRY_MODEL]: """Returns the commit corresponding to an instance id and version number. @@ -784,29 +868,26 @@ class VersionedModel(BaseModel): # to log the commits it can be None. COMMIT_LOG_ENTRY_CLASS: Optional[Type[BaseCommitLogEntryModel]] = None # Whether reverting is allowed. Default is False. - ALLOW_REVERT = False + ALLOW_REVERT: bool = False # IMPORTANT: Subclasses should only overwrite things above this line. - # The possible commit types. - _COMMIT_TYPE_CREATE = 'create' - _COMMIT_TYPE_REVERT = 'revert' - _COMMIT_TYPE_EDIT = 'edit' - _COMMIT_TYPE_DELETE = 'delete' # A list containing the possible commit types. - COMMIT_TYPE_CHOICES = [ - _COMMIT_TYPE_CREATE, _COMMIT_TYPE_REVERT, _COMMIT_TYPE_EDIT, - _COMMIT_TYPE_DELETE + COMMIT_TYPE_CHOICES: List[str] = [ + feconf.COMMIT_TYPE_CREATE, + feconf.COMMIT_TYPE_REVERT, + feconf.COMMIT_TYPE_EDIT, + feconf.COMMIT_TYPE_DELETE ] # The reserved prefix for keys that are automatically inserted into a # commit_cmd dict by this model. - _AUTOGENERATED_PREFIX = feconf.AUTOGENERATED_PREFIX + _AUTOGENERATED_PREFIX: Final = feconf.AUTOGENERATED_PREFIX # The command string for a revert commit. - CMD_REVERT_COMMIT = feconf.CMD_REVERT_COMMIT + CMD_REVERT_COMMIT: Final = feconf.CMD_REVERT_COMMIT # The command string for a delete commit. - CMD_DELETE_COMMIT = feconf.CMD_DELETE_COMMIT + CMD_DELETE_COMMIT: Final = feconf.CMD_DELETE_COMMIT # The current version number of this instance. In each PUT operation, # this number is incremented and a snapshot of the modified instance is @@ -821,17 +902,19 @@ def _require_not_marked_deleted(self) -> None: if self.deleted: raise Exception('This model instance has been deleted.') - # TODO(#13523): Change 'snapshot' to domain object/TypedDict to - # remove Any from type-annotation below. + # Here we use type Any because the method 'compute_snapshot' defined in + # subclasses of VersionedModel can return different Dict/TypedDict types. + # So, to allow every Dict/TypedDict type we used Any here. def compute_snapshot(self) -> Dict[str, Any]: """Generates a snapshot (dict) from the model property values.""" return self.to_dict(exclude=['created_on', 'last_updated']) - # TODO(#13523): Change 'snapshot_dict' to domain object/Typed Dict to - # remove Any from type-annotation below. + # Here we use type Any because the method '_reconstitute' defined in + # subclasses of VersionedModel can accept different Dict/TypedDict types. + # So, to allow every Dict/TypedDict type we used Any here. def _reconstitute( - self: SELF_VERSIONED_MODEL, - snapshot_dict: Dict[str, Any] + self: SELF_VERSIONED_MODEL, + snapshot_dict: Dict[str, Any] ) -> SELF_VERSIONED_MODEL: """Populates the model instance with the snapshot. @@ -847,8 +930,8 @@ def _reconstitute( return self def _reconstitute_from_snapshot_id( - self: SELF_VERSIONED_MODEL, - snapshot_id: str + self: SELF_VERSIONED_MODEL, + snapshot_id: str ) -> SELF_VERSIONED_MODEL: """Gets a reconstituted instance of this model class, based on the given snapshot id. @@ -861,8 +944,6 @@ def _reconstitute_from_snapshot_id( """ assert self.SNAPSHOT_CONTENT_CLASS is not None snapshot_model = self.SNAPSHOT_CONTENT_CLASS.get(snapshot_id) - # Ruling out the possibility of None for mypy type checking. - assert snapshot_model is not None snapshot_dict = snapshot_model.content reconstituted_model = self._reconstitute(snapshot_dict) # TODO(sll): The 'created_on' and 'last_updated' values here will be @@ -891,30 +972,52 @@ def get_snapshot_id(cls, instance_id: str, version_number: int) -> str: return '%s%s%s' % ( instance_id, VERSION_DELIMITER, version_number) - # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to - # remove Any from type-annotation below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't be allowed. + def _prepare_additional_models(self) -> Mapping[str, BaseModel]: + """Prepares additional models needed for the commit process. + The default return value is an empty dict; however, this method should + be overridden in models that inherit from VersionedModel. + + Returns: + dict(str, BaseModel). Additional models needed for + the commit process. + """ + return {} + + def compute_models_to_commit( + self, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + unused_additional_models: Mapping[str, BaseModel] + ) -> ModelsToPutDict: """Evaluates and executes commit. Main function for all commit types. Args: committer_id: str. The user_id of the user who committed the change. commit_type: str. Unique identifier of commit type. Possible values are in COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit description message, or None if + draft (or unpublished) model is provided. commit_cmds: list(dict). A list of commands, describing changes made in this model, should give sufficient information to reconstruct the commit. Dict always contains: cmd: str. Unique command. - And then additional arguments for that command. For example: + And then additional arguments for that command. For example: { + 'cmd': 'AUTO_revert_version_number', + 'version_number': 4 + } + unused_additional_models: dict(str, BaseModel). Additional models + that are needed for the commit process. - {'cmd': 'AUTO_revert_version_number' - 'version_number': 4} + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. Raises: Exception. No snapshot metadata class has been defined. @@ -935,28 +1038,31 @@ def _trusted_commit( snapshot = self.compute_snapshot() snapshot_id = self.get_snapshot_id(self.id, self.version) - snapshot_metadata_instance: BaseSnapshotMetadataModel = ( - self.SNAPSHOT_METADATA_CLASS.create( - snapshot_id, committer_id, commit_type, commit_message, - commit_cmds) + snapshot_metadata_instance = self.SNAPSHOT_METADATA_CLASS.create( + snapshot_id, + committer_id, + commit_type, + commit_message, + commit_cmds ) - snapshot_content_instance: BaseSnapshotContentModel = ( - self.SNAPSHOT_CONTENT_CLASS.create(snapshot_id, snapshot) + snapshot_content_instance = self.SNAPSHOT_CONTENT_CLASS.create( + snapshot_id, snapshot ) - entities: List[BaseModel] = [ - snapshot_metadata_instance, snapshot_content_instance, self] - BaseModel.update_timestamps_multi(entities) - BaseModel.put_multi_transactional(entities) + return { + 'snapshot_metadata_model': snapshot_metadata_instance, + 'snapshot_content_model': snapshot_content_instance, + 'versioned_model': self, + } - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.delete(). # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override def delete( # type: ignore[override] - self, - committer_id: str, - commit_message: str, - force_deletion: bool = False + self, + committer_id: str, + commit_message: str, + force_deletion: bool = False, ) -> None: """Deletes this model instance. @@ -1002,7 +1108,7 @@ def delete( # type: ignore[override] datastore_services.delete_multi( content_keys + metadata_keys + commit_log_keys) - super(VersionedModel, self).delete() + super().delete() else: self._require_not_marked_deleted() # pylint: disable=protected-access self.deleted = True @@ -1010,21 +1116,31 @@ def delete( # type: ignore[override] commit_cmds = [{ 'cmd': self.CMD_DELETE_COMMIT }] - - self._trusted_commit( - committer_id, self._COMMIT_TYPE_DELETE, commit_message, - commit_cmds) - - # We have ignored [override] here because the signature of this method + models_to_put = self.compute_models_to_commit( + committer_id, + feconf.COMMIT_TYPE_DELETE, + commit_message, + commit_cmds, + self._prepare_additional_models() + ) + models_to_put_values = [] + for model_to_put in models_to_put.values(): + # Here, we are narrowing down the type from object to BaseModel. + assert isinstance(model_to_put, BaseModel) + models_to_put_values.append(model_to_put) + BaseModel.update_timestamps_multi(models_to_put_values) + BaseModel.put_multi(models_to_put_values) + + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.delete_multi(). # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override @classmethod def delete_multi( # type: ignore[override] - cls, - entity_ids: List[str], - committer_id: str, - commit_message: str, - force_deletion: bool = False + cls, + entity_ids: List[str], + committer_id: str, + commit_message: str, + force_deletion: bool = False ) -> None: """Deletes the given cls instancies with the given entity_ids. @@ -1111,8 +1227,13 @@ def delete_multi( # type: ignore[override] assert model.SNAPSHOT_METADATA_CLASS is not None snapshot_metadata_models.append( model.SNAPSHOT_METADATA_CLASS.create( - snapshot_id, committer_id, cls._COMMIT_TYPE_DELETE, - commit_message, commit_cmds)) + snapshot_id, + committer_id, + feconf.COMMIT_TYPE_DELETE, + commit_message, + commit_cmds + ) + ) # This assert is used to eliminate the possibility of None # during mypy type checking. @@ -1120,18 +1241,17 @@ def delete_multi( # type: ignore[override] snapshot_content_models.append( model.SNAPSHOT_CONTENT_CLASS.create(snapshot_id, snapshot)) - # We need the cast here in order to convey to MyPy that all these - # lists can be merged together for the purpose of putting them into - # the datastore. - entities = ( - cast(List[BaseModel], snapshot_metadata_models) + - cast(List[BaseModel], snapshot_content_models) + - cast(List[BaseModel], versioned_models) - ) + entities: List[BaseModel] = [] + for snapshot_model in snapshot_metadata_models: + entities.append(snapshot_model) + for content_model in snapshot_content_models: + entities.append(content_model) + for versioned_model in versioned_models: + entities.append(versioned_model) cls.update_timestamps_multi(entities) BaseModel.put_multi_transactional(entities) - # We use type Any for *args and **kwargs to denote compatibility with the + # Here we use type Any because we need to denote the compatibility with the # overridden constructor of the parent class i.e BaseModel here. def put(self, *args: Any, **kwargs: Any) -> None: """For VersionedModels, this method is replaced with commit().""" @@ -1139,19 +1259,63 @@ def put(self, *args: Any, **kwargs: Any) -> None: 'The put() method is missing from the ' 'derived class. It should be implemented in the derived class.') - # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to - # remove Any from type-annotation below. + def get_models_to_put_values( + self, + committer_id: str, + commit_message: Optional[str], + commit_cmds: Sequence[ + Mapping[str, change_domain.AcceptableChangeDictTypes] + ] + ) -> List[BaseModel]: + """Returns the models which should be put. + + Args: + committer_id: str. The user_id of the user who committed the change. + commit_message: str. The commit description message. + commit_cmds: list(dict). A list of commands, describing changes + made in this model, should give sufficient information to + reconstruct the commit. Dict always contains: + cmd: str. Unique command. + And then additional arguments for that command. For example: + + {'cmd': 'AUTO_revert_version_number' + 'version_number': 4} + + Returns: + list(BaseModel). The models which should be put. + """ + commit_type = ( + feconf.COMMIT_TYPE_CREATE + if self.version == 0 + else feconf.COMMIT_TYPE_EDIT + ) + + models_to_put = self.compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + self._prepare_additional_models() + ).values() + models_to_put_values = [] + for model_to_put in models_to_put: + # Here, we are narrowing down the type from object to BaseModel. + assert isinstance(model_to_put, BaseModel) + models_to_put_values.append(model_to_put) + return models_to_put_values + def commit( - self, - committer_id: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] + self, + committer_id: str, + commit_message: Optional[str], + commit_cmds: AllowedCommitCmdsListType ) -> None: """Saves a version snapshot and updates the model. Args: committer_id: str. The user_id of the user who committed the change. - commit_message: str. The commit description message. + commit_message: str|None. The commit description message, or None if + draft (or unpublished) model is provided. commit_cmds: list(dict). A list of commands, describing changes made in this model, should give sufficient information to reconstruct the commit. Dict always contains: @@ -1167,13 +1331,11 @@ def commit( """ self._require_not_marked_deleted() - for item in commit_cmds: - if not isinstance(item, dict): + for commit_cmd in commit_cmds: + if not isinstance(commit_cmd, dict): raise Exception( 'Expected commit_cmds to be a list of dicts, received %s' % commit_cmds) - - for commit_cmd in commit_cmds: if 'cmd' not in commit_cmd: raise Exception( 'Invalid commit_cmd: %s. Expected a \'cmd\' key.' @@ -1182,20 +1344,21 @@ def commit( raise Exception( 'Invalid change list command: %s' % commit_cmd['cmd']) - commit_type = ( - self._COMMIT_TYPE_CREATE if self.version == 0 else - self._COMMIT_TYPE_EDIT) - - self._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + models_to_put = self.get_models_to_put_values( + committer_id, + commit_message, + commit_cmds + ) + BaseModel.update_timestamps_multi(models_to_put) + BaseModel.put_multi_transactional(models_to_put) @classmethod def revert( - cls: Type[SELF_VERSIONED_MODEL], - model: SELF_VERSIONED_MODEL, - committer_id: str, - commit_message: str, - version_number: int + cls: Type[SELF_VERSIONED_MODEL], + model: SELF_VERSIONED_MODEL, + committer_id: str, + commit_message: str, + version_number: int ) -> None: """Reverts model to previous version. @@ -1216,7 +1379,7 @@ def revert( 'Reverting objects of type %s is not allowed.' % model.__class__.__name__) - commit_cmds = [{ + commit_cmds: List[Dict[str, Union[str, int]]] = [{ 'cmd': model.CMD_REVERT_COMMIT, 'version_number': version_number }] @@ -1232,22 +1395,70 @@ def revert( # not have a states_schema_version property, it should revert to the # default states_schema_version value rather than taking the # states_schema_version value from the latest exploration version. - snapshot_id = model.get_snapshot_id(model.id, version_number) new_model = cls(id=model.id) new_model._reconstitute_from_snapshot_id(snapshot_id) # pylint: disable=protected-access new_model.version = current_version - new_model._trusted_commit( # pylint: disable=protected-access - committer_id, cls._COMMIT_TYPE_REVERT, commit_message, - commit_cmds) + models_to_put = new_model.compute_models_to_commit( + committer_id, + feconf.COMMIT_TYPE_REVERT, + commit_message, + commit_cmds, + new_model._prepare_additional_models() + ) + models_to_put_values = [] + for model_to_put in models_to_put.values(): + # Here, we are narrowing down the type from object to BaseModel. + assert isinstance(model_to_put, BaseModel) + models_to_put_values.append(model_to_put) + BaseModel.update_timestamps_multi(models_to_put_values) + BaseModel.put_multi_transactional(models_to_put_values) + + @overload + @classmethod + def get_version( + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + version_number: int, + ) -> SELF_VERSIONED_MODEL: ... + + @overload + @classmethod + def get_version( + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + version_number: int, + *, + strict: Literal[True] + ) -> SELF_VERSIONED_MODEL: ... + + @overload + @classmethod + def get_version( + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + version_number: int, + *, + strict: Literal[False] + ) -> Optional[SELF_VERSIONED_MODEL]: ... + + @overload + @classmethod + def get_version( + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + version_number: int, + *, + strict: bool = ... + ) -> Optional[SELF_VERSIONED_MODEL]: ... @classmethod def get_version( - cls: Type[SELF_VERSIONED_MODEL], - entity_id: str, - version_number: int, - strict: bool = True + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + version_number: int, + strict: bool = True ) -> Optional[SELF_VERSIONED_MODEL]: """Gets model instance representing the given version. @@ -1264,7 +1475,7 @@ def get_version( VersionedModel. Model instance representing given version. Raises: - Exception. This model instance has been deleted. + EntityNotFoundError. This model instance has been deleted. """ current_version_model = cls.get(entity_id, strict=strict) @@ -1287,9 +1498,9 @@ def get_version( @classmethod def get_multi_versions( - cls: Type[SELF_VERSIONED_MODEL], - entity_id: str, - version_numbers: List[int] + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + version_numbers: List[int] ) -> List[SELF_VERSIONED_MODEL]: """Gets model instances for each version specified in version_numbers. @@ -1341,12 +1552,51 @@ def get_multi_versions( instances.append(reconstituted_model) return instances + @overload + @classmethod + def get( + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + ) -> SELF_VERSIONED_MODEL: ... + + @overload + @classmethod + def get( + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + *, + strict: Literal[True], + version: Optional[int] = None + ) -> SELF_VERSIONED_MODEL: ... + + @overload @classmethod def get( - cls: Type[SELF_VERSIONED_MODEL], - entity_id: str, - strict: bool = True, - version: Optional[int] = None + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + *, + strict: Literal[False], + version: Optional[int] = None + ) -> Optional[SELF_VERSIONED_MODEL]: ... + + @overload + @classmethod + def get( + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + *, + strict: bool = ..., + version: Optional[int] = None + ) -> Optional[SELF_VERSIONED_MODEL]: ... + + # Here we use MyPy ignore because the signature of this method + # doesn't match with BaseModel.get(). + @classmethod + def get( # type: ignore[override] + cls: Type[SELF_VERSIONED_MODEL], + entity_id: str, + strict: bool = True, + version: Optional[int] = None ) -> Optional[SELF_VERSIONED_MODEL]: """Gets model instance. @@ -1365,15 +1615,13 @@ def get( else: return cls.get_version(entity_id, version, strict=strict) - # TODO(#13523): Change 'snapshot' to domain object/TypedDict to - # remove Any from type-annotation below. @classmethod def get_snapshots_metadata( - cls, - model_instance_id: str, - version_numbers: List[int], - allow_deleted: bool = False - ) -> List[Dict[str, Any]]: + cls, + model_instance_id: str, + version_numbers: List[int], + allow_deleted: bool = False + ) -> List[SnapshotsMetadataDict]: """Gets a list of dicts, each representing a model snapshot. One dict is returned for each version number in the list of version @@ -1413,8 +1661,6 @@ def get_snapshots_metadata( """ if not allow_deleted: model = cls.get(model_instance_id) - # Ruling out the possibility of None for mypy type checking. - assert model is not None model._require_not_marked_deleted() # pylint: disable=protected-access snapshot_ids = [ @@ -1425,14 +1671,15 @@ def get_snapshots_metadata( for snapshot_id in snapshot_ids] returned_models = datastore_services.get_multi(metadata_keys) - for ind, model in enumerate(returned_models): - if model is None: + for ind, returned_model in enumerate(returned_models): + if returned_model is None: raise Exception( 'Invalid version number %s for model %s with id %s' % (version_numbers[ind], cls.__name__, model_instance_id)) - # We need the cast here to make sure that returned_models only contains - # BaseSnapshotMetadataModel and not None. + # Here we use cast because we need to make sure that returned_models + # only contains BaseSnapshotMetadataModel and not None. In above code, + # we are already throwing an error if None value is encountered. returned_models_without_none = cast( List[BaseSnapshotMetadataModel], returned_models) return [{ @@ -1469,7 +1716,7 @@ class BaseSnapshotMetadataModel(BaseModel): """ # The ids of SnapshotMetadataModels are used as Takeout keys. - ID_IS_USED_AS_TAKEOUT_KEY = True + ID_IS_USED_AS_TAKEOUT_KEY: bool = True # The id of the user who committed this revision. committer_id = ( @@ -1530,16 +1777,14 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: cls.content_user_ids == user_id, )).get(keys_only=True) is not None - # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to - # remove Any from type-annotation below. @classmethod def create( - cls: Type[SELF_BASE_SNAPSHOT_METADATA_MODEL], - snapshot_id: str, - committer_id: str, - commit_type: str, - commit_message: Optional[str], - commit_cmds: Union[Dict[str, Any], List[Dict[str, Any]], None] + cls: Type[SELF_BASE_SNAPSHOT_METADATA_MODEL], + snapshot_id: str, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: AllowedCommitCmdsListType ) -> SELF_BASE_SNAPSHOT_METADATA_MODEL: """This method returns an instance of the BaseSnapshotMetadataModel for a construct with the common fields filled. @@ -1589,11 +1834,20 @@ def export_data(cls, user_id: str) -> Dict[str, Dict[str, str]]: metadata_models: Sequence[BaseSnapshotMetadataModel] = cls.query( cls.committer_id == user_id ).fetch(projection=[cls.commit_type, cls.commit_message]) + user_data = {} for metadata_model in metadata_models: + message_without_user_ids = None + if metadata_model.commit_message is not None: + message_without_user_ids = re.sub( + feconf.USER_ID_REGEX, + '', + metadata_model.commit_message + ) + user_data[metadata_model.id] = { 'commit_type': metadata_model.commit_type, - 'commit_message': metadata_model.commit_message, + 'commit_message': message_without_user_ids, } return user_data @@ -1624,13 +1878,14 @@ def get_export_policy(cls) -> Dict[str, EXPORT_POLICY]: 'content': EXPORT_POLICY.NOT_APPLICABLE }) - # TODO(#13523): Change 'content' to domain object/TypedDict to - # remove Any from type-annotation below. + # Here we use type Any because the method 'create' defined in subclasses + # of BaseSnapshotContentModel can accept different Dict/TypedDict types. + # So, to allow every Dict/TypedDict type we used Any here. @classmethod def create( - cls: Type[SELF_BASE_SNAPSHOT_CONTENT_MODEL], - snapshot_id: str, - content: Dict[str, Any] + cls: Type[SELF_BASE_SNAPSHOT_CONTENT_MODEL], + snapshot_id: str, + content: Dict[str, Any] ) -> SELF_BASE_SNAPSHOT_CONTENT_MODEL: """This method returns an instance of the BaseSnapshotContentModel for a construct with the common fields filled. @@ -1672,5 +1927,5 @@ class BaseMapReduceBatchResultsModel(BaseModel): store its batch results should subclass this class. """ - _use_cache = False - _use_memcache = False + _use_cache: bool = False + _use_memcache: bool = False diff --git a/core/storage/base_model/gae_models_test.py b/core/storage/base_model/gae_models_test.py index 140759c53286..d56a392f4706 100644 --- a/core/storage/base_model/gae_models_test.py +++ b/core/storage/base_model/gae_models_test.py @@ -23,7 +23,6 @@ import types from core import feconf -from core import python_utils from core.constants import constants from core.platform import models from core.tests import test_utils @@ -34,7 +33,7 @@ if MYPY: # pragma: no cover from mypy_imports import base_models -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) class BaseModelUnitTests(test_utils.GenericTestBase): @@ -45,10 +44,10 @@ def tearDown(self) -> None: for entity in base_models.BaseModel.get_all(): entity.delete() - super(BaseModelUnitTests, self).tearDown() + super().tearDown() def test_get_deletion_policy(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The get_deletion_policy() method is missing from the ' @@ -56,8 +55,17 @@ def test_get_deletion_policy(self) -> None: 'derived class.')): base_models.BaseModel.get_deletion_policy() + def test_apply_deletion_policy(self) -> None: + with self.assertRaisesRegex( + NotImplementedError, + re.escape( + 'The apply_deletion_policy() method is missing from the ' + 'derived class. It should be implemented in the ' + 'derived class.')): + base_models.BaseModel.apply_deletion_policy('test_user_id') + def test_has_reference_to_user_id(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The has_reference_to_user_id() method is missing from the ' @@ -66,11 +74,11 @@ def test_has_reference_to_user_id(self) -> None: base_models.BaseModel.has_reference_to_user_id('user_id') def test_error_cases_for_get_method(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( base_models.BaseModel.EntityNotFoundError, 'Entity for class BaseModel with id Invalid id not found'): base_models.BaseModel.get('Invalid id') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( base_models.BaseModel.EntityNotFoundError, 'Entity for class BaseModel with id Invalid id not found'): base_models.BaseModel.get('Invalid id', strict=True) @@ -79,7 +87,7 @@ def test_error_cases_for_get_method(self) -> None: base_models.BaseModel.get('Invalid id', strict=False)) def test_base_model_export_data_raises_not_implemented_error(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The export_data() method is missing from the ' @@ -90,7 +98,7 @@ def test_base_model_export_data_raises_not_implemented_error(self) -> None: def test_get_model_association_to_user_raises_not_implemented_error( self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The get_model_association_to_user() method is missing from ' @@ -99,7 +107,7 @@ def test_get_model_association_to_user_raises_not_implemented_error( base_models.BaseModel.get_model_association_to_user() def test_export_data(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The export_data() method is missing from the derived ' @@ -128,7 +136,8 @@ def test_generic_query_put_get_and_delete_operations(self) -> None: model.delete() all_models = base_models.BaseModel.get_all() self.assertEqual(all_models.count(), 0) - with self.assertRaisesRegexp( + self.assertEqual(model_id, 4) + with self.assertRaisesRegex( base_models.BaseModel.EntityNotFoundError, 'Entity for class BaseModel with id 4 not found' ): @@ -176,7 +185,7 @@ def test_put_without_update_timestamps(self) -> None: # Immediately calling `put` again fails, because update_timestamps needs # to be called first. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, re.escape('did not call update_timestamps()') ): model.put() @@ -184,7 +193,7 @@ def test_put_without_update_timestamps(self) -> None: model = base_models.BaseModel.get_by_id(model.id) # Getting a fresh model requires update_timestamps too. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, re.escape('did not call update_timestamps()') ): model.put() @@ -213,6 +222,8 @@ def test_put_multi(self) -> None: # Field last_updated won't get updated because update_last_updated_time # is set to False and last_updated already has some value. + # Here we use cast because we are narrowing down the type from + # List[Optional[BaseModel]] to List[BaseModel]. models_2_without_none = cast( List[base_models.BaseModel], base_models.BaseModel.get_multi(model_ids) @@ -220,21 +231,21 @@ def test_put_multi(self) -> None: base_models.BaseModel.update_timestamps_multi( models_2_without_none, update_last_updated_time=False) base_models.BaseModel.put_multi(models_2_without_none) - for model_id, last_updated in python_utils.ZIP( - model_ids, last_updated_values): + for model_id, last_updated in zip(model_ids, last_updated_values): model = base_models.BaseModel.get_by_id(model_id) self.assertEqual(model.last_updated, last_updated) # Field last_updated will get updated because update_last_updated_time # is set to True (by default). + # Here we use cast because we are narrowing down the type from + # List[Optional[BaseModel]] to List[BaseModel]. models_3_without_none = cast( List[base_models.BaseModel], base_models.BaseModel.get_multi(model_ids) ) base_models.BaseModel.update_timestamps_multi(models_3_without_none) base_models.BaseModel.put_multi(models_3_without_none) - for model_id, last_updated in python_utils.ZIP( - model_ids, last_updated_values): + for model_id, last_updated in zip(model_ids, last_updated_values): model = base_models.BaseModel.get_by_id(model_id) self.assertNotEqual(model.last_updated, last_updated) @@ -308,7 +319,7 @@ class BaseHumanMaintainedModelTests(test_utils.GenericTestBase): MODEL_ID = 'model1' def setUp(self) -> None: - super(BaseHumanMaintainedModelTests, self).setUp() + super().setUp() self.model_instance = TestBaseHumanMaintainedModel(id=self.MODEL_ID) def mock_put(self: base_models.BaseHumanMaintainedModel) -> None: """Function to modify and save the entities used for testing @@ -335,7 +346,7 @@ def mock_put(self: base_models.BaseHumanMaintainedModel) -> None: self.model_instance.put() def test_put(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, 'Use put_for_human or put_for_bot instead'): self.model_instance.put() @@ -360,7 +371,7 @@ def test_put_for_bot(self) -> None: self.model_instance.last_updated_by_human) def test_put_multi(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, 'Use put_multi_for_human or put_multi_for_bot instead'): TestBaseHumanMaintainedModel.put_multi([]) @@ -450,7 +461,7 @@ def test_base_class_get_instance_id_raises_not_implemented_error( ) -> None: # Raise NotImplementedError as _get_instance_id is to be overwritten # in child classes of BaseCommitLogEntryModel. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The get_instance_id() method is missing from the derived ' @@ -512,7 +523,7 @@ def test_export_data_trivial(self) -> None: def test_export_data_nontrivial(self) -> None: version_model = TestVersionedModel(id='version_model') model1 = version_model.SNAPSHOT_METADATA_CLASS.create( - 'model_id-1', 'committer_id', 'create', None, None) + 'model_id-1', 'committer_id', 'create', None, []) model1.update_timestamps() model1.put() model2 = version_model.SNAPSHOT_METADATA_CLASS.create( @@ -534,6 +545,32 @@ def test_export_data_nontrivial(self) -> None: } self.assertEqual(user_data, expected_data) + def test_export_when_commit_message_contains_user_id(self) -> None: + version_model = TestVersionedModel(id='version_model') + model1 = version_model.SNAPSHOT_METADATA_CLASS.create( + 'model_id-1', 'committer_id', 'create', + 'Test uid_abcdefghijabcdefghijabcdefghijab', []) + model1.update_timestamps() + model1.put() + model2 = version_model.SNAPSHOT_METADATA_CLASS.create( + 'model_id-2', 'committer_id', 'create', 'Hi this is a commit.', + [{'cmd': 'some_command'}, {'cmd2': 'another_command'}]) + model2.update_timestamps() + model2.put() + user_data = ( + version_model.SNAPSHOT_METADATA_CLASS.export_data('committer_id')) + expected_data = { + 'model_id-1': { + 'commit_type': 'create', + 'commit_message': 'Test ', + }, + 'model_id-2': { + 'commit_type': 'create', + 'commit_message': 'Hi this is a commit.', + } + } + self.assertEqual(user_data, expected_data) + class BaseSnapshotContentModelTests(test_utils.GenericTestBase): @@ -556,7 +593,7 @@ class CommitLogEntryModelTests(test_utils.GenericTestBase): def test_get_commit(self) -> None: model1 = TestCommitLogEntryModel.create( entity_id='id', committer_id='user', - commit_cmds={}, commit_type='create', + commit_cmds=[], commit_type='create', commit_message='New commit created.', version=1, status=constants.ACTIVITY_STATUS_PUBLIC, community_owned=False ) @@ -577,13 +614,13 @@ def test_get_commit(self) -> None: def test_get_all_commits(self) -> None: model1 = TestCommitLogEntryModel.create( entity_id='id', committer_id='user', - commit_cmds={}, commit_type='create', + commit_cmds=[], commit_type='create', commit_message='New commit created.', version=1, status=constants.ACTIVITY_STATUS_PUBLIC, community_owned=False ) model2 = TestCommitLogEntryModel.create( entity_id='id', committer_id='user', - commit_cmds={}, commit_type='edit', + commit_cmds=[], commit_type='edit', commit_message='New commit created.', version=2, status=constants.ACTIVITY_STATUS_PUBLIC, community_owned=False ) @@ -603,7 +640,7 @@ class VersionedModelTests(test_utils.GenericTestBase): """Test methods for VersionedModel.""" def test_retrieval_of_multiple_version_models_for_fake_id(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( ValueError, 'The given entity_id fake_id is invalid'): TestVersionedModel.get_multi_versions( 'fake_id', [1, 2, 3]) @@ -613,7 +650,7 @@ def test_commit_with_model_instance_deleted_raises_error(self) -> None: model1.commit(feconf.SYSTEM_COMMITTER_ID, '', []) model1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'This model instance has been deleted.'): model1.commit(feconf.SYSTEM_COMMITTER_ID, '', []) @@ -621,37 +658,37 @@ def test_trusted_commit_with_no_snapshot_metadata_raises_error( self ) -> None: model1 = TestVersionedModel(id='model_id1') - # TODO(#13528): Remove this test after the backend is fully - # type-annotated. Here ignore[assignment] is used to test method - # commit() for invalid SNAPSHOT_METADATA_CLASS. + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[assignment] + # is used to test method commit() for invalid SNAPSHOT_METADATA_CLASS. model1.SNAPSHOT_METADATA_CLASS = None # type: ignore[assignment] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'No snapshot metadata class defined.'): model1.commit(feconf.SYSTEM_COMMITTER_ID, '', []) model1 = TestVersionedModel(id='model_id1') - # TODO(#13528): Remove this test after the backend is fully - # type-annotated. Here ignore[assignment] is used to test method - # commit() for invalid SNAPSHOT_CONTENT_CLASS. + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[assignment] + # is used to test method commit() for invalid SNAPSHOT_CONTENT_CLASS. model1.SNAPSHOT_CONTENT_CLASS = None # type: ignore[assignment] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'No snapshot content class defined.'): model1.commit(feconf.SYSTEM_COMMITTER_ID, '', []) model1 = TestVersionedModel(id='model_id1') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Expected commit_cmds to be a list of dicts, received'): - # TODO(#13528): Remove this test after the backend is fully - # type-annotated. Here ignore[arg-type] is used to test method - # commit() for invalid input type. + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[arg-type] + # is used to test method commit() for invalid input type. model1.commit(feconf.SYSTEM_COMMITTER_ID, '', {}) # type: ignore[arg-type] model1 = TestVersionedModel(id='model_id1') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Expected commit_cmds to be a list of dicts, received'): - # TODO(#13528): Remove this test after the backend is fully - # type-annotated. Here ignore[list-item] is used to test method - # commit() for invalid input type. + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[list-item] + # is used to test method commit() for invalid input type. model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [[]]) # type: ignore[list-item] def test_put_raises_not_implemented_error_for_versioned_models( @@ -659,7 +696,7 @@ def test_put_raises_not_implemented_error_for_versioned_models( ) -> None: model1 = TestVersionedModel(id='model_id1') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The put() method is missing from the derived ' @@ -733,19 +770,19 @@ def test_commit_with_invalid_change_list_raises_error(self) -> None: model1 = TestVersionedModel(id='model_id1') # Test for invalid commit command. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid commit_cmd:'): model1.commit( feconf.SYSTEM_COMMITTER_ID, '', [{'invalid_cmd': 'value'}]) # Test for invalid change list command. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid change list command:'): model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [{'cmd': 'AUTO'}]) def test_revert_raises_error_when_not_allowed(self) -> None: model1 = TestVersionedModel(id='model_id1') - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Reverting objects of type TestVersionedModel is not allowed.'): model1.revert(model1, feconf.SYSTEM_COMMITTER_ID, '', 1) @@ -756,7 +793,7 @@ def test_get_snapshots_metadata_with_invalid_model_raises_error( model1 = TestVersionedModel(id='model_id1') model1.commit(feconf.SYSTEM_COMMITTER_ID, '', []) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Invalid version number 10 for model TestVersionedModel with id ' 'model_id1'): @@ -768,25 +805,23 @@ def test_get_version(self) -> None: model1.commit(feconf.SYSTEM_COMMITTER_ID, '', []) version_model = TestVersionedModel.get_version('model_id1', 2) - # Ruling out the possibility of None for mypy type checking. - assert version_model is not None self.assertEqual(version_model.version, 2) - version_model = ( + test_version_model = ( TestVersionedModel.get_version('nonexistent_id1', 4, strict=False)) - self.assertIsNone(version_model) + self.assertIsNone(test_version_model) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( base_models.BaseModel.EntityNotFoundError, 'Entity for class TestVersionedModel with id nonexistent_id1 ' 'not found'): TestVersionedModel.get_version('nonexistent_id1', 4, strict=True) - version_model = ( + test_version_model = ( TestVersionedModel.get_version('model_id1', 4, strict=False)) - self.assertIsNone(version_model) + self.assertIsNone(test_version_model) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( base_models.BaseModel.EntityNotFoundError, 'Entity for class TestSnapshotContentModel with id model_id1-4 ' 'not found'): @@ -808,18 +843,19 @@ def test_get_multi_versions_errors(self) -> None: model1.commit(feconf.SYSTEM_COMMITTER_ID, '', []) model1.commit(feconf.SYSTEM_COMMITTER_ID, '', []) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( ValueError, 'Requested version number 3 cannot be higher than the current ' 'version number 2.'): TestVersionedModel.get_multi_versions('model_id1', [1, 2, 3]) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( ValueError, 'At least one version number is invalid'): - # TODO(#13528): Remove this test after the backend is fully - # type-annotated. Here ignore[list-item] is used to test method - # get_multi_versions() for invalid input type. + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[list-item] + # is used to test method get_multi_versions() for invalid input + # type. TestVersionedModel.get_multi_versions('model_id1', [1, 1.5, 2]) # type: ignore[list-item] @@ -840,7 +876,7 @@ def test_create_raises_error_when_many_id_collisions_occur(self) -> None: TestBaseModel, 'get_by_id', types.MethodType( lambda _, __: True, TestBaseModel)) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'New id generator is producing too many collisions.') with assert_raises_regexp_context_manager, get_by_id_swap: diff --git a/core/storage/beam_job/gae_models.py b/core/storage/beam_job/gae_models.py index 3e8a7030d8e7..4a5046b2627e 100644 --- a/core/storage/beam_job/gae_models.py +++ b/core/storage/beam_job/gae_models.py @@ -24,18 +24,28 @@ from core import utils from core.platform import models -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +from typing import Dict, Final, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() -_MAX_ID_GENERATION_ATTEMPTS = 5 +_MAX_ID_GENERATION_ATTEMPTS: Final = 5 -def _get_new_model_id(model_class: base_models.BaseModel) -> str: +def _get_new_model_id(model_class: Type[base_models.BaseModel]) -> str: """Generates an ID for a new model. Returns: str. The new ID. + + Raises: + RuntimeError. The function failed to generate a new ID. """ for _ in range(_MAX_ID_GENERATION_ATTEMPTS): new_id = utils.convert_to_hash(uuid.uuid4().hex, 22) @@ -126,8 +136,10 @@ class BeamJobRunModel(base_models.BaseModel): BeamJobState.UNKNOWN.value, ]) + # Here we use MyPy ignore because the signature of this method + # doesn't match with signature of super class's get_new_id() method. @classmethod - def get_new_id(cls) -> str: + def get_new_id(cls) -> str: # type: ignore[override] """Generates an ID for a new BeamJobRunModel. Returns: @@ -136,17 +148,27 @@ def get_new_id(cls) -> str: return _get_new_model_id(cls) @staticmethod - def get_deletion_policy(): - """Model doesn't contain any data directly corresponding to a user.""" + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model doesn't contain any data directly corresponding to a user. + + This model is marked as deleted after a period of time after its + creation. See MODEL_CLASSES_TO_MARK_AS_DELETED and + mark_outdated_models_as_deleted() in cron_services.py. + + This model is being deleted because it has no significance without these + other models, but note that this is being done only for data consistency + and is not a requirement of the wipeout process. + """ return base_models.DELETION_POLICY.NOT_APPLICABLE @staticmethod - def get_model_association_to_user(): + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: """Model doesn't contain user data.""" return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER @classmethod - def get_export_policy(cls): + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: """Model doesn't contain any data directly corresponding to a user.""" return dict(super(BeamJobRunModel, cls).get_export_policy(), **{ 'dataflow_job_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, @@ -175,8 +197,10 @@ class BeamJobRunResultModel(base_models.BaseModel): # The error output generated by the corresponding Apache Beam job. stderr = datastore_services.TextProperty(required=False, indexed=False) + # Here we use MyPy ignore because the signature of this method + # doesn't match with signature of super class's get_new_id() method. @classmethod - def get_new_id(cls) -> str: + def get_new_id(cls) -> str: # type: ignore[override] """Generates an ID for a new BeamJobRunResultModel. Returns: @@ -185,17 +209,23 @@ def get_new_id(cls) -> str: return _get_new_model_id(cls) @staticmethod - def get_deletion_policy(): - """Model doesn't contain any data directly corresponding to a user.""" + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model doesn't contain any data directly corresponding to a user. + + This model is marked as deleted after a period of time after its + creation. See MODEL_CLASSES_TO_MARK_AS_DELETED and + mark_outdated_models_as_deleted() in cron_services.py. + """ return base_models.DELETION_POLICY.NOT_APPLICABLE @staticmethod - def get_model_association_to_user(): + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: """Model doesn't contain user data.""" return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER @classmethod - def get_export_policy(cls): + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: """Model doesn't contain any data directly corresponding to a user.""" return dict(super(BeamJobRunResultModel, cls).get_export_policy(), **{ 'job_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, diff --git a/core/storage/beam_job/gae_models_test.py b/core/storage/beam_job/gae_models_test.py index b47df029287c..4172bf5391c7 100644 --- a/core/storage/beam_job/gae_models_test.py +++ b/core/storage/beam_job/gae_models_test.py @@ -20,14 +20,22 @@ from core.platform import models from core.tests import test_utils -(base_models, beam_job_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.beam_job]) +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import beam_job_models + +(base_models, beam_job_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.BEAM_JOB +]) class BeamJobRunModelTest(test_utils.GenericTestBase): """Tests for BeamJobRunModel.""" - def test_get_new_id_raises_error_after_too_many_failed_attempts(self): + def test_get_new_id_raises_error_after_too_many_failed_attempts( + self + ) -> None: model = beam_job_models.BeamJobRunModel( id=beam_job_models.BeamJobRunModel.get_new_id(), job_name='FooJob', latest_job_state=beam_job_models.BeamJobState.RUNNING.value) @@ -38,22 +46,23 @@ def test_get_new_id_raises_error_after_too_many_failed_attempts(self): utils, 'convert_to_hash', value=model.id) with collision_context: - self.assertRaisesRegexp( + with self.assertRaisesRegex( RuntimeError, - r'Failed to generate a unique ID after \d+ attempts', - beam_job_models.BeamJobRunModel.get_new_id) + r'Failed to generate a unique ID after \d+ attempts' + ): + beam_job_models.BeamJobRunModel.get_new_id() - def test_get_deletion_policy(self): + def test_get_deletion_policy(self) -> None: self.assertEqual( beam_job_models.BeamJobRunModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) - def test_get_model_association_to_user(self): + def test_get_model_association_to_user(self) -> None: self.assertEqual( beam_job_models.BeamJobRunModel.get_model_association_to_user(), base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) - def test_get_export_policy(self): + def test_get_export_policy(self) -> None: export_policy = beam_job_models.BeamJobRunModel.get_export_policy() self.assertEqual( export_policy['dataflow_job_id'], @@ -69,7 +78,9 @@ def test_get_export_policy(self): class BeamJobRunResultModelTest(test_utils.GenericTestBase): """Tests for BeamJobRunResultModel.""" - def test_get_new_id_raises_error_after_too_many_failed_attempts(self): + def test_get_new_id_raises_error_after_too_many_failed_attempts( + self + ) -> None: model = beam_job_models.BeamJobRunResultModel( id=beam_job_models.BeamJobRunResultModel.get_new_id(), job_id='123') model.update_timestamps() @@ -79,23 +90,24 @@ def test_get_new_id_raises_error_after_too_many_failed_attempts(self): utils, 'convert_to_hash', value=model.id) with collision_context: - self.assertRaisesRegexp( + with self.assertRaisesRegex( RuntimeError, - r'Failed to generate a unique ID after \d+ attempts', - beam_job_models.BeamJobRunResultModel.get_new_id) + r'Failed to generate a unique ID after \d+ attempts' + ): + beam_job_models.BeamJobRunResultModel.get_new_id() - def test_get_deletion_policy(self): + def test_get_deletion_policy(self) -> None: self.assertEqual( beam_job_models.BeamJobRunResultModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) - def test_get_model_association_to_user(self): + def test_get_model_association_to_user(self) -> None: self.assertEqual( beam_job_models.BeamJobRunResultModel .get_model_association_to_user(), base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) - def test_get_export_policy(self): + def test_get_export_policy(self) -> None: export_policy = ( beam_job_models.BeamJobRunResultModel.get_export_policy()) self.assertEqual( diff --git a/core/storage/blog/gae_models.py b/core/storage/blog/gae_models.py index d06eadc8b3c5..91f3fe21a939 100644 --- a/core/storage/blog/gae_models.py +++ b/core/storage/blog/gae_models.py @@ -21,21 +21,48 @@ from core import utils from core.platform import models +from typing import Dict, List, Literal, Optional, Sequence, TypedDict + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + (base_models, user_models) = models.Registry.import_models([ - models.NAMES.base_model, models.NAMES.user]) + models.Names.BASE_MODEL, models.Names.USER +]) datastore_services = models.Registry.import_datastore_services() +class BlogPostModelDataDict(TypedDict): + """Dictionary representing the export data of BlogPostModel.""" + + title: str + content: str + url_fragment: str + tags: List[str] + thumbnail_filename: str + published_on: float + + +class BlogAuthorDetailsModelDict(TypedDict): + """Dictionary representing the export data of BlogAuthorDetailsModel.""" + + displayed_author_name: str + author_bio: str + + class BlogPostModel(base_models.BaseModel): - """Model to store blog post data. + """Model to store blog post data. Functionality to allow authors to revert + back to earlier versions is not being built in as we do not want to maintain + commit history for blog post models. All models are therefore not versioned. - The id of instances of this class is in the form of random hash of - 12 chars. + The id of instances of this class is in the form of random hash of 12 chars. """ # We use the model id as a key in the Takeout dict. - ID_IS_USED_AS_TAKEOUT_KEY = True + ID_IS_USED_AS_TAKEOUT_KEY: Literal[True] = True # The ID of the user the blog post is authored by. author_id = datastore_services.StringProperty(indexed=True, required=True) @@ -56,19 +83,19 @@ class BlogPostModel(base_models.BaseModel): # post is a draft. thumbnail_filename = datastore_services.StringProperty(indexed=True) # Time when the blog post model was last published. Value will be None - # if the blog has never been published. + # if the blog post is not currently published. published_on = ( datastore_services.DateTimeProperty(indexed=True)) @staticmethod - def get_deletion_policy(): + def get_deletion_policy() -> base_models.DELETION_POLICY: """Model contains data to pseudonymize corresponding to a user: author_id field. """ return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE @classmethod - def has_reference_to_user_id(cls, user_id): + def has_reference_to_user_id(cls, user_id: str) -> bool: """Check whether BlogPostModel references user. Args: @@ -82,14 +109,15 @@ def has_reference_to_user_id(cls, user_id): ).get(keys_only=True) is not None @staticmethod - def get_model_association_to_user(): + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: """Model is exported as multiple instances per user since there can be multiple blog post models relevant to a user. """ return base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER @classmethod - def get_export_policy(cls): + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: """Model contains data corresponding to a user to export.""" return dict(super(BlogPostModel, cls).get_export_policy(), **{ # We do not export the author_id because we should not @@ -104,7 +132,7 @@ def get_export_policy(cls): }) @classmethod - def generate_new_blog_post_id(cls): + def generate_new_blog_post_id(cls) -> str: """Generates a new blog post ID which is unique and is in the form of random hash of 12 chars. @@ -126,7 +154,7 @@ def generate_new_blog_post_id(cls): 'New blog post id generator is producing too many collisions.') @classmethod - def create(cls, blog_post_id, author_id): + def create(cls, blog_post_id: str, author_id: str) -> BlogPostModel: """Creates a new BlogPostModel entry. Args: @@ -159,7 +187,7 @@ def create(cls, blog_post_id, author_id): return entity @classmethod - def get_by_url_fragment(cls, url_fragment): + def get_by_url_fragment(cls, url_fragment: str) -> Optional[BlogPostModel]: """Gets BlogPostModel by url_fragment. Returns None if the blog post with the given url_fragment doesn't exist. @@ -176,7 +204,7 @@ def get_by_url_fragment(cls, url_fragment): ).get() @classmethod - def export_data(cls, user_id): + def export_data(cls, user_id: str) -> Dict[str, BlogPostModelDataDict]: """Exports the data from BlogPostModel into dict format for Takeout. Args: @@ -185,10 +213,9 @@ def export_data(cls, user_id): Returns: dict. Dictionary of the data from BlogPostModel. """ - user_data = {} - blog_post_models = cls.get_all().filter( + user_data: Dict[str, BlogPostModelDataDict] = {} + blog_post_models: Sequence[BlogPostModel] = cls.get_all().filter( cls.author_id == user_id).fetch() - for blog_post_model in blog_post_models: user_data[blog_post_model.id] = { 'title': blog_post_model.title, @@ -228,18 +255,18 @@ class BlogPostSummaryModel(base_models.BaseModel): # post is a draft. thumbnail_filename = datastore_services.StringProperty(indexed=True) # Time when the blog post model was last published. Value will be None - # if the blog post has never been published. + # if the blog post is currently not published. published_on = (datastore_services.DateTimeProperty(indexed=True)) @staticmethod - def get_deletion_policy(): + def get_deletion_policy() -> base_models.DELETION_POLICY: """Model contains data to pseudonymize corresponding to a user: author_id field. """ return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE @classmethod - def has_reference_to_user_id(cls, user_id): + def has_reference_to_user_id(cls, user_id: str) -> bool: """Check whether BlogPostSummaryModel exists for user. Args: @@ -253,7 +280,8 @@ def has_reference_to_user_id(cls, user_id): ).get(keys_only=True) is not None @staticmethod - def get_model_association_to_user(): + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: """Model data has already been associated as a part of the BlogPostModel to the user and thus does not need a separate user association. @@ -261,7 +289,7 @@ def get_model_association_to_user(): return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER @classmethod - def get_export_policy(cls): + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: """Model contains data corresponding to a user (author_id), but this isn't exported because noteworthy details that belong to this model have already been exported as a part of the BlogPostModel. @@ -292,7 +320,7 @@ class BlogPostRightsModel(base_models.BaseModel): indexed=True, required=True, default=False) @staticmethod - def get_deletion_policy(): + def get_deletion_policy() -> base_models.DELETION_POLICY: """Model contains data to be deleted corresponding to a user: editor_ids field. It does not delete the model but removes the user id from the list of editor IDs corresponding to a blog post rights model. @@ -300,7 +328,7 @@ def get_deletion_policy(): return base_models.DELETION_POLICY.DELETE @classmethod - def deassign_user_from_all_blog_posts(cls, user_id): + def deassign_user_from_all_blog_posts(cls, user_id: str) -> None: """Removes user_id from the list of editor_ids from all the blog post rights models. @@ -315,7 +343,7 @@ def deassign_user_from_all_blog_posts(cls, user_id): cls.put_multi(blog_post_rights_models) @classmethod - def has_reference_to_user_id(cls, user_id): + def has_reference_to_user_id(cls, user_id: str) -> bool: """Check whether BlogPostRightsModel references to the given user. Args: @@ -331,12 +359,19 @@ def has_reference_to_user_id(cls, user_id): cls.editor_ids == user_id).get(keys_only=True) is not None @classmethod - def get_published_models_by_user(cls, user_id, limit=None): + def get_published_models_by_user( + cls, + user_id: str, + offset: int = 0, + limit: Optional[int] = None, + ) -> List[BlogPostRightsModel]: """Retrieves the blog post rights objects for published blog posts for which the given user is an editor. Args: user_id: str. ID of the author of the blog post. + offset: int|None. Number of query results to skip from top. If None, + all results from top will be returned. limit: int|None. The maximum number of BlogPostRightsModels to be fetched. If None, all existing published models by user will be fetched. @@ -349,10 +384,18 @@ def get_published_models_by_user(cls, user_id, limit=None): query = cls.query( cls.editor_ids == user_id, cls.blog_post_is_published == True # pylint: disable=singleton-comparison ).order(-cls.last_updated) - return query.fetch(limit) if limit is not None else query.fetch() + return list( + query.fetch( + limit, offset=offset + ) if limit is not None else query.fetch(offset=offset) + ) @classmethod - def get_draft_models_by_user(cls, user_id, limit=None): + def get_draft_models_by_user( + cls, + user_id: str, + limit: Optional[int] = None + ) -> List[BlogPostRightsModel]: """Retrieves the blog post rights objects for draft blog posts for which the given user is an editor. @@ -370,10 +413,12 @@ def get_draft_models_by_user(cls, user_id, limit=None): query = cls.query( cls.editor_ids == user_id, cls.blog_post_is_published == False # pylint: disable=singleton-comparison ).order(-cls.last_updated) - return query.fetch(limit) if limit is not None else query.fetch() + return list( + query.fetch(limit) if limit is not None else query.fetch() + ) @classmethod - def get_all_by_user(cls, user_id): + def get_all_by_user(cls, user_id: str) -> List[BlogPostRightsModel]: """Retrieves the blog post rights objects for all blog posts for which the given user is an editor. @@ -384,10 +429,11 @@ def get_all_by_user(cls, user_id): list(BlogPostRightsModel). The list of BlogPostRightsModel objects in which the given user is an editor. """ - return cls.query(cls.editor_ids == user_id).fetch() + return list(cls.query(cls.editor_ids == user_id).fetch()) @staticmethod - def get_model_association_to_user(): + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: """Model is exported as one instance shared across users since multiple users can edit the blog post. """ @@ -397,7 +443,7 @@ def get_model_association_to_user(): ) @classmethod - def get_export_policy(cls): + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: """Model contains data to export corresponding to a user.""" return dict(super(BlogPostRightsModel, cls).get_export_policy(), **{ 'editor_ids': base_models.EXPORT_POLICY.EXPORTED, @@ -405,7 +451,7 @@ def get_export_policy(cls): }) @classmethod - def export_data(cls, user_id): + def export_data(cls, user_id: str) -> Dict[str, List[str]]: """(Takeout) Export user-relevant properties of BlogPostsRightsModel. Args: @@ -416,7 +462,8 @@ def export_data(cls, user_id): in a python dict format. In this case, we are returning all the ids of blog posts for which the user is an editor. """ - editable_blog_posts = cls.query(cls.editor_ids == user_id).fetch() + editable_blog_posts: Sequence[BlogPostRightsModel] = ( + cls.query(cls.editor_ids == user_id).fetch()) editable_blog_post_ids = [blog.id for blog in editable_blog_posts] return { @@ -424,7 +471,7 @@ def export_data(cls, user_id): } @classmethod - def get_field_name_mapping_to_takeout_keys(cls): + def get_field_name_mapping_to_takeout_keys(cls) -> Dict[str, str]: """Defines the mapping of field names to takeout keys since this model is exported as one instance shared across users. """ @@ -433,7 +480,7 @@ def get_field_name_mapping_to_takeout_keys(cls): } @classmethod - def create(cls, blog_post_id, author_id): + def create(cls, blog_post_id: str, author_id: str) -> BlogPostRightsModel: """Creates a new BlogPostRightsModel entry. Args: @@ -460,3 +507,146 @@ def create(cls, blog_post_id, author_id): entity.put() return entity + + +class BlogAuthorDetailsModel(base_models.BaseModel): + """Model for storing user's blog author details. + + The id/key of instances of this model is randomly generated string of + length 12. + """ + + # We use the model id as a key in the Takeout dict. + ID_IS_USED_AS_TAKEOUT_KEY: Literal[True] = True + # The user ID of the blog author. + author_id = datastore_services.StringProperty(indexed=True, required=True) + # The publicly viewable name of the user to display as author name in blog + # posts. + displayed_author_name = ( + datastore_services.StringProperty(indexed=True, required=True)) + # User specified biography to be shown on their blog author page. + author_bio = datastore_services.TextProperty(indexed=False) + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model is exported as one instance per user.""" + return base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER + + @staticmethod + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model contains data to pseudonymize corresponding to a user: + id field. + """ + return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE + + @classmethod + def has_reference_to_user_id(cls, user_id: str) -> bool: + """Check whether BlogAuthorUserModel exists for user. + + Args: + user_id: str. The ID of the user whose data should be checked. + + Returns: + bool. Whether any models refer to the given user ID. + """ + return cls.query( + cls.author_id == user_id + ).get(keys_only=True) is not None + + @classmethod + def export_data(cls, user_id: str) -> Dict[str, BlogAuthorDetailsModelDict]: + """Exports the data from BlogAuthorDetailModel into dict format for + Takeout. + + Args: + user_id: str. The ID of the user whose data should be exported. + + Returns: + dict. Dictionary of the data from BlogAuthorDetailModel. + """ + + author_model = cls.query(cls.author_id == user_id).get() + if author_model: + return { + 'displayed_author_name': author_model.displayed_author_name, + 'author_bio': author_model.author_bio + } + else: + return {} + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model contains data corresponding to a user to export.""" + return dict(super(BlogAuthorDetailsModel, cls).get_export_policy(), **{ + # We do not export the author id of the model because we should not + # export internal user ids. + 'author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'displayed_author_name': base_models.EXPORT_POLICY.EXPORTED, + 'author_bio': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + }) + + @classmethod + def generate_new_instance_id(cls) -> str: + """Generates a ID which is unique and is in the form of random hash of + 12 chars. + + Returns: + str. A instance ID that is different from the IDs of all the + existing models. + + Raises: + Exception. There were too many collisions with existing model IDs + when attempting to generate a new ID. + """ + for _ in range(base_models.MAX_RETRIES): + instance_id = utils.convert_to_hash( + str(utils.get_random_int(base_models.RAND_RANGE)), + base_models.ID_LENGTH) + if not cls.get_by_id(instance_id): + return instance_id + raise Exception( + 'New instance id generator is producing too many collisions.') + + @classmethod + def create( + cls, author_id: str, displayed_author_name: str, author_bio: str + ) -> None: + """Creates a new BlogAuthorDetailsModel entry. + + Args: + author_id: str. The user ID of the author. + displayed_author_name: str. The author name of the user. + author_bio: str. The author bio of the user. + + Raises: + Exception. A blog author details model with the given ID exists + already. + """ + if cls.get_by_author(author_id): + raise Exception( + 'A blog author details model for given user already exists.') + model_id = cls.generate_new_instance_id() + entity = cls( + id=model_id, + author_id=author_id, + displayed_author_name=displayed_author_name, + author_bio=author_bio) + entity.update_timestamps() + entity.put() + + @classmethod + def get_by_author(cls, author_id: str) -> Optional[BlogAuthorDetailsModel]: + """Retrieves the author details objects for the given author id. + + Args: + author_id: str. User ID of the author. + + Returns: + BlogAuthorDetailsModel. BlogAuthorDetailsModel corresponding to the + given author_id. + """ + return cls.query(cls.author_id == author_id).get() diff --git a/core/storage/blog/gae_models_test.py b/core/storage/blog/gae_models_test.py index 2b78d149b00f..dd42d076a55d 100644 --- a/core/storage/blog/gae_models_test.py +++ b/core/storage/blog/gae_models_test.py @@ -21,27 +21,36 @@ import datetime import types +from core import feconf from core import utils from core.platform import models from core.tests import test_utils -(base_models, blog_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.blog, models.NAMES.user]) +from typing import Dict, Final, List + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import blog_models + +(base_models, blog_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.BLOG, models.Names.USER +]) class BlogPostModelTest(test_utils.GenericTestBase): """Tests for the BlogPostModel class.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID = 'user_1' - CONTENT = 'Dummy Content' - TITLE = 'Dummy Title' - TAGS = ['tag1', 'tag2', 'tag3'] - THUMBNAIL = 'xyzabc' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID: Final = 'user_1' + CONTENT: Final = 'Dummy Content' + TITLE: Final = 'Dummy Title' + TAGS: Final = ['tag1', 'tag2', 'tag3'] + THUMBNAIL: Final = 'xyzabc' - def setUp(self): + def setUp(self) -> None: """Set up blog post models in datastore for use in testing.""" - super(BlogPostModelTest, self).setUp() + super().setUp() self.blog_post_model = blog_models.BlogPostModel( id='blog_one', @@ -56,12 +65,35 @@ def setUp(self): self.blog_post_model.update_timestamps() self.blog_post_model.put() - def test_get_deletion_policy(self): + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + blog_models.BlogPostModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'title': base_models.EXPORT_POLICY.EXPORTED, + 'content': base_models.EXPORT_POLICY.EXPORTED, + 'url_fragment': base_models.EXPORT_POLICY.EXPORTED, + 'tags': base_models.EXPORT_POLICY.EXPORTED, + 'thumbnail_filename': base_models.EXPORT_POLICY.EXPORTED, + 'published_on': base_models.EXPORT_POLICY.EXPORTED, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + blog_models.BlogPostModel.get_export_policy(), + expected_export_policy_dict) + + def test_get_deletion_policy(self) -> None: self.assertEqual( blog_models.BlogPostModel.get_deletion_policy(), base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE) - def test_has_reference_to_user_id(self): + def test_has_reference_to_user_id(self) -> None: self.assertTrue( blog_models.BlogPostModel .has_reference_to_user_id(self.USER_ID)) @@ -69,16 +101,16 @@ def test_has_reference_to_user_id(self): blog_models.BlogPostModel .has_reference_to_user_id(self.NONEXISTENT_USER_ID)) - def test_raise_exception_by_mocking_collision(self): + def test_raise_exception_by_mocking_collision(self) -> None: """Tests create and generate_new_blog_post_id methods for raising exception. """ blog_post_model_cls = blog_models.BlogPostModel # Test create method. - with self.assertRaisesRegexp( - Exception, 'A blog post with the given blog post ID exists' - ' already.'): + with self.assertRaisesRegex( + Exception, + 'A blog post with the given blog post ID exists already.'): # Swap dependent method get_by_id to simulate collision every time. with self.swap( @@ -90,7 +122,7 @@ def test_raise_exception_by_mocking_collision(self): 'blog_post_id', self.USER_ID) # Test generate_new_blog_post_id method. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'New blog post id generator is producing too many collisions.'): # Swap dependent method get_by_id to simulate collision every time. @@ -101,14 +133,14 @@ def test_raise_exception_by_mocking_collision(self): blog_post_model_cls)): blog_post_model_cls.generate_new_blog_post_id() - def test_get_by_url_fragment(self): + def test_get_by_url_fragment(self) -> None: self.assertEqual( blog_models.BlogPostModel.get_by_url_fragment( 'sample-url-fragment'), self.blog_post_model ) - def test_creating_new_blog_post_model_instance(self): + def test_creating_new_blog_post_model_instance(self) -> None: blog_post_model_id = ( blog_models.BlogPostModel.generate_new_blog_post_id()) blog_post_model_instance = ( @@ -117,14 +149,14 @@ def test_creating_new_blog_post_model_instance(self): self.assertEqual(blog_post_model_instance.id, blog_post_model_id) self.assertEqual(blog_post_model_instance.author_id, self.USER_ID) - def test_export_data_trivial(self): + def test_export_data_trivial(self) -> None: user_data = blog_models.BlogPostModel.export_data( self.NONEXISTENT_USER_ID ) - test_data = {} + test_data: Dict[str, blog_models.BlogPostModelDataDict] = {} self.assertEqual(user_data, test_data) - def test_export_data_nontrivial(self): + def test_export_data_nontrivial(self) -> None: user_data = blog_models.BlogPostModel.export_data(self.USER_ID) blog_post_id = 'blog_one' test_data = { @@ -144,16 +176,16 @@ def test_export_data_nontrivial(self): class BlogPostSummaryModelTest(test_utils.GenericTestBase): """Tests for the BlogPostSummaryModel class.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID = 'user_1' - SUMMARY = 'Dummy Summary' - TITLE = 'Dummy Title' - TAGS = ['tag1', 'tag2', 'tag3'] - THUMBNAIL = 'xyzabc' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID: Final = 'user_1' + SUMMARY: Final = 'Dummy Summary' + TITLE: Final = 'Dummy Title' + TAGS: Final = ['tag1', 'tag2', 'tag3'] + THUMBNAIL: Final = 'xyzabc' - def setUp(self): + def setUp(self) -> None: """Set up models in datastore for use in testing.""" - super(BlogPostSummaryModelTest, self).setUp() + super().setUp() self.blog_post_summary_model_old = ( blog_models.BlogPostSummaryModel( @@ -183,12 +215,35 @@ def setUp(self): self.blog_post_summary_model_new.update_timestamps() self.blog_post_summary_model_new.put() - def test_get_deletion_policy(self): + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'title': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'summary': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'tags': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'published_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + blog_models.BlogPostSummaryModel.get_export_policy(), + expected_export_policy_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + blog_models.BlogPostSummaryModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_deletion_policy(self) -> None: self.assertEqual( blog_models.BlogPostSummaryModel.get_deletion_policy(), base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE) - def test_has_reference_to_user_id(self): + def test_has_reference_to_user_id(self) -> None: self.assertTrue( blog_models.BlogPostSummaryModel .has_reference_to_user_id(self.USER_ID)) @@ -196,7 +251,7 @@ def test_has_reference_to_user_id(self): blog_models.BlogPostSummaryModel .has_reference_to_user_id(self.NONEXISTENT_USER_ID)) - def test_get_blog_post_summary_models(self): + def test_get_blog_post_summary_models(self) -> None: blog_post_ids = ['blog_two', 'blog_one'] blog_post_summary_models = ( blog_models.BlogPostSummaryModel.get_multi(blog_post_ids)) @@ -210,15 +265,15 @@ def test_get_blog_post_summary_models(self): class BlogPostRightsModelTest(test_utils.GenericTestBase): """Tests for the BlogPostRightsModel class.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID = 'user_1' - USER_ID_NEW = 'user_2' - USER_ID_OLD = 'user_3' - BLOG_POST_ID_NEW = 'blog_post_id' - BLOG_POST_ID_OLD = 'blog_post_old_id' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID: Final = 'user_1' + USER_ID_NEW: Final = 'user_2' + USER_ID_OLD: Final = 'user_3' + BLOG_POST_ID_NEW: Final = 'blog_post_id' + BLOG_POST_ID_OLD: Final = 'blog_post_old_id' - def setUp(self): - super(BlogPostRightsModelTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.blog_post_rights_model = blog_models.BlogPostRightsModel( id=self.BLOG_POST_ID_NEW, editor_ids=[self.USER_ID_NEW], @@ -235,12 +290,39 @@ def setUp(self): self.blog_post_rights_draft_model.update_timestamps() self.blog_post_rights_draft_model.put() - def test_get_deletion_policy(self): + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'editor_ids': base_models.EXPORT_POLICY.EXPORTED, + 'blog_post_is_published': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + blog_models.BlogPostRightsModel.get_export_policy(), + expected_export_policy_dict) + + def test_get_field_name_mapping_to_takeout_keys(self) -> None: + self.assertEqual( + blog_models.BlogPostRightsModel. + get_field_name_mapping_to_takeout_keys(), + { + 'editor_ids': 'editable_blog_post_ids' + }) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + blog_models.BlogPostRightsModel. + get_model_association_to_user(), + base_models. + MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_SHARED_ACROSS_USERS) + + def test_get_deletion_policy(self) -> None: self.assertEqual( blog_models.BlogPostRightsModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) - def test_has_reference_to_user_id(self): + def test_has_reference_to_user_id(self) -> None: self.assertTrue( blog_models.BlogPostRightsModel .has_reference_to_user_id(self.USER_ID)) @@ -251,7 +333,7 @@ def test_has_reference_to_user_id(self): blog_models.BlogPostRightsModel .has_reference_to_user_id(self.NONEXISTENT_USER_ID)) - def test_get_all_by_user_for_fetching_all_rights_model(self): + def test_get_all_by_user_for_fetching_all_rights_model(self) -> None: self.assertEqual( blog_models.BlogPostRightsModel.get_all_by_user(self.USER_ID_NEW), [self.blog_post_rights_model, self.blog_post_rights_draft_model]) @@ -259,7 +341,7 @@ def test_get_all_by_user_for_fetching_all_rights_model(self): blog_models.BlogPostRightsModel.get_all_by_user(self.USER_ID), [self.blog_post_rights_draft_model]) - def test_get_published_models_by_user_when_limit_is_set(self): + def test_get_published_models_by_user_when_limit_is_set(self) -> None: blog_post_rights_draft_model = blog_models.BlogPostRightsModel( id='blog_post_two', editor_ids=[self.USER_ID_NEW], @@ -268,40 +350,45 @@ def test_get_published_models_by_user_when_limit_is_set(self): blog_post_rights_draft_model.update_timestamps() blog_post_rights_draft_model.put() - blog_post_rights_punlished_model = blog_models.BlogPostRightsModel( + blog_post_rights_published_model = blog_models.BlogPostRightsModel( id='blog_post_one', editor_ids=[self.USER_ID_NEW], blog_post_is_published=True, ) - blog_post_rights_punlished_model.update_timestamps() - blog_post_rights_punlished_model.put() + blog_post_rights_published_model.update_timestamps() + blog_post_rights_published_model.put() # The latest two published blog post rights models should be fetched. self.assertEqual( blog_models.BlogPostRightsModel.get_published_models_by_user( - self.USER_ID_NEW, 2), - [blog_post_rights_punlished_model, self.blog_post_rights_model]) + self.USER_ID_NEW, 0, 2), + [blog_post_rights_published_model, self.blog_post_rights_model]) # The latest published blog post rights model should be fetched. self.assertEqual( blog_models.BlogPostRightsModel.get_published_models_by_user( - self.USER_ID_NEW, 1), [blog_post_rights_punlished_model]) + self.USER_ID_NEW, 0, 1), [blog_post_rights_published_model]) + + # The second latest published blog post rights model should be fetched. + self.assertEqual( + blog_models.BlogPostRightsModel.get_published_models_by_user( + self.USER_ID_NEW, 1, 1), [self.blog_post_rights_model]) - def test_get_published_models_by_user_when_no_limit(self): - blog_post_rights_punlished_model = blog_models.BlogPostRightsModel( + def test_get_published_models_by_user_when_no_limit(self) -> None: + blog_post_rights_published_model = blog_models.BlogPostRightsModel( id='blog_post_one', editor_ids=[self.USER_ID_NEW], blog_post_is_published=True, ) - blog_post_rights_punlished_model.update_timestamps() - blog_post_rights_punlished_model.put() + blog_post_rights_published_model.update_timestamps() + blog_post_rights_published_model.put() self.assertEqual( len( blog_models.BlogPostRightsModel .get_published_models_by_user(self.USER_ID_NEW)), 2) - def test_get_draft_models_by_user_when_limit_is_set(self): + def test_get_draft_models_by_user_when_limit_is_set(self) -> None: blog_post_rights_draft_model = blog_models.BlogPostRightsModel( id='blog_post_two', editor_ids=[self.USER_ID_NEW], @@ -329,7 +416,7 @@ def test_get_draft_models_by_user_when_limit_is_set(self): blog_models.BlogPostRightsModel.get_draft_models_by_user( self.USER_ID_NEW, 1), [blog_post_rights_draft_model]) - def test_get_draft_models_by_user_when_no_limit_is_set(self): + def test_get_draft_models_by_user_when_no_limit_is_set(self) -> None: blog_post_rights_draft_model = blog_models.BlogPostRightsModel( id='blog_post_two', editor_ids=[self.USER_ID_NEW], @@ -342,7 +429,7 @@ def test_get_draft_models_by_user_when_no_limit_is_set(self): len(blog_models.BlogPostRightsModel.get_draft_models_by_user( self.USER_ID_NEW)), 2) - def test_export_data_on_editor(self): + def test_export_data_on_editor(self) -> None: """Test export data on user who is editor of the blog post.""" blog_post_ids = ( @@ -356,7 +443,7 @@ def test_export_data_on_editor(self): } self.assertEqual(expected_blog_post_ids, blog_post_ids) - def test_export_data_on_uninvolved_user(self): + def test_export_data_on_uninvolved_user(self) -> None: """Test for empty lists when user has no editor rights on existing blog posts. """ @@ -364,18 +451,18 @@ def test_export_data_on_uninvolved_user(self): blog_post_ids = ( blog_models.BlogPostRightsModel.export_data( self.NONEXISTENT_USER_ID)) - expected_blog_post_ids = { + expected_blog_post_ids: Dict[str, List[str]] = { 'editable_blog_post_ids': [], } self.assertEqual(expected_blog_post_ids, blog_post_ids) - def test_raise_exception_by_mocking_collision(self): + def test_raise_exception_by_mocking_collision(self) -> None: """Tests create methods for raising exception.""" blog_post_rights_model_cls = blog_models.BlogPostRightsModel # Test create method. - with self.assertRaisesRegexp( + with self.assertRaisesRegex( Exception, 'Blog Post ID conflict on creating new blog post rights model.'): # Swap dependent method get_by_id to simulate collision every time. @@ -387,7 +474,7 @@ def test_raise_exception_by_mocking_collision(self): blog_post_rights_model_cls.create( 'blog_one', self.USER_ID) - def test_creating_new_blog_post_rights_model(self): + def test_creating_new_blog_post_rights_model(self) -> None: blog_post_model_id = ( blog_models.BlogPostModel.generate_new_blog_post_id()) blog_post_rights_model_instance = ( @@ -398,7 +485,7 @@ def test_creating_new_blog_post_rights_model(self): self.assertEqual( blog_post_rights_model_instance.editor_ids, [self.USER_ID]) - def test_deassign_user_from_all_blog_posts(self): + def test_deassign_user_from_all_blog_posts(self) -> None: """Tests removing user id from the list of editor ids for blog post assigned to a user. """ @@ -408,3 +495,144 @@ def test_deassign_user_from_all_blog_posts(self): blog_post_rights_models = blog_models.BlogPostRightsModel.get_all() for model in blog_post_rights_models: self.assertTrue(self.USER_ID_NEW not in model.editor_ids) + + def test_deassign_user_from_blog_post_handles_invalid_user_id(self) -> None: + # If the user is not in the editor list of any blog post, the + # method 'BlogPostRightsModel.deassign_user_from_all_blog_posts()' + # should do nothing and exit. + blog_models.BlogPostRightsModel.deassign_user_from_all_blog_posts( + self.NONEXISTENT_USER_ID) + blog_post_rights_models = blog_models.BlogPostRightsModel.get_all() + for model in blog_post_rights_models: + self.assertTrue(self.NONEXISTENT_USER_ID not in model.editor_ids) + + +class BlogAuthorDetailsModelTest(test_utils.GenericTestBase): + """Tests for BlogAuthorDetailsModel class.""" + + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'user_id' + USER_1_ROLE: Final = feconf.ROLE_ID_BLOG_ADMIN + USER_1_NAME: Final = 'user' + USER_2_ID: Final = 'user_two_id' + USER_2_ROLE: Final = feconf.ROLE_ID_BLOG_POST_EDITOR + USER_2_NAME: Final = 'user two' + GENERIC_DATE: Final = datetime.datetime(2019, 5, 20) + GENERIC_EPOCH: Final = utils.get_time_in_millisecs( + datetime.datetime(2019, 5, 20) + ) + GENERIC_IMAGE_URL: Final = 'www.example.com/example.png' + GENERIC_USER_BIO: Final = 'I am a user of Oppia!' + + def setUp(self) -> None: + """Set up author details models in datastore for use in testing.""" + super().setUp() + author_model_one = blog_models.BlogAuthorDetailsModel( + id='author_model', + author_id=self.USER_1_ID, + displayed_author_name=self.USER_1_NAME, + author_bio=self.GENERIC_USER_BIO + ) + author_model_one.update_timestamps() + author_model_one.put() + + def test_raise_exception_by_mocking_collision(self) -> None: + """Tests create and generate_new_instance_id methods for raising + exception. + """ + blog_author_details_model_cls = blog_models.BlogAuthorDetailsModel + + # Test create method. + with self.assertRaisesRegex( + Exception, + 'A blog author details model for given user already exists.'): + + # Swap dependent method get_by_author to simulate collision every + # time. + with self.swap( + blog_author_details_model_cls, 'get_by_author', + types.MethodType( + lambda x, y: True, + blog_author_details_model_cls)): + blog_author_details_model_cls.create( + self.USER_1_ID, 'displayed_author_name', '') + + # Test generate_new_blog_post_id method. + with self.assertRaisesRegex( + Exception, + 'New instance id generator is producing too many collisions.'): + # Swap dependent method get_by_id to simulate collision every time. + with self.swap( + blog_author_details_model_cls, 'get_by_id', + types.MethodType( + lambda x, y: True, + blog_author_details_model_cls)): + blog_author_details_model_cls.generate_new_instance_id() + + def test_get_deletion_policy_is_delete(self) -> None: + self.assertEqual( + blog_models.BlogAuthorDetailsModel.get_deletion_policy(), + base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + blog_models.BlogAuthorDetailsModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + blog_models.BlogAuthorDetailsModel.get_export_policy(), { + 'author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'displayed_author_name': base_models.EXPORT_POLICY.EXPORTED, + 'author_bio': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + ) + + def test_export_data_on_nonexistent_author(self) -> None: + """Test if export_data returns None when user's author detail model is + not in datastore. + """ + self.assertEqual( + blog_models.BlogAuthorDetailsModel.export_data( + self.NONEXISTENT_USER_ID + ), {} + ) + + def test_export_data_on_existent_author(self) -> None: + """Test if export_data works as intended on a user's author detail model + in datastore. + """ + user_data = ( + blog_models.BlogAuthorDetailsModel.export_data(self.USER_1_ID)) + expected_data = { + 'displayed_author_name': self.USER_1_NAME, + 'author_bio': self.GENERIC_USER_BIO + } + self.assertEqual(expected_data, user_data) + + def test_has_reference_to_user_id(self) -> None: + # Case for blog post author. + self.assertTrue( + blog_models.BlogAuthorDetailsModel.has_reference_to_user_id( + self.USER_1_ID) + ) + + # Case for a non existing user. + self.assertFalse( + blog_models.BlogAuthorDetailsModel.has_reference_to_user_id( + self.NONEXISTENT_USER_ID) + ) + + def test_creating_new_author_detail_model_instance(self) -> None: + blog_models.BlogAuthorDetailsModel.create( + self.USER_2_ID, self.USER_2_NAME, self.GENERIC_USER_BIO) + model_instance = blog_models.BlogAuthorDetailsModel.get_by_author( + self.USER_2_ID) + # Ruling out the possibility of None for mypy type checking. + assert model_instance is not None + self.assertEqual(model_instance.author_id, self.USER_2_ID) + self.assertEqual(model_instance.displayed_author_name, self.USER_2_NAME) + self.assertEqual(model_instance.author_bio, self.GENERIC_USER_BIO) diff --git a/core/storage/classifier/gae_models.py b/core/storage/classifier/gae_models.py index 1ba182ba7fcd..69998dc41213 100644 --- a/core/storage/classifier/gae_models.py +++ b/core/storage/classifier/gae_models.py @@ -22,17 +22,21 @@ from core import utils from core.platform import models -from typing import Any, Dict, List, Optional, Sequence, Tuple, Union +from typing import Dict, Final, List, Optional, Sequence, Tuple, Union MYPY = False if MYPY: # pragma: no cover + # Here, we are importing 'classifier_domain' and 'classifier_services' + # only for type checking. + from core.domain import classifier_domain # pylint: disable=invalid-import # isort:skip + from core.domain import classifier_services # pylint: disable=invalid-import # isort:skip from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() -NEW_AND_PENDING_TRAINING_JOBS_FETCH_LIMIT = 100 +NEW_AND_PENDING_TRAINING_JOBS_FETCH_LIMIT: Final = 100 TrainingDataUnionType = Union[ Dict[str, Union[int, List[str]]], @@ -107,7 +111,7 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: }) @classmethod - def _generate_id(cls, exp_id: str) -> str: + def generate_id(cls, exp_id: str) -> str: """Generates a unique id for the training job of the form '[exp_id].[random hash of 16 chars]'. @@ -137,16 +141,16 @@ def _generate_id(cls, exp_id: str) -> str: @classmethod def create( - cls, - algorithm_id: str, - interaction_id: str, - exp_id: str, - exp_version: int, - next_scheduled_check_time: datetime.datetime, - training_data: TrainingDataUnionType, - state_name: str, - status: str, - algorithm_version: int + cls, + algorithm_id: str, + interaction_id: str, + exp_id: str, + exp_version: int, + next_scheduled_check_time: datetime.datetime, + training_data: TrainingDataUnionType, + state_name: str, + status: str, + algorithm_version: int ) -> str: """Creates a new ClassifierTrainingJobModel entry. @@ -173,7 +177,7 @@ def create( Exception. A model with the same ID already exists. """ - instance_id = cls._generate_id(exp_id) + instance_id = cls.generate_id(exp_id) training_job_instance = cls( id=instance_id, algorithm_id=algorithm_id, interaction_id=interaction_id, @@ -191,7 +195,7 @@ def create( @classmethod def query_new_and_pending_training_jobs( - cls, offset: int + cls, offset: int ) -> Tuple[Sequence[ClassifierTrainingJobModel], int]: """Gets the next 10 jobs which are either in status "new" or "pending", ordered by their next_scheduled_check_time attribute. @@ -224,10 +228,10 @@ def query_new_and_pending_training_jobs( offset = offset + len(classifier_job_models) return classifier_job_models, offset - # TODO(#13523): Change 'job_dict' to domain object/TypedDict to - # remove Any from type-annotation below. @classmethod - def create_multi(cls, job_dicts_list: List[Dict[str, Any]]) -> List[str]: + def create_multi( + cls, job_dicts_list: List[classifier_services.JobInfoDict] + ) -> List[str]: """Creates multiple new ClassifierTrainingJobModel entries. Args: @@ -240,7 +244,7 @@ def create_multi(cls, job_dicts_list: List[Dict[str, Any]]) -> List[str]: job_models = [] job_ids = [] for job_dict in job_dicts_list: - instance_id = cls._generate_id(job_dict['exp_id']) + instance_id = cls.generate_id(job_dict['exp_id']) training_job_instance = cls( id=instance_id, algorithm_id=job_dict['algorithm_id'], interaction_id=job_dict['interaction_id'], @@ -301,11 +305,11 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: }) @classmethod - def _generate_id( - cls, - exp_id: str, - exp_version: int, - state_name: str + def get_entity_id( + cls, + exp_id: str, + exp_version: int, + state_name: str ) -> str: """Generates a unique ID for the Classifier Exploration Mapping of the form [exp_id].[exp_version].[state_name]. @@ -342,7 +346,7 @@ def get_models( """ mapping_ids = [] for state_name in state_names: - mapping_id = cls._generate_id(exp_id, exp_version, state_name) + mapping_id = cls.get_entity_id(exp_id, exp_version, state_name) mapping_ids.append(mapping_id) mapping_instances = cls.get_multi(mapping_ids) return mapping_instances @@ -366,17 +370,17 @@ def get_model( for the classifier exploration mapping. It returns None if the no entry for given is found. """ - mapping_id = cls._generate_id(exp_id, exp_version, state_name) + mapping_id = cls.get_entity_id(exp_id, exp_version, state_name) model = cls.get_by_id(mapping_id) return model @classmethod def create( - cls, - exp_id: str, - exp_version: int, - state_name: str, - algorithm_ids_to_job_ids: Dict[str, str] + cls, + exp_id: str, + exp_version: int, + state_name: str, + algorithm_ids_to_job_ids: Dict[str, str] ) -> str: """Creates a new ClassifierExplorationMappingModel entry. @@ -397,7 +401,7 @@ def create( Exception. A model with the same ID already exists. """ - instance_id = cls._generate_id(exp_id, exp_version, state_name) + instance_id = cls.get_entity_id(exp_id, exp_version, state_name) if not cls.get_by_id(instance_id): mapping_instance = cls( id=instance_id, exp_id=exp_id, exp_version=exp_version, @@ -411,8 +415,10 @@ def create( @classmethod def create_multi( - cls, - state_training_jobs_mappings: List[StateTrainingJobsMappingModel] + cls, + state_training_jobs_mappings: List[ + classifier_domain.StateTrainingJobsMapping + ] ) -> List[str]: """Creates multiple new StateTrainingJobsMappingModel entries. @@ -426,7 +432,7 @@ def create_multi( mapping_models = [] mapping_ids = [] for state_training_job_mapping in state_training_jobs_mappings: - instance_id = cls._generate_id( + instance_id = cls.get_entity_id( state_training_job_mapping.exp_id, state_training_job_mapping.exp_version, state_training_job_mapping.state_name) diff --git a/core/storage/classifier/gae_models_test.py b/core/storage/classifier/gae_models_test.py index 92e333909f5b..9d00148bf49e 100644 --- a/core/storage/classifier/gae_models_test.py +++ b/core/storage/classifier/gae_models_test.py @@ -26,15 +26,18 @@ from core.platform import models from core.tests import test_utils -from typing import List, cast +from typing import List MYPY = False if MYPY: # pragma: no cover + # Here, we are importing 'classifier_services' only for type checking. + from core.domain import classifier_services from mypy_imports import base_models from mypy_imports import classifier_models -(base_models, classifier_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.classifier]) +(base_models, classifier_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.CLASSIFIER +]) class ClassifierTrainingJobModelUnitTests(test_utils.GenericTestBase): @@ -45,6 +48,32 @@ def test_get_deletion_policy(self) -> None: classifier_models.ClassifierTrainingJobModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'algorithm_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'interaction_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'status': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'training_data': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'next_scheduled_check_time': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'algorithm_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + classifier_models.ClassifierTrainingJobModel.get_export_policy(), + expected_export_policy_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + classifier_models.ClassifierTrainingJobModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + def test_create_and_get_new_training_job_runs_successfully(self) -> None: next_scheduled_check_time = datetime.datetime.utcnow() job_id = classifier_models.ClassifierTrainingJobModel.create( @@ -56,8 +85,6 @@ def test_create_and_get_new_training_job_runs_successfully(self) -> None: training_job = ( classifier_models.ClassifierTrainingJobModel.get(job_id) ) - # Ruling out the possibility of None for mypy type checking. - assert training_job is not None self.assertEqual(training_job.algorithm_id, 'TextClassifier') self.assertEqual(training_job.interaction_id, 'TextInput') @@ -121,7 +148,7 @@ def test_query_new_and_pending_training_jobs(self) -> None: self.assertEqual(offset, 2) def test_query_new_and_pending_training_jobs_with_non_zero_offset( - self + self ) -> None: with self.swap( classifier_models, 'NEW_AND_PENDING_TRAINING_JOBS_FETCH_LIMIT', 2): @@ -210,7 +237,7 @@ def test_query_new_and_pending_training_jobs_with_non_zero_offset( def test_create_multi_jobs(self) -> None: next_scheduled_check_time = datetime.datetime.utcnow() - job_dicts_list = [] + job_dicts_list: List[classifier_services.JobInfoDict] = [] job_dicts_list.append({ 'exp_id': u'1', 'exp_version': 1, @@ -243,8 +270,6 @@ def test_create_multi_jobs(self) -> None: training_job1 = ( classifier_models.ClassifierTrainingJobModel.get(job_ids[0]) ) - # Ruling out the possibility of None for mypy type checking. - assert training_job1 is not None self.assertEqual( training_job1.algorithm_id, @@ -265,8 +290,6 @@ def test_create_multi_jobs(self) -> None: training_job2 = ( classifier_models.ClassifierTrainingJobModel.get(job_ids[1]) ) - # Ruling out the possibility of None for mypy type checking. - assert training_job2 is not None self.assertEqual( training_job2.algorithm_id, @@ -287,7 +310,7 @@ def test_create_multi_jobs(self) -> None: def test_raise_exception_by_mocking_collision(self) -> None: next_scheduled_check_time = datetime.datetime.utcnow() - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'The id generator for ClassifierTrainingJobModel is ' 'producing too many collisions.' ): @@ -313,6 +336,47 @@ def test_get_deletion_policy(self) -> None: .get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'algorithm_ids_to_job_ids': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + classifier_models.StateTrainingJobsMappingModel.get_export_policy(), + expected_export_policy_dict) + + def test_get_model_from_exploration_attributes(self) -> None: + exp_id = 'exp_id1' + exp_version = 1 + state_name = 'state_name1' + job_id = 'job_id1' + classifier_models.StateTrainingJobsMappingModel.create( + exp_id, exp_version, state_name, {'algorithm_id': job_id}) + + mapping = ( + classifier_models.StateTrainingJobsMappingModel.get_model( + exp_id, exp_version, state_name)) + # Ruling out the possibility of None for mypy type checking. + assert mapping is not None + + self.assertEqual(mapping.exp_id, exp_id) + self.assertEqual(mapping.exp_version, 1) + self.assertEqual(mapping.state_name, state_name) + self.assertDictEqual( + mapping.algorithm_ids_to_job_ids, {'algorithm_id': job_id}) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + classifier_models.StateTrainingJobsMappingModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + def test_create_and_get_new_mapping_runs_successfully(self) -> None: mapping_id = ( classifier_models.StateTrainingJobsMappingModel.create( @@ -320,8 +384,6 @@ def test_create_and_get_new_mapping_runs_successfully(self) -> None: mapping = classifier_models.StateTrainingJobsMappingModel.get( mapping_id) - # Ruling out the possibility of None for mypy type checking. - assert mapping is not None self.assertEqual(mapping.exp_id, 'exp_id1') self.assertEqual(mapping.exp_version, 2) @@ -330,7 +392,7 @@ def test_create_and_get_new_mapping_runs_successfully(self) -> None: mapping.algorithm_ids_to_job_ids, {'algorithm_id': 'job_id4'}) # Test that exception is raised when creating mapping with same id. - with self.assertRaisesRegexp(Exception, ( # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, ( 'A model with the same ID already exists.')): mapping_id = ( classifier_models.StateTrainingJobsMappingModel.create( @@ -358,7 +420,7 @@ def test_create_and_get_new_mapping_runs_successfully(self) -> None: self.assertEqual(mapping_id, 'exp_id1.2.%s' % state_name2) - def test_get_model_from_exploration_attributes(self) -> None: + def test_get_models_from_exploration_attributes(self) -> None: exp_id = 'exp_id1' exp_version = 1 state_name = 'state_name1' @@ -383,25 +445,22 @@ def test_create_multi_mappings(self) -> None: state_training_jobs_mappings: List[ classifier_domain.StateTrainingJobsMapping] = [] state_training_jobs_mappings.append( - classifier_domain.StateTrainingJobsMapping( # type: ignore[no-untyped-call] + classifier_domain.StateTrainingJobsMapping( u'1', 1, 'Home', {'algorithm_id': 'job_id1'})) state_training_jobs_mappings.append( - classifier_domain.StateTrainingJobsMapping( # type: ignore[no-untyped-call] + classifier_domain.StateTrainingJobsMapping( u'1', 2, 'Home', {'algorithm_id': 'job_id2'})) - state_training_jobs_mappings_model = cast( - List[classifier_models.StateTrainingJobsMappingModel], - state_training_jobs_mappings) mapping_ids = ( classifier_models.StateTrainingJobsMappingModel.create_multi( - state_training_jobs_mappings_model)) + state_training_jobs_mappings + ) + ) self.assertEqual(len(mapping_ids), 2) mapping1 = ( classifier_models.StateTrainingJobsMappingModel.get( mapping_ids[0])) - # Ruling out the possibility of None for mypy type checking. - assert mapping1 is not None self.assertEqual(mapping1.exp_id, '1') self.assertEqual(mapping1.exp_version, 1) @@ -412,8 +471,6 @@ def test_create_multi_mappings(self) -> None: mapping2 = ( classifier_models.StateTrainingJobsMappingModel.get( mapping_ids[1])) - # Ruling out the possibility of None for mypy type checking. - assert mapping2 is not None self.assertEqual(mapping2.exp_id, '1') self.assertEqual(mapping2.exp_version, 2) diff --git a/core/storage/classroom/__init__.py b/core/storage/classroom/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/core/storage/classroom/gae_models.py b/core/storage/classroom/gae_models.py new file mode 100644 index 000000000000..24e3464c590f --- /dev/null +++ b/core/storage/classroom/gae_models.py @@ -0,0 +1,181 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Models for storing classroom data.""" + +from __future__ import annotations + +from core import utils +from core.platform import models + +from typing import Dict, List, Optional + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + +datastore_services = models.Registry.import_datastore_services() +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) + + +class ClassroomModel(base_models.BaseModel): + """Model to store the classroom data. + + The id of instances of this class is in the form of random hash of + 12 chars. + """ + + # The name of the classroom. + name = datastore_services.StringProperty(required=True, indexed=True) + # The url fragment of the classroom. + url_fragment = datastore_services.StringProperty( + required=True, indexed=True) + # A text to provide course details present in the classroom. + course_details = datastore_services.StringProperty( + indexed=True, required=True) + # A text to provide an introduction for all the topics in the classroom. + topic_list_intro = datastore_services.StringProperty( + indexed=True, required=True) + # A property that is used to establish dependencies among the topics in the + # classroom. This field contains a dict with topic ID as key and a list of + # prerequisite topic IDs as value. + topic_id_to_prerequisite_topic_ids = datastore_services.JsonProperty( + indexed=False, required=False) + + @staticmethod + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model doesn't contain any data directly corresponding to a user.""" + return base_models.DELETION_POLICY.NOT_APPLICABLE + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model does not contain user data.""" + return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model doesn't contain any data directly corresponding to a user.""" + return dict(super(cls, cls).get_export_policy(), **{ + 'name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'course_details': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_list_intro': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id_to_prerequisite_topic_ids': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE) + }) + + @classmethod + def generate_new_classroom_id(cls) -> str: + """Generates a new classroom ID which is unique and is in the form of + random hash of 12 chars. + + Returns: + str. A classroom ID that is different from the IDs of all + the existing classroom. + + Raises: + Exception. There were too many collisions with existing classroom + IDs when attempting to generate a new classroom ID. + """ + for _ in range(base_models.MAX_RETRIES): + classroom_id = utils.convert_to_hash( + str(utils.get_random_int(base_models.RAND_RANGE)), + base_models.ID_LENGTH) + if not cls.get_by_id(classroom_id): + return classroom_id + raise Exception( + 'New classroom id generator is producing too many collisions.') + + @classmethod + def create( + cls, classroom_id: str, name: str, url_fragment: str, + course_details: str, topic_list_intro: str, + topic_id_to_prerequisite_topic_ids: Dict[str, List[str]] + ) -> ClassroomModel: + """Creates a new ClassroomModel entry. + + Args: + classroom_id: str. Classroom ID of the newly-created classroom. + name: str. The name of the classroom. + url_fragment: str. The url fragment of the classroom. + course_details: str. A text to provide course details present in + the classroom. + topic_list_intro: str. A text to provide an introduction for all + the topics in the classroom. + topic_id_to_prerequisite_topic_ids: dict(str, list(str)). A dict + with topic ID as key and list of topic IDs as value. + + Returns: + ClassroomModel. The newly created ClassroomModel instance. + + Raises: + Exception. A classroom with the given ID already exists. + """ + if cls.get_by_id(classroom_id): + raise Exception( + 'A classroom with the given classroom ID already exists.') + + entity = cls( + id=classroom_id, + name=name, + url_fragment=url_fragment, + course_details=course_details, + topic_list_intro=topic_list_intro, + topic_id_to_prerequisite_topic_ids=( + topic_id_to_prerequisite_topic_ids) + ) + entity.update_timestamps() + entity.put() + + return entity + + @classmethod + def get_by_url_fragment(cls, url_fragment: str) -> Optional[ClassroomModel]: + """Gets ClassroomModel by url_fragment. Returns None if the classroom + with the given url_fragment doesn't exist. + + Args: + url_fragment: str. The url fragment of the classroom. + + Returns: + ClassroomModel | None. The Classroom model or None if not found. + """ + return ClassroomModel.query( + datastore_services.all_of( + cls.url_fragment == url_fragment, + cls.deleted == False # pylint: disable=singleton-comparison + ) + ).get() + + @classmethod + def get_by_name(cls, classroom_name: str) -> Optional[ClassroomModel]: + """Gets ClassroomModel by name. Returns None if the classroom + with the given name doesn't exist. + + Args: + classroom_name: str. The name of the classroom. + + Returns: + ClassroomModel | None. The Classroom model or None if not found. + """ + return ClassroomModel.query( + datastore_services.all_of( + cls.name == classroom_name, + cls.deleted == False # pylint: disable=singleton-comparison + ) + ).get() diff --git a/core/storage/classroom/gae_models_test.py b/core/storage/classroom/gae_models_test.py new file mode 100644 index 000000000000..539028fbbac9 --- /dev/null +++ b/core/storage/classroom/gae_models_test.py @@ -0,0 +1,157 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for classroom models.""" + +from __future__ import annotations + +import types + +from core.platform import models +from core.tests import test_utils + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import classroom_models + +(base_models, classroom_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.CLASSROOM +]) + + +class ClassroomModelUnitTest(test_utils.GenericTestBase): + """Test the ClassroomModel class.""" + + def setUp(self) -> None: + super().setUp() + self.classroom_model = classroom_models.ClassroomModel( + id='id', + name='math', + url_fragment='math', + course_details='Curated math foundations course.', + topic_list_intro='Start from the basics with our first topic.', + topic_id_to_prerequisite_topic_ids={} + ) + self.classroom_model.update_timestamps() + self.classroom_model.put() + + def test_create_new_model(self) -> None: + classroom_id = ( + classroom_models.ClassroomModel.generate_new_classroom_id()) + classroom_model_instance = (classroom_models.ClassroomModel.create( + classroom_id, 'physics', 'physics', 'Curated physics course.', + 'Start from the basic physics.', {})) + + self.assertEqual(classroom_model_instance.name, 'physics') + self.assertEqual(classroom_model_instance.url_fragment, 'physics') + self.assertEqual( + classroom_model_instance.course_details, 'Curated physics course.') + self.assertEqual( + classroom_model_instance.topic_list_intro, + 'Start from the basic physics.') + + def test_get_export_policy_not_applicable(self) -> None: + self.assertEqual( + classroom_models.ClassroomModel.get_export_policy(), + { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'course_details': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_list_intro': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id_to_prerequisite_topic_ids': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE) + } + ) + + def test_get_model_association_to_user_not_corresponding_to_user( + self + ) -> None: + self.assertEqual( + classroom_models.ClassroomModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_deletion_policy_not_applicable(self) -> None: + self.assertEqual( + classroom_models.ClassroomModel.get_deletion_policy(), + base_models.DELETION_POLICY.NOT_APPLICABLE) + + def test_get_model_by_url_fragment(self) -> None: + self.assertEqual( + classroom_models.ClassroomModel.get_by_url_fragment('math'), + self.classroom_model) + self.assertEqual( + classroom_models.ClassroomModel.get_by_url_fragment( + 'incorrect_url_fragment'), None) + + def test_get_model_by_name(self) -> None: + self.assertEqual( + classroom_models.ClassroomModel.get_by_name('math'), + self.classroom_model) + self.assertEqual( + classroom_models.ClassroomModel.get_by_name('incorrect_name'), + None) + + def test_get_model_by_id(self) -> None: + self.assertEqual( + classroom_models.ClassroomModel.get_by_id('id'), + self.classroom_model) + self.assertEqual( + classroom_models.ClassroomModel.get_by_id('incorrect_id'), + None) + + def test_raise_exception_by_mocking_collision(self) -> None: + """Tests create and generate_new_classroom_id methods for raising + exception. + """ + classroom_model_cls = classroom_models.ClassroomModel + + # Test create method. + with self.assertRaisesRegex( + Exception, + 'A classroom with the given classroom ID already exists.' + ): + # Swap dependent method get_by_id to simulate collision every time. + with self.swap( + classroom_model_cls, 'get_by_id', + types.MethodType( + lambda x, y: True, + classroom_model_cls + ) + ): + classroom_model_cls.create( + 'classroom_id', 'math', 'math', + 'Curated math foundations course.', + 'Start from the basic math.', {} + ) + + # Test generate_new_classroom_id method. + with self.assertRaisesRegex( + Exception, + 'New classroom id generator is producing too many collisions.' + ): + # Swap dependent method get_by_id to simulate collision every time. + with self.swap( + classroom_model_cls, 'get_by_id', + types.MethodType( + lambda x, y: True, + classroom_model_cls + ) + ): + classroom_model_cls.generate_new_classroom_id() diff --git a/core/storage/collection/gae_models.py b/core/storage/collection/gae_models.py index 43d6faa0116c..66733fde85ea 100644 --- a/core/storage/collection/gae_models.py +++ b/core/storage/collection/gae_models.py @@ -22,13 +22,12 @@ import datetime from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.platform import models import core.storage.base_model.gae_models as base_models -from typing import Any, Dict, List, Optional, Sequence, Tuple +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple MYPY = False if MYPY: # pragma: no cover @@ -216,8 +215,10 @@ def get_collection_count(cls) -> int: """Returns the total number of collections.""" return cls.get_all().count() - # TODO(#13523): Change 'model_dict' to domain object/TypedDict to - # remove Any from type-annotation below. + # TODO(#15911): Here we use type Any because 'convert_to_valid_dict' method + # accepts content NDB JSON properties and those NDB JSON properties have + # loose typing. So, once we explicitly type those NDB JSON properties, we + # can remove Any type from here. @staticmethod def convert_to_valid_dict(model_dict: Dict[str, Any]) -> Dict[str, Any]: """Replace invalid fields and values in the CollectionModel dict. @@ -247,8 +248,10 @@ def convert_to_valid_dict(model_dict: Dict[str, Any]) -> Dict[str, Any]: return model_dict - # TODO(#13523): Change 'snapshot_dict' to domain object/TypedDict to - # remove Any from type-annotation below. + # TODO(#15911): Here we use type Any because this '_reconstitute' method + # accepts content NDB JSON properties and those NDB JSON properties have + # loose typing. So, once we explicitly type those NDB JSON properties, we + # can remove Any type from the argument of '_reconstitute' method. def _reconstitute(self, snapshot_dict: Dict[str, Any]) -> CollectionModel: """Populates the model instance with the snapshot. @@ -269,15 +272,34 @@ def _reconstitute(self, snapshot_dict: Dict[str, Any]) -> CollectionModel: **CollectionModel.convert_to_valid_dict(snapshot_dict)) return self - # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to - # remove Any from type-annotation below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't be allowed. + def _prepare_additional_models(self) -> Mapping[str, base_models.BaseModel]: + """Prepares additional models needed for the commit process. + + Returns: + dict(str, BaseModel). Additional models needed for + the commit process. Contains the CollectionRightsModel. + """ + return { + 'rights_model': CollectionRightsModel.get_by_id(self.id) + } + + # Here we use MyPy ignore because super class (VersionedModel) + # defines this 'additional_models' argument as broader type but here + # we are sure that in this sub-class (CollectionModel) argument + # 'additional_models' is always going to be of type Dict[str, + # CollectionRightsModel]. So, due to this conflict in argument types, + # a conflict in signatures occurred which causes MyPy to throw an + # error. Thus, to avoid the error, we used ignore here. + def compute_models_to_commit( # type: ignore[override] + self, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: base_models.AllowedCommitCmdsListType, + additional_models: Dict[str, CollectionRightsModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -287,37 +309,57 @@ def _trusted_commit( change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit message or None if unpublished + collection is provided. commit_cmds: list(dict). A list of commands, describing changes made in this model, which should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. - """ - super(CollectionModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. - collection_rights = CollectionRightsModel.get_by_id(self.id) + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. + """ + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) + collection_rights_model = additional_models['rights_model'] collection_commit_log = CollectionCommitLogEntryModel.create( - self.id, self.version, committer_id, commit_type, commit_message, - commit_cmds, collection_rights.status, - collection_rights.community_owned + self.id, + self.version, + committer_id, + commit_type, + commit_message, + commit_cmds, + collection_rights_model.status, + collection_rights_model.community_owned ) collection_commit_log.collection_id = self.id - collection_commit_log.update_timestamps() - collection_commit_log.put() + return { + 'snapshot_metadata_model': models_to_put['snapshot_metadata_model'], + 'snapshot_content_model': models_to_put['snapshot_content_model'], + 'commit_log_model': collection_commit_log, + 'versioned_model': models_to_put['versioned_model'], + } - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.delete_multi(). # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override @classmethod def delete_multi( # type: ignore[override] - cls, - entity_ids: List[str], - committer_id: str, - commit_message: str, - force_deletion: bool = False + cls, + entity_ids: List[str], + committer_id: str, + commit_message: str, + force_deletion: bool = False ) -> None: """Deletes the given cls instances with the given entity_ids. @@ -340,14 +382,14 @@ def delete_multi( # type: ignore[override] collection_rights_models = CollectionRightsModel.get_multi( entity_ids, include_deleted=True) versioned_models = cls.get_multi(entity_ids, include_deleted=True) - for model, rights_model in python_utils.ZIP( - versioned_models, collection_rights_models): + for model, rights_model in zip( + versioned_models, collection_rights_models): # Ruling out the possibility of None for mypy type checking. assert model is not None assert rights_model is not None collection_commit_log = CollectionCommitLogEntryModel.create( model.id, model.version, committer_id, - cls._COMMIT_TYPE_DELETE, + feconf.COMMIT_TYPE_DELETE, commit_message, [{'cmd': cls.CMD_DELETE_COMMIT}], rights_model.status, rights_model.community_owned ) @@ -505,13 +547,11 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: cls.viewer_ids == user_id )).get(keys_only=True) is not None - # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to - # remove Any from type-annotation below. def save( - self, - committer_id: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] + self, + committer_id: str, + commit_message: str, + commit_cmds: base_models.AllowedCommitCmdsListType ) -> None: """Updates the collection rights model by applying the given commit_cmds, then saves it. @@ -526,11 +566,13 @@ def save( cmd: str. Unique command. and additional arguments for that command. """ - super(CollectionRightsModel, self).commit( + super().commit( committer_id, commit_message, commit_cmds) - # TODO(#13523): Change 'model_dict' to domain object/TypedDict to - # remove Any from type-annotation below. + # TODO(#15911): Here we use type Any because 'convert_to_valid_dict' method + # accepts content NDB JSON properties and those NDB JSON properties have + # loose typing. So, once we explicitly type those NDB JSON properties, we + # can remove Any type from here. @staticmethod def convert_to_valid_dict(model_dict: Dict[str, Any]) -> Dict[str, Any]: """Replace invalid fields and values in the CollectionRightsModel dict. @@ -572,10 +614,12 @@ def convert_to_valid_dict(model_dict: Dict[str, Any]) -> Dict[str, Any]: return model_dict - # TODO(#13523): Change 'snapshot_dict' to domain object/TypedDict to - # remove Any from type-annotation below. + # TODO(#15911): Here we use type Any because this '_reconstitute' method + # accepts content NDB JSON properties and those NDB JSON properties have + # loose typing. So, once we explicitly type those NDB JSON properties, we + # can remove Any type from the argument of '_reconstitute' method. def _reconstitute( - self, snapshot_dict: Dict[str, Any] + self, snapshot_dict: Dict[str, Any] ) -> CollectionRightsModel: """Populates the model instance with the snapshot. @@ -596,15 +640,17 @@ def _reconstitute( **CollectionRightsModel.convert_to_valid_dict(snapshot_dict)) return self - # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to - # remove Any from type-annotation below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + def compute_models_to_commit( + self, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: base_models.AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + additional_models: Mapping[str, base_models.BaseModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this overrides the superclass method. @@ -614,38 +660,29 @@ def _trusted_commit( change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit message or None if unpublished + collection is provided. commit_cmds: list(dict). A list of commands, describing changes made in this model, should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. - """ - super(CollectionRightsModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. - # Create and delete events will already be recorded in the - # CollectionModel. - if commit_type not in ['create', 'delete']: - CollectionCommitLogEntryModel( - id=('rights-%s-%s' % (self.id, self.version)), - user_id=committer_id, - collection_id=self.id, - commit_type=commit_type, - commit_message=commit_message, - commit_cmds=commit_cmds, - version=None, - post_commit_status=self.status, - post_commit_community_owned=self.community_owned, - post_commit_is_private=( - self.status == constants.ACTIVITY_STATUS_PRIVATE) - ).put() - - snapshot_metadata_model = self.SNAPSHOT_METADATA_CLASS.get( - self.get_snapshot_id(self.id, self.version)) - # Ruling out the possibility of None for mypy type checking. - assert snapshot_metadata_model is not None + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. + """ + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) + snapshot_metadata_model = models_to_put['snapshot_metadata_model'] snapshot_metadata_model.content_user_ids = list(sorted( set(self.owner_ids) | set(self.editor_ids) | @@ -661,12 +698,40 @@ def _trusted_commit( if cmd['name'] == commit_cmd['cmd'] ) for user_id_attribute_name in user_id_attribute_names: - commit_cmds_user_ids.add(commit_cmd[user_id_attribute_name]) + user_id_name_value = commit_cmd[user_id_attribute_name] + # # Ruling out the possibility of any other type for mypy type + # checking. + assert isinstance(user_id_name_value, str) + commit_cmds_user_ids.add(user_id_name_value) snapshot_metadata_model.commit_cmds_user_ids = list( sorted(commit_cmds_user_ids)) - snapshot_metadata_model.update_timestamps() - snapshot_metadata_model.put() + # Create and delete events will already be recorded in the + # CollectionModel. + if commit_type not in ['create', 'delete']: + collection_commit_log = CollectionCommitLogEntryModel( + id=('rights-%s-%s' % (self.id, self.version)), + user_id=committer_id, + collection_id=self.id, + commit_type=commit_type, + commit_message=commit_message, + commit_cmds=commit_cmds, + version=None, + post_commit_status=self.status, + post_commit_community_owned=self.community_owned, + post_commit_is_private=( + self.status == constants.ACTIVITY_STATUS_PRIVATE) + ) + return { + 'snapshot_metadata_model': ( + models_to_put['snapshot_metadata_model']), + 'snapshot_content_model': ( + models_to_put['snapshot_content_model']), + 'commit_log_model': collection_commit_log, + 'versioned_model': models_to_put['versioned_model'], + } + + return models_to_put @classmethod def export_data(cls, user_id: str) -> Dict[str, List[str]]: diff --git a/core/storage/collection/gae_models_test.py b/core/storage/collection/gae_models_test.py index 6efc31b7b3a1..aa6e64351e62 100644 --- a/core/storage/collection/gae_models_test.py +++ b/core/storage/collection/gae_models_test.py @@ -29,7 +29,7 @@ from core.platform import models from core.tests import test_utils -from typing import Any, Dict, List +from typing import Dict, Final, List MYPY = False if MYPY: # pragma: no cover @@ -37,8 +37,9 @@ from mypy_imports import collection_models from mypy_imports import user_models -(base_models, collection_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.collection, models.NAMES.user]) +(base_models, collection_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.COLLECTION, models.Names.USER +]) class CollectionSnapshotContentModelTests(test_utils.GenericTestBase): @@ -59,20 +60,20 @@ def test_get_deletion_policy(self) -> None: base_models.DELETION_POLICY.NOT_APPLICABLE) def test_get_collection_count(self) -> None: - collection = collection_domain.Collection.create_default_collection( # type: ignore[no-untyped-call] + collection = collection_domain.Collection.create_default_collection( 'id', title='A title', category='A Category', objective='An Objective') - collection_services.save_new_collection('id', collection) # type: ignore[no-untyped-call] + collection_services.save_new_collection('id', collection) num_collections = ( collection_models.CollectionModel.get_collection_count()) self.assertEqual(num_collections, 1) def test_reconstitute(self) -> None: - collection = collection_domain.Collection.create_default_collection( # type: ignore[no-untyped-call] + collection = collection_domain.Collection.create_default_collection( 'id', title='A title', category='A Category', objective='An Objective') - collection_services.save_new_collection('id', collection) # type: ignore[no-untyped-call] + collection_services.save_new_collection('id', collection) collection_model = collection_models.CollectionModel.get_by_id('id') snapshot_dict = collection_model.compute_snapshot() snapshot_dict['nodes'] = ['node0', 'node1'] @@ -84,10 +85,10 @@ def test_reconstitute(self) -> None: class CollectionRightsSnapshotContentModelTests(test_utils.GenericTestBase): - COLLECTION_ID_1 = '1' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_COMMITTER = 'id_committer' + COLLECTION_ID_1: Final = '1' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_COMMITTER: Final = 'id_committer' def test_get_deletion_policy_is_locally_pseudonymize(self) -> None: self.assertEqual( @@ -127,29 +128,29 @@ def test_has_reference_to_user_id(self) -> None: class CollectionRightsModelUnitTest(test_utils.GenericTestBase): """Test the CollectionRightsModel class.""" - COLLECTION_ID_1 = '1' - COLLECTION_ID_2 = '2' - COLLECTION_ID_3 = '3' - COLLECTION_ID_4 = '4' + COLLECTION_ID_1: Final = '1' + COLLECTION_ID_2: Final = '2' + COLLECTION_ID_3: Final = '3' + COLLECTION_ID_4: Final = '4' # Related to all three collections. - USER_ID_1 = 'id_1' + USER_ID_1: Final = 'id_1' # Related to a subset of the three collections. - USER_ID_2 = 'id_2' + USER_ID_2: Final = 'id_2' # Related to no collections. - USER_ID_3 = 'id_3' + USER_ID_3: Final = 'id_3' # Related to one collection and then removed from it. - USER_ID_4 = 'id_4' + USER_ID_4: Final = 'id_4' # User id used in commits. - USER_ID_COMMITTER = 'id_5' - USER_ID_4_OLD = 'id_4_old' - USER_ID_4_NEW = 'id_4_new' - USER_ID_5_OLD = 'id_5_old' - USER_ID_5_NEW = 'id_5_new' - USER_ID_6_OLD = 'id_6_old' - USER_ID_6_NEW = 'id_6_new' + USER_ID_COMMITTER: Final = 'id_5' + USER_ID_4_OLD: Final = 'id_4_old' + USER_ID_4_NEW: Final = 'id_4_new' + USER_ID_5_OLD: Final = 'id_5_old' + USER_ID_5_NEW: Final = 'id_5_new' + USER_ID_6_OLD: Final = 'id_6_old' + USER_ID_6_NEW: Final = 'id_6_new' def setUp(self) -> None: - super(CollectionRightsModelUnitTest, self).setUp() + super().setUp() user_models.UserSettingsModel( id=self.USER_ID_1, email='some@email.com', @@ -253,8 +254,6 @@ def test_save(self) -> None: self.USER_ID_COMMITTER, 'Created new collection', [{'cmd': rights_domain.CMD_CREATE_NEW}]) collection_model = collection_models.CollectionRightsModel.get('id') - # Ruling out the possibility of None for mypy type checking. - assert collection_model is not None self.assertEqual('id', collection_model.id) self.assertEqual( @@ -341,8 +340,6 @@ def test_reconstitute(self) -> None: collection_rights_model = ( collection_models.CollectionRightsModel.get('id') ) - # Ruling out the possibility of None for mypy type checking. - assert collection_rights_model is not None snapshot_dict = collection_rights_model.compute_snapshot() snapshot_dict['translator_ids'] = ['tid1', 'tid2'] snapshot_dict = collection_rights_model.convert_to_valid_dict( @@ -360,15 +357,15 @@ def test_reconstitute(self) -> None: class CollectionRightsModelRevertUnitTest(test_utils.GenericTestBase): """Test the revert method on CollectionRightsModel class.""" - COLLECTION_ID_1 = '1' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_3 = 'id_3' + COLLECTION_ID_1: Final = '1' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_3: Final = 'id_3' # User id used in commits. - USER_ID_COMMITTER = 'id_4' + USER_ID_COMMITTER: Final = 'id_4' def setUp(self) -> None: - super(CollectionRightsModelRevertUnitTest, self).setUp() + super().setUp() self.collection_model = collection_models.CollectionRightsModel( id=self.COLLECTION_ID_1, owner_ids=[self.USER_ID_1], @@ -408,7 +405,9 @@ def setUp(self) -> None: 'name': feconf.CMD_REVERT_COMMIT, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }) self.allowed_commands_swap = self.swap( feconf, @@ -430,9 +429,7 @@ def test_revert_to_valid_version_is_successful(self) -> None: ) def test_revert_to_version_with_invalid_status_is_successful(self) -> None: - # TODO(#13523): Use of Any in the type annotation below will be - # removed when model_dict is converted to TypedDicts/domain objects. - broken_dict: Dict[str, Any] = dict(**self.original_dict) + broken_dict = dict(**self.original_dict) broken_dict['status'] = 'publicized' snapshot_model = ( @@ -457,11 +454,9 @@ def test_revert_to_version_with_invalid_status_is_successful(self) -> None: ) def test_revert_to_version_with_translator_ids_field_is_successful( - self + self ) -> None: - # TODO(#13523): Use of Any in the type annotation below will be - # removed when model_dict is converted to TypedDicts/domain objects. - broken_dict: Dict[str, Any] = dict(**self.original_dict) + broken_dict = dict(**self.original_dict) del broken_dict['voice_artist_ids'] broken_dict['translator_ids'] = [self.USER_ID_2] @@ -531,12 +526,13 @@ def test_get_all_non_private_commits(self) -> None: self.assertFalse(more) def test_get_all_non_private_commits_with_invalid_max_age(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'max_age must be a datetime.timedelta instance or None.'): - # TODO(#13528): Remove this test after the backend is fully - # type-annotated. Here ignore[arg-type] is used to test method - # get_all_non_private_commits() for invalid input type. + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[arg-type] + # is used to test method get_all_non_private_commits() for invalid + # input type. ( collection_models.CollectionCommitLogEntryModel .get_all_non_private_commits( @@ -572,18 +568,18 @@ def test_get_all_non_private_commits_with_max_age(self) -> None: class CollectionSummaryModelUnitTest(test_utils.GenericTestBase): """Tests for the CollectionSummaryModel.""" - COLLECTION_ID_1 = '1' - COLLECTION_ID_2 = '2' - COLLECTION_ID_3 = '3' - USER_ID_1_OLD = 'id_1_old' - USER_ID_1_NEW = 'id_1_new' - USER_ID_2_OLD = 'id_2_old' - USER_ID_2_NEW = 'id_2_new' - USER_ID_3_OLD = 'id_3_old' - USER_ID_3_NEW = 'id_3_new' + COLLECTION_ID_1: Final = '1' + COLLECTION_ID_2: Final = '2' + COLLECTION_ID_3: Final = '3' + USER_ID_1_OLD: Final = 'id_1_old' + USER_ID_1_NEW: Final = 'id_1_new' + USER_ID_2_OLD: Final = 'id_2_old' + USER_ID_2_NEW: Final = 'id_2_new' + USER_ID_3_OLD: Final = 'id_3_old' + USER_ID_3_NEW: Final = 'id_3_new' def setUp(self) -> None: - super(CollectionSummaryModelUnitTest, self).setUp() + super().setUp() user_models.UserSettingsModel( id=self.USER_ID_1_NEW, email='some@email.com', diff --git a/core/storage/config/gae_models.py b/core/storage/config/gae_models.py index 897b5135da2a..fa229c386887 100644 --- a/core/storage/config/gae_models.py +++ b/core/storage/config/gae_models.py @@ -21,10 +21,13 @@ from core.platform import models import core.storage.base_model.gae_models as base_models -from typing import Any, Dict, List +from typing import Dict, List MYPY = False if MYPY: # pragma: no cover + # Here, we are importing 'platform_parameter_domain' only for type checking. + from core.domain import platform_parameter_domain # pylint: disable=invalid-import # isort:skip + from mypy_imports import base_models from mypy_imports import datastore_services datastore_services = models.Registry.import_datastore_services() @@ -76,17 +79,15 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'value': base_models.EXPORT_POLICY.NOT_APPLICABLE }) - # TODO(#13523): Change 'commit_cmds' to domain object/TypedDict to - # remove Any from type-annotation below. - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with VersionedModel.commit(). # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override def commit( # type: ignore[override] - self, - committer_id: str, - commit_cmds: List[Dict[str, Any]] + self, + committer_id: str, + commit_cmds: base_models.AllowedCommitCmdsListType ) -> None: - super(ConfigPropertyModel, self).commit(committer_id, '', commit_cmds) + super().commit(committer_id, '', commit_cmds) class PlatformParameterSnapshotMetadataModel( @@ -139,13 +140,11 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'rule_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE }) - # TODO(#13523): Change 'rule_dicts' to domain object/TypedDict to - # remove Any from type-annotation below. @classmethod def create( cls, param_name: str, - rule_dicts: List[Dict[str, Any]], + rule_dicts: List[platform_parameter_domain.PlatformParameterRuleDict], rule_schema_version: int ) -> PlatformParameterModel: """Creates a PlatformParameterModel instance. @@ -161,8 +160,10 @@ def create( PlatformParameterFilter objects, having the following structure: - type: str. The type of the filter. - - value: *. The value of the filter to match - against. + - conditions: list((str, str)). Each element of the + list is a 2-tuple (op, value), where op is the + operator for comparison and value is the value + used for comparison. rule_schema_version: int. The schema version for the rule dicts. Returns: diff --git a/core/storage/config/gae_models_test.py b/core/storage/config/gae_models_test.py index 2c45a1376b44..08fb8d9d409e 100644 --- a/core/storage/config/gae_models_test.py +++ b/core/storage/config/gae_models_test.py @@ -22,13 +22,18 @@ from core.platform import models from core.tests import test_utils +from typing import List + MYPY = False if MYPY: # pragma: no cover + # Here, we are importing 'platform_parameter_domain' only for type checking. + from core.domain import platform_parameter_domain from mypy_imports import base_models from mypy_imports import config_models -(base_models, config_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.config]) +(base_models, config_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.CONFIG +]) class ConfigPropertySnapshotContentModelTests(test_utils.GenericTestBase): @@ -72,6 +77,25 @@ def test_commit(self) -> None: self.assertEqual(retrieved_model2.value, 'd') + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + config_models.ConfigPropertyModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'value': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + config_models.ConfigPropertyModel.get_export_policy(), + expected_export_policy_dict + ) + class PlatformParameterSnapshotContentModelTests(test_utils.GenericTestBase): @@ -107,7 +131,9 @@ def test_create_model(self) -> None: def test_commit(self) -> None: parameter_name = 'parameter_name' - rule_dicts = [{'filters': [], 'value_when_matched': False}] + rule_dicts: List[ + platform_parameter_domain.PlatformParameterRuleDict + ] = [{'filters': [], 'value_when_matched': False}] param_model = config_models.PlatformParameterModel.create( param_name=parameter_name, @@ -125,10 +151,10 @@ def test_commit(self) -> None: self.assertEqual(retrieved_model1.rules, rule_dicts) - new_rules = [ + new_rules: List[platform_parameter_domain.PlatformParameterRuleDict] = [ { 'filters': [ - {'type': 'app_version', 'value': '>1.2.3'} + {'type': 'app_version', 'conditions': [['>', '1.2.3']]} ], 'value_when_matched': True }, @@ -147,7 +173,9 @@ def test_commit(self) -> None: def test_commit_is_persistent_in_storage(self) -> None: parameter_name = 'parameter_name' - rule_dicts = [{'filters': [], 'value_when_matched': False}] + rule_dicts: List[ + platform_parameter_domain.PlatformParameterRuleDict + ] = [{'filters': [], 'value_when_matched': False}] param_model = config_models.PlatformParameterModel.create( param_name=parameter_name, @@ -166,7 +194,9 @@ def test_commit_is_persistent_in_storage(self) -> None: def test_commit_with_updated_rules(self) -> None: parameter_name = 'parameter_name' - rule_dicts = [{'filters': [], 'value_when_matched': False}] + rule_dicts: List[ + platform_parameter_domain.PlatformParameterRuleDict + ] = [{'filters': [], 'value_when_matched': False}] param_model = config_models.PlatformParameterModel.create( param_name=parameter_name, @@ -176,10 +206,10 @@ def test_commit_with_updated_rules(self) -> None: ) param_model.commit(feconf.SYSTEM_COMMITTER_ID, 'commit message', []) - new_rules = [ + new_rules: List[platform_parameter_domain.PlatformParameterRuleDict] = [ { 'filters': [ - {'type': 'app_version', 'value': '>1.2.3'} + {'type': 'app_version', 'conditions': [['>', '1.2.3']]} ], 'value_when_matched': True }, @@ -194,3 +224,23 @@ def test_commit_with_updated_rules(self) -> None: assert retrieved_model is not None self.assertEqual(retrieved_model.rules, new_rules) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + config_models.PlatformParameterModel.get_model_association_to_user(), # pylint: disable=line-too-long + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'rules': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'rule_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + config_models.PlatformParameterModel.get_export_policy(), + expected_export_policy_dict + ) diff --git a/core/storage/email/gae_models.py b/core/storage/email/gae_models.py index bb55f5613cfa..67792785013a 100644 --- a/core/storage/email/gae_models.py +++ b/core/storage/email/gae_models.py @@ -24,14 +24,14 @@ from core import utils from core.platform import models -from typing import Dict, List, Optional, Sequence +from typing import Dict, Optional, Sequence MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() @@ -77,9 +77,11 @@ class SentEmailModel(base_models.BaseModel): feconf.EMAIL_INTENT_REVIEW_CREATOR_DASHBOARD_SUGGESTIONS, feconf.EMAIL_INTENT_REVIEW_CONTRIBUTOR_DASHBOARD_SUGGESTIONS, feconf.EMAIL_INTENT_ADD_CONTRIBUTOR_DASHBOARD_REVIEWERS, - feconf.EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES, feconf.EMAIL_INTENT_ACCOUNT_DELETED, - feconf.BULK_EMAIL_INTENT_TEST + feconf.BULK_EMAIL_INTENT_TEST, + ( + feconf + .EMAIL_INTENT_NOTIFY_CONTRIBUTOR_DASHBOARD_ACHIEVEMENTS) ]) # The subject line of the email. subject = datastore_services.TextProperty(required=True) @@ -94,10 +96,9 @@ class SentEmailModel(base_models.BaseModel): @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: """Model contains data corresponding to a user: recipient_id, - recipient_email, sender_id, and sender_email, but this isn't deleted - because this model is needed for auditing purposes. + recipient_email, sender_id, and sender_email. """ - return base_models.DELETION_POLICY.KEEP + return base_models.DELETION_POLICY.DELETE @staticmethod def get_model_association_to_user( @@ -125,6 +126,19 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'email_hash': base_models.EXPORT_POLICY.NOT_APPLICABLE }) + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete instances of SentEmailModel for the user. + + Args: + user_id: str. The ID of the user whose data should be deleted. + """ + keys = cls.query(datastore_services.any_of( + cls.recipient_id == user_id, + cls.sender_id == user_id, + )).fetch(keys_only=True) + datastore_services.delete_multi(keys) + @classmethod def has_reference_to_user_id(cls, user_id: str) -> bool: """Check whether SentEmailModel exists for user. @@ -172,15 +186,15 @@ def _generate_id(cls, intent: str) -> str: @classmethod def create( - cls, - recipient_id: str, - recipient_email: str, - sender_id: str, - sender_email: str, - intent: str, - subject: str, - html_body: str, - sent_datetime: datetime.datetime + cls, + recipient_id: str, + recipient_email: str, + sender_id: str, + sender_email: str, + intent: str, + subject: str, + html_body: str, + sent_datetime: datetime.datetime ) -> None: """Creates a new SentEmailModel entry. @@ -207,7 +221,7 @@ def create( def _pre_put_hook(self) -> None: """Operations to perform just before the model is `put` into storage.""" - super(SentEmailModel, self)._pre_put_hook() + super()._pre_put_hook() self.email_hash = self._generate_hash( self.recipient_id, self.subject, self.html_body) @@ -255,10 +269,10 @@ def get_by_hash( @classmethod def _generate_hash( - cls, - recipient_id: str, - email_subject: str, - email_body: str + cls, + recipient_id: str, + email_subject: str, + email_body: str ) -> str: """Generate hash for a given recipient_id, email_subject and cleaned email_body. @@ -279,10 +293,10 @@ def _generate_hash( @classmethod def check_duplicate_message( - cls, - recipient_id: str, - email_subject: str, - email_body: str + cls, + recipient_id: str, + email_subject: str, + email_body: str ) -> bool: """Check for a given recipient_id, email_subject and cleaned email_body, whether a similar message has been sent in the last @@ -325,10 +339,11 @@ class BulkEmailModel(base_models.BaseModel): This model is read-only; entries cannot be modified once created. The id/key of instances of this model is randomly generated string of length 12. + + The recipient IDs are not stored in this model. But, we store all + bulk emails that are sent to a particular user in UserBulkEmailsModel. """ - # The user IDs of the email recipients. - recipient_ids = datastore_services.JsonProperty(default=[], compressed=True) # The user ID of the email sender. For site-generated emails this is equal # to SYSTEM_COMMITTER_ID. sender_id = datastore_services.StringProperty(required=True, indexed=True) @@ -352,13 +367,15 @@ class BulkEmailModel(base_models.BaseModel): sent_datetime = ( datastore_services.DateTimeProperty(required=True, indexed=True)) + # DEPRECATED in v3.2.1. Do not use. + recipient_ids = datastore_services.JsonProperty(default=[], compressed=True) + @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: - """Model contains data corresponding to a user: recipient_ids, - sender_id, and sender_email, but this isn't deleted because this model - is needed for auditing purposes. + """Model contains data corresponding to a user: sender_id, and + sender_email. """ - return base_models.DELETION_POLICY.KEEP + return base_models.DELETION_POLICY.DELETE @staticmethod def get_model_association_to_user( @@ -375,21 +392,30 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: (since emails were sent to them). """ return dict(super(cls, cls).get_export_policy(), **{ - 'recipient_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'sender_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'sender_email': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'recipient_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'intent': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'subject': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'html_body': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'sent_datetime': base_models.EXPORT_POLICY.NOT_APPLICABLE }) + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete instances of BulkEmailModel for the user. + + Args: + user_id: str. The ID of the user whose data should be deleted. + """ + keys = cls.query(datastore_services.any_of( + cls.sender_id == user_id, + )).fetch(keys_only=True) + datastore_services.delete_multi(keys) + @classmethod def has_reference_to_user_id(cls, user_id: str) -> bool: - """Check whether BulkEmailModel exists for user. Since recipient_ids - can't be indexed it also can't be checked by this method, we can allow - this because the deletion policy for this model is keep , thus even the - deleted user's id will remain here. + """Check whether BulkEmailModel exists for user. Args: user_id: str. The ID of the user whose data should be checked. @@ -398,25 +424,24 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: bool. Whether any models refer to the given user ID. """ return ( - cls.query(cls.sender_id == user_id).get(keys_only=True) is not None) + cls.query(cls.sender_id == user_id).get(keys_only=True) is not None + ) @classmethod def create( - cls, - instance_id: str, - recipient_ids: List[str], - sender_id: str, - sender_email: str, - intent: str, - subject: str, - html_body: str, - sent_datetime: datetime.datetime + cls, + instance_id: str, + sender_id: str, + sender_email: str, + intent: str, + subject: str, + html_body: str, + sent_datetime: datetime.datetime ) -> None: """Creates a new BulkEmailModel entry. Args: instance_id: str. The ID of the instance. - recipient_ids: list(str). The user IDs of the email recipients. sender_id: str. The user ID of the email sender. sender_email: str. The email address used to send the notification. intent: str. The intent string, i.e. the purpose of the email. @@ -426,7 +451,7 @@ def create( was sent, in UTC. """ email_model_instance = cls( - id=instance_id, recipient_ids=recipient_ids, sender_id=sender_id, + id=instance_id, sender_id=sender_id, sender_email=sender_email, intent=intent, subject=subject, html_body=html_body, sent_datetime=sent_datetime) email_model_instance.update_timestamps() diff --git a/core/storage/email/gae_models_test.py b/core/storage/email/gae_models_test.py index 40a0f20c30ec..166ce3a65633 100644 --- a/core/storage/email/gae_models_test.py +++ b/core/storage/email/gae_models_test.py @@ -22,10 +22,11 @@ import types from core import feconf +from core import utils from core.platform import models from core.tests import test_utils -from typing import Sequence +from typing import Final, Sequence MYPY = False if MYPY: # pragma: no cover @@ -33,96 +34,110 @@ from mypy_imports import email_models from mypy_imports import user_models # pylint: disable=unused-import -(base_models, email_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.email, models.NAMES.user]) +(base_models, email_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.EMAIL, models.Names.USER +]) class SentEmailModelUnitTests(test_utils.GenericTestBase): """Test the SentEmailModel class.""" + SENDER_ID: Final = 'sender_id' + RECIPIENT_ID: Final = 'recipient_id' + NONEXISTENT_USER_ID: Final = 'id_x' + def setUp(self) -> None: - super(SentEmailModelUnitTests, self).setUp() + super().setUp() def mock_generate_hash( - unused_cls: email_models.SentEmailModel, - unused_recipient_id: str, - unused_email_subject: str, - unused_email_body: str + unused_cls: email_models.SentEmailModel, + unused_recipient_id: str, + unused_email_subject: str, + unused_email_body: str ) -> str: return 'Email Hash' self.generate_constant_hash_ctx = self.swap( - email_models.SentEmailModel, '_generate_hash', - types.MethodType(mock_generate_hash, email_models.SentEmailModel)) + email_models.SentEmailModel, + '_generate_hash', + types.MethodType(mock_generate_hash, email_models.SentEmailModel) + ) + # Since we cannot reuse swap, we need to duplicate the code so that + # we can create the intitial model here. + with self.swap( + email_models.SentEmailModel, + '_generate_hash', + types.MethodType(mock_generate_hash, email_models.SentEmailModel) + ): + email_models.SentEmailModel.create( + 'recipient_id', 'recipient@email.com', self.SENDER_ID, + 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, + 'Email Subject', 'Email Body', datetime.datetime.utcnow()) def test_get_deletion_policy(self) -> None: self.assertEqual( email_models.SentEmailModel.get_deletion_policy(), - base_models.DELETION_POLICY.KEEP) + base_models.DELETION_POLICY.DELETE) def test_has_reference_to_user_id(self) -> None: - with self.generate_constant_hash_ctx: - email_models.SentEmailModel.create( - 'recipient_id', 'recipient@email.com', 'sender_id', - 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, - 'Email Subject', 'Email Body', datetime.datetime.utcnow()) - - self.assertTrue( - email_models.SentEmailModel.has_reference_to_user_id( - 'recipient_id')) - self.assertTrue( - email_models.SentEmailModel.has_reference_to_user_id( - 'sender_id')) - self.assertFalse( - email_models.SentEmailModel.has_reference_to_user_id('id_x')) + self.assertTrue( + email_models.SentEmailModel.has_reference_to_user_id( + 'recipient_id')) + self.assertTrue( + email_models.SentEmailModel.has_reference_to_user_id( + self.SENDER_ID)) + self.assertFalse( + email_models.SentEmailModel.has_reference_to_user_id( + self.NONEXISTENT_USER_ID)) + + def test_apply_deletion_policy_deletes_model_for_user_who_is_sender( + self + ) -> None: + email_models.SentEmailModel.apply_deletion_policy(self.SENDER_ID) + self.assertIsNone( + email_models.SentEmailModel.get_by_id(self.SENDER_ID)) + + def test_apply_deletion_policy_deletes_model_for_user_who_is_recipient( + self + ) -> None: + email_models.SentEmailModel.apply_deletion_policy(self.RECIPIENT_ID) + self.assertIsNone( + email_models.SentEmailModel.get_by_id(self.RECIPIENT_ID)) + + def test_apply_deletion_policy_raises_no_exception_for_nonexistent_user( + self + ) -> None: + email_models.SentEmailModel.apply_deletion_policy( + self.NONEXISTENT_USER_ID) def test_saved_model_can_be_retrieved_with_same_hash(self) -> None: - with self.generate_constant_hash_ctx: - email_models.SentEmailModel.create( - 'recipient_id', 'recipient@email.com', 'sender_id', - 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, - 'Email Subject', 'Email Body', datetime.datetime.utcnow()) + query = email_models.SentEmailModel.query() + query = query.filter( + email_models.SentEmailModel.email_hash == 'Email Hash') - query = email_models.SentEmailModel.query() - query = query.filter( - email_models.SentEmailModel.email_hash == 'Email Hash') + results: Sequence[email_models.SentEmailModel] = query.fetch(2) - results: Sequence[email_models.SentEmailModel] = query.fetch(2) + self.assertEqual(len(results), 1) - self.assertEqual(len(results), 1) + query = email_models.SentEmailModel.query() + query = query.filter( + email_models.SentEmailModel.email_hash == 'Bad Email Hash') - query = email_models.SentEmailModel.query() - query = query.filter( - email_models.SentEmailModel.email_hash == 'Bad Email Hash') + results = query.fetch(2) - results = query.fetch(2) - - self.assertEqual(len(results), 0) + self.assertEqual(len(results), 0) def test_get_by_hash_works_correctly(self) -> None: - with self.generate_constant_hash_ctx: - email_models.SentEmailModel.create( - 'recipient_id', 'recipient@email.com', 'sender_id', - 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, - 'Email Subject', 'Email Body', datetime.datetime.utcnow()) - - results = email_models.SentEmailModel.get_by_hash('Email Hash') - - self.assertEqual(len(results), 1) - - results = email_models.SentEmailModel.get_by_hash('Bad Email Hash') + results = email_models.SentEmailModel.get_by_hash('Email Hash') + self.assertEqual(len(results), 1) - self.assertEqual(len(results), 0) + results = email_models.SentEmailModel.get_by_hash('Bad Email Hash') + self.assertEqual(len(results), 0) def test_get_by_hash_returns_multiple_models_with_same_hash(self) -> None: with self.generate_constant_hash_ctx: email_models.SentEmailModel.create( - 'recipient_id', 'recipient@email.com', 'sender_id', - 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, - 'Email Subject', 'Email Body', datetime.datetime.utcnow()) - - email_models.SentEmailModel.create( - 'recipient_id', 'recipient@email.com', 'sender_id', + 'recipient_id', 'recipient@email.com', self.SENDER_ID, 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body', datetime.datetime.utcnow()) @@ -133,45 +148,105 @@ def test_get_by_hash_returns_multiple_models_with_same_hash(self) -> None: def test_get_by_hash_behavior_with_sent_datetime_lower_bound(self) -> None: with self.generate_constant_hash_ctx: time_now = datetime.datetime.utcnow() - email_models.SentEmailModel.create( - 'recipient_id', 'recipient@email.com', 'sender_id', + 'recipient_id', 'recipient@email.com', self.SENDER_ID, 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body', datetime.datetime.utcnow()) - results = email_models.SentEmailModel.get_by_hash( - 'Email Hash', sent_datetime_lower_bound=time_now) - self.assertEqual(len(results), 1) + results = email_models.SentEmailModel.get_by_hash( + 'Email Hash', sent_datetime_lower_bound=time_now) + self.assertEqual(len(results), 1) + + time_now1 = datetime.datetime.utcnow() + + results = email_models.SentEmailModel.get_by_hash( + 'Email Hash', sent_datetime_lower_bound=time_now1) + self.assertEqual(len(results), 0) - time_now1 = datetime.datetime.utcnow() + time_before = ( + datetime.datetime.utcnow() - datetime.timedelta(minutes=10)) - results = email_models.SentEmailModel.get_by_hash( - 'Email Hash', sent_datetime_lower_bound=time_now1) - self.assertEqual(len(results), 0) + results = email_models.SentEmailModel.get_by_hash( + 'Email Hash', sent_datetime_lower_bound=time_before) + self.assertEqual(len(results), 2) - time_before = ( - datetime.datetime.utcnow() - datetime.timedelta(minutes=10)) + # Check that it accepts only DateTime objects. + with self.assertRaisesRegex( + Exception, + 'Expected datetime, received Not a datetime object of type ' + '' + ): + # TODO(#13528): Here we use MyPy ignore because we remove this + # test after the backend is fully type-annotated. Here + # ignore[arg-type] is used to test method get_by_hash() + # for invalid input type. + email_models.SentEmailModel.get_by_hash( + 'Email Hash', + sent_datetime_lower_bound='Not a datetime object') # type: ignore[arg-type] + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'recipient_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'recipient_email': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'sender_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'sender_email': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'intent': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'subject': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'html_body': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'sent_datetime': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'email_hash': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = email_models.SentEmailModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = email_models.SentEmailModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) - results = email_models.SentEmailModel.get_by_hash( - 'Email Hash', sent_datetime_lower_bound=time_before) - self.assertEqual(len(results), 1) + def test_check_duplicate_message(self) -> None: + email_models.SentEmailModel.create( + 'recipient_id', 'recipient@email.com', self.SENDER_ID, + 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, + 'Email Subject', 'Email Body', datetime.datetime.utcnow()) - # Check that it accepts only DateTime objects. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] - Exception, - 'Expected datetime, received Not a datetime object of type ' - '' - ): - # TODO(#13528): Remove this test after the backend is fully - # type-annotated. Here ignore[arg-type] is used to test method - # get_by_hash() for invalid input type. - email_models.SentEmailModel.get_by_hash( - 'Email Hash', - sent_datetime_lower_bound='Not a datetime object') # type: ignore[arg-type] + self.assertTrue( + email_models.SentEmailModel.check_duplicate_message( + 'recipient_id', 'Email Subject', 'Email Body')) + + email_models.SentEmailModel.create( + 'recipient_id2', 'recipient@email.com', self.SENDER_ID, + 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, + 'Email Subject', 'Email Body', + datetime.datetime.utcnow() - datetime.timedelta( + minutes=feconf.DUPLICATE_EMAIL_INTERVAL_MINS)) + + self.assertFalse( + email_models.SentEmailModel.check_duplicate_message( + 'recipient_id2', 'Email Subject', 'Email Body')) + + def test_check_duplicate_messages_with_same_hash(self) -> None: + def mock_convert_to_hash(input_string: str, max_length: int) -> str: # pylint: disable=unused-argument + return 'some_poor_hash' + swap_generate_hash = self.swap( + utils, 'convert_to_hash', mock_convert_to_hash) + with swap_generate_hash: + email_models.SentEmailModel.create( + 'recipient_id', 'recipient@email.com', self.SENDER_ID, + 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP, + 'Email Subject', 'Email Body', datetime.datetime.utcnow()) + + self.assertFalse( + email_models.SentEmailModel.check_duplicate_message( + 'recipient_id2', 'Email Subject2', 'Email Body2')) def test_raise_exception_by_mocking_collision(self) -> None: # Test Exception for SentEmailModel. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'The id generator for SentEmailModel is ' 'producing too many collisions.' ): @@ -190,22 +265,63 @@ def test_raise_exception_by_mocking_collision(self) -> None: class BulkEmailModelUnitTests(test_utils.GenericTestBase): """Test the BulkEmailModel class.""" + SENDER_ID: Final = 'sender_id' + NONEXISTENT_USER_ID: Final = 'id_x' + + def setUp(self) -> None: + super().setUp() + email_models.BulkEmailModel.create( + 'instance_id', self.SENDER_ID, 'sender@email.com', + feconf.BULK_EMAIL_INTENT_MARKETING, 'Email Subject', 'Email Body', + datetime.datetime.utcnow()) + def test_get_deletion_policy(self) -> None: self.assertEqual( email_models.BulkEmailModel.get_deletion_policy(), - base_models.DELETION_POLICY.KEEP) + base_models.DELETION_POLICY.DELETE) def test_has_reference_to_user_id(self) -> None: - email_models.BulkEmailModel.create( - 'instance_id', ['recipient_1_id', 'recipient_2_id'], 'sender_id', - 'sender@email.com', feconf.BULK_EMAIL_INTENT_MARKETING, - 'Email Subject', 'Email Body', datetime.datetime.utcnow()) - self.assertTrue( email_models.BulkEmailModel.has_reference_to_user_id( - 'sender_id')) + self.SENDER_ID)) self.assertFalse( - email_models.BulkEmailModel.has_reference_to_user_id('id_x')) + email_models.BulkEmailModel.has_reference_to_user_id( + self.NONEXISTENT_USER_ID)) + + def test_apply_deletion_policy_deletes_model_for_user_who_is_sender( + self + ) -> None: + email_models.BulkEmailModel.apply_deletion_policy(self.SENDER_ID) + self.assertIsNone( + email_models.BulkEmailModel.get_by_id(self.SENDER_ID)) + + def test_apply_deletion_policy_raises_no_exception_for_nonexistent_user( + self + ) -> None: + email_models.BulkEmailModel.apply_deletion_policy( + self.NONEXISTENT_USER_ID) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'sender_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'sender_email': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'recipient_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'intent': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'subject': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'html_body': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'sent_datetime': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = email_models.BulkEmailModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = email_models.BulkEmailModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) class GenerateHashTests(test_utils.GenericTestBase): diff --git a/core/storage/exploration/gae_models.py b/core/storage/exploration/gae_models.py index b3ea59ba29ab..ace75d4beb88 100644 --- a/core/storage/exploration/gae_models.py +++ b/core/storage/exploration/gae_models.py @@ -19,19 +19,20 @@ from __future__ import annotations import datetime +import random +import string from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.platform import models import core.storage.base_model.gae_models as base_models -from typing import Any, Dict, List, Optional, Sequence, Tuple +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple MYPY = False if MYPY: # pragma: no cover - from mypy_imports import datastore_services # pylint: disable=unused-import + from mypy_imports import datastore_services datastore_services = models.Registry.import_datastore_services() @@ -92,7 +93,7 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE }) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get_multi(). @classmethod def get_multi( # type: ignore[override] @@ -161,6 +162,10 @@ def get_all_non_private_commits( more: bool. If True, there are (probably) more results after this batch. If False, there are no further results after this batch. + + Raises: + ValueError. If the max age is other than datetime.timedelta + instance or None. """ if not isinstance(max_age, datetime.timedelta) and max_age is not None: @@ -229,6 +234,13 @@ class ExplorationModel(base_models.VersionedModel): # exploration. correctness_feedback_enabled = datastore_services.BooleanProperty( default=False, indexed=True) + # The next_content_id index to use for generation of new content ids. + next_content_id_index = datastore_services.IntegerProperty( + required=True, default=0, indexed=True) + # An boolean indicating whether further edits can be made to the + # exploration. + edits_allowed = datastore_services.BooleanProperty( + default=True, indexed=True) @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: @@ -259,7 +271,9 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'param_changes': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'auto_tts_enabled': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'correctness_feedback_enabled': - base_models.EXPORT_POLICY.NOT_APPLICABLE + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'next_content_id_index': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'edits_allowed': base_models.EXPORT_POLICY.NOT_APPLICABLE }) @classmethod @@ -267,15 +281,34 @@ def get_exploration_count(cls) -> int: """Returns the total number of explorations.""" return cls.get_all().count() - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't be allowed. + def _prepare_additional_models(self) -> Mapping[str, base_models.BaseModel]: + """Prepares additional models needed for the commit process. + + Returns: + dict(str, BaseModel). Additional models needed for + the commit process. Contains the ExplorationRightsModel. + """ + return { + 'rights_model': ExplorationRightsModel.get_by_id(self.id) + } + + # Here we use MyPy ignore because super class (VersionedModel) + # defines this 'additional_models' argument as broader type but + # here we are sure that in this sub-class (ExplorationModel) argument + # 'additional_models' is always going to be of type Dict[str, + # ExplorationRightsModel]. So, due to this conflict in argument types, + # a conflict in signatures occurred which causes MyPy to throw an + # error. Thus, to avoid the error, we used ignore here. + def compute_models_to_commit( # type: ignore[override] + self, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: base_models.AllowedCommitCmdsListType, + additional_models: Mapping[str, ExplorationRightsModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -285,35 +318,55 @@ def _trusted_commit( change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit description message or None for + unpublished explorations. commit_cmds: list(dict). A list of commands, describing changes made in this model, which should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. - """ - super(ExplorationModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. - exp_rights = ExplorationRightsModel.get_by_id(self.id) + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. + """ + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) + exploration_rights_model = additional_models['rights_model'] exploration_commit_log = ExplorationCommitLogEntryModel.create( - self.id, self.version, committer_id, commit_type, commit_message, - commit_cmds, exp_rights.status, exp_rights.community_owned + self.id, self.version, + committer_id, + commit_type, + commit_message, + commit_cmds, + exploration_rights_model.status, + exploration_rights_model.community_owned ) exploration_commit_log.exploration_id = self.id - exploration_commit_log.update_timestamps() - exploration_commit_log.put() + return { + 'snapshot_metadata_model': models_to_put['snapshot_metadata_model'], + 'snapshot_content_model': models_to_put['snapshot_content_model'], + 'commit_log_model': exploration_commit_log, + 'versioned_model': models_to_put['versioned_model'], + } - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.delete_multi(). @classmethod def delete_multi( # type: ignore[override] - cls, - entity_ids: List[str], - committer_id: str, - commit_message: str, - force_deletion: bool = False + cls, + entity_ids: List[str], + committer_id: str, + commit_message: str, + force_deletion: bool = False ) -> None: """Deletes the given cls instances with the given entity_ids. @@ -327,6 +380,8 @@ def delete_multi( # type: ignore[override] from storage, otherwise there are only marked as deleted. Default is False. """ + versioned_exp_models = cls.get_multi(entity_ids, include_deleted=True) + super(ExplorationModel, cls).delete_multi( entity_ids, committer_id, commit_message, force_deletion=force_deletion) @@ -335,17 +390,16 @@ def delete_multi( # type: ignore[override] commit_log_models = [] exp_rights_models = ExplorationRightsModel.get_multi( entity_ids, include_deleted=True) - versioned_models = cls.get_multi(entity_ids, include_deleted=True) - versioned_and_exp_rights_models = python_utils.ZIP( - versioned_models, exp_rights_models) + versioned_and_exp_rights_models = zip( + versioned_exp_models, exp_rights_models) for model, rights_model in versioned_and_exp_rights_models: # Ruling out the possibility of None for mypy type checking. assert model is not None assert rights_model is not None exploration_commit_log = ExplorationCommitLogEntryModel.create( model.id, model.version, committer_id, - cls._COMMIT_TYPE_DELETE, + feconf.COMMIT_TYPE_DELETE, commit_message, [{'cmd': cls.CMD_DELETE_COMMIT}], rights_model.status, rights_model.community_owned ) @@ -354,9 +408,29 @@ def delete_multi( # type: ignore[override] ExplorationCommitLogEntryModel.update_timestamps_multi( commit_log_models) datastore_services.put_multi(commit_log_models) - - # TODO(#13523): Change snapshot of this model to TypedDict/Domain Object - # to remove Any used below. + else: + # Delete the ExplorationVersionHistoryModels if force_deletion is + # True. + versioned_exp_models_without_none = [ + model for model in versioned_exp_models + if model is not None + ] + version_history_keys = [] + for model in versioned_exp_models_without_none: + for version in range(1, model.version + 1): + version_history_id = ( + ExplorationVersionHistoryModel.get_instance_id( + model.id, version + ) + ) + version_history_keys.append(datastore_services.Key( + ExplorationVersionHistoryModel, version_history_id)) + datastore_services.delete_multi(version_history_keys) + + # TODO(#15911): Here we use type Any because 'convert_to_valid_dict' method + # accepts content NDB JSON properties and those NDB JSON properties have + # loose typing. So, once we explicitly type those NDB JSON properties, we + # can remove Any type from here. @staticmethod def convert_to_valid_dict(snapshot_dict: Dict[str, Any]) -> Dict[str, Any]: """Replace invalid fields and values in the ExplorationModel dict. @@ -384,11 +458,13 @@ def convert_to_valid_dict(snapshot_dict: Dict[str, Any]) -> Dict[str, Any]: return snapshot_dict - # TODO(#13523): Change 'snapshot_dict' to TypedDict/Domain Object - # to remove Any used below. + # TODO(#15911): Here we use type Any because this '_reconstitute' method + # accepts content NDB JSON properties and those NDB JSON properties have + # loose typing. So, once we explicitly type those NDB JSON properties, we + # can remove Any type from the argument of '_reconstitute' method. def _reconstitute( - self, - snapshot_dict: Dict[str, Any] + self, + snapshot_dict: Dict[str, Any] ) -> ExplorationModel: """Populates the model instance with the snapshot. Some old ExplorationSnapshotContentModels can contain fields @@ -589,13 +665,11 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: cls.viewer_ids == user_id )).get(keys_only=True) is not None - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. def save( - self, - committer_id: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] + self, + committer_id: str, + commit_message: str, + commit_cmds: base_models.AllowedCommitCmdsListType ) -> None: """Saves a new version of the exploration, updating the Exploration datastore model. @@ -614,11 +688,12 @@ def save( {'cmd': 'AUTO_revert_version_number', 'version_number': 4} """ - super(ExplorationRightsModel, self).commit( - committer_id, commit_message, commit_cmds) + super().commit(committer_id, commit_message, commit_cmds) - # TODO(#13523): Change snapshot of this model to TypedDict/Domain Object - # to remove Any used below. + # TODO(#15911): Here we use type Any because 'convert_to_valid_dict' method + # accepts content NDB JSON properties and those NDB JSON properties have + # loose typing. So, once we explicitly type those NDB JSON properties, we + # can remove Any type from here. @staticmethod def convert_to_valid_dict(model_dict: Dict[str, Any]) -> Dict[str, Any]: """Replace invalid fields and values in the ExplorationRightsModel dict. @@ -668,11 +743,13 @@ def convert_to_valid_dict(model_dict: Dict[str, Any]) -> Dict[str, Any]: return model_dict - # TODO(#13523): Change 'snapshot_dict' to TypedDict/Domain Object - # to remove Any used below. + # TODO(#15911): Here we use type Any because this '_reconstitute' method + # accepts content NDB JSON properties and those NDB JSON properties have + # loose typing. So, once we explicitly type those NDB JSON properties, we + # can remove Any type from the argument of '_reconstitute' method. def _reconstitute( - self, - snapshot_dict: Dict[str, Any] + self, + snapshot_dict: Dict[str, Any] ) -> ExplorationRightsModel: """Populates the model instance with the snapshot. @@ -693,15 +770,17 @@ def _reconstitute( **ExplorationRightsModel.convert_to_valid_dict(snapshot_dict)) return self - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + def compute_models_to_commit( + self, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: base_models.AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + additional_models: Mapping[str, base_models.BaseModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -711,38 +790,29 @@ def _trusted_commit( change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit description message or None for + unpublished explorations. commit_cmds: list(dict). A list of commands, describing changes made in this model, should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. - """ - - super(ExplorationRightsModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. - # Create and delete events will already be recorded in the - # ExplorationModel. - if commit_type not in ['create', 'delete']: - ExplorationCommitLogEntryModel( - id=('rights-%s-%s' % (self.id, self.version)), - user_id=committer_id, - exploration_id=self.id, - commit_type=commit_type, - commit_message=commit_message, - commit_cmds=commit_cmds, - version=None, - post_commit_status=self.status, - post_commit_community_owned=self.community_owned, - post_commit_is_private=( - self.status == constants.ACTIVITY_STATUS_PRIVATE) - ).put() + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. + """ + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) - snapshot_metadata_model = self.SNAPSHOT_METADATA_CLASS.get( - self.get_snapshot_id(self.id, self.version)) - # Ruling out the possibility of None for mypy type checking. - assert snapshot_metadata_model is not None + snapshot_metadata_model = models_to_put['snapshot_metadata_model'] snapshot_metadata_model.content_user_ids = list(sorted( set(self.owner_ids) | set(self.editor_ids) | @@ -758,12 +828,41 @@ def _trusted_commit( if cmd['name'] == commit_cmd['cmd'] ) for user_id_attribute_name in user_id_attribute_names: - commit_cmds_user_ids.add(commit_cmd[user_id_attribute_name]) + user_id_name_value = commit_cmd[user_id_attribute_name] + # Ruling out the possibility of any other type for mypy type + # checking. + assert isinstance(user_id_name_value, str) + commit_cmds_user_ids.add(user_id_name_value) snapshot_metadata_model.commit_cmds_user_ids = list( sorted(commit_cmds_user_ids)) - snapshot_metadata_model.update_timestamps() - snapshot_metadata_model.put() + # Create and delete events will already be recorded in the + # ExplorationModel. + if commit_type not in ['create', 'delete']: + exploration_commit_log = ExplorationCommitLogEntryModel( + id=('rights-%s-%s' % (self.id, self.version)), + user_id=committer_id, + exploration_id=self.id, + commit_type=commit_type, + commit_message=commit_message, + commit_cmds=commit_cmds, + version=None, + post_commit_status=self.status, + post_commit_community_owned=self.community_owned, + post_commit_is_private=( + self.status == constants.ACTIVITY_STATUS_PRIVATE) + ) + + return { + 'snapshot_metadata_model': ( + models_to_put['snapshot_metadata_model']), + 'snapshot_content_model': ( + models_to_put['snapshot_content_model']), + 'commit_log_model': exploration_commit_log, + 'versioned_model': models_to_put['versioned_model'], + } + + return models_to_put @classmethod def export_data(cls, user_id: str) -> Dict[str, List[str]]: @@ -799,6 +898,104 @@ def export_data(cls, user_id: str) -> Dict[str, List[str]]: } +class TransientCheckpointUrlModel(base_models.BaseModel): + """Model for storing the progress of a logged-out user.""" + + # The exploration id. + exploration_id = ( + datastore_services.StringProperty(required=True, indexed=True)) + # The state name of the furthest reached checkpoint. + furthest_reached_checkpoint_state_name = datastore_services.StringProperty( + default=None) + # The exploration version of the furthest reached checkpoint. + furthest_reached_checkpoint_exp_version = ( + datastore_services.IntegerProperty(default=None)) + # The state name of the most recently reached checkpoint. + most_recently_reached_checkpoint_state_name = ( + datastore_services.StringProperty(default=None)) + # The exploration version of the most recently reached checkpoint. + most_recently_reached_checkpoint_exp_version = ( + datastore_services.IntegerProperty(default=None)) + + @staticmethod + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model doesn't contain any data directly corresponding to a user.""" + return base_models.DELETION_POLICY.NOT_APPLICABLE + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model does not contain user data.""" + return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model doesn't contain any data directly corresponding to a user.""" + return dict(super(cls, cls).get_export_policy(), **{ + 'exploration_id': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'furthest_reached_checkpoint_state_name': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'furthest_reached_checkpoint_exp_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'most_recently_reached_checkpoint_state_name': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'most_recently_reached_checkpoint_exp_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE + }) + + @classmethod + def create( + cls, + exploration_id: str, + unique_progress_url_id: str + ) -> TransientCheckpointUrlModel: + """Creates a new TransientCheckpointUrlModel instance and returns it. + + Note that the client is responsible for actually saving this entity to + the datastore. + + Args: + exploration_id: str. The ID of the exploration. + unique_progress_url_id: str. The 6 digit long unique id + assigned to the progress made by a logged-out user. + + Returns: + TransientCheckpointUrlModel. The newly created + TransientCheckpointUrlModel instance. + """ + entity = cls( + id=unique_progress_url_id, + exploration_id=exploration_id) + + entity.update_timestamps() + entity.put() + return entity + + @classmethod + def get_new_progress_id(cls) -> str: + """Gets a new unique progress url id for the logged-out user. + + The returned id is guaranteed to be unique among all instances of this + entity. + + Returns: + str. New unique progress url id. + + Raises: + Exception. An ID cannot be generated within a reasonable number + of attempts. + """ + for _ in range(base_models.MAX_RETRIES): + new_id = '%s' % ''.join( + random.choice(string.ascii_letters) + for _ in range(constants.MAX_PROGRESS_URL_ID_LENGTH)) + if not cls.get_by_id(new_id): + return new_id + + raise Exception('New id generator is producing too many collisions.') + + class ExpSummaryModel(base_models.BaseModel): """Summary model for an Oppia exploration. @@ -1021,7 +1218,7 @@ def get_model_association_to_user( @classmethod def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: """Model contains data corresponding to a user, but this isn't exported - because because noteworthy details that belong to this model have + because noteworthy details that belong to this model have already been exported as a part of the ExplorationModel. """ return dict(super(cls, cls).get_export_policy(), **{ @@ -1047,3 +1244,126 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'contributors_summary': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE }) + + +class ExplorationVersionHistoryModel(base_models.BaseModel): + """Version history model for an oppia exploration. + + Version history means some information about the previous commit on each + state and the exploration metadata at a particular version of the + exploration. The information about each state includes the version + number of the exploration on which the state was previously edited, + the name of the state at the previous version and the id of the user who + committed those changes. For metadata, the information includes the + version number of the exploration on which the metadata was previously + edited and the id of the user who committed those changes. + + A new instance of this model is created each time a new exploration + is created or some changes are saved in an exploration. + + The id of the model is generated as follows: + {exploration_id}-{exploration_version} + """ + + # The id of the corresponding exploration. + exploration_id = datastore_services.StringProperty( + required=True, indexed=True) + # The version of the corresponding exploration. + exploration_version = datastore_services.IntegerProperty( + required=True, indexed=True) + # The details of the previous commit on each state at a particular + # version of the exploration. The json structure will look like the + # following: + # { + # [state_name: str]: { + # "previously_edited_in_version": int, + # "state_name_in_previous_version": str, + # "committer_id": str + # } + # } + # The json object can have multiple keys in this case depending on + # the number of states. + state_version_history = datastore_services.JsonProperty( + default={}, indexed=False) + # The exploration version on which the metadata was previously edited. + # If its value is v, then it will indicate that the metadata was modified + # when the exploration was updated from version v -> v + 1. + # Its value will be None during the creation of an exploration. The value + # None indicates that the metadata was not modified after the creation of + # the exploration. + metadata_last_edited_version_number = datastore_services.IntegerProperty( + indexed=True) + # The user id of the user who committed the latest changes to the + # exploration metadata. + metadata_last_edited_committer_id = datastore_services.StringProperty( + required=True, indexed=True) + # The user ids of the users who did the 'previous commit' on each state + # in this version of the exploration. It is required during the + # wipeout process to query for the models efficiently. + committer_ids = datastore_services.StringProperty( + indexed=True, repeated=True) + + @classmethod + def get_instance_id(cls, exp_id: str, exp_version: int) -> str: + """Returns ID of the exploration version history model. + + Args: + exp_id: str. The ID of the exploration. + exp_version: int. The version of the exploration. + + Returns: + str. A string containing exploration ID and + exploration version. + """ + return '%s.%s' % (exp_id, exp_version) + + @classmethod + def has_reference_to_user_id(cls, user_id: str) -> bool: + """Check whether ExplorationVersionHistoryModel references + the given user. + + Args: + user_id: str. The ID of the user whose data should be checked. + + Returns: + bool. Whether any models refer to the given user ID. + """ + return ExplorationVersionHistoryModel.query( + ExplorationVersionHistoryModel.committer_ids == user_id + ).get(keys_only=True) is not None + + @staticmethod + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model contains data to pseudonymize corresponding to a user: + committer_ids field, metadata_last_edited_committer_id field and the + user ids stored in state_version_history field. + """ + return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """All the noteworthy data in this model which are related to a + user's contributions to an exploration is already contained in various + user models such as UserContributionsModel which fall under Takeout. + Hence, this model will not export any data. + """ + return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model contains data corresponding to a user, but this isn't exported + because the all the noteworthy data related to a user's contributions + to an exploration is already contained in various user models such + as UserContributionsModel which fall under Takeout. + """ + return dict(super(cls, cls).get_export_policy(), **{ + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_version_history': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'metadata_last_edited_version_number': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'metadata_last_edited_committer_id': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'committer_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE + }) diff --git a/core/storage/exploration/gae_models_test.py b/core/storage/exploration/gae_models_test.py index ad1ce59478af..fe0199a6d531 100644 --- a/core/storage/exploration/gae_models_test.py +++ b/core/storage/exploration/gae_models_test.py @@ -20,6 +20,7 @@ import copy import datetime +import types from core import feconf from core.constants import constants @@ -29,7 +30,7 @@ from core.platform import models from core.tests import test_utils -from typing import Any, Dict, List +from typing import Dict, Final, List MYPY = False if MYPY: # pragma: no cover @@ -37,8 +38,9 @@ from mypy_imports import exp_models from mypy_imports import user_models -(base_models, exp_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.exploration, models.NAMES.user]) +(base_models, exp_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.EXPLORATION, models.Names.USER +]) class ExplorationSnapshotContentModelTests(test_utils.GenericTestBase): @@ -58,10 +60,10 @@ def test_get_deletion_policy(self) -> None: base_models.DELETION_POLICY.NOT_APPLICABLE) def test_get_exploration_count(self) -> None: - exploration = exp_domain.Exploration.create_default_exploration( # type: ignore[no-untyped-call] + exploration = exp_domain.Exploration.create_default_exploration( 'id', title='A Title', category='A Category', objective='An Objective') - exp_services.save_new_exploration('id', exploration) # type: ignore[no-untyped-call] + exp_services.save_new_exploration('id', exploration) self.assertEqual( exp_models.ExplorationModel.get_exploration_count(), 1) @@ -72,10 +74,10 @@ def test_get_exploration_count(self) -> None: self.assertEqual(saved_exploration.objective, 'An Objective') def test_reconstitute(self) -> None: - exploration = exp_domain.Exploration.create_default_exploration( # type: ignore[no-untyped-call] + exploration = exp_domain.Exploration.create_default_exploration( 'id', title='A Title', category='A Category', objective='An Objective') - exp_services.save_new_exploration('id', exploration) # type: ignore[no-untyped-call] + exp_services.save_new_exploration('id', exploration) exp_model = exp_models.ExplorationModel.get_by_id('id') snapshot_dict = exp_model.compute_snapshot() snapshot_dict['skill_tags'] = ['tag1', 'tag2'] @@ -100,10 +102,10 @@ def test_get_deletion_policy(self) -> None: class ExplorationRightsSnapshotContentModelTests(test_utils.GenericTestBase): - EXP_ID_1 = '1' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_COMMITTER = 'id_committer' + EXP_ID_1: Final = '1' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_COMMITTER: Final = 'id_committer' def test_get_deletion_policy_is_locally_pseudonymize(self) -> None: self.assertEqual( @@ -143,29 +145,29 @@ def test_has_reference_to_user_id(self) -> None: class ExplorationRightsModelUnitTest(test_utils.GenericTestBase): """Test the ExplorationRightsModel class.""" - EXPLORATION_ID_1 = '1' - EXPLORATION_ID_2 = '2' - EXPLORATION_ID_3 = '3' - EXPLORATION_ID_4 = '4' + EXPLORATION_ID_1: Final = '1' + EXPLORATION_ID_2: Final = '2' + EXPLORATION_ID_3: Final = '3' + EXPLORATION_ID_4: Final = '4' # Related to all three explorations. - USER_ID_1 = 'id_1' + USER_ID_1: Final = 'id_1' # Related to a subset of the three explorations. - USER_ID_2 = 'id_2' + USER_ID_2: Final = 'id_2' # Related to no explorations. - USER_ID_3 = 'id_3' + USER_ID_3: Final = 'id_3' # Related to one collection and then removed from it. - USER_ID_4 = 'id_4' + USER_ID_4: Final = 'id_4' # User id used in commits. - USER_ID_COMMITTER = 'id_5' - USER_ID_4_OLD = 'id_4_old' - USER_ID_4_NEW = 'id_4_new' - USER_ID_5_OLD = 'id_5_old' - USER_ID_5_NEW = 'id_5_new' - USER_ID_6_OLD = 'id_6_old' - USER_ID_6_NEW = 'id_6_new' + USER_ID_COMMITTER: Final = 'id_5' + USER_ID_4_OLD: Final = 'id_4_old' + USER_ID_4_NEW: Final = 'id_4_new' + USER_ID_5_OLD: Final = 'id_5_old' + USER_ID_5_NEW: Final = 'id_5_new' + USER_ID_6_OLD: Final = 'id_6_old' + USER_ID_6_NEW: Final = 'id_6_new' def setUp(self) -> None: - super(ExplorationRightsModelUnitTest, self).setUp() + super().setUp() user_models.UserSettingsModel( id=self.USER_ID_1, email='some@email.com', @@ -269,8 +271,6 @@ def test_save(self) -> None: 'cid', 'Created new exploration right', [{'cmd': rights_domain.CMD_CREATE_NEW}]) saved_model = exp_models.ExplorationRightsModel.get('id_0') - # Ruling out the possibility of None for mypy type checking. - assert saved_model is not None self.assertEqual(saved_model.id, 'id_0') self.assertEqual(saved_model.owner_ids, ['owner_id']) self.assertEqual(saved_model.voice_artist_ids, ['voice_artist_id']) @@ -356,8 +356,6 @@ def test_reconstitute_excludes_deprecated_properties(self) -> None: 'cid', 'Created new exploration right', [{'cmd': rights_domain.CMD_CREATE_NEW}]) saved_model = exp_models.ExplorationRightsModel.get('id_0') - # Ruling out the possibility of None for mypy type checking. - assert saved_model is not None snapshot_dict = saved_model.compute_snapshot() snapshot_dict['translator_ids'] = ['owner_id'] @@ -376,15 +374,15 @@ def test_reconstitute_excludes_deprecated_properties(self) -> None: class ExplorationRightsModelRevertUnitTest(test_utils.GenericTestBase): """Test the revert method on ExplorationRightsModel class.""" - EXPLORATION_ID_1 = '1' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_3 = 'id_3' + EXPLORATION_ID_1: Final = '1' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_3: Final = 'id_3' # User id used in commits. - USER_ID_COMMITTER = 'id_4' + USER_ID_COMMITTER: Final = 'id_4' def setUp(self) -> None: - super(ExplorationRightsModelRevertUnitTest, self).setUp() + super().setUp() self.exploration_model = exp_models.ExplorationRightsModel( id=self.EXPLORATION_ID_1, owner_ids=[self.USER_ID_1], @@ -424,7 +422,9 @@ def setUp(self) -> None: 'name': feconf.CMD_REVERT_COMMIT, 'required_attribute_names': [], 'optional_attribute_names': [], - 'user_id_attribute_names': [] + 'user_id_attribute_names': [], + 'allowed_values': {}, + 'deprecated_values': {} }) self.allowed_commands_swap = self.swap( feconf, @@ -445,12 +445,9 @@ def test_revert_to_valid_version_is_successful(self) -> None: ) def test_revert_to_version_with_all_viewer_ids_field_successful( - self + self ) -> None: - # TODO(#13523): Use of Any in the type-annotation below will be - # removed when the snapshot of ExplorationRightsModel - # is converted to TypedDict/Domain Object. - broken_dict: Dict[str, Any] = dict(**self.original_dict) + broken_dict = dict(**self.original_dict) broken_dict['all_viewer_ids'] = [ self.USER_ID_1, self.USER_ID_2, self.USER_ID_3] @@ -476,10 +473,7 @@ def test_revert_to_version_with_all_viewer_ids_field_successful( ) def test_revert_to_version_with_invalid_status_is_successful(self) -> None: - # TODO(#13523): Use of Any in the type-annotation below will be - # removed when the snapshot of ExplorationRightsModel - # is converted to TypedDict/Domain Object. - broken_dict: Dict[str, Any] = dict(**self.original_dict) + broken_dict = dict(**self.original_dict) broken_dict['status'] = 'publicized' snapshot_model = ( @@ -563,12 +557,13 @@ def test_get_all_non_private_commits(self) -> None: self.assertFalse(more) self.assertEqual(len(results), 1) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'max_age must be a datetime.timedelta instance or None.' ): - # TODO(#13528): Remove this test after the backend is fully - # type-annotated. Here ignore[arg-type] is used to test method - # get_all_non_private_commits() for invalid input type. + # TODO(#13528): Here we use MyPy ignore because we remove this test + # after the backend is fully type-annotated. Here ignore[arg-type] + # is used to test method get_all_non_private_commits() for invalid + # input type. results, _, _ = ( exp_models.ExplorationCommitLogEntryModel .get_all_non_private_commits(2, None, max_age=1)) # type: ignore[arg-type] @@ -610,18 +605,18 @@ def test_get_multi(self) -> None: class ExpSummaryModelUnitTest(test_utils.GenericTestBase): """Tests for the ExpSummaryModel.""" - EXPLORATION_ID_1 = '1' - EXPLORATION_ID_2 = '2' - EXPLORATION_ID_3 = '3' - USER_ID_1_OLD = 'id_1_old' - USER_ID_1_NEW = 'id_1_new' - USER_ID_2_OLD = 'id_2_old' - USER_ID_2_NEW = 'id_2_new' - USER_ID_3_OLD = 'id_3_old' - USER_ID_3_NEW = 'id_3_new' + EXPLORATION_ID_1: Final = '1' + EXPLORATION_ID_2: Final = '2' + EXPLORATION_ID_3: Final = '3' + USER_ID_1_OLD: Final = 'id_1_old' + USER_ID_1_NEW: Final = 'id_1_new' + USER_ID_2_OLD: Final = 'id_2_old' + USER_ID_2_NEW: Final = 'id_2_new' + USER_ID_3_OLD: Final = 'id_3_old' + USER_ID_3_NEW: Final = 'id_3_new' def setUp(self) -> None: - super(ExpSummaryModelUnitTest, self).setUp() + super().setUp() user_models.UserSettingsModel( id=self.USER_ID_1_NEW, email='some@email.com', @@ -896,3 +891,191 @@ def test_get_at_least_editable(self) -> None: exp_models.ExpSummaryModel .get_at_least_editable('nonexistent_id')) self.assertEqual(0, len(exploration_summary_models)) + + +class ExplorationVersionHistoryModelUnitTest(test_utils.GenericTestBase): + """Unit tests for ExplorationVersionHistoryModel.""" + + def test_get_deletion_policy(self) -> None: + self.assertEqual( + exp_models.ExplorationVersionHistoryModel + .get_deletion_policy(), + base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE + ) + + def test_get_export_policy(self) -> None: + export_policy_dict = base_models.BaseModel.get_export_policy() + export_policy_dict.update({ + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_version_history': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'metadata_last_edited_version_number': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'metadata_last_edited_committer_id': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'committer_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE + }) + + self.assertEqual( + exp_models.ExplorationVersionHistoryModel.get_export_policy(), + export_policy_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + exp_models.ExplorationVersionHistoryModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_instance_id(self) -> None: + expected_instance_id = 'exp1.2' + actual_instance_id = ( + exp_models.ExplorationVersionHistoryModel.get_instance_id( + 'exp1', 2)) + + self.assertEqual(actual_instance_id, expected_instance_id) + + def test_has_reference_to_user_id(self) -> None: + exp_models.ExplorationVersionHistoryModel( + exploration_id='exp1', + exploration_version=2, + state_version_history={ + feconf.DEFAULT_INIT_STATE_NAME: { + 'previously_edited_in_version': 1, + 'state_name_in_previous_version': ( + feconf.DEFAULT_INIT_STATE_NAME), + 'committer_id': 'user_1' + } + }, + metadata_last_edited_version_number=1, + metadata_last_edited_committer_id='user_1', + committer_ids=['user_1'] + ).put() + + self.assertTrue( + exp_models.ExplorationVersionHistoryModel + .has_reference_to_user_id('user_1')) + self.assertFalse( + exp_models.ExplorationVersionHistoryModel + .has_reference_to_user_id('user_2')) + + +class TransientCheckpointUrlModelUnitTest(test_utils.GenericTestBase): + """Tests for the TransientCheckpointUrl model.""" + + def test_get_deletion_policy(self) -> None: + self.assertEqual( + exp_models.TransientCheckpointUrlModel.get_deletion_policy(), + base_models.DELETION_POLICY.NOT_APPLICABLE) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + exp_models.TransientCheckpointUrlModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'exploration_id': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'furthest_reached_checkpoint_exp_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'furthest_reached_checkpoint_state_name': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'most_recently_reached_checkpoint_exp_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'most_recently_reached_checkpoint_state_name': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + fetched_dict = ( + exp_models.TransientCheckpointUrlModel.get_export_policy()) + self.assertEqual( + expected_dict['exploration_id'], + fetched_dict['exploration_id']) + self.assertEqual( + expected_dict['furthest_reached_checkpoint_exp_version'], + fetched_dict['furthest_reached_checkpoint_exp_version']) + self.assertEqual( + expected_dict['furthest_reached_checkpoint_state_name'], + fetched_dict['furthest_reached_checkpoint_state_name']) + self.assertEqual( + expected_dict['most_recently_reached_checkpoint_exp_version'], + fetched_dict['most_recently_reached_checkpoint_exp_version']) + self.assertEqual( + expected_dict['most_recently_reached_checkpoint_state_name'], + fetched_dict['most_recently_reached_checkpoint_state_name']) + + def test_create_new_object(self) -> None: + exp_models.TransientCheckpointUrlModel.create( + 'exp_id', 'progress_id') + transient_checkpoint_url_model = ( + exp_models.TransientCheckpointUrlModel.get( + 'progress_id', strict=True)) + + # Ruling out the possibility of None for mypy type checking. + assert transient_checkpoint_url_model is not None + self.assertEqual( + transient_checkpoint_url_model.exploration_id, + 'exp_id') + self.assertIsNone( + transient_checkpoint_url_model. + most_recently_reached_checkpoint_exp_version) + self.assertIsNone( + transient_checkpoint_url_model. + most_recently_reached_checkpoint_state_name) + self.assertIsNone( + transient_checkpoint_url_model. + furthest_reached_checkpoint_exp_version) + self.assertIsNone( + transient_checkpoint_url_model. + furthest_reached_checkpoint_state_name) + + def test_get_object(self) -> None: + exp_models.TransientCheckpointUrlModel.create( + 'exp_id', 'progress_id') + expected_model = exp_models.TransientCheckpointUrlModel( + exploration_id='exp_id', + most_recently_reached_checkpoint_exp_version=None, + most_recently_reached_checkpoint_state_name=None, + furthest_reached_checkpoint_exp_version=None, + furthest_reached_checkpoint_state_name=None + ) + + actual_model = ( + exp_models.TransientCheckpointUrlModel.get( + 'progress_id', strict=True)) + + self.assertEqual( + actual_model.exploration_id, + expected_model.exploration_id) + self.assertEqual( + actual_model.most_recently_reached_checkpoint_exp_version, + expected_model.most_recently_reached_checkpoint_exp_version) + self.assertEqual( + actual_model.most_recently_reached_checkpoint_state_name, + expected_model.most_recently_reached_checkpoint_state_name) + self.assertEqual( + actual_model.furthest_reached_checkpoint_exp_version, + expected_model.furthest_reached_checkpoint_exp_version) + self.assertEqual( + actual_model.furthest_reached_checkpoint_state_name, + expected_model.furthest_reached_checkpoint_state_name) + + def test_raise_exception_by_mocking_collision(self) -> None: + """Tests get_new_progress_id method for raising exception.""" + transient_checkpoint_progress_model_cls = ( + exp_models.TransientCheckpointUrlModel) + + # Test get_new_progress_id method. + with self.assertRaisesRegex( + Exception, + 'New id generator is producing too many collisions.' + ): + # Swap dependent method get_by_id to simulate collision every time. + with self.swap( + transient_checkpoint_progress_model_cls, 'get_by_id', + types.MethodType( + lambda x, y: True, + transient_checkpoint_progress_model_cls + ) + ): + transient_checkpoint_progress_model_cls.get_new_progress_id() diff --git a/core/storage/feedback/gae_models.py b/core/storage/feedback/gae_models.py index 21c078a6e61a..6a626176a74c 100644 --- a/core/storage/feedback/gae_models.py +++ b/core/storage/feedback/gae_models.py @@ -19,7 +19,6 @@ from __future__ import annotations from core import feconf -from core import python_utils from core import utils # TODO(#13594): After the domain layer is refactored to be independent of # the storage layer, the disable=invalid-import will @@ -33,24 +32,25 @@ from core.domain import feedback_domain # pylint: disable=invalid-import from core.platform import models -from typing import Dict, List, Optional, Sequence, Tuple, Union +from typing import ( + Dict, Final, List, Literal, Optional, Sequence, Tuple, Union, overload) MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() # Allowed feedback thread statuses. -STATUS_CHOICES_OPEN = 'open' -STATUS_CHOICES_FIXED = 'fixed' -STATUS_CHOICES_IGNORED = 'ignored' -STATUS_CHOICES_COMPLIMENT = 'compliment' -STATUS_CHOICES_NOT_ACTIONABLE = 'not_actionable' -STATUS_CHOICES = [ +STATUS_CHOICES_OPEN: Final = 'open' +STATUS_CHOICES_FIXED: Final = 'fixed' +STATUS_CHOICES_IGNORED: Final = 'ignored' +STATUS_CHOICES_COMPLIMENT: Final = 'compliment' +STATUS_CHOICES_NOT_ACTIONABLE: Final = 'not_actionable' +STATUS_CHOICES: Final = [ STATUS_CHOICES_OPEN, STATUS_CHOICES_FIXED, STATUS_CHOICES_IGNORED, @@ -59,8 +59,8 @@ ] # Constants used for generating new ids. -_MAX_RETRIES = 10 -_RAND_RANGE = 127 * 127 +_MAX_RETRIES: Final = 10 +_RAND_RANGE: Final = 127 * 127 class GeneralFeedbackThreadModel(base_models.BaseModel): @@ -71,7 +71,7 @@ class GeneralFeedbackThreadModel(base_models.BaseModel): """ # We use the model id as a key in the Takeout dict. - ID_IS_USED_AS_TAKEOUT_KEY = True + ID_IS_USED_AS_TAKEOUT_KEY: Literal[True] = True # The type of entity the thread is linked to. entity_type = datastore_services.StringProperty(required=True, indexed=True) @@ -283,7 +283,7 @@ class GeneralFeedbackMessageModel(base_models.BaseModel): """ # We use the model id as a key in the Takeout dict. - ID_IS_USED_AS_TAKEOUT_KEY = True + ID_IS_USED_AS_TAKEOUT_KEY: Literal[True] = True # ID corresponding to an entry of FeedbackThreadModel. thread_id = datastore_services.StringProperty(required=True, indexed=True) @@ -353,8 +353,8 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: @classmethod def export_data( - cls, - user_id: str + cls, + user_id: str ) -> Dict[str, Dict[str, Union[str, int, bool, None]]]: """Exports the data from GeneralFeedbackMessageModel into dict format for Takeout. @@ -440,7 +440,8 @@ def create( def create_multi( cls, message_identifiers: List[ - feedback_domain.FullyQualifiedMessageIdentifier] + feedback_domain.FullyQualifiedMessageIdentifier + ] ) -> List[GeneralFeedbackMessageModel]: """Creates a new GeneralFeedbackMessageModel entry for each (thread_id, message_id) pair. @@ -470,7 +471,7 @@ def create_multi( # Generate the new ids. instance_ids = [ cls._generate_id(thread_id, message_id) for thread_id, message_id - in python_utils.ZIP(thread_ids, message_ids) + in zip(thread_ids, message_ids) ] # Check if the new ids are valid. @@ -487,7 +488,33 @@ def create_multi( return [cls(id=instance_id) for instance_id in instance_ids] - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method + # doesn't match with BaseModel.get(). + @overload # type: ignore[override] + @classmethod + def get( + cls, thread_id: str, message_id: int + ) -> GeneralFeedbackMessageModel: ... + + @overload + @classmethod + def get( + cls, thread_id: str, message_id: int, *, strict: Literal[True] + ) -> GeneralFeedbackMessageModel: ... + + @overload + @classmethod + def get( + cls, thread_id: str, message_id: int, *, strict: Literal[False] + ) -> Optional[GeneralFeedbackMessageModel]: ... + + @overload + @classmethod + def get( + cls, thread_id: str, message_id: int, *, strict: bool = ... + ) -> Optional[GeneralFeedbackMessageModel]: ... + + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). @classmethod def get( # type: ignore[override] @@ -553,8 +580,6 @@ def get_most_recent_message( """ thread = GeneralFeedbackThreadModel.get_by_id(thread_id) message = cls.get(thread_id, thread.message_count - 1) - # Ruling out the possibility of None for mypy type checking. - assert message is not None return message @classmethod @@ -572,7 +597,7 @@ def get_message_count(cls, thread_id: str) -> int: @classmethod def get_message_counts( - cls, thread_ids: List[str] + cls, thread_ids: List[str] ) -> List[int]: """Returns a list containing the number of messages in the threads. Includes the deleted entries. @@ -694,7 +719,7 @@ def generate_full_id(cls, user_id: str, thread_id: str) -> str: """ return '%s.%s' % (user_id, thread_id) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). @classmethod def get( # type: ignore[override] @@ -758,7 +783,7 @@ def create_multi( GeneralFeedbackThreadUserModel.put_multi(new_instances) return new_instances - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get_multi(). @classmethod def get_multi( # type: ignore[override] @@ -864,10 +889,8 @@ class UnsentFeedbackEmailModel(base_models.BaseModel): @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: - """Model contains data corresponding to a user: id field but it isn't - deleted because it is needed for auditing purposes. - """ - return base_models.DELETION_POLICY.KEEP + """Model contains data corresponding to a user: id field.""" + return base_models.DELETION_POLICY.DELETE @staticmethod def get_model_association_to_user( @@ -884,6 +907,15 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'retries': base_models.EXPORT_POLICY.NOT_APPLICABLE }) + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete instance of UnsentFeedbackEmailModel for the user. + + Args: + user_id: str. The ID of the user whose data should be deleted. + """ + cls.delete_by_id(user_id) + @classmethod def has_reference_to_user_id(cls, user_id: str) -> bool: """Check whether UnsentFeedbackEmailModel exists for user. diff --git a/core/storage/feedback/gae_models_test.py b/core/storage/feedback/gae_models_test.py index 10a36f537fab..df8b7ad2f5d3 100644 --- a/core/storage/feedback/gae_models_test.py +++ b/core/storage/feedback/gae_models_test.py @@ -36,7 +36,7 @@ from mypy_imports import user_models (base_models, feedback_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.feedback, models.NAMES.user]) + [models.Names.BASE_MODEL, models.Names.FEEDBACK, models.Names.USER]) CREATED_ON_FIELD = 'created_on' LAST_UPDATED_FIELD = 'last_updated' @@ -63,7 +63,7 @@ class FeedbackThreadModelTest(test_utils.GenericTestBase): def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(FeedbackThreadModelTest, self).setUp() + super().setUp() user_models.UserSettingsModel( id=self.NEW_USER_1_ID, @@ -104,7 +104,7 @@ def test_has_reference_to_user_id(self) -> None: def test_raise_exception_by_mocking_collision(self) -> None: feedback_thread_model_cls = feedback_models.GeneralFeedbackThreadModel # Test create method. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Feedback thread ID conflict on create.'): # Swap dependent method get_by_id to simulate collision every time. with self.swap( @@ -116,7 +116,7 @@ def test_raise_exception_by_mocking_collision(self) -> None: 'exploration.exp_id.thread_id') # Test generate_new_thread_id method. - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'New thread id generator is producing too many collisions.'): # Swap dependent method get_by_id to simulate collision every time. @@ -161,6 +161,44 @@ def test_message_cache_supports_huge_text(self) -> None: self.feedback_thread_model.update_timestamps() self.feedback_thread_model.put() + def test_get_threads(self) -> None: + self.assertEqual( + feedback_models.GeneralFeedbackThreadModel.get_threads( + self.ENTITY_TYPE, self.ENTITY_ID), + [self.feedback_thread_model]) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_type': base_models.EXPORT_POLICY.EXPORTED, + 'entity_id': base_models.EXPORT_POLICY.EXPORTED, + 'original_author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'status': base_models.EXPORT_POLICY.EXPORTED, + 'subject': base_models.EXPORT_POLICY.EXPORTED, + 'summary': base_models.EXPORT_POLICY.EXPORTED, + 'has_suggestion': base_models.EXPORT_POLICY.EXPORTED, + 'message_count': base_models.EXPORT_POLICY.EXPORTED, + 'last_nonempty_message_text': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_nonempty_message_author_id': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.EXPORTED + } + model = feedback_models.GeneralFeedbackThreadModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = feedback_models.GeneralFeedbackThreadModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + def test_get_field_names_for_takeout(self) -> None: + expected_dict = {'last_updated': 'last_updated_msec'} + model = feedback_models.GeneralFeedbackThreadModel + self.assertEqual(model.get_field_names_for_takeout(), expected_dict) + class GeneralFeedbackMessageModelTests(test_utils.GenericTestBase): """Tests for the GeneralFeedbackMessageModel class.""" @@ -186,7 +224,7 @@ def test_has_reference_to_user_id(self) -> None: .has_reference_to_user_id('id_x')) def test_raise_exception_by_mocking_collision(self) -> None: - thread_id = feedback_services.create_thread( # type: ignore[no-untyped-call] + thread_id = feedback_services.create_thread( 'exploration', '0', 'test_author', 'subject 1', 'text 1') # Simulating the _generate_id function in the # GeneralFeedbackMessageModel class. @@ -196,23 +234,21 @@ def test_raise_exception_by_mocking_collision(self) -> None: r'The following feedback message ID\(s\) conflicted on ' 'create: %s' % (instance_id) ) - with self.assertRaisesRegexp(Exception, expected_exception_regexp): # type: ignore[no-untyped-call] + with self.assertRaisesRegex(Exception, expected_exception_regexp): feedback_models.GeneralFeedbackMessageModel.create( - feedback_domain.FullyQualifiedMessageIdentifier( # type: ignore[no-untyped-call] - thread_id, '0') + feedback_domain.FullyQualifiedMessageIdentifier( + thread_id, 0) ) def test_get_all_messages(self) -> None: - thread_id = feedback_services.create_thread( # type: ignore[no-untyped-call] - 'exploration', '0', None, 'subject 1', 'text 1') + thread_id = feedback_services.create_thread( + 'exploration', '0', 'test_author', 'subject 1', 'text 1') - feedback_services.create_message( # type: ignore[no-untyped-call] - thread_id, None, 'open', 'subject 2', 'text 2') + feedback_services.create_message( + thread_id, 'test_author', 'open', 'subject 2', 'text 2') model = feedback_models.GeneralFeedbackMessageModel.get( thread_id, 0) - # Ruling out the possibility of None for mypy type checking. - assert model is not None self.assertEqual(model.entity_type, 'exploration') all_messages = ( @@ -234,16 +270,14 @@ def test_get_all_messages(self) -> None: self.assertEqual(all_messages[0][1].updated_subject, 'subject 1') def test_get_most_recent_message(self) -> None: - thread_id = feedback_services.create_thread( # type: ignore[no-untyped-call] - 'exploration', '0', None, 'subject 1', 'text 1') + thread_id = feedback_services.create_thread( + 'exploration', '0', 'test_author', 'subject 1', 'text 1') - feedback_services.create_message( # type: ignore[no-untyped-call] - thread_id, None, 'open', 'subject 2', 'text 2') + feedback_services.create_message( + thread_id, 'test_author', 'open', 'subject 2', 'text 2') model1 = feedback_models.GeneralFeedbackMessageModel.get( thread_id, 0) - # Ruling out the possibility of None for mypy type checking. - assert model1 is not None self.assertEqual(model1.entity_type, 'exploration') message = ( @@ -274,9 +308,9 @@ def test_export_data_nontrivial(self) -> None: self.signup('export_author_1@example.com', 'exportAuthor1') test_export_author_id = ( - self.get_user_id_from_email('export_author_1@example.com')) # type: ignore[no-untyped-call] + self.get_user_id_from_email('export_author_1@example.com')) - thread_id = feedback_services.create_thread( # type: ignore[no-untyped-call] + thread_id = feedback_services.create_thread( test_export_thread_type, test_export_thread_id, test_export_author_id, @@ -284,7 +318,7 @@ def test_export_data_nontrivial(self) -> None: test_export_text ) - feedback_services.create_message( # type: ignore[no-untyped-call] + feedback_services.create_message( thread_id, test_export_author_id, test_export_updated_status, @@ -317,6 +351,63 @@ def test_export_data_nontrivial(self) -> None: self.assertEqual(test_data, user_data) + def test_get_all_messages_in_a_thread_correctly(self) -> None: + feedback_thread_model = feedback_models.GeneralFeedbackThreadModel( + id='thread_id', + entity_type=feconf.ENTITY_TYPE_EXPLORATION, + entity_id='exp_id_2', + original_author_id='user_1', + status='open', + subject='dummy_subject', + has_suggestion=True, + summary='This is a great summary.', + message_count=0 + ) + feedback_thread_model.update_timestamps() + feedback_thread_model.put() + self.assertEqual( + feedback_models.GeneralFeedbackMessageModel.get_message_count( + 'thread_id'), + 0) + self.assertEqual( + feedback_models.GeneralFeedbackMessageModel.get_messages( + 'thread_id'), + []) + feedback_message_model = feedback_models.GeneralFeedbackMessageModel( + id='id', + thread_id='thread_id', + message_id=1, + author_id='user_id', + received_via_email=False + ) + feedback_message_model.put() + self.assertEqual( + feedback_models.GeneralFeedbackMessageModel.get_messages( + 'thread_id'), + [feedback_message_model]) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'thread_id': base_models.EXPORT_POLICY.EXPORTED, + 'message_id': base_models.EXPORT_POLICY.EXPORTED, + 'author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'updated_status': base_models.EXPORT_POLICY.EXPORTED, + 'updated_subject': base_models.EXPORT_POLICY.EXPORTED, + 'text': base_models.EXPORT_POLICY.EXPORTED, + 'received_via_email': base_models.EXPORT_POLICY.EXPORTED + } + model = feedback_models.GeneralFeedbackMessageModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = feedback_models.GeneralFeedbackMessageModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + class FeedbackThreadUserModelTest(test_utils.GenericTestBase): """Tests for the FeedbackThreadUserModel class.""" @@ -331,7 +422,7 @@ class FeedbackThreadUserModelTest(test_utils.GenericTestBase): MESSAGE_IDS_READ_IN_THREAD_C = [5, 6, 7, 8, 9] def setUp(self) -> None: - super(FeedbackThreadUserModelTest, self).setUp() + super().setUp() model = feedback_models.GeneralFeedbackThreadUserModel.create( self.USER_ID_A, self.THREAD_ID_A) model.message_ids_read_by_user = self.MESSAGE_IDS_READ_IN_THREAD_A @@ -420,7 +511,6 @@ def test_get_object(self) -> None: 'user_id', 'exploration.exp_id.thread_id')) # Ruling out the possibility of None for mypy type checking. - assert expected_model is not None assert actual_model is not None self.assertEqual(actual_model.id, expected_model.id) self.assertEqual(actual_model.user_id, expected_model.user_id) @@ -495,6 +585,39 @@ def test_export_data_nonexistent_case(self) -> None: self.USER_ID_B) self.assertEqual({}, user_data) + def test_delete_model_instance_of_user_by_applying_deletion_policy( + self) -> None: + feedback_models.GeneralFeedbackThreadUserModel.create( + 'user_id', 'exploration.exp_id.thread_id') + self.assertIsNotNone( + feedback_models.GeneralFeedbackThreadUserModel.get( + 'user_id', 'exploration.exp_id.thread_id')) + feedback_models.GeneralFeedbackThreadUserModel.apply_deletion_policy( + 'user_id') + self.assertIsNone( + feedback_models.GeneralFeedbackThreadUserModel.get( + 'user_id', 'exploration.exp_id.thread_id')) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'thread_id': + base_models.EXPORT_POLICY.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT, + 'message_ids_read_by_user': + base_models.EXPORT_POLICY.EXPORTED + } + model = feedback_models.GeneralFeedbackThreadUserModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = feedback_models.GeneralFeedbackThreadUserModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + class FeedbackAnalyticsModelTests(test_utils.GenericTestBase): """Tests for the FeedbackAnalyticsModelTests class.""" @@ -504,17 +627,40 @@ def test_get_deletion_policy(self) -> None: feedback_models.FeedbackAnalyticsModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_open_threads': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_total_threads': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = feedback_models.FeedbackAnalyticsModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = feedback_models.FeedbackAnalyticsModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + class UnsentFeedbackEmailModelTest(test_utils.GenericTestBase): """Tests for FeedbackMessageEmailDataModel class.""" + NONEXISTENT_USER_ID = 'id_x' + USER_ID_1 = 'id_1' + + def setUp(self) -> None: + super().setUp() + feedback_models.UnsentFeedbackEmailModel(id='user_id').put() + def test_get_deletion_policy(self) -> None: self.assertEqual( feedback_models.UnsentFeedbackEmailModel.get_deletion_policy(), - base_models.DELETION_POLICY.KEEP) + base_models.DELETION_POLICY.DELETE) def test_has_reference_to_user_id(self) -> None: - feedback_models.UnsentFeedbackEmailModel(id='user_id').put() self.assertTrue( feedback_models.UnsentFeedbackEmailModel .has_reference_to_user_id('user_id')) @@ -522,6 +668,18 @@ def test_has_reference_to_user_id(self) -> None: feedback_models.UnsentFeedbackEmailModel .has_reference_to_user_id('id_x')) + def test_apply_deletion_policy_deletes_model_for_user(self) -> None: + feedback_models.UnsentFeedbackEmailModel.apply_deletion_policy( + self.USER_ID_1) + self.assertIsNone( + feedback_models.UnsentFeedbackEmailModel.get_by_id(self.USER_ID_1)) + + def test_apply_deletion_policy_raises_no_exception_for_nonexistent_user( + self + ) -> None: + feedback_models.UnsentFeedbackEmailModel.apply_deletion_policy( + self.NONEXISTENT_USER_ID) + def test_new_instances_stores_correct_data(self) -> None: user_id = 'A' message_reference_dict = { @@ -541,3 +699,21 @@ def test_new_instances_stores_correct_data(self) -> None: retrieved_instance.feedback_message_references, [message_reference_dict]) self.assertEqual(retrieved_instance.retries, 0) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'feedback_message_references': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'retries': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = feedback_models.UnsentFeedbackEmailModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = feedback_models.UnsentFeedbackEmailModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) diff --git a/core/storage/improvements/gae_models.py b/core/storage/improvements/gae_models.py index dc71ac16464f..bfd5a0845361 100644 --- a/core/storage/improvements/gae_models.py +++ b/core/storage/improvements/gae_models.py @@ -18,54 +18,46 @@ from __future__ import annotations +import datetime + +from core import feconf from core.constants import constants from core.platform import models -from typing import Dict, List, Optional +from typing import Dict, Final, List, Optional MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() -TASK_ENTITY_TYPE_EXPLORATION = constants.TASK_ENTITY_TYPE_EXPLORATION -TASK_ENTITY_TYPES = ( - TASK_ENTITY_TYPE_EXPLORATION, +TASK_ENTITY_TYPES: Final = ( + constants.TASK_ENTITY_TYPE_EXPLORATION, ) -TASK_STATUS_OPEN = constants.TASK_STATUS_OPEN -TASK_STATUS_OBSOLETE = constants.TASK_STATUS_OBSOLETE -TASK_STATUS_RESOLVED = constants.TASK_STATUS_RESOLVED -TASK_STATUS_CHOICES = ( - TASK_STATUS_OPEN, - TASK_STATUS_OBSOLETE, - TASK_STATUS_RESOLVED, +TASK_STATUS_CHOICES: Final = ( + constants.TASK_STATUS_OPEN, + constants.TASK_STATUS_OBSOLETE, + constants.TASK_STATUS_RESOLVED, ) -TASK_TARGET_TYPE_STATE = constants.TASK_TARGET_TYPE_STATE -TASK_TARGET_TYPES = ( - TASK_TARGET_TYPE_STATE, +TASK_TARGET_TYPES: Final = ( + constants.TASK_TARGET_TYPE_STATE, ) -TASK_TYPE_HIGH_BOUNCE_RATE = constants.TASK_TYPE_HIGH_BOUNCE_RATE -TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP = ( - constants.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP) -TASK_TYPE_NEEDS_GUIDING_RESPONSES = constants.TASK_TYPE_NEEDS_GUIDING_RESPONSES -TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS = ( - constants.TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS) -TASK_TYPES = ( - TASK_TYPE_HIGH_BOUNCE_RATE, - TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP, - TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS, - TASK_TYPE_NEEDS_GUIDING_RESPONSES, +TASK_TYPES: Final = ( + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP, + constants.TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS, + constants.TASK_TYPE_NEEDS_GUIDING_RESPONSES, ) -class TaskEntryModel(base_models.BaseModel): +class ExplorationStatsTaskEntryModel(base_models.BaseModel): """Model representation of an actionable task from the improvements tab. The ID of a task has the form: "[entity_type].[entity_id].[entity_version]. @@ -114,7 +106,8 @@ class TaskEntryModel(base_models.BaseModel): @classmethod def has_reference_to_user_id(cls, user_id: str) -> bool: - """Check whether any TaskEntryModel references the given user. + """Check whether any ExplorationStatsTaskEntryModel references + the given user. Args: user_id: str. The ID of the user whose data should be checked. @@ -126,18 +119,14 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: - """Model contains data to delete corresponding to a user: + """Model contains data to pseudonymize corresponding to a user: resolver_id field. - - It is okay to delete task entries since, after they are resolved, they - only act as a historical record. The removal just removes the historical - record. """ - return base_models.DELETION_POLICY.DELETE + return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE @classmethod def apply_deletion_policy(cls, user_id: str) -> None: - """Delete instances of TaskEntryModel for the user. + """Delete instances of ExplorationStatsTaskEntryModel for the user. Args: user_id: str. The ID of the user whose data should be deleted. @@ -160,7 +149,8 @@ def get_model_association_to_user( @classmethod def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: """Model contains data to export corresponding to a user: - TaskEntryModel contains the ID of the user that acted on a task. + ExplorationStatsTaskEntryModel contains the ID of the user that acted + on a task. """ return dict(super(cls, cls).get_export_policy(), **{ 'composite_entity_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, @@ -190,18 +180,19 @@ def get_field_name_mapping_to_takeout_keys(cls) -> Dict[str, str]: @staticmethod def export_data(user_id: str) -> Dict[str, List[str]]: - """Returns the user-relevant properties of TaskEntryModels. + """Returns the user-relevant properties of + ExplorationStatsTaskEntryModels. Args: user_id: str. The ID of the user whose data should be exported. Returns: - dict. The user-relevant properties of TaskEntryModel in a dict - format. In this case, we are returning all the ids of the tasks - which were closed by this user. + dict. The user-relevant properties of ExplorationStatsTaskEntryModel + in a dict format. In this case, we are returning all the ids of the + tasks which were closed by this user. """ - task_ids_resolved_by_user = TaskEntryModel.query( - TaskEntryModel.resolver_id == user_id) + task_ids_resolved_by_user = ExplorationStatsTaskEntryModel.query( + ExplorationStatsTaskEntryModel.resolver_id == user_id) return { 'task_ids_resolved_by_user': ( [t.id for t in task_ids_resolved_by_user]), @@ -215,13 +206,13 @@ def export_data(user_id: str) -> Dict[str, List[str]]: @classmethod def generate_task_id( - cls, - entity_type: str, - entity_id: str, - entity_version: int, - task_type: str, - target_type: str, - target_id: str + cls, + entity_type: str, + entity_id: str, + entity_version: int, + task_type: str, + target_type: str, + target_id: str ) -> str: """Generates a new task entry ID. @@ -237,16 +228,16 @@ def generate_task_id( Returns: str. The ID for the given task. """ - return '%s.%s.%d.%s.%s.%s' % ( + return feconf.TASK_ENTRY_ID_TEMPLATE % ( entity_type, entity_id, entity_version, task_type, target_type, target_id) @classmethod def generate_composite_entity_id( - cls, - entity_type: str, - entity_id: str, - entity_version: int + cls, + entity_type: str, + entity_id: str, + entity_version: int ) -> str: """Generates a new composite_entity_id value. @@ -259,21 +250,22 @@ def generate_composite_entity_id( Returns: str. The composite_entity_id for the given task. """ - return '%s.%s.%d' % (entity_type, entity_id, entity_version) + return feconf.COMPOSITE_ENTITY_ID_TEMPLATE % ( + entity_type, entity_id, entity_version) @classmethod def create( - cls, - entity_type: str, - entity_id: str, - entity_version: int, - task_type: str, - target_type: str, - target_id: str, - issue_description: Optional[str] = None, - status: str = TASK_STATUS_OBSOLETE, - resolver_id: Optional[str] = None, - resolved_on: Optional[str] = None + cls, + entity_type: str, + entity_id: str, + entity_version: int, + task_type: str, + target_type: str, + target_id: str, + issue_description: Optional[str] = None, + status: str = constants.TASK_STATUS_OBSOLETE, + resolver_id: Optional[str] = None, + resolved_on: Optional[datetime.datetime] = None ) -> str: """Creates a new task entry and puts it in storage. @@ -289,8 +281,8 @@ def create( the task was created. status: str. Tracks the state/progress of a task entry. resolver_id: str. ID of the user who closed the task, if any. - resolved_on: str. The date and time at which a task was closed or - deprecated. + resolved_on: datetime. The date and time at which a task was closed + or deprecated. Returns: str. The ID of the new task. diff --git a/core/storage/improvements/gae_models_test.py b/core/storage/improvements/gae_models_test.py index fc80af113d4a..1ae270af1418 100644 --- a/core/storage/improvements/gae_models_test.py +++ b/core/storage/improvements/gae_models_test.py @@ -18,6 +18,7 @@ from __future__ import annotations +from core.constants import constants from core.platform import models from core.tests import test_utils @@ -26,55 +27,103 @@ from mypy_imports import base_models from mypy_imports import improvements_models -base_models, improvements_models = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.improvements]) +base_models, improvements_models = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.IMPROVEMENTS +]) -class TaskEntryModelTests(test_utils.GenericTestBase): - """Unit tests for TaskEntryModel instances.""" +class ExplorationStatsTaskEntryModelTests(test_utils.GenericTestBase): + """Unit tests for ExplorationStatsTaskEntryModel instances.""" + + def test_get_field_name_mapping_to_takeout_keys(self) -> None: + self.assertEqual( + improvements_models.ExplorationStatsTaskEntryModel. + get_field_name_mapping_to_takeout_keys(), + { + 'resolver_id': 'task_ids_resolved_by_user', + 'issue_description': 'issue_descriptions', + 'status': 'statuses', + 'resolved_on': 'resolution_msecs' + } + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'composite_entity_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'task_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'target_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'target_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'issue_description': base_models.EXPORT_POLICY.EXPORTED, + 'status': base_models.EXPORT_POLICY.EXPORTED, + 'resolver_id': base_models.EXPORT_POLICY.EXPORTED, + 'resolved_on': base_models.EXPORT_POLICY.EXPORTED, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + improvements_models.ExplorationStatsTaskEntryModel + .get_export_policy(), + expected_export_policy_dict + ) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + improvements_models.ExplorationStatsTaskEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER. + ONE_INSTANCE_SHARED_ACROSS_USERS + ) def test_has_reference_to_user_id(self) -> None: - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, 'state name', 'issue_description', - improvements_models.TASK_STATUS_RESOLVED, + constants.TASK_STATUS_RESOLVED, 'uid') self.assertTrue( - improvements_models.TaskEntryModel.has_reference_to_user_id('uid')) + improvements_models.ExplorationStatsTaskEntryModel + .has_reference_to_user_id('uid')) self.assertFalse( - improvements_models.TaskEntryModel.has_reference_to_user_id('xid')) + improvements_models.ExplorationStatsTaskEntryModel + .has_reference_to_user_id('xid')) def test_get_deletion_policy(self) -> None: self.assertEqual( - improvements_models.TaskEntryModel.get_deletion_policy(), - base_models.DELETION_POLICY.DELETE) + improvements_models.ExplorationStatsTaskEntryModel + .get_deletion_policy(), + base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE) def test_apply_deletion_policy(self) -> None: - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + model_class = improvements_models.ExplorationStatsTaskEntryModel + model_class.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, 'state name', 'issue_description', - status=improvements_models.TASK_STATUS_OPEN, + status=constants.TASK_STATUS_OPEN, resolver_id='uid') - self.assertTrue( - improvements_models.TaskEntryModel.has_reference_to_user_id('uid')) + self.assertTrue(model_class.has_reference_to_user_id('uid')) - improvements_models.TaskEntryModel.apply_deletion_policy('uid') - self.assertFalse( - improvements_models.TaskEntryModel.has_reference_to_user_id('uid')) + model_class.apply_deletion_policy('uid') + self.assertFalse(model_class.has_reference_to_user_id('uid')) def test_export_data_without_any_tasks(self) -> None: self.assertEqual( - improvements_models.TaskEntryModel.export_data('uid'), + improvements_models.ExplorationStatsTaskEntryModel.export_data( + 'uid' + ), { 'issue_descriptions': [], 'resolution_msecs': [], @@ -83,28 +132,30 @@ def test_export_data_without_any_tasks(self) -> None: }) def test_export_data_with_task(self) -> None: - task_id_1 = improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + task_id_1 = improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid_1', 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, 'state name', 'issue_description_1', - status=improvements_models.TASK_STATUS_RESOLVED, + status=constants.TASK_STATUS_RESOLVED, resolver_id='uid') - task_id_2 = improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + task_id_2 = improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid_2', 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, 'state name', 'issue_description_2', - status=improvements_models.TASK_STATUS_RESOLVED, + status=constants.TASK_STATUS_RESOLVED, resolver_id='uid') self.assertEqual( - improvements_models.TaskEntryModel.export_data('uid'), + improvements_models.ExplorationStatsTaskEntryModel.export_data( + 'uid' + ), { 'issue_descriptions': [ 'issue_description_1', 'issue_description_2'], @@ -115,86 +166,86 @@ def test_export_data_with_task(self) -> None: def test_generate_new_task_id(self) -> None: self.assertEqual( - improvements_models.TaskEntryModel.generate_task_id( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + improvements_models.ExplorationStatsTaskEntryModel.generate_task_id( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, 'tid'), 'exploration.eid.1.high_bounce_rate.state.tid') def test_can_create_task_with_unicode_identifiers(self) -> None: - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid_\U0001F4C8', 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, 'tid_\U0001F4C8') def test_can_create_new_high_bounce_rate_task(self) -> None: - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_HIGH_BOUNCE_RATE, + constants.TASK_TARGET_TYPE_STATE, 'Introduction', 'issue_description', - status=improvements_models.TASK_STATUS_OPEN) + status=constants.TASK_STATUS_OPEN) def test_can_create_new_successive_incorrect_answers_task(self) -> None: - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS, + constants.TASK_TARGET_TYPE_STATE, 'Introduction', 'issue_description', - status=improvements_models.TASK_STATUS_OPEN) + status=constants.TASK_STATUS_OPEN) def test_can_create_new_needs_guiding_responses_task(self) -> None: - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_NEEDS_GUIDING_RESPONSES, + constants.TASK_TARGET_TYPE_STATE, 'Introduction', 'issue_description', - status=improvements_models.TASK_STATUS_OPEN) + status=constants.TASK_STATUS_OPEN) def test_can_create_new_ineffective_feedback_loop_task(self) -> None: - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP, + constants.TASK_TARGET_TYPE_STATE, 'Introduction', 'issue_description', - status=improvements_models.TASK_STATUS_OPEN) + status=constants.TASK_STATUS_OPEN) def test_can_not_create_duplicate_tasks(self) -> None: - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP, + constants.TASK_TARGET_TYPE_STATE, 'Introduction', 'issue_description', - status=improvements_models.TASK_STATUS_OPEN) + status=constants.TASK_STATUS_OPEN) - with self.assertRaisesRegexp(Exception, 'Task id .* already exists'): # type: ignore[no-untyped-call] - improvements_models.TaskEntryModel.create( - improvements_models.TASK_ENTITY_TYPE_EXPLORATION, + with self.assertRaisesRegex(Exception, 'Task id .* already exists'): + improvements_models.ExplorationStatsTaskEntryModel.create( + constants.TASK_ENTITY_TYPE_EXPLORATION, 'eid', 1, - improvements_models.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP, - improvements_models.TASK_TARGET_TYPE_STATE, + constants.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP, + constants.TASK_TARGET_TYPE_STATE, 'Introduction', 'issue_description', - status=improvements_models.TASK_STATUS_OPEN) + status=constants.TASK_STATUS_OPEN) diff --git a/core/storage/job/gae_models.py b/core/storage/job/gae_models.py index d48a1093bc81..b478dd2f096e 100644 --- a/core/storage/job/gae_models.py +++ b/core/storage/job/gae_models.py @@ -20,25 +20,25 @@ from core.platform import models -from typing import Dict, Sequence +from typing import Dict, Final, Sequence MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() # These are the possible status codes for a job. -STATUS_CODE_NEW = 'new' -STATUS_CODE_QUEUED = 'queued' -STATUS_CODE_STARTED = 'started' -STATUS_CODE_COMPLETED = 'completed' -STATUS_CODE_FAILED = 'failed' -STATUS_CODE_CANCELED = 'canceled' +STATUS_CODE_NEW: Final = 'new' +STATUS_CODE_QUEUED: Final = 'queued' +STATUS_CODE_STARTED: Final = 'started' +STATUS_CODE_COMPLETED: Final = 'completed' +STATUS_CODE_FAILED: Final = 'failed' +STATUS_CODE_CANCELED: Final = 'canceled' class JobModel(base_models.BaseModel): @@ -81,7 +81,12 @@ class JobModel(base_models.BaseModel): @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: - """Model doesn't contain any data directly corresponding to a user.""" + """Model doesn't contain any data directly corresponding to a user. + + This model is marked as deleted after a period of time after its + creation. See MODEL_CLASSES_TO_MARK_AS_DELETED and + mark_outdated_models_as_deleted() in cron_services.py. + """ return base_models.DELETION_POLICY.NOT_APPLICABLE @staticmethod diff --git a/core/storage/job/gae_models_test.py b/core/storage/job/gae_models_test.py index 3853640d8de1..b2175110643e 100644 --- a/core/storage/job/gae_models_test.py +++ b/core/storage/job/gae_models_test.py @@ -24,8 +24,9 @@ from mypy_imports import base_models from mypy_imports import job_models -(base_models, job_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.job]) +(base_models, job_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.JOB +]) class JobModelTest(test_utils.GenericTestBase): @@ -50,12 +51,37 @@ def test_is_cancelable(self) -> None: job.status_code = job_models.STATUS_CODE_CANCELED self.assertFalse(job.is_cancelable) + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'job_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'time_queued_msec': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'time_started_msec': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'time_finished_msec': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'status_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'metadata': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'output': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'error': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'has_been_cleaned_up': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'additional_job_params': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = job_models.JobModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = job_models.JobModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + class JobModelSetUpJobsTest(test_utils.GenericTestBase): """Tests for Oppia job models with setUp.""" def setUp(self) -> None: - super(JobModelSetUpJobsTest, self).setUp() + super().setUp() job_models.JobModel( id='MyJobId1', job_type='JobType1', status_code=job_models.STATUS_CODE_FAILED).put() diff --git a/core/storage/learner_group/__init__.py b/core/storage/learner_group/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/core/storage/learner_group/gae_models.py b/core/storage/learner_group/gae_models.py new file mode 100644 index 000000000000..085e699bbd72 --- /dev/null +++ b/core/storage/learner_group/gae_models.py @@ -0,0 +1,360 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Models for learner groups.""" + +from __future__ import annotations + +import random +import string + +from core.platform import models + +from typing import Dict, List, Literal, Sequence, TypedDict + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import datastore_services + +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) + +datastore_services = models.Registry.import_datastore_services() + + +class LearnerGroupDataDict(TypedDict): + """Dictionary for learner group data to export.""" + + title: str + description: str + role_in_group: str + subtopic_page_ids: List[str] + story_ids: List[str] + + +class LearnerGroupModel(base_models.BaseModel): + """Class for storing learner group data. + + Instances of this class are keyed by the group ID. + """ + + # We use the model id as a key in the Takeout dict. + ID_IS_USED_AS_TAKEOUT_KEY: Literal[True] = True + + # The title of the learner group. + title = datastore_services.StringProperty(required=True, indexed=True) + # The description of the learner group. + description = datastore_services.StringProperty(required=True, indexed=True) + # The list of user_ids of facilitators of the learner group. + facilitator_user_ids = datastore_services.StringProperty( + repeated=True, indexed=True) + # The list of user_ids of learners of the learner group. + learner_user_ids = datastore_services.StringProperty(repeated=True) + # The list of user_ids of the learners who are invited to join the + # learner group. + invited_learner_user_ids = datastore_services.StringProperty(repeated=True) + # The list of subtopic page ids that are part of the group syllabus. + # Each subtopic page id is stored as topicid:subtopicid a string. + subtopic_page_ids = datastore_services.StringProperty(repeated=True) + # The list of story ids that are part of the group syllabus. + story_ids = datastore_services.StringProperty(repeated=True) + + @staticmethod + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model contains data to delete corresponding + to a user: learner_user_ids, invited_learner_user_ids and + facilitator_user_ids fields. + """ + return base_models.DELETION_POLICY.DELETE + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model is exported as multiple instances per user as a + user can be part of multiple learner groups. + """ + return ( + base_models.MODEL_ASSOCIATION_TO_USER + .MULTIPLE_INSTANCES_PER_USER + ) + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model contains user data to be exported.""" + return dict(super(cls, cls).get_export_policy(), **{ + 'title': base_models.EXPORT_POLICY.EXPORTED, + 'description': base_models.EXPORT_POLICY.EXPORTED, + 'facilitator_user_ids': base_models.EXPORT_POLICY.EXPORTED, + 'learner_user_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'invited_learner_user_ids': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'subtopic_page_ids': base_models.EXPORT_POLICY.EXPORTED, + 'story_ids': base_models.EXPORT_POLICY.EXPORTED + }) + + # Here we use MyPy ignore because the signature of this method doesn't + # match with signature of super class's get_new_id() method. + @classmethod + def get_new_id(cls) -> str: # type: ignore[override] + """Generates an ID for a new LearnerGroupModel. + + Returns: + str. The new ID. + + Raises: + Exception. An ID cannot be generated within a reasonable number + of attempts. + """ + for _ in range(base_models.MAX_RETRIES): + group_id = ''.join( + random.choice(string.ascii_lowercase + string.ascii_uppercase) + for _ in range(base_models.ID_LENGTH)) + if not cls.get_by_id(group_id): + return group_id + + raise Exception('New id generator is producing too many collisions.') + + @classmethod + def create( + cls, + group_id: str, + title: str, + description: str + ) -> LearnerGroupModel: + """Creates a new LearnerGroupModel instance and returns it. + + Args: + group_id: str. The ID of the learner group. + title: str. The title of the learner group. + description: str. The description of the learner group. + + Returns: + LearnerGroupModel. The newly created LearnerGroupModel instance. + + Raises: + Exception. A learner group with the given group ID exists already. + """ + if cls.get_by_id(group_id): + raise Exception( + 'A learner group with the given group ID exists already.') + + entity = cls(id=group_id, title=title, description=description) + + entity.update_timestamps() + entity.put() + return entity + + @staticmethod + def get_field_names_for_takeout() -> Dict[str, str]: + """We want to takeout the role of the current user in the group. So we + change the field name from 'facilitator_user_ids' to 'role_in_group' + before takeout. + """ + return { + 'facilitator_user_ids': 'role_in_group', + } + + @classmethod + def export_data(cls, user_id: str) -> Dict[str, LearnerGroupDataDict]: + """Takeout: Export LearnerGroupModel user-based properties. + + Args: + user_id: str. The user_id denotes which user's data to extract. + + Returns: + dict. A dict containing the user-relevant properties of + LearnerGroupModel. + """ + found_models = cls.get_all().filter( + datastore_services.any_of( + cls.learner_user_ids == user_id, + cls.invited_learner_user_ids == user_id, + cls.facilitator_user_ids == user_id + )) + user_data = {} + for learner_group_model in found_models: + learner_group_data: LearnerGroupDataDict + if user_id in learner_group_model.learner_user_ids: + learner_group_data = { + 'title': learner_group_model.title, + 'description': learner_group_model.description, + 'role_in_group': 'learner', + 'subtopic_page_ids': + learner_group_model.subtopic_page_ids, + 'story_ids': learner_group_model.story_ids + } + elif user_id in learner_group_model.invited_learner_user_ids: + learner_group_data = { + 'title': learner_group_model.title, + 'description': learner_group_model.description, + 'role_in_group': 'invited_learner', + 'subtopic_page_ids': [], + 'story_ids': [] + } + else: + # To get to this branch, the user_id would need to be in + # facilitator_user_ids. + assert user_id in learner_group_model.facilitator_user_ids + learner_group_data = { + 'title': learner_group_model.title, + 'description': learner_group_model.description, + 'role_in_group': 'facilitator', + 'subtopic_page_ids': + learner_group_model.subtopic_page_ids, + 'story_ids': learner_group_model.story_ids + } + user_data[learner_group_model.id] = learner_group_data + + return user_data + + @classmethod + def has_reference_to_user_id(cls, user_id: str) -> bool: + """Check whether LearnerGroupModel contains data of a given user. + + Args: + user_id: str. The ID of the user whose data should be checked. + + Returns: + bool. Whether any models refer to the given user ID. + """ + return ( + cls.query(datastore_services.any_of( + cls.learner_user_ids == user_id, + cls.invited_learner_user_ids == user_id, + cls.facilitator_user_ids == user_id + )).get(keys_only=True) is not None + ) + + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete all LearnerGroupModel instances associated with the + user. + + Args: + user_id: str. The user_id denotes which user's data to delete. + """ + found_models = cls.get_all().filter( + datastore_services.any_of( + cls.learner_user_ids == user_id, + cls.invited_learner_user_ids == user_id, + cls.facilitator_user_ids == user_id + )) + + learner_group_models_to_put = [] + + for learner_group_model in found_models: + # If the user is the facilitator of the group and there is + # only one facilitator_user_id, delete the group. + if ( + user_id in learner_group_model.facilitator_user_ids and + len(learner_group_model.facilitator_user_ids) == 1 + ): + learner_group_model.delete() + continue + + # If the user is the facilitator of the group and there are + # more then one facilitator_user_ids, delete the user from the + # facilitator_user_ids list. + if ( + user_id in learner_group_model.facilitator_user_ids and + len(learner_group_model.facilitator_user_ids) > 1 + ): + learner_group_model.facilitator_user_ids.remove(user_id) + + # If the user is a learner, delete the user from the + # learner_user_ids list. + elif user_id in learner_group_model.learner_user_ids: + learner_group_model.learner_user_ids.remove(user_id) + + # If the user has been invited to join the group, delete the + # user from the invited_learner_user_ids list. + else: + # To get to this branch, the user_id would need to be in + # invited_learner_user_ids. + assert user_id in learner_group_model.invited_learner_user_ids + learner_group_model.invited_learner_user_ids.remove(user_id) + + learner_group_models_to_put.append(learner_group_model) + + cls.update_timestamps_multi(learner_group_models_to_put) + cls.put_multi(learner_group_models_to_put) + + @classmethod + def get_by_facilitator_id( + cls, facilitator_id: str + ) -> Sequence[LearnerGroupModel]: + """Returns a list of all LearnerGroupModels that have the given + facilitator id. + + Args: + facilitator_id: str. The id of the facilitator. + + Returns: + list(LearnerGroupModel)|None. A list of all LearnerGroupModels that + have the given facilitator id or None if no such learner group + models exist. + """ + found_models: Sequence[LearnerGroupModel] = cls.get_all().filter( + datastore_services.any_of( + cls.facilitator_user_ids == facilitator_id + )).fetch() + + return found_models + + @classmethod + def get_by_learner_user_id( + cls, learner_user_id: str + ) -> Sequence[LearnerGroupModel]: + """Returns a list of all LearnerGroupModels that have the given + user id as a learner. + + Args: + learner_user_id: str. The id of the learner. + + Returns: + list(LearnerGroupModel)|None. A list of all LearnerGroupModels that + the given learner is part of or None if no such learner group + models exist. + """ + found_models: Sequence[LearnerGroupModel] = cls.get_all().filter( + datastore_services.any_of( + cls.learner_user_ids == learner_user_id + )).fetch() + + return found_models + + @classmethod + def get_by_invited_learner_user_id( + cls, invited_learner_user_id: str + ) -> Sequence[LearnerGroupModel]: + """Returns a list of all LearnerGroupModels which the given user has + been invited to join. + + Args: + invited_learner_user_id: str. The id of the learner invited to + join the groups. + + Returns: + list(LearnerGroupModel)|None. A list of all LearnerGroupModels that + the given learner is being invited to join or None if no such + learner group models exist. + """ + found_models: Sequence[LearnerGroupModel] = cls.get_all().filter( + datastore_services.any_of( + cls.invited_learner_user_ids == invited_learner_user_id + )).fetch() + + return found_models diff --git a/core/storage/learner_group/gae_models_test.py b/core/storage/learner_group/gae_models_test.py new file mode 100644 index 000000000000..094903579151 --- /dev/null +++ b/core/storage/learner_group/gae_models_test.py @@ -0,0 +1,306 @@ +# coding: utf-8 +# +# Copyright 2022 The Oppia Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Learner group models.""" + +from __future__ import annotations + +import types + +from core.platform import models +from core.tests import test_utils + +from typing import Dict, List, Union + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + from mypy_imports import learner_group_models + +(base_models, learner_group_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.LEARNER_GROUP +]) + + +class LearnerGroupModelUnitTest(test_utils.GenericTestBase): + """Test the LearnerGroupModel class.""" + + def setUp(self) -> None: + """Set up learner group model in datastore for use in testing.""" + super().setUp() + + self.learner_group_model = learner_group_models.LearnerGroupModel( + id='learner_group_32', + title='title', + description='description', + facilitator_user_ids=['facilitator_1', 'facilitator_2'], + learner_user_ids=['learner_1', 'learner_2', 'learner_3'], + invited_learner_user_ids=['invited_user_1', 'invited_user_2'], + subtopic_page_ids=['subtopic_1', 'subtopic_2'], + story_ids=['story_1', 'story_2']) + self.learner_group_model.update_timestamps() + self.learner_group_model.put() + + def test_get_deletion_policy(self) -> None: + self.assertEqual( + learner_group_models.LearnerGroupModel.get_deletion_policy(), + base_models.DELETION_POLICY.DELETE) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + learner_group_models.LearnerGroupModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER. + MULTIPLE_INSTANCES_PER_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'title': base_models.EXPORT_POLICY.EXPORTED, + 'description': base_models.EXPORT_POLICY.EXPORTED, + 'facilitator_user_ids': base_models.EXPORT_POLICY.EXPORTED, + 'learner_user_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'invited_learner_user_ids': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'subtopic_page_ids': base_models.EXPORT_POLICY.EXPORTED, + 'story_ids': base_models.EXPORT_POLICY.EXPORTED + } + self.assertEqual( + learner_group_models.LearnerGroupModel.get_export_policy(), + expected_export_policy_dict + ) + + def test_raise_exception_by_mocking_collision(self) -> None: + """Tests get_new_id method for raising exception.""" + + learner_group_model_cls = ( + learner_group_models.LearnerGroupModel) + + # Test create method. + with self.assertRaisesRegex( + Exception, + 'A learner group with the given group ID exists already.' + ): + # Swap dependent method get_by_id to simulate collision every time. + with self.swap( + learner_group_model_cls, 'get_by_id', + types.MethodType( + lambda x, y: True, + learner_group_model_cls)): + learner_group_model_cls.create('Abcd', 'title', 'description') + + # Test get_new_id method. + with self.assertRaisesRegex( + Exception, + 'New id generator is producing too many collisions.' + ): + # Swap dependent method get_by_id to simulate collision every time. + with self.swap( + learner_group_model_cls, 'get_by_id', + types.MethodType( + lambda x, y: True, + learner_group_model_cls)): + learner_group_model_cls.get_new_id() + + def test_creating_new_learner_group_model_instance(self) -> None: + learner_group_model_id = ( + learner_group_models.LearnerGroupModel.get_new_id()) + learner_group_model_instance = ( + learner_group_models.LearnerGroupModel.create( + learner_group_model_id, 'title', 'description')) + self.assertEqual( + learner_group_model_instance.id, learner_group_model_id) + self.assertEqual( + learner_group_model_instance.title, 'title') + self.assertEqual( + learner_group_model_instance.description, 'description') + + def test_get_field_names_for_takeout(self) -> None: + expected_results = { + 'facilitator_user_ids': 'role_in_group', + } + self.assertEqual( + learner_group_models.LearnerGroupModel + .get_field_names_for_takeout(), + expected_results) + + def test_export_data_on_learners(self) -> None: + """Test export data on users that are learners of the learner group.""" + + learner_user_data = ( + learner_group_models.LearnerGroupModel.export_data('learner_1')) + expected_learner_user_data = { + 'learner_group_32': { + 'title': 'title', + 'description': 'description', + 'role_in_group': 'learner', + 'subtopic_page_ids': ['subtopic_1', 'subtopic_2'], + 'story_ids': ['story_1', 'story_2'] + } + } + self.assertEqual(expected_learner_user_data, learner_user_data) + + def test_export_data_on_invited_learners(self) -> None: + """Test export data on learners that have been invited to join the + learner group. + """ + invited_learner_data = ( + learner_group_models.LearnerGroupModel.export_data( + 'invited_user_2')) + expected_invited_learner_data = { + 'learner_group_32': { + 'title': 'title', + 'description': 'description', + 'role_in_group': 'invited_learner', + 'subtopic_page_ids': [], + 'story_ids': [] + } + } + self.assertEqual(expected_invited_learner_data, invited_learner_data) + + def test_export_data_on_facilitators(self) -> None: + """Test export data on users that are facilitators of + the learner group. + """ + facilitator_user_data = ( + learner_group_models.LearnerGroupModel.export_data('facilitator_1') + ) + expected_facilitator_user_data = { + 'learner_group_32': { + 'title': 'title', + 'description': 'description', + 'role_in_group': 'facilitator', + 'subtopic_page_ids': ['subtopic_1', 'subtopic_2'], + 'story_ids': ['story_1', 'story_2'] + } + } + self.assertEqual(expected_facilitator_user_data, facilitator_user_data) + + def test_export_data_on_uninvolved_user(self) -> None: + """Test export data on users who do not have any involvement with + the learner group. + """ + uninvolved_user_data = ( + learner_group_models.LearnerGroupModel.export_data('learner_21')) + expected_uninvolved_user_data: Dict[str, Union[str, List[str]]] = {} + + self.assertEqual( + expected_uninvolved_user_data, + uninvolved_user_data) + + def test_apply_deletion_policy_on_learners(self) -> None: + """Test apply_deletion_policy on users that are learners of + the learner group. + """ + self.assertTrue( + learner_group_models.LearnerGroupModel + .has_reference_to_user_id('learner_1')) + + learner_group_models.LearnerGroupModel.apply_deletion_policy( + 'learner_1') + + self.assertFalse( + learner_group_models.LearnerGroupModel + .has_reference_to_user_id('learner_1')) + + def test_apply_deletion_policy_on_invited_users(self) -> None: + """Test apply_deletion_policy on users that have been + invited to join the learner group. + """ + self.assertTrue( + learner_group_models.LearnerGroupModel + .has_reference_to_user_id('invited_user_1')) + + learner_group_models.LearnerGroupModel.apply_deletion_policy( + 'invited_user_1') + + self.assertFalse( + learner_group_models.LearnerGroupModel + .has_reference_to_user_id('invited_user_1')) + + def test_apply_deletion_policy_on_facilitators(self) -> None: + """Test apply_deletion_policy on users that are facilitators of + the learner group. + """ + self.assertTrue( + learner_group_models.LearnerGroupModel + .has_reference_to_user_id('facilitator_1')) + self.assertTrue( + learner_group_models.LearnerGroupModel + .has_reference_to_user_id('facilitator_2')) + + # Deleting a facilitator when more than 1 facilitators are present. + learner_group_models.LearnerGroupModel.apply_deletion_policy( + 'facilitator_1') + + self.assertFalse( + learner_group_models.LearnerGroupModel + .has_reference_to_user_id('facilitator_1')) + + # Deleting a facilitator when only 1 facilitator is present. + learner_group_models.LearnerGroupModel.apply_deletion_policy( + 'facilitator_2') + + self.assertFalse( + learner_group_models.LearnerGroupModel + .has_reference_to_user_id('facilitator_2')) + + def test_get_by_facilitator_id(self) -> None: + """Test get_by_facilitator_id.""" + learner_group_model = ( + learner_group_models.LearnerGroupModel.get_by_facilitator_id( + 'facilitator_1')) + self.assertEqual(learner_group_model[0].id, 'learner_group_32') + + def test_get_by_invited_learner_user_id(self) -> None: + """Test get_by_invited_learner_user_id.""" + learner_grp_models = ( + learner_group_models.LearnerGroupModel + .get_by_invited_learner_user_id( + 'facilitator_1')) + self.assertEqual(len(learner_grp_models), 0) + + learner_grp_models = ( + learner_group_models.LearnerGroupModel + .get_by_invited_learner_user_id( + 'learner_2')) + self.assertEqual(len(learner_grp_models), 0) + + learner_grp_models = ( + learner_group_models.LearnerGroupModel + .get_by_invited_learner_user_id( + 'invited_user_1')) + self.assertEqual(learner_grp_models[0].id, 'learner_group_32') + + def test_get_by_learner_user_id(self) -> None: + """Test get_by_learner_user_id.""" + learner_grp_models = ( + learner_group_models.LearnerGroupModel.get_by_learner_user_id( + 'facilitator_1')) + self.assertEqual(len(learner_grp_models), 0) + + learner_grp_models = ( + learner_group_models.LearnerGroupModel.get_by_learner_user_id( + 'invited_user_1')) + self.assertEqual(len(learner_grp_models), 0) + + learner_grp_models = ( + learner_group_models.LearnerGroupModel.get_by_learner_user_id( + 'learner_2')) + self.assertEqual(learner_grp_models[0].id, 'learner_group_32') diff --git a/core/storage/opportunity/gae_models.py b/core/storage/opportunity/gae_models.py index 63e0ff5bb83b..64f36d371fd4 100644 --- a/core/storage/opportunity/gae_models.py +++ b/core/storage/opportunity/gae_models.py @@ -27,7 +27,7 @@ from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() @@ -89,11 +89,11 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: # tuple(list, str|None, bool) to a domain object. @classmethod def get_all_translation_opportunities( - cls, - page_size: int, - urlsafe_start_cursor: Optional[str], - language_code: str, - topic_name: str + cls, + page_size: int, + urlsafe_start_cursor: Optional[str], + language_code: str, + topic_name: Optional[str] ) -> Tuple[ Sequence[ExplorationOpportunitySummaryModel], Optional[str], bool ]: @@ -159,69 +159,6 @@ def get_all_translation_opportunities( more_results ) - # TODO(#13523): Change the return value of the function below from - # tuple(list, str|None, bool) to a domain object. - @classmethod - def get_all_voiceover_opportunities( - cls, - page_size: int, - urlsafe_start_cursor: Optional[str], - language_code: str - ) -> Tuple[ - Sequence[ExplorationOpportunitySummaryModel], Optional[str], bool - ]: - """Returns a list of opportunities available for voiceover in a - specific language. - - Args: - page_size: int. The maximum number of entities to be returned. - urlsafe_start_cursor: str or None. If provided, the list of - returned entities starts from this datastore cursor. - Otherwise, the returned entities start from the beginning - of the full list of entities. - language_code: str. The language for which voiceover opportunities - to be fetched. - - Returns: - 3-tuple of (results, cursor, more). As described in fetch_page() at: - https://developers.google.com/appengine/docs/python/ndb/queryclass, - where: - results: list(ExplorationOpportunitySummaryModel). A list - of query results. - cursor: str or None. A query cursor pointing to the next - batch of results. If there are no more results, this might - be None. - more: bool. If True, there are (probably) more results after - this batch. If False, there are no further results after - this batch. - """ - start_cursor = datastore_services.make_cursor( - urlsafe_cursor=urlsafe_start_cursor) - - language_created_on_query = cls.query( - cls.language_codes_needing_voice_artists == language_code - ).order(cls.created_on) - - fetch_result: Tuple[ - Sequence[ExplorationOpportunitySummaryModel], - datastore_services.Cursor, - bool - ] = language_created_on_query.fetch_page( - page_size, start_cursor=start_cursor) - results, cursor, _ = fetch_result - # TODO(#13462): Refactor this so that we don't do the lookup. - # Do a forward lookup so that we can know if there are more values. - fetch_result = language_created_on_query.fetch_page( - page_size + 1, start_cursor=start_cursor) - plus_one_query_models, _, _ = fetch_result - more_results = len(plus_one_query_models) == page_size + 1 - # The urlsafe returns bytes and we need to decode them to string. - return ( - results, - (cursor.urlsafe().decode('utf-8') if cursor else None), - more_results - ) - @classmethod def get_by_topic( cls, topic_id: str @@ -234,12 +171,6 @@ def get_by_topic( """ return cls.query(cls.topic_id == topic_id).fetch() - @classmethod - def delete_all(cls) -> None: - """Deletes all entities of this class.""" - keys = cls.query().fetch(keys_only=True) - datastore_services.delete_multi(keys) - class SkillOpportunityModel(base_models.BaseModel): """Model for opportunities to add questions to skills. @@ -325,9 +256,3 @@ def get_skill_opportunities( (cursor.urlsafe().decode('utf-8') if cursor else None), more_results ) - - @classmethod - def delete_all(cls) -> None: - """Deletes all entities of this class.""" - keys = cls.query().fetch(keys_only=True) - datastore_services.delete_multi(keys) diff --git a/core/storage/opportunity/gae_models_test.py b/core/storage/opportunity/gae_models_test.py index 61acbd41abb7..9631784a004f 100644 --- a/core/storage/opportunity/gae_models_test.py +++ b/core/storage/opportunity/gae_models_test.py @@ -26,15 +26,16 @@ from mypy_imports import base_models from mypy_imports import opportunity_models -(base_models, opportunity_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.opportunity]) +(base_models, opportunity_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.OPPORTUNITY +]) class ExplorationOpportunitySummaryModelUnitTest(test_utils.GenericTestBase): """Test the ExplorationOpportunitySummaryModel class.""" def setUp(self) -> None: - super(ExplorationOpportunitySummaryModelUnitTest, self).setUp() + super().setUp() opportunity_models.ExplorationOpportunitySummaryModel( id='opportunity_id1', @@ -118,13 +119,45 @@ def setUp(self) -> None: def test_get_deletion_policy(self) -> None: self.assertEqual( opportunity_models.ExplorationOpportunitySummaryModel - .get_deletion_policy(), + .get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + opportunity_models.ExplorationOpportunitySummaryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'story_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'story_title': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'chapter_title': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'content_count': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'incomplete_translation_language_codes': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'translation_counts': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_codes_with_assigned_voice_artists': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_codes_needing_voice_artists': + base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + opportunity_models.ExplorationOpportunitySummaryModel + .get_export_policy(), + expected_export_policy_dict + ) + def test_get_all_translation_opportunities(self) -> None: results, cursor, more = ( opportunity_models.ExplorationOpportunitySummaryModel - .get_all_translation_opportunities(6, None, 'hi', '')) + .get_all_translation_opportunities(6, None, 'hi', '')) # Ruling out the possibility of None for mypy type checking. assert results is not None self.assertEqual(len(results), 6) @@ -140,7 +173,7 @@ def test_get_all_translation_opportunities(self) -> None: def test_get_all_translation_opportunities_pagination(self) -> None: results, cursor, more = ( opportunity_models.ExplorationOpportunitySummaryModel - .get_all_translation_opportunities(1, None, 'hi', '')) + .get_all_translation_opportunities(1, None, 'hi', '')) # Ruling out the possibility of None for mypy type checking. assert results is not None self.assertEqual(len(results), 1) @@ -150,7 +183,7 @@ def test_get_all_translation_opportunities_pagination(self) -> None: results, second_cursor, more = ( opportunity_models.ExplorationOpportunitySummaryModel - .get_all_translation_opportunities(1, cursor, 'hi', '')) + .get_all_translation_opportunities(1, cursor, 'hi', '')) # Ruling out the possibility of None for mypy type checking. assert results is not None self.assertEqual(len(results), 1) @@ -160,7 +193,7 @@ def test_get_all_translation_opportunities_pagination(self) -> None: results, third_cursor, more = ( opportunity_models.ExplorationOpportunitySummaryModel - .get_all_translation_opportunities(1, second_cursor, 'hi', '')) + .get_all_translation_opportunities(1, second_cursor, 'hi', '')) # Ruling out the possibility of None for mypy type checking. assert results is not None self.assertEqual(len(results), 1) @@ -171,50 +204,18 @@ def test_get_all_translation_opportunities_pagination(self) -> None: def test_get_translation_opportunities_by_topic(self) -> None: results, cursor, more = ( opportunity_models.ExplorationOpportunitySummaryModel - .get_all_translation_opportunities(5, None, 'hi', 'a_topic name')) + .get_all_translation_opportunities( + 5, None, 'hi', 'a_topic name')) self.assertEqual(len(results), 2) self.assertEqual(results[0].id, 'opportunity_id1') self.assertEqual(results[1].id, 'opportunity_id3') self.assertFalse(more) self.assertTrue(isinstance(cursor, str)) - def test_get_all_voiceover_opportunities(self) -> None: - results, cursor, more = ( - opportunity_models.ExplorationOpportunitySummaryModel - .get_all_voiceover_opportunities(5, None, 'en')) - # Ruling out the possibility of None for mypy type checking. - assert results is not None - self.assertEqual(len(results), 2) - self.assertEqual(results[0].id, 'opportunity_id1') - self.assertEqual(results[1].id, 'opportunity_id2') - self.assertFalse(more) - self.assertTrue(isinstance(cursor, str)) - - def test_get_all_voiceover_opportunities_pagination(self) -> None: - results, cursor, more = ( - opportunity_models.ExplorationOpportunitySummaryModel - .get_all_voiceover_opportunities(1, None, 'en')) - # Ruling out the possibility of None for mypy type checking. - assert results is not None - self.assertEqual(len(results), 1) - self.assertEqual(results[0].id, 'opportunity_id1') - self.assertTrue(more) - self.assertTrue(isinstance(cursor, str)) - - results, new_cursor, more = ( - opportunity_models.ExplorationOpportunitySummaryModel - .get_all_voiceover_opportunities(1, cursor, 'en')) - # Ruling out the possibility of None for mypy type checking. - assert results is not None - self.assertEqual(len(results), 1) - self.assertEqual(results[0].id, 'opportunity_id2') - self.assertFalse(more) - self.assertTrue(isinstance(new_cursor, str)) - def test_get_by_topic(self) -> None: model_list = ( opportunity_models.ExplorationOpportunitySummaryModel - .get_by_topic('topic_id1')) + .get_by_topic('topic_id1')) # Ruling out the possibility of None for mypy type checking. assert model_list is not None self.assertEqual(len(model_list), 2) @@ -222,7 +223,7 @@ def test_get_by_topic(self) -> None: model_list = ( opportunity_models.ExplorationOpportunitySummaryModel - .get_by_topic('topic_id2')) + .get_by_topic('topic_id2')) # Ruling out the possibility of None for mypy type checking. assert model_list is not None self.assertEqual(len(model_list), 1) @@ -231,36 +232,17 @@ def test_get_by_topic(self) -> None: def test_get_by_topic_for_non_existing_topic(self) -> None: model_list = ( opportunity_models.ExplorationOpportunitySummaryModel - .get_by_topic('non_existing_topic_id')) + .get_by_topic('non_existing_topic_id')) # Ruling out the possibility of None for mypy type checking. assert model_list is not None self.assertEqual(len(model_list), 0) - def test_delete_all(self) -> None: - results, _, more = ( - opportunity_models.ExplorationOpportunitySummaryModel - .get_all_translation_opportunities(1, None, 'hi', '')) - # Ruling out the possibility of None for mypy type checking. - assert results is not None - self.assertEqual(len(results), 1) - self.assertTrue(more) - - opportunity_models.ExplorationOpportunitySummaryModel.delete_all() - - results, _, more = ( - opportunity_models.ExplorationOpportunitySummaryModel - .get_all_translation_opportunities(1, None, 'hi', '')) - # Ruling out the possibility of None for mypy type checking. - assert results is not None - self.assertEqual(len(results), 0) - self.assertFalse(more) - class SkillOpportunityModelTest(test_utils.GenericTestBase): """Tests for the SkillOpportunityModel class.""" def setUp(self) -> None: - super(SkillOpportunityModelTest, self).setUp() + super().setUp() opportunity_models.SkillOpportunityModel( id='opportunity_id1', @@ -278,10 +260,30 @@ def test_get_deletion_policy(self) -> None: opportunity_models.SkillOpportunityModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + opportunity_models.SkillOpportunityModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_description': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'question_count': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + opportunity_models.SkillOpportunityModel.get_export_policy(), + expected_export_policy_dict + ) + def test_get_skill_opportunities(self) -> None: results, cursor, more = ( opportunity_models.SkillOpportunityModel - .get_skill_opportunities(5, None)) + .get_skill_opportunities(5, None)) # Ruling out the possibility of None for mypy type checking. assert results is not None self.assertEqual(len(results), 2) @@ -310,22 +312,3 @@ def test_get_skill_opportunities_pagination(self) -> None: self.assertEqual(results[0].id, 'opportunity_id2') self.assertFalse(more) self.assertTrue(isinstance(cursor, str)) - - def test_delete_all_skill_opportunities(self) -> None: - results, _, more = ( - opportunity_models.SkillOpportunityModel.get_skill_opportunities( - 1, None)) - # Ruling out the possibility of None for mypy type checking. - assert results is not None - self.assertEqual(len(results), 1) - self.assertTrue(more) - - opportunity_models.SkillOpportunityModel.delete_all() - - results, _, more = ( - opportunity_models.SkillOpportunityModel.get_skill_opportunities( - 1, None)) - # Ruling out the possibility of None for mypy type checking. - assert results is not None - self.assertEqual(len(results), 0) - self.assertFalse(more) diff --git a/core/storage/question/gae_models.py b/core/storage/question/gae_models.py index f1241222b827..5ccd452759ed 100644 --- a/core/storage/question/gae_models.py +++ b/core/storage/question/gae_models.py @@ -20,20 +20,21 @@ import random from core import feconf -from core import python_utils from core import utils from core.constants import constants from core.platform import models -from typing import Any, Dict, List, Sequence +from typing import Dict, List, Mapping, Sequence MYPY = False if MYPY: # pragma: no cover + # Here, we are importing 'state_domain' only for type-checking purpose. + from core.domain import state_domain # pylint: disable=invalid-import # isort:skip from mypy_imports import base_models from mypy_imports import datastore_services (base_models, skill_models) = models.Registry.import_models([ - models.NAMES.base_model, models.NAMES.skill + models.Names.BASE_MODEL, models.Names.SKILL ]) datastore_services = models.Registry.import_datastore_services() @@ -116,6 +117,9 @@ class QuestionModel(base_models.VersionedModel): # The schema version for the question state data. question_state_data_schema_version = datastore_services.IntegerProperty( required=True, indexed=True) + # The next_content_id index to use for generation of new content ids. + next_content_id_index = datastore_services.IntegerProperty( + required=True, default=0, indexed=True) # The ISO 639-1 code for the language this question is written in. language_code = ( datastore_services.StringProperty(required=True, indexed=True)) @@ -154,7 +158,8 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'linked_skill_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'inapplicable_skill_misconception_ids': - base_models.EXPORT_POLICY.NOT_APPLICABLE + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'next_content_id_index': base_models.EXPORT_POLICY.NOT_APPLICABLE }) @classmethod @@ -181,15 +186,21 @@ def _get_new_id(cls) -> str: 'The id generator for QuestionModel is producing too many ' 'collisions.') - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + # Here we use MyPy ignore because the signature of this method doesn't + # match with VersionedModel.compute_models_to_commit(). Because argument + # `commit_message` of super class can accept Optional[str] but this method + # can only accept str. + def compute_models_to_commit( # type: ignore[override] + self, + committer_id: str, + commit_type: str, + commit_message: str, + commit_cmds: base_models.AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + additional_models: Mapping[str, base_models.BaseModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -205,28 +216,42 @@ def _trusted_commit( reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. + + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. """ - super(QuestionModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) question_commit_log = QuestionCommitLogEntryModel.create( self.id, self.version, committer_id, commit_type, commit_message, commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False ) question_commit_log.question_id = self.id - question_commit_log.update_timestamps() - question_commit_log.put() + return { + 'snapshot_metadata_model': models_to_put['snapshot_metadata_model'], + 'snapshot_content_model': models_to_put['snapshot_content_model'], + 'commit_log_model': question_commit_log, + 'versioned_model': models_to_put['versioned_model'], + } - # TODO(#13523): Change 'question_state_data' to TypedDict/Domain Object - # to remove Any used below. @classmethod def create( cls, - question_state_data: Dict[str, Any], + question_state_data: state_domain.StateDict, language_code: str, version: int, linked_skill_ids: List[str], - inapplicable_skill_misconception_ids: List[str] + inapplicable_skill_misconception_ids: List[str], + next_content_id_index: int ) -> QuestionModel: """Creates a new QuestionModel entry. @@ -240,6 +265,8 @@ def create( inapplicable_skill_misconception_ids: list(str). The optional skill misconception ids marked as not applicable to the question. + next_content_id_index: int. The next content Id indext to generate + new content Id. Returns: QuestionModel. Instance of the new QuestionModel entry. @@ -255,7 +282,8 @@ def create( version=version, linked_skill_ids=linked_skill_ids, inapplicable_skill_misconception_ids=( - inapplicable_skill_misconception_ids)) + inapplicable_skill_misconception_ids), + next_content_id_index=next_content_id_index) return question_model_instance @@ -321,10 +349,10 @@ def get_model_id(cls, question_id: str, skill_id: str) -> str: @classmethod def create( - cls, - question_id: str, - skill_id: str, - skill_difficulty: float + cls, + question_id: str, + skill_id: str, + skill_difficulty: float ) -> QuestionSkillLinkModel: """Creates a new QuestionSkillLinkModel entry. @@ -343,7 +371,8 @@ def create( question_skill_link_id = cls.get_model_id(question_id, skill_id) if cls.get(question_skill_link_id, strict=False) is not None: raise Exception( - 'The given question is already linked to given skill') + 'The question with ID %s is already linked to skill %s' % + (question_id, skill_id)) question_skill_link_model_instance = cls( id=question_skill_link_id, @@ -355,7 +384,7 @@ def create( @classmethod def get_total_question_count_for_skill_ids( - cls, skill_ids: List[str] + cls, skill_ids: List[str] ) -> int: """Returns the number of questions assigned to the given skill_ids. @@ -421,6 +450,9 @@ def get_question_skill_links_based_on_difficulty_equidistributed_by_skill( each skill. If not evenly divisible, it will be rounded up. If not enough questions for a skill, just return all questions it links to. + + Raises: + Exception. The number of skill IDs exceeds 20. """ if len(skill_ids) > feconf.MAX_NUMBER_OF_SKILL_IDS: raise Exception('Please keep the number of skill IDs below 20.') @@ -429,8 +461,7 @@ def get_question_skill_links_based_on_difficulty_equidistributed_by_skill( return [] question_count_per_skill = int( - math.ceil(python_utils.divide( # type: ignore[no-untyped-call] - float(total_question_count), float(len(skill_ids))))) + math.ceil(float(total_question_count) / float(len(skill_ids)))) question_skill_link_mapping = {} @@ -563,6 +594,9 @@ def get_question_skill_links_equidistributed_by_skill( each skill. If not evenly divisible, it will be rounded up. If not enough questions for a skill, just return all questions it links to. + + Raises: + Exception. The number of skill IDs exceeds 20. """ if len(skill_ids) > feconf.MAX_NUMBER_OF_SKILL_IDS: raise Exception('Please keep the number of skill IDs below 20.') @@ -572,8 +606,7 @@ def get_question_skill_links_equidistributed_by_skill( question_count_per_skill = int( math.ceil( - python_utils.divide( # type: ignore[no-untyped-call] - float(total_question_count), float(len(skill_ids))))) + float(total_question_count) / float(len(skill_ids)))) question_skill_link_models = [] existing_question_ids = [] @@ -621,7 +654,7 @@ def get_offset(query: datastore_services.Query) -> int: @classmethod def get_all_question_ids_linked_to_skill_id( - cls, skill_id: str + cls, skill_id: str ) -> List[str]: """Returns a list of all question ids corresponding to the given skill id. diff --git a/core/storage/question/gae_models_test.py b/core/storage/question/gae_models_test.py index ed9c8ec84ee4..b21a5c060c44 100644 --- a/core/storage/question/gae_models_test.py +++ b/core/storage/question/gae_models_test.py @@ -23,6 +23,7 @@ from core.constants import constants from core.domain import skill_services from core.domain import state_domain +from core.domain import translation_domain from core.platform import models from core.tests import test_utils @@ -33,8 +34,9 @@ from mypy_imports import base_models from mypy_imports import question_models -(base_models, question_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.question]) +(base_models, question_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.QUESTION +]) class QuestionSnapshotContentModelTests(test_utils.GenericTestBase): @@ -53,39 +55,68 @@ def test_get_deletion_policy(self) -> None: question_models.QuestionModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + question_models.QuestionModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'question_state_data': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'question_state_data_schema_version': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'linked_skill_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'next_content_id_index': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'inapplicable_skill_misconception_ids': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE) + } + self.assertEqual( + question_models.QuestionModel.get_export_policy(), + expected_export_policy_dict + ) + def test_create_question_empty_skill_id_list(self) -> None: - state = state_domain.State.create_default_state('ABC') # type: ignore[no-untyped-call] + state = state_domain.State.create_default_state( + 'ABC', 'content_0', 'default_outcome_1') question_state_data = state.to_dict() language_code = 'en' version = 1 question_model = question_models.QuestionModel.create( - question_state_data, language_code, version, [], []) + question_state_data, language_code, version, [], [], 2) self.assertEqual( question_model.question_state_data, question_state_data) self.assertEqual(question_model.language_code, language_code) - self.assertItemsEqual(question_model.linked_skill_ids, []) # type: ignore[no-untyped-call] + self.assertItemsEqual(question_model.linked_skill_ids, []) def test_create_question_with_skill_ids(self) -> None: - state = state_domain.State.create_default_state('ABC') # type: ignore[no-untyped-call] + state = state_domain.State.create_default_state( + 'ABC', 'content_0', 'default_outcome_1') question_state_data = state.to_dict() linked_skill_ids = ['skill_id1', 'skill_id2'] language_code = 'en' version = 1 question_model = question_models.QuestionModel.create( question_state_data, language_code, version, - linked_skill_ids, ['skill-1']) + linked_skill_ids, ['skill-1'], 2) self.assertEqual( question_model.question_state_data, question_state_data) self.assertEqual(question_model.language_code, language_code) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( question_model.linked_skill_ids, linked_skill_ids) def test_create_question_with_inapplicable_skill_misconception_ids( - self + self ) -> None: - state = state_domain.State.create_default_state('ABC') # type: ignore[no-untyped-call] + state = state_domain.State.create_default_state( + 'ABC', 'content_0', 'default_outcome_1') question_state_data = state.to_dict() linked_skill_ids = ['skill_id1', 'skill_id2'] inapplicable_skill_misconception_ids = ['skill_id-1', 'skill_id-2'] @@ -93,35 +124,35 @@ def test_create_question_with_inapplicable_skill_misconception_ids( version = 1 question_model = question_models.QuestionModel.create( question_state_data, language_code, version, - linked_skill_ids, inapplicable_skill_misconception_ids) + linked_skill_ids, inapplicable_skill_misconception_ids, 2) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( question_model.inapplicable_skill_misconception_ids, inapplicable_skill_misconception_ids) def test_put_multi_questions(self) -> None: - question_state_data = self._create_valid_question_data('ABC') # type: ignore[no-untyped-call] + content_id_generator = translation_domain.ContentIdGenerator() + question_state_data = self._create_valid_question_data( + 'ABC', content_id_generator) linked_skill_ids = ['skill_id1', 'skill_id2'] - self.save_new_question( # type: ignore[no-untyped-call] + self.save_new_question( 'question_id1', 'owner_id', question_state_data, - linked_skill_ids) - self.save_new_question( # type: ignore[no-untyped-call] + linked_skill_ids, + content_id_generator.next_content_id_index) + self.save_new_question( 'question_id2', 'owner_id', question_state_data, - linked_skill_ids) + linked_skill_ids, + content_id_generator.next_content_id_index) question_ids = ['question_id1', 'question_id2'] question_model1 = question_models.QuestionModel.get(question_ids[0]) - # Ruling out the possibility of None for mypy type checking. - assert question_model1 is not None question_model2 = question_models.QuestionModel.get(question_ids[1]) - # Ruling out the possibility of None for mypy type checking. - assert question_model2 is not None - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( question_model1.linked_skill_ids, ['skill_id1', 'skill_id2']) - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( question_model2.linked_skill_ids, ['skill_id1', 'skill_id2']) question_model1.linked_skill_ids = ['skill_id3'] @@ -132,24 +163,21 @@ def test_put_multi_questions(self) -> None: updated_question_model1 = question_models.QuestionModel.get( question_ids[0]) - # Ruling out the possibility of None for mypy type checking. - assert updated_question_model1 is not None updated_question_model2 = question_models.QuestionModel.get( question_ids[1]) - # Ruling out the possibility of None for mypy type checking. - assert updated_question_model2 is not None self.assertEqual( updated_question_model1.linked_skill_ids, ['skill_id3']) self.assertEqual( updated_question_model2.linked_skill_ids, ['skill_id3']) def test_raise_exception_by_mocking_collision(self) -> None: - state = state_domain.State.create_default_state('ABC') # type: ignore[no-untyped-call] + state = state_domain.State.create_default_state( + 'ABC', 'content_0', 'default_outcome_1') question_state_data = state.to_dict() language_code = 'en' version = 1 - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'The id generator for QuestionModel is producing too ' 'many collisions.' ): @@ -160,7 +188,7 @@ def test_raise_exception_by_mocking_collision(self) -> None: lambda x, y: True, question_models.QuestionModel)): question_models.QuestionModel.create( - question_state_data, language_code, version, [], []) + question_state_data, language_code, version, [], [], 2) class QuestionSkillLinkModelUnitTests(test_utils.GenericTestBase): @@ -171,6 +199,27 @@ def test_get_deletion_policy(self) -> None: question_models.QuestionSkillLinkModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + question_models.QuestionSkillLinkModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'question_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_difficulty': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + question_models.QuestionSkillLinkModel.get_export_policy(), + expected_export_policy_dict + ) + def test_create_question_skill_link(self) -> None: question_id = 'A Test Question Id' skill_id = 'A Test Skill Id' @@ -183,6 +232,36 @@ def test_create_question_skill_link(self) -> None: self.assertEqual( questionskilllink_model.skill_difficulty, skill_difficulty) + def test_get_all_question_ids_linked_to_skill_id(self) -> None: + skill_id_1 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_1, 'user', description='Description 1') + + # Testing that no question is linked to a skill. + self.assertEqual( + question_models.QuestionSkillLinkModel + .get_all_question_ids_linked_to_skill_id(skill_id_1), + [] + ) + + questionskilllink_model1 = ( + question_models.QuestionSkillLinkModel.create( + 'question_id1', skill_id_1, 0.1) + ) + questionskilllink_model2 = ( + question_models.QuestionSkillLinkModel.create( + 'question_id2', skill_id_1, 0.2) + ) + + question_models.QuestionSkillLinkModel.put_multi_question_skill_links( + [questionskilllink_model1, questionskilllink_model2] + ) + + self.assertEqual( + question_models.QuestionSkillLinkModel + .get_all_question_ids_linked_to_skill_id(skill_id_1), + ['question_id1', 'question_id2'] + ) + def test_put_multi_question_skill_link(self) -> None: questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( @@ -251,6 +330,24 @@ def test_delete_multi_question_skill_link(self) -> None: self.assertEqual(len(question_skill_links), 1) self.assertEqual(question_skill_links[0].question_id, 'question_id3') + def test_cannot_link_same_question_to_given_skill(self) -> None: + question_skill_link_model = ( + question_models.QuestionSkillLinkModel.create( + 'question_id1', 'skill_id1', 0.1) + ) + + question_models.QuestionSkillLinkModel.put_multi_question_skill_links([ + question_skill_link_model + ]) + + with self.assertRaisesRegex( + Exception, + 'The question with ID question_id1 is already linked to ' + 'skill skill_id1' + ): + question_models.QuestionSkillLinkModel.create( + 'question_id1', 'skill_id1', 0.1) + def test_get_models_by_question_id(self) -> None: questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( @@ -281,10 +378,10 @@ def test_get_models_by_question_id(self) -> None: self.assertEqual(len(question_skill_links), 0) def test_get_total_question_count_for_skill_ids(self) -> None: - skill_id_1 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] - self.save_new_skill(skill_id_1, 'user', description='Description 1') # type: ignore[no-untyped-call] - skill_id_2 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] - self.save_new_skill(skill_id_2, 'user', description='Description 2') # type: ignore[no-untyped-call] + skill_id_1 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_1, 'user', description='Description 1') + skill_id_2 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_2, 'user', description='Description 2') questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( @@ -334,10 +431,10 @@ def test_get_total_question_count_for_skill_ids(self) -> None: self.assertEqual(question_count, 3) def test_get_question_skill_links_by_skill_ids(self) -> None: - skill_id_1 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] - self.save_new_skill(skill_id_1, 'user', description='Description 1') # type: ignore[no-untyped-call] - skill_id_2 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] - self.save_new_skill(skill_id_2, 'user', description='Description 2') # type: ignore[no-untyped-call] + skill_id_1 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_1, 'user', description='Description 1') + skill_id_2 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_2, 'user', description='Description 2') questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( @@ -377,14 +474,14 @@ def test_get_question_skill_links_by_skill_ids(self) -> None: def test_get_question_skill_links_by_skill_ids_many_skills(self) -> None: # Test the case when len(skill_ids) > constants.MAX_SKILLS_PER_QUESTION. - skill_id_1 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] - self.save_new_skill(skill_id_1, 'user', description='Description 1') # type: ignore[no-untyped-call] - skill_id_2 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] - self.save_new_skill(skill_id_2, 'user', description='Description 2') # type: ignore[no-untyped-call] - skill_id_3 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] - self.save_new_skill(skill_id_3, 'user', description='Description 3') # type: ignore[no-untyped-call] - skill_id_4 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] - self.save_new_skill(skill_id_4, 'user', description='Description 4') # type: ignore[no-untyped-call] + skill_id_1 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_1, 'user', description='Description 1') + skill_id_2 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_2, 'user', description='Description 2') + skill_id_3 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_3, 'user', description='Description 3') + skill_id_4 = skill_services.get_new_skill_id() + self.save_new_skill(skill_id_4, 'user', description='Description 4') questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( @@ -526,10 +623,10 @@ def mock_random_int(upper_bound: int) -> int: questionskilllink_model4]) def test_request_too_many_skills_raises_error_when_fetch_by_difficulty( - self + self ) -> None: skill_ids = ['skill_id%s' % number for number in range(25)] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Please keep the number of skill IDs below 20.'): ( question_models.QuestionSkillLinkModel. @@ -595,7 +692,7 @@ def test_get_more_question_skill_links_than_available(self) -> None: self.assertTrue(questionskilllink_model3 in question_skill_links) def test_get_question_skill_links_when_count_not_evenly_divisible( - self + self ) -> None: questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( @@ -626,7 +723,7 @@ def test_get_question_skill_links_when_count_not_evenly_divisible( self.assertTrue(questionskilllink_model3 in question_skill_links) def test_get_question_skill_links_equidistributed_by_skill( - self + self ) -> None: questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( @@ -663,7 +760,7 @@ def test_get_question_skill_links_equidistributed_by_skill( self.assertEqual(question_ids.count('question_id2'), 1) def test_get_random_question_skill_links_equidistributed_by_skill( - self + self ) -> None: questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( @@ -737,7 +834,7 @@ def mock_random_int(upper_bound: int) -> int: def test_request_too_many_skills_raises_error(self) -> None: skill_ids = ['skill_id%s' % number for number in range(25)] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Please keep the number of skill IDs below 20.'): ( question_models.QuestionSkillLinkModel. @@ -762,6 +859,34 @@ def test_has_reference_to_user_id(self) -> None: question_models.QuestionCommitLogEntryModel .has_reference_to_user_id('x_id')) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + question_models.QuestionCommitLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_message': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_cmds': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'post_commit_status': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'post_commit_community_owned': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'post_commit_is_private': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'question_id': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + question_models.QuestionCommitLogEntryModel.get_export_policy(), + expected_export_policy_dict + ) + class QuestionSummaryModelUnitTests(test_utils.GenericTestBase): """Tests the QuestionSummaryModel class.""" @@ -770,3 +895,28 @@ def test_get_deletion_policy(self) -> None: self.assertEqual( question_models.QuestionSummaryModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + question_models.QuestionSummaryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'question_model_last_updated': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'question_model_created_on': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'question_content': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'interaction_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'misconception_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + question_models.QuestionSummaryModel.get_export_policy(), + expected_export_policy_dict + ) diff --git a/core/storage/recommendations/gae_models.py b/core/storage/recommendations/gae_models.py index 8113f3949b0d..4180445f98a6 100644 --- a/core/storage/recommendations/gae_models.py +++ b/core/storage/recommendations/gae_models.py @@ -20,18 +20,18 @@ from core.platform import models -from typing import Dict +from typing import Dict, Final MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() -TOPIC_SIMILARITIES_ID = 'topics' +TOPIC_SIMILARITIES_ID: Final = 'topics' class ExplorationRecommendationsModel( diff --git a/core/storage/recommendations/gae_models_test.py b/core/storage/recommendations/gae_models_test.py index beb6df55778b..2eca1c668ce5 100644 --- a/core/storage/recommendations/gae_models_test.py +++ b/core/storage/recommendations/gae_models_test.py @@ -19,21 +19,24 @@ from core.platform import models from core.tests import test_utils +from typing import Final + MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import recommendations_models -(base_models, recommendations_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.recommendations]) +(base_models, recommendations_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.RECOMMENDATIONS +]) class ExplorationRecommendationsModelUnitTests(test_utils.GenericTestBase): """Tests the ExplorationRecommendationsModel class.""" - RECOMMENDATION_1_ID = 'rec_1_id' - RECOMMENDATION_2_ID = 'rec_2_id' - RECOMMENDATION_3_ID = 'rec_3_id' + RECOMMENDATION_1_ID: Final = 'rec_1_id' + RECOMMENDATION_2_ID: Final = 'rec_2_id' + RECOMMENDATION_3_ID: Final = 'rec_3_id' def test_get_deletion_policy(self) -> None: self.assertEqual( @@ -41,6 +44,25 @@ def test_get_deletion_policy(self) -> None: .get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + recommendations_models.ExplorationRecommendationsModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'recommended_exploration_ids': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + } + self.assertEqual( + recommendations_models.ExplorationRecommendationsModel + .get_export_policy(), + expected_export_policy_dict) + class TopicSimilaritiesModelUnitTests(test_utils.GenericTestBase): """Tests the TopicSimilaritiesModel class.""" @@ -49,3 +71,20 @@ def test_get_deletion_policy(self) -> None: self.assertEqual( recommendations_models.TopicSimilaritiesModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + recommendations_models.TopicSimilaritiesModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'content': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + recommendations_models.TopicSimilaritiesModel.get_export_policy(), + expected_export_policy_dict) diff --git a/core/storage/skill/gae_models.py b/core/storage/skill/gae_models.py index 4676faf66206..c76180d79e18 100644 --- a/core/storage/skill/gae_models.py +++ b/core/storage/skill/gae_models.py @@ -19,7 +19,7 @@ from core.constants import constants from core.platform import models -from typing import Any, Dict, List, Optional, Sequence, Tuple +from typing import Dict, List, Mapping, Optional, Sequence, Tuple MYPY = False if MYPY: # pragma: no cover @@ -27,7 +27,8 @@ from mypy_imports import datastore_services (base_models, user_models,) = models.Registry.import_models([ - models.NAMES.base_model, models.NAMES.user]) + models.Names.BASE_MODEL, models.Names.USER +]) datastore_services = models.Registry.import_datastore_services() @@ -159,15 +160,17 @@ def get_merged_skills(cls) -> List[SkillModel]: skill.superseding_skill_id is not None and ( len(skill.superseding_skill_id) > 0))] - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + def compute_models_to_commit( + self, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: base_models.AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + additional_models: Mapping[str, base_models.BaseModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -177,23 +180,39 @@ def _trusted_commit( change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit description message, for + unpublished skills, it may be equal to None. commit_cmds: list(dict). A list of commands, describing changes made in this model, which should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. + + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. """ - super(SkillModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) skill_commit_log_entry = SkillCommitLogEntryModel.create( self.id, self.version, committer_id, commit_type, commit_message, commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False ) skill_commit_log_entry.skill_id = self.id - skill_commit_log_entry.update_timestamps() - skill_commit_log_entry.put() + return { + 'snapshot_metadata_model': models_to_put['snapshot_metadata_model'], + 'snapshot_content_model': models_to_put['snapshot_content_model'], + 'commit_log_model': skill_commit_log_entry, + 'versioned_model': models_to_put['versioned_model'], + } @staticmethod def get_model_association_to_user( diff --git a/core/storage/skill/gae_models_test.py b/core/storage/skill/gae_models_test.py index e3fba7707643..8bdaddd7312c 100644 --- a/core/storage/skill/gae_models_test.py +++ b/core/storage/skill/gae_models_test.py @@ -21,6 +21,7 @@ import datetime from core.constants import constants +from core.domain import skill_domain from core.platform import models from core.tests import test_utils @@ -29,8 +30,9 @@ from mypy_imports import base_models from mypy_imports import skill_models -(base_models, skill_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.skill]) +(base_models, skill_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.SKILL +]) class SkillSnapshotContentModelTests(test_utils.GenericTestBase): @@ -44,11 +46,111 @@ def test_get_deletion_policy_is_not_applicable(self) -> None: class SkillModelUnitTest(test_utils.GenericTestBase): """Test the SkillModel class.""" + def setUp(self) -> None: + super().setUp() + self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) + self.user_id_admin = ( + self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) + self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) + def test_get_deletion_policy(self) -> None: self.assertEqual( skill_models.SkillModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_all_merged_skills_correctly(self) -> None: + commit_cmd = skill_domain.SkillChange({ + 'cmd': skill_domain.CMD_CREATE_NEW + }) + model1 = skill_models.SkillModel( + id='skill_id_a', + description='description1', + language_code='en', + misconceptions=[], + rubrics=[], + next_misconception_id=0, + misconceptions_schema_version=1, + rubric_schema_version=1, + skill_contents_schema_version=0, + superseding_skill_id=None, + all_questions_merged=False + ) + model2 = skill_models.SkillModel( + id='skill_id_b', + description='description2', + language_code='en', + misconceptions=[], + rubrics=[], + next_misconception_id=0, + misconceptions_schema_version=1, + rubric_schema_version=1, + skill_contents_schema_version=0, + superseding_skill_id='skill_id_x', + all_questions_merged=True + ) + commit_cmd_dicts = [commit_cmd.to_dict()] + model1.commit( + self.user_id_admin, 'skill model created', commit_cmd_dicts) + model2.commit( + self.user_id_admin, 'skill model created', commit_cmd_dicts) + self.assertEqual(skill_models.SkillModel.get_merged_skills(), [model2]) + + def test_get_skills_by_description_correctly(self) -> None: + commit_cmd = skill_domain.SkillChange({ + 'cmd': skill_domain.CMD_CREATE_NEW + }) + model = skill_models.SkillModel( + id='skill_id', + description='description', + language_code='en', + misconceptions=[], + rubrics=[], + next_misconception_id=0, + misconceptions_schema_version=1, + rubric_schema_version=1, + skill_contents_schema_version=0, + superseding_skill_id='skill_id1', + all_questions_merged=True + ) + commit_cmd_dicts = [commit_cmd.to_dict()] + model.commit( + self.user_id_admin, 'skill model created', commit_cmd_dicts) + + self.assertIsNone( + skill_models.SkillModel.get_by_description('Invalid description')) + self.assertEqual( + skill_models.SkillModel.get_by_description('description'), model) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'description': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'misconceptions_schema_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'rubric_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'misconceptions': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'rubrics': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_contents_schema_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'prerequisite_skill_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'next_misconception_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'superseding_skill_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'all_questions_merged': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = skill_models.SkillModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = skill_models.SkillModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + class SkillCommitLogEntryModelUnitTests(test_utils.GenericTestBase): """Tests the SkillCommitLogEntryModel class.""" @@ -68,6 +170,31 @@ def test_has_reference_to_user_id(self) -> None: skill_models.SkillCommitLogEntryModel .has_reference_to_user_id('x_id')) + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_message': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_cmds': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'post_commit_status': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'post_commit_community_owned': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'post_commit_is_private': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_id': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = skill_models.SkillCommitLogEntryModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = skill_models.SkillCommitLogEntryModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + class SkillSummaryModelUnitTest(test_utils.GenericTestBase): """Test the SkillSummaryModel class.""" @@ -131,3 +258,26 @@ def test_fetch_page(self) -> None: self.assertEqual(skill_summaries[0].id, 'skill_id1') self.assertEqual(skill_summaries[1].id, 'skill_id2') self.assertFalse(more) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'description': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'misconception_count': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'worked_examples_count': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_model_last_updated': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_model_created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = skill_models.SkillSummaryModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = skill_models.SkillSummaryModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) diff --git a/core/storage/statistics/gae_models.py b/core/storage/statistics/gae_models.py index 368c651266bc..102bbfa6c25a 100644 --- a/core/storage/statistics/gae_models.py +++ b/core/storage/statistics/gae_models.py @@ -26,7 +26,7 @@ from core import utils from core.platform import models -from typing import Any, Dict, List, Optional, Sequence, Tuple, cast +from typing import Dict, Final, List, Optional, Sequence, Tuple MYPY = False if MYPY: # pragma: no cover @@ -43,30 +43,32 @@ from mypy_imports import datastore_services from mypy_imports import transaction_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() transaction_services = models.Registry.import_transaction_services() -CURRENT_ACTION_SCHEMA_VERSION = 1 -CURRENT_ISSUE_SCHEMA_VERSION = 1 +CURRENT_ACTION_SCHEMA_VERSION: Final = 1 +CURRENT_ISSUE_SCHEMA_VERSION: Final = 1 -ACTION_TYPE_EXPLORATION_START = 'ExplorationStart' -ACTION_TYPE_ANSWER_SUBMIT = 'AnswerSubmit' -ACTION_TYPE_EXPLORATION_QUIT = 'ExplorationQuit' +ACTION_TYPE_EXPLORATION_START: Final = 'ExplorationStart' +ACTION_TYPE_ANSWER_SUBMIT: Final = 'AnswerSubmit' +ACTION_TYPE_EXPLORATION_QUIT: Final = 'ExplorationQuit' -ISSUE_TYPE_EARLY_QUIT = 'EarlyQuit' -ISSUE_TYPE_MULTIPLE_INCORRECT_SUBMISSIONS = 'MultipleIncorrectSubmissions' -ISSUE_TYPE_CYCLIC_STATE_TRANSITIONS = 'CyclicStateTransitions' +ISSUE_TYPE_EARLY_QUIT: Final = 'EarlyQuit' +ISSUE_TYPE_MULTIPLE_INCORRECT_SUBMISSIONS: Final = ( + 'MultipleIncorrectSubmissions' +) +ISSUE_TYPE_CYCLIC_STATE_TRANSITIONS: Final = 'CyclicStateTransitions' # Types of allowed issues. -ALLOWED_ISSUE_TYPES = [ +ALLOWED_ISSUE_TYPES: Final = [ ISSUE_TYPE_EARLY_QUIT, ISSUE_TYPE_MULTIPLE_INCORRECT_SUBMISSIONS, ISSUE_TYPE_CYCLIC_STATE_TRANSITIONS ] # Types of allowed learner actions. -ALLOWED_ACTION_TYPES = [ +ALLOWED_ACTION_TYPES: Final = [ ACTION_TYPE_EXPLORATION_START, ACTION_TYPE_ANSWER_SUBMIT, ACTION_TYPE_EXPLORATION_QUIT @@ -74,8 +76,9 @@ # The entity types for which the LearnerAnswerDetailsModel instance # can be created. -ALLOWED_ENTITY_TYPES = [ - feconf.ENTITY_TYPE_EXPLORATION, feconf.ENTITY_TYPE_QUESTION] +ALLOWED_ENTITY_TYPES: Final = [ + feconf.ENTITY_TYPE_EXPLORATION, feconf.ENTITY_TYPE_QUESTION +] class StateCounterModel(base_models.BaseModel): @@ -445,14 +448,14 @@ def get_new_event_entity_id(cls, exp_id: str, session_id: str) -> str: # feel free to remove this comment once you've done so. @classmethod def create( - cls, - exp_id: str, - exp_version: int, - state_name: str, - session_id: str, - params: Dict[str, str], - play_type: str, - unused_version: int = 1 + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + params: Dict[str, str], + play_type: str, + unused_version: int = 1 ) -> str: """Creates a new start exploration event and then writes it to the datastore. @@ -600,15 +603,15 @@ def get_new_event_entity_id(cls, exp_id: str, session_id: str) -> str: # feel free to remove this comment once you've done so. @classmethod def create( - cls, - exp_id: str, - exp_version: int, - state_name: str, - session_id: str, - client_time_spent_in_secs: float, - params: Dict[str, str], - play_type: str - ) -> None: + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + client_time_spent_in_secs: float, + params: Dict[str, str], + play_type: str + ) -> str: """Creates a new leave exploration event and then writes it to the datastore. @@ -622,6 +625,9 @@ def create( params: dict. Current parameter values, map of parameter name to value. play_type: str. Type of play-through. + + Returns: + str. New unique ID for this entity instance. """ # TODO(sll): Some events currently do not have an entity ID that was # set using this method; it was randomly set instead due to an error. @@ -641,6 +647,7 @@ def create( event_schema_version=feconf.CURRENT_EVENT_MODELS_SCHEMA_VERSION) leave_event_entity.update_timestamps() leave_event_entity.put() + return entity_id @staticmethod def get_model_association_to_user( @@ -746,14 +753,14 @@ def get_new_event_entity_id(cls, exp_id: str, session_id: str) -> str: # feel free to remove this comment once you've done so. @classmethod def create( - cls, - exp_id: str, - exp_version: int, - state_name: str, - session_id: str, - client_time_spent_in_secs: float, - params: Dict[str, str], - play_type: str + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + client_time_spent_in_secs: float, + params: Dict[str, str], + play_type: str ) -> str: """Creates a new exploration completion event and then writes it to the datastore. @@ -849,7 +856,7 @@ def get_new_event_entity_id(cls, exp_id: str, user_id: str) -> str: user_id: str. ID of the user. Returns: - str. New unique ID for this entity class. + str. New unique ID for this entity instance. """ timestamp = datetime.datetime.utcnow() return cls.get_new_id('%s:%s:%s' % ( @@ -859,12 +866,12 @@ def get_new_event_entity_id(cls, exp_id: str, user_id: str) -> str: @classmethod def create( - cls, - exp_id: str, - user_id: str, - rating: int, - old_rating: Optional[int] - ) -> None: + cls, + exp_id: str, + user_id: str, + rating: int, + old_rating: Optional[int] + ) -> str: """Creates a new rate exploration event and then writes it to the datastore. @@ -874,6 +881,9 @@ def create( rating: int. Value of rating assigned to exploration. old_rating: int or None. Will be None if the user rates an exploration for the first time. + + Returns: + str. New unique ID for this entity instance. """ entity_id = cls.get_new_event_entity_id( exp_id, user_id) @@ -885,6 +895,7 @@ def create( old_rating=old_rating, event_schema_version=feconf.CURRENT_EVENT_MODELS_SCHEMA_VERSION ).put() + return entity_id @staticmethod def get_model_association_to_user( @@ -974,13 +985,13 @@ def get_new_event_entity_id(cls, exp_id: str, session_id: str) -> str: # feel free to remove this comment once you've done so. @classmethod def create( - cls, - exp_id: str, - exp_version: int, - state_name: str, - session_id: str, - params: Dict[str, str], - play_type: str + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + params: Dict[str, str], + play_type: str ) -> str: """Creates a new state hit event entity and then writes it to the datastore. @@ -1070,12 +1081,12 @@ def get_new_event_entity_id(cls, exp_id: str, session_id: str) -> str: @classmethod def create( - cls, - exp_id: str, - exp_version: int, - state_name: str, - session_id: str, - time_spent_in_state_secs: float + cls, + exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + time_spent_in_state_secs: float ) -> str: """Creates a new state complete event.""" entity_id = cls.get_new_event_entity_id( @@ -1148,13 +1159,13 @@ def get_new_event_entity_id(cls, exp_id: str, session_id: str) -> str: @classmethod def create( - cls, - exp_id: str, - refresher_exp_id: str, - exp_version: int, - state_name: str, - session_id: str, - time_spent_in_state_secs: float + cls, + exp_id: str, + refresher_exp_id: str, + exp_version: int, + state_name: str, + session_id: str, + time_spent_in_state_secs: float ) -> str: """Creates a new leave for refresher exploration event.""" entity_id = cls.get_new_event_entity_id( @@ -1273,20 +1284,18 @@ def get_model( exploration_stats_model = cls.get(instance_id, strict=False) return exploration_stats_model - # TODO(#13523): Change 'state_stats_mapping' to TypedDict/Domain Object - # to remove Any used below. @classmethod def create( - cls, - exp_id: str, - exp_version: int, - num_starts_v1: int, - num_starts_v2: int, - num_actual_starts_v1: int, - num_actual_starts_v2: int, - num_completions_v1: int, - num_completions_v2: int, - state_stats_mapping: Dict[str, Any] + cls, + exp_id: str, + exp_version: int, + num_starts_v1: int, + num_starts_v2: int, + num_actual_starts_v1: int, + num_actual_starts_v2: int, + num_completions_v1: int, + num_completions_v2: int, + state_stats_mapping: Dict[str, Dict[str, int]] ) -> str: """Creates an ExplorationStatsModel instance and writes it to the datastore. @@ -1438,14 +1447,12 @@ def get_model( instance_id = cls.get_entity_id(exp_id, exp_version) return cls.get(instance_id, strict=False) - # TODO(#13523): Change 'unresolved_issues' to TypedDict/Domain Object - # to remove Any used below. @classmethod def create( - cls, - exp_id: str, - exp_version: int, - unresolved_issues: List[Dict[str, Any]] + cls, + exp_id: str, + exp_version: int, + unresolved_issues: List[stats_domain.ExplorationIssueDict] ) -> str: """Creates an ExplorationIssuesModel instance and writes it to the datastore. @@ -1554,16 +1561,16 @@ def _generate_id(cls, exp_id: str) -> str: 'The id generator for PlaythroughModel is producing too many ' 'collisions.') - # TODO(#13523): Change 'issue_customization_args' and 'actions' to - # TypedDict/Domain Object to remove Any used below. @classmethod def create( - cls, - exp_id: str, - exp_version: int, - issue_type: str, - issue_customization_args: Dict[str, Any], - actions: List[Dict[str, Any]] + cls, + exp_id: str, + exp_version: int, + issue_type: str, + issue_customization_args: ( + stats_domain.IssuesCustomizationArgsDictType + ), + actions: List[stats_domain.LearnerActionDict] ) -> str: """Creates a PlaythroughModel instance and writes it to the datastore. @@ -1732,7 +1739,7 @@ def create_model_instance( state_reference=state_reference, interaction_id=interaction_id, learner_answer_info_list=[ - learner_answer_info.to_dict() # type: ignore[no-untyped-call] + learner_answer_info.to_dict() for learner_answer_info in learner_answer_info_list ], learner_answer_info_schema_version=( @@ -1835,12 +1842,12 @@ def get_entity_id( @classmethod def create( - cls, - exp_id: str, - version: str, - num_starts: int, - num_completions: int, - state_hit_counts: Dict[str, int] + cls, + exp_id: str, + version: str, + num_starts: int, + num_completions: int, + state_hit_counts: Dict[str, int] ) -> None: """Creates a new ExplorationAnnotationsModel and then writes it to the datastore. @@ -2051,8 +2058,14 @@ def get_all_models( shard_id) for shard_id in range( 1, main_shard.shard_count + 1)] - all_models += cast( - List[StateAnswersModel], cls.get_multi(shard_ids)) + state_answer_models = cls.get_multi(shard_ids) + for state_answer_model in state_answer_models: + # Filtering out the None cases for MyPy type checking, + # because shard deletion is not supported and we expect + # main_shard.shard_count to be present, since the master + # model keeps track of the number of shards explicitly. + assert state_answer_model is not None + all_models.append(state_answer_model) return all_models else: return None @@ -2060,12 +2073,14 @@ def get_all_models( @classmethod @transaction_services.run_in_transaction_wrapper def _insert_submitted_answers_unsafe_transactional( - cls, - exploration_id: str, - exploration_version: int, - state_name: str, - interaction_id: str, - new_submitted_answer_dict_list: List[Dict[str, str]] + cls, + exploration_id: str, + exploration_version: int, + state_name: str, + interaction_id: str, + new_submitted_answer_dict_list: List[ + stats_domain.SubmittedAnswerDict + ] ) -> None: """See the insert_submitted_answers for general documentation of what this method does. It's only safe to call this method from within a @@ -2162,16 +2177,16 @@ def _insert_submitted_answers_unsafe_transactional( cls.update_timestamps_multi(entities_to_put) cls.put_multi(entities_to_put) - # TODO(#13523): Change 'new_submitted_answer' to TypedDict/Domain Object - # to remove Any used below. @classmethod def insert_submitted_answers( - cls, - exploration_id: str, - exploration_version: int, - state_name: str, - interaction_id: str, - new_submitted_answer_dict_list: List[Dict[str, Any]] + cls, + exploration_id: str, + exploration_version: int, + state_name: str, + interaction_id: str, + new_submitted_answer_dict_list: List[ + stats_domain.SubmittedAnswerDict + ] ) -> None: """Given an exploration ID, version, state name, and interaction ID, attempt to insert a list of specified SubmittedAnswers into this model, @@ -2198,11 +2213,11 @@ def insert_submitted_answers( @classmethod def _get_entity_id( - cls, - exploration_id: str, - exploration_version: int, - state_name: str, - shard_id: int + cls, + exploration_id: str, + exploration_version: int, + state_name: str, + shard_id: int ) -> str: """Returns the entity_id of a StateAnswersModel based on it's exp_id, state_name, exploration_version and shard_id. @@ -2223,15 +2238,13 @@ def _get_entity_id( str(shard_id) ]) - # TODO(#13523): Change answer lists to TypedDict/Domain Object - # to remove Any used below. @classmethod def _shard_answers( - cls, - current_answer_list: List[Dict[str, Any]], - current_answer_list_size: int, - new_answer_list: List[Dict[str, Any]] - ) -> Tuple[List[List[Dict[str, Any]]], List[int]]: + cls, + current_answer_list: List[stats_domain.SubmittedAnswerDict], + current_answer_list_size: int, + new_answer_list: List[stats_domain.SubmittedAnswerDict] + ) -> Tuple[List[List[stats_domain.SubmittedAnswerDict]], List[int]]: """Given a current answer list which can fit within one NDB entity and a list of new answers which need to try and fit in the current answer list, shard the answers such that a list of answer lists are returned. @@ -2278,10 +2291,10 @@ def _shard_answers( sharded_answer_list_sizes.append(answer_size) return sharded_answer_lists, sharded_answer_list_sizes - # TODO(#13523): Change answer dict to TypedDict/Domain Object - # to remove Any used below. @classmethod - def _get_answer_dict_size(cls, answer_dict: Dict[str, Any]) -> int: + def _get_answer_dict_size( + cls, answer_dict: stats_domain.SubmittedAnswerDict + ) -> int: """Returns a size overestimate (in bytes) of the given answer dict. Args: diff --git a/core/storage/statistics/gae_models_test.py b/core/storage/statistics/gae_models_test.py index afc71f6fe98d..fdac137aba9d 100644 --- a/core/storage/statistics/gae_models_test.py +++ b/core/storage/statistics/gae_models_test.py @@ -33,8 +33,9 @@ from mypy_imports import base_models from mypy_imports import stats_models -(base_models, stats_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.statistics]) +(base_models, stats_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.STATISTICS +]) class StateCounterModelTests(test_utils.GenericTestBase): @@ -58,7 +59,7 @@ def test_state_counter_model_gets_created(self) -> None: def test_get_state_counter_model(self) -> None: # This tests whether get_or_create() can get/fetch the model when the # model is created by creating an instance. - stats_models.StateCounterModel(id='exp_id1.state_name') + stats_models.StateCounterModel(id='exp_id1.state_name').put() model_instance = stats_models.StateCounterModel.get_or_create( 'exp_id1', 'state_name') @@ -69,6 +70,28 @@ def test_get_state_counter_model(self) -> None: self.assertEqual(model_instance.resolved_answer_count, 0) self.assertEqual(model_instance.active_answer_count, 0) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.StateCounterModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'first_entry_count': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'subsequent_entries_count': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'resolved_answer_count': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'active_answer_count': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.StateCounterModel.get_export_policy(), + expected_export_policy_dict + ) + class AnswerSubmittedEventLogEntryModelUnitTests(test_utils.GenericTestBase): """Test the AnswerSubmittedEventLogEntryModel class.""" @@ -87,8 +110,6 @@ def test_create_and_get_event_models(self) -> None: event_model = stats_models.AnswerSubmittedEventLogEntryModel.get( event_id) - # Ruling out the possibility of None for mypy type checking. - assert event_model is not None self.assertEqual(event_model.exp_id, 'exp_id1') self.assertEqual(event_model.exp_version, 1) self.assertEqual(event_model.state_name, 'state_name1') @@ -96,6 +117,32 @@ def test_create_and_get_event_models(self) -> None: self.assertEqual(event_model.time_spent_in_state_secs, 0.0) self.assertEqual(event_model.is_feedback_useful, True) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.AnswerSubmittedEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'time_spent_in_state_secs': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'is_feedback_useful': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.AnswerSubmittedEventLogEntryModel.get_export_policy(), + expected_export_policy_dict + ) + class ExplorationActualStartEventLogEntryModelUnitTests( test_utils.GenericTestBase): @@ -115,13 +162,35 @@ def test_create_and_get_event_models(self) -> None: event_model = stats_models.ExplorationActualStartEventLogEntryModel.get( event_id) - # Ruling out the possibility of None for mypy type checking. - assert event_model is not None self.assertEqual(event_model.exp_id, 'exp_id1') self.assertEqual(event_model.exp_version, 1) self.assertEqual(event_model.state_name, 'state_name1') self.assertEqual(event_model.session_id, 'session_id1') + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.ExplorationActualStartEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.ExplorationActualStartEventLogEntryModel + .get_export_policy(), + expected_export_policy_dict + ) + class SolutionHitEventLogEntryModelUnitTests(test_utils.GenericTestBase): """Test the SolutionHitEventLogEntryModel class.""" @@ -139,14 +208,37 @@ def test_create_and_get_event_models(self) -> None: event_model = stats_models.SolutionHitEventLogEntryModel.get( event_id) - # Ruling out the possibility of None for mypy type checking. - assert event_model is not None self.assertEqual(event_model.exp_id, 'exp_id1') self.assertEqual(event_model.exp_version, 1) self.assertEqual(event_model.state_name, 'state_name1') self.assertEqual(event_model.session_id, 'session_id1') self.assertEqual(event_model.time_spent_in_state_secs, 0.0) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.SolutionHitEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'time_spent_in_state_secs': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.SolutionHitEventLogEntryModel.get_export_policy(), + expected_export_policy_dict + ) + class StartExplorationEventLogEntryModelUnitTests(test_utils.GenericTestBase): """Test the StartExplorationEventLogEntryModel class.""" @@ -166,8 +258,6 @@ def test_create_and_get_event_models(self) -> None: event_model = stats_models.StartExplorationEventLogEntryModel.get( event_id) - # Ruling out the possibility of None for mypy type checking. - assert event_model is not None self.assertEqual(event_model.exploration_id, 'exp_id1') self.assertEqual(event_model.exploration_version, 1) self.assertEqual(event_model.state_name, 'state_name1') @@ -175,6 +265,34 @@ def test_create_and_get_event_models(self) -> None: self.assertEqual(event_model.params, {}) self.assertEqual(event_model.play_type, feconf.PLAY_TYPE_NORMAL) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.StartExplorationEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'client_time_spent_in_secs': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'params': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'play_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.StartExplorationEventLogEntryModel.get_export_policy(), + expected_export_policy_dict + ) + class MaybeLeaveExplorationEventLogEntryModelUnitTests( test_utils.GenericTestBase): @@ -186,6 +304,49 @@ def test_get_deletion_policy(self) -> None: .get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_create_event_models(self) -> None: + event_id = stats_models.MaybeLeaveExplorationEventLogEntryModel.create( + 'exp_id1', 1, 'state_name1', 'session_id1', 1.0, {}, + feconf.PLAY_TYPE_NORMAL) + event_model = stats_models.MaybeLeaveExplorationEventLogEntryModel.get( + event_id) + + self.assertEqual(event_model.exploration_id, 'exp_id1') + self.assertEqual(event_model.exploration_version, 1) + self.assertEqual(event_model.state_name, 'state_name1') + self.assertEqual(event_model.session_id, 'session_id1') + self.assertEqual(event_model.params, {}) + self.assertEqual(event_model.play_type, feconf.PLAY_TYPE_NORMAL) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.MaybeLeaveExplorationEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'client_time_spent_in_secs': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'params': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'play_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.MaybeLeaveExplorationEventLogEntryModel + .get_export_policy(), + expected_export_policy_dict + ) + class CompleteExplorationEventLogEntryModelUnitTests( test_utils.GenericTestBase): @@ -206,8 +367,6 @@ def test_create_and_get_event_models(self) -> None: event_model = stats_models.CompleteExplorationEventLogEntryModel.get( event_id) - # Ruling out the possibility of None for mypy type checking. - assert event_model is not None self.assertEqual(event_model.exploration_id, 'exp_id1') self.assertEqual(event_model.exploration_version, 1) self.assertEqual(event_model.state_name, 'state_name1') @@ -216,6 +375,35 @@ def test_create_and_get_event_models(self) -> None: self.assertEqual(event_model.params, {}) self.assertEqual(event_model.play_type, feconf.PLAY_TYPE_NORMAL) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.CompleteExplorationEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'client_time_spent_in_secs': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'params': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'play_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.CompleteExplorationEventLogEntryModel + .get_export_policy(), + expected_export_policy_dict + ) + class RateExplorationEventLogEntryModelUnitTests( test_utils.GenericTestBase): @@ -227,6 +415,41 @@ def test_get_deletion_policy(self) -> None: .get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_create_event_models(self) -> None: + event_id = stats_models.RateExplorationEventLogEntryModel.create( + 'exp_id', 'user_id', 2, 1 + ) + event_model = stats_models.RateExplorationEventLogEntryModel.get( + event_id) + + self.assertEqual(event_model.exploration_id, 'exp_id') + self.assertEqual(event_model.rating, 2) + self.assertEqual(event_model.old_rating, 1) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.RateExplorationEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'rating': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'old_rating': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.RateExplorationEventLogEntryModel + .get_export_policy(), + expected_export_policy_dict + ) + class StateHitEventLogEntryModelUnitTests(test_utils.GenericTestBase): """Test the StateHitEventLogEntryModel class.""" @@ -245,14 +468,38 @@ def test_create_and_get_event_models(self) -> None: event_model = stats_models.StateHitEventLogEntryModel.get( event_id) - # Ruling out the possibility of None for mypy type checking. - assert event_model is not None self.assertEqual(event_model.exploration_id, 'exp_id1') self.assertEqual(event_model.exploration_version, 1) self.assertEqual(event_model.state_name, 'state_name1') self.assertEqual(event_model.session_id, 'session_id1') self.assertEqual(event_model.play_type, feconf.PLAY_TYPE_NORMAL) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.StateHitEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'params': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'play_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.StateHitEventLogEntryModel.get_export_policy(), + expected_export_policy_dict + ) + class StateCompleteEventLogEntryModelUnitTests(test_utils.GenericTestBase): """Test the StateCompleteEventLogEntryModel class.""" @@ -270,14 +517,37 @@ def test_create_and_get_event_models(self) -> None: event_model = stats_models.StateCompleteEventLogEntryModel.get( event_id) - # Ruling out the possibility of None for mypy type checking. - assert event_model is not None self.assertEqual(event_model.exp_id, 'exp_id1') self.assertEqual(event_model.exp_version, 1) self.assertEqual(event_model.state_name, 'state_name1') self.assertEqual(event_model.session_id, 'session_id1') self.assertEqual(event_model.time_spent_in_state_secs, 0.0) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.StateCompleteEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'time_spent_in_state_secs': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.StateCompleteEventLogEntryModel.get_export_policy(), + expected_export_policy_dict + ) + class LeaveForRefresherExplorationEventLogEntryModelUnitTests( test_utils.GenericTestBase): @@ -298,8 +568,6 @@ def test_create_and_get_event_models(self) -> None: stats_models.LeaveForRefresherExplorationEventLogEntryModel.get( event_id)) - # Ruling out the possibility of None for mypy type checking. - assert event_model is not None self.assertEqual(event_model.exp_id, 'exp_id1') self.assertEqual(event_model.refresher_exp_id, 'exp_id2') self.assertEqual(event_model.exp_version, 1) @@ -310,6 +578,33 @@ def test_create_and_get_event_models(self) -> None: event_model.event_schema_version, feconf.CURRENT_EVENT_MODELS_SCHEMA_VERSION) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.LeaveForRefresherExplorationEventLogEntryModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'refresher_exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'session_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'time_spent_in_state_secs': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'event_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.LeaveForRefresherExplorationEventLogEntryModel + .get_export_policy(), + expected_export_policy_dict + ) + class ExplorationStatsModelUnitTests(test_utils.GenericTestBase): """Test the ExplorationStatsModel class.""" @@ -340,6 +635,25 @@ def test_create_and_get_analytics_model(self) -> None: self.assertEqual(model.num_completions_v2, 0) self.assertEqual(model.state_stats_mapping, {}) + def test_create_analytics_model(self) -> None: + model_id = stats_models.ExplorationStatsModel.create( + 'exp_id1', 1, 0, 0, 0, 0, 0, 0, {}) + fetched_model = stats_models.ExplorationStatsModel.get_model( + 'exp_id1', 1) + + # Ruling out the possibility of None for mypy type checking. + assert fetched_model is not None + self.assertEqual(fetched_model.id, model_id) + self.assertEqual(fetched_model.exp_id, 'exp_id1') + self.assertEqual(fetched_model.exp_version, 1) + self.assertEqual(fetched_model.num_starts_v1, 0) + self.assertEqual(fetched_model.num_actual_starts_v1, 0) + self.assertEqual(fetched_model.num_completions_v1, 0) + self.assertEqual(fetched_model.num_starts_v2, 0) + self.assertEqual(fetched_model.num_actual_starts_v2, 0) + self.assertEqual(fetched_model.num_completions_v2, 0) + self.assertEqual(fetched_model.state_stats_mapping, {}) + def test_get_multi_stats_models(self) -> None: stats_models.ExplorationStatsModel.create( 'exp_id1', 1, 0, 0, 0, 0, 0, 0, {}) @@ -349,9 +663,9 @@ def test_get_multi_stats_models(self) -> None: 'exp_id2', 1, 0, 0, 0, 0, 0, 0, {}) exp_version_reference_dicts = [ - exp_domain.ExpVersionReference('exp_id1', 1), # type: ignore[no-untyped-call] - exp_domain.ExpVersionReference('exp_id1', 2), # type: ignore[no-untyped-call] - exp_domain.ExpVersionReference('exp_id2', 1)] # type: ignore[no-untyped-call] + exp_domain.ExpVersionReference('exp_id1', 1), + exp_domain.ExpVersionReference('exp_id1', 2), + exp_domain.ExpVersionReference('exp_id2', 1)] stat_models = stats_models.ExplorationStatsModel.get_multi_stats_models( exp_version_reference_dicts) @@ -368,6 +682,51 @@ def test_get_multi_stats_models(self) -> None: self.assertEqual(stat_models[2].exp_id, 'exp_id2') self.assertEqual(stat_models[2].exp_version, 1) + def test_get_multi_versions(self) -> None: + stats_models.ExplorationStatsModel.create( + 'exp_id1', 1, 0, 0, 0, 0, 0, 0, {}) + stats_models.ExplorationStatsModel.create( + 'exp_id1', 2, 0, 0, 0, 0, 0, 0, {}) + + stat_models = stats_models.ExplorationStatsModel.get_multi_versions( + 'exp_id1', [1, 2] + ) + + assert stat_models[0] is not None + assert stat_models[1] is not None + self.assertEqual(len(stat_models), 2) + self.assertEqual(stat_models[0].exp_id, 'exp_id1') + self.assertEqual(stat_models[0].exp_version, 1) + self.assertEqual(stat_models[1].exp_id, 'exp_id1') + self.assertEqual(stat_models[1].exp_version, 2) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.ExplorationStatsModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_starts_v1': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_starts_v2': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_actual_starts_v1': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_actual_starts_v2': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_completions_v1': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_completions_v2': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_stats_mapping': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.ExplorationStatsModel.get_export_policy(), + expected_export_policy_dict + ) + class ExplorationIssuesModelUnitTests(test_utils.GenericTestBase): """Test the ExplorationIssuesModel class.""" @@ -384,6 +743,17 @@ def test_create_and_get_exp_issues_model(self) -> None: model = stats_models.ExplorationIssuesModel.get(model_id) + self.assertEqual(model.id, model_id) + self.assertEqual(model.exp_id, 'exp_id1') + self.assertEqual(model.exp_version, 1) + self.assertEqual(model.unresolved_issues, []) + + def test_get_exploration_issues_model(self) -> None: + model_id = ( + stats_models.ExplorationIssuesModel.create( + 'exp_id1', 1, [])) + + model = stats_models.ExplorationIssuesModel.get_model('exp_id1', 1) # Ruling out the possibility of None for mypy type checking. assert model is not None self.assertEqual(model.id, model_id) @@ -391,6 +761,27 @@ def test_create_and_get_exp_issues_model(self) -> None: self.assertEqual(model.exp_version, 1) self.assertEqual(model.unresolved_issues, []) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.ExplorationIssuesModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'unresolved_issues': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + stats_models.ExplorationIssuesModel.get_export_policy(), + expected_export_policy_dict + ) + class PlaythroughModelUnitTests(test_utils.GenericTestBase): """Test the PlaythroughModel class.""" @@ -407,8 +798,6 @@ def test_create_and_get_playthrough_model(self) -> None: model = stats_models.PlaythroughModel.get(model_id) - # Ruling out the possibility of None for mypy type checking. - assert model is not None self.assertEqual(model.id, model_id) self.assertEqual(model.exp_id, 'exp_id1') self.assertEqual(model.exp_version, 1) @@ -422,7 +811,7 @@ def test_create_raises_error_when_many_id_collisions_occur(self) -> None: stats_models.PlaythroughModel, 'get_by_id', types.MethodType( lambda _, __: True, stats_models.PlaythroughModel)) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'The id generator for PlaythroughModel is producing too ' 'many collisions.') @@ -430,6 +819,29 @@ def test_create_raises_error_when_many_id_collisions_occur(self) -> None: stats_models.PlaythroughModel.create( 'exp_id1', 1, 'EarlyQuit', {}, []) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.PlaythroughModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exp_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'issue_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'issue_customization_args': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'actions': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.PlaythroughModel.get_export_policy(), + expected_export_policy_dict + ) + class LearnerAnswerDetailsModelUnitTests(test_utils.GenericTestBase): """Tests the LearnerAnswerDetailsModel class.""" @@ -565,6 +977,33 @@ def test_save_and_get_model_instance_for_unicode_state_names(self) -> None: self.assertEqual( model_instance.state_reference, '123:%s' % (state_name)) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.LearnerAnswerDetailsModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_reference': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'interaction_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'learner_answer_info_list': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'learner_answer_info_schema_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'accumulated_answer_info_json_size_bytes': + base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.LearnerAnswerDetailsModel.get_export_policy(), + expected_export_policy_dict + ) + class ExplorationAnnotationsModelUnitTests(test_utils.GenericTestBase): """Tests the ExplorationAnnotationsModel class.""" @@ -580,8 +1019,6 @@ def test_create_and_get_models(self) -> None: model1 = stats_models.ExplorationAnnotationsModel.get('exp_id1:1') - # Ruling out the possibility of None for mypy type checking. - assert model1 is not None self.assertEqual(model1.exploration_id, 'exp_id1') self.assertEqual(model1.version, '1') self.assertEqual(model1.num_starts, 5) @@ -605,6 +1042,29 @@ def test_get_version_for_invalid_exploration_id(self) -> None: self.assertEqual(versions, []) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.ExplorationAnnotationsModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_starts': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'num_completions': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_hit_counts': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.ExplorationAnnotationsModel.get_export_policy(), + expected_export_policy_dict + ) + class StateAnswersModelUnitTests(test_utils.GenericTestBase): """Tests the StateAnswersModel class.""" @@ -616,7 +1076,19 @@ def test_get_deletion_policy(self) -> None: def test_shard_count_is_updated_when_data_overflows(self) -> None: - submitted_answer_list = [{'answer': 'value'}] + submitted_answer_list: List[stats_domain.SubmittedAnswerDict] = [{ + 'answer': 'value', + 'interaction_id': 'TextInput', + 'answer_group_index': 0, + 'rule_spec_index': 1, + 'classification_categorization': ( + exp_domain.EXPLICIT_CLASSIFICATION), + 'params': {}, + 'session_id': 'sess', + 'time_spent_in_sec': 10.5, + 'rule_spec_str': 'rule spec str', + 'answer_str': 'answer str' + }] stats_models.StateAnswersModel.insert_submitted_answers( 'exp_id', 1, 'state_name', 'interaction_id', @@ -673,6 +1145,153 @@ def test_shard_count_is_updated_when_data_overflows(self) -> None: assert model1 is not None self.assertEqual(model1.shard_count, 2) + def test_get_all_state_answer_models_of_a_single_shard(self) -> None: + self.assertIsNone(stats_models.StateAnswersModel.get_all_models( + 'exp_id', 1, 'state_name' + )) + + # The 'shard_count' will be zero since the number of answer lists + # is less than _MAX_ANSWER_LIST_BYTE_SIZE. + submitted_answer_list1: List[stats_domain.SubmittedAnswerDict] = [{ + 'answer': 'value1', + 'interaction_id': 'TextInput', + 'answer_group_index': 0, + 'rule_spec_index': 1, + 'classification_categorization': ( + exp_domain.EXPLICIT_CLASSIFICATION), + 'params': {}, + 'session_id': 'sess', + 'time_spent_in_sec': 10.5, + 'rule_spec_str': 'rule spec str', + 'answer_str': 'answer str' + }] + stats_models.StateAnswersModel.insert_submitted_answers( + 'exp_id', 1, 'state_name', 'interaction_id1', + submitted_answer_list1) + + submitted_answer_list2: List[stats_domain.SubmittedAnswerDict] = [{ + 'answer': 'value2', + 'interaction_id': 'TextInput', + 'answer_group_index': 0, + 'rule_spec_index': 1, + 'classification_categorization': ( + exp_domain.EXPLICIT_CLASSIFICATION), + 'params': {}, + 'session_id': 'sess', + 'time_spent_in_sec': 10.5, + 'rule_spec_str': 'rule spec str', + 'answer_str': 'answer str' + }] + stats_models.StateAnswersModel.insert_submitted_answers( + 'exp_id', 1, 'state_name', 'interaction_id2', + submitted_answer_list2) + + stat_answer_models = stats_models.StateAnswersModel.get_all_models( + 'exp_id', 1, 'state_name' + ) + + # Ruling out the possibility of None for mypy type checking. + assert stat_answer_models is not None + + # Ensure we got the correct model. + self.assertEqual(stat_answer_models[0].exploration_id, 'exp_id') + self.assertEqual(stat_answer_models[0].exploration_version, 1) + self.assertEqual(stat_answer_models[0].state_name, 'state_name') + self.assertEqual( + stat_answer_models[0].submitted_answer_list, + submitted_answer_list1 + submitted_answer_list2 + ) + + def test_get_all_state_answer_models_of_all_shards(self) -> None: + # Use a smaller max answer list size so fewer answers are needed to + # exceed a shard. This will increase the 'shard_count'. + with self.swap( + stats_models.StateAnswersModel, '_MAX_ANSWER_LIST_BYTE_SIZE', 1): + submitted_answer_list1: List[stats_domain.SubmittedAnswerDict] = [{ + 'answer': 'value1', + 'interaction_id': 'TextInput', + 'answer_group_index': 0, + 'rule_spec_index': 1, + 'classification_categorization': ( + exp_domain.EXPLICIT_CLASSIFICATION), + 'params': {}, + 'session_id': 'sess', + 'time_spent_in_sec': 10.5, + 'rule_spec_str': 'rule spec str', + 'answer_str': 'answer str' + }] + stats_models.StateAnswersModel.insert_submitted_answers( + 'exp_id', 1, 'state_name', 'interaction_id1', + submitted_answer_list1) + + submitted_answer_list2: List[stats_domain.SubmittedAnswerDict] = [{ + 'answer': 'value2', + 'interaction_id': 'TextInput', + 'answer_group_index': 0, + 'rule_spec_index': 1, + 'classification_categorization': ( + exp_domain.EXPLICIT_CLASSIFICATION), + 'params': {}, + 'session_id': 'sess', + 'time_spent_in_sec': 10.5, + 'rule_spec_str': 'rule spec str', + 'answer_str': 'answer str' + }] + stats_models.StateAnswersModel.insert_submitted_answers( + 'exp_id', 1, 'state_name', 'interaction_id2', + submitted_answer_list2) + + stat_answer_models = stats_models.StateAnswersModel.get_all_models( + 'exp_id', 1, 'state_name' + ) + + # Ruling out the possibility of None for mypy type checking. + assert stat_answer_models is not None + + # Ensure we got the correct model. + self.assertEqual(stat_answer_models[1].exploration_id, 'exp_id') + self.assertEqual(stat_answer_models[1].exploration_version, 1) + self.assertEqual(stat_answer_models[1].state_name, 'state_name') + self.assertEqual( + stat_answer_models[1].submitted_answer_list, + submitted_answer_list1 + ) + + self.assertEqual(stat_answer_models[2].exploration_id, 'exp_id') + self.assertEqual(stat_answer_models[2].exploration_version, 1) + self.assertEqual(stat_answer_models[2].state_name, 'state_name') + self.assertEqual( + stat_answer_models[2].submitted_answer_list, + submitted_answer_list2 + ) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.StateAnswersModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'shard_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'interaction_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'shard_count': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'accumulated_answer_json_size_bytes': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'submitted_answer_list': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.StateAnswersModel.get_export_policy(), + expected_export_policy_dict + ) + class StateAnswersCalcOutputModelUnitTests(test_utils.GenericTestBase): """Tests the StateAnswersCalcOutputModel class.""" @@ -681,3 +1300,28 @@ def test_get_deletion_policy(self) -> None: self.assertEqual( stats_models.StateAnswersCalcOutputModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + stats_models.StateAnswersCalcOutputModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'state_name': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'interaction_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'calculation_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'calculation_output_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'calculation_output': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + stats_models.StateAnswersCalcOutputModel.get_export_policy(), + expected_export_policy_dict + ) diff --git a/core/storage/storage_models_test.py b/core/storage/storage_models_test.py index 2dd2ddd912ea..2a9e891c6240 100644 --- a/core/storage/storage_models_test.py +++ b/core/storage/storage_models_test.py @@ -22,22 +22,21 @@ from core.platform import models from core.tests import test_utils -( - base_models, collection_models, email_models, - exploration_models, feedback_models, skill_models, - topic_models, suggestion_models, user_models, - story_models, question_models, config_models -) = models.Registry.import_models([ - models.NAMES.base_model, models.NAMES.collection, models.NAMES.email, - models.NAMES.exploration, models.NAMES.feedback, models.NAMES.skill, - models.NAMES.topic, models.NAMES.suggestion, models.NAMES.user, - models.NAMES.story, models.NAMES.question, models.NAMES.config]) +from typing import Iterator, Type + +MYPY = False +if MYPY: # pragma: no cover + from mypy_imports import base_models + +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) class StorageModelsTest(test_utils.GenericTestBase): """Tests for Oppia storage models.""" - def _get_base_or_versioned_model_child_classes(self): + def _get_base_or_versioned_model_child_classes( + self + ) -> Iterator[Type[base_models.BaseModel]]: """Get child model classes that inherit directly from BaseModel or VersionedModel, these are classes that are used directly for saving data and not just inherited from. @@ -49,7 +48,7 @@ def _get_base_or_versioned_model_child_classes(self): continue yield clazz - def test_all_model_module_names_unique(self): + def test_all_model_module_names_unique(self) -> None: names_of_ndb_model_subclasses = [ clazz.__name__ for clazz in test_utils.get_storage_model_classes()] @@ -57,7 +56,9 @@ def test_all_model_module_names_unique(self): len(set(names_of_ndb_model_subclasses)), len(names_of_ndb_model_subclasses)) - def test_base_or_versioned_child_classes_have_get_deletion_policy(self): + def test_base_or_versioned_child_classes_have_get_deletion_policy( + self + ) -> None: for clazz in self._get_base_or_versioned_model_child_classes(): try: self.assertIn( @@ -67,11 +68,12 @@ def test_base_or_versioned_child_classes_have_get_deletion_policy(self): clazz.__name__)) def test_base_or_versioned_child_classes_have_has_reference_to_user_id( - self): + self + ) -> None: for clazz in self._get_base_or_versioned_model_child_classes(): if (clazz.get_deletion_policy() == base_models.DELETION_POLICY.NOT_APPLICABLE): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( NotImplementedError, re.escape( 'The has_reference_to_user_id() method is missing from ' @@ -89,7 +91,7 @@ def test_base_or_versioned_child_classes_have_has_reference_to_user_id( msg='has_reference_to_user_id is not defined for %s' % ( clazz.__name__)) - def test_get_models_which_should_be_exported(self): + def test_get_models_which_should_be_exported(self) -> None: """Ensure that the set of models to export is the set of models with export policy CONTAINS_USER_DATA, and that all other models have export policy NOT_APPLICABLE. @@ -111,7 +113,7 @@ def test_get_models_which_should_be_exported(self): self.assertNotIn( base_models.EXPORT_POLICY.EXPORTED, export_policy.values()) - def test_all_fields_have_export_policy(self): + def test_all_fields_have_export_policy(self) -> None: """Ensure every field in every model has an export policy defined.""" all_models = [ clazz diff --git a/core/storage/story/gae_models.py b/core/storage/story/gae_models.py index f7ac2e82a551..3ae4ea839b7d 100644 --- a/core/storage/story/gae_models.py +++ b/core/storage/story/gae_models.py @@ -19,7 +19,7 @@ from core.constants import constants from core.platform import models -from typing import Any, Dict, List, Optional +from typing import Dict, Mapping, Optional MYPY = False if MYPY: # pragma: no cover @@ -27,7 +27,7 @@ from mypy_imports import datastore_services (base_models, user_models,) = models.Registry.import_models([ - models.NAMES.base_model, models.NAMES.user]) + models.Names.BASE_MODEL, models.Names.USER]) datastore_services = models.Registry.import_datastore_services() @@ -141,15 +141,21 @@ def get_deletion_policy() -> base_models.DELETION_POLICY: """Model doesn't contain any data directly corresponding to a user.""" return base_models.DELETION_POLICY.NOT_APPLICABLE - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + # Here we use MyPy ignore because the signature of this method doesn't + # match with VersionedModel.compute_models_to_commit(). Because argument + # `commit_message` of super class can accept Optional[str] but this method + # can only accept str. + def compute_models_to_commit( # type: ignore[override] + self, + committer_id: str, + commit_type: str, + commit_message: str, + commit_cmds: base_models.AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + additional_models: Mapping[str, base_models.BaseModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -165,17 +171,32 @@ def _trusted_commit( reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. + + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. """ - super(StoryModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) story_commit_log_entry = StoryCommitLogEntryModel.create( self.id, self.version, committer_id, commit_type, commit_message, commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False ) story_commit_log_entry.story_id = self.id - story_commit_log_entry.update_timestamps() - story_commit_log_entry.put() + return { + 'snapshot_metadata_model': models_to_put['snapshot_metadata_model'], + 'snapshot_content_model': models_to_put['snapshot_content_model'], + 'commit_log_model': story_commit_log_entry, + 'versioned_model': models_to_put['versioned_model'], + } @staticmethod def get_model_association_to_user( diff --git a/core/storage/story/gae_models_test.py b/core/storage/story/gae_models_test.py index 86d63aaf4b6b..0a6f7c1e76bb 100644 --- a/core/storage/story/gae_models_test.py +++ b/core/storage/story/gae_models_test.py @@ -28,8 +28,9 @@ from mypy_imports import base_models from mypy_imports import story_models -(base_models, story_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.story]) +(base_models, story_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.STORY +]) class StorySnapshotContentModelTests(test_utils.GenericTestBase): @@ -48,6 +49,36 @@ def test_get_deletion_policy(self) -> None: story_models.StoryModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_export_policy(self) -> None: + expexted_export_policy_dict = { + 'title': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'thumbnail_size_in_bytes': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'description': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'notes': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'story_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'story_contents_schema_version': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'corresponding_topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'meta_tag_content': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + story_models.StoryModel.get_export_policy(), + expexted_export_policy_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + story_models.StoryModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + def test_story_model(self) -> None: """Method to test the StoryModel.""" @@ -106,6 +137,34 @@ def test_get_by_url_fragment(self) -> None: class StoryCommitLogEntryModelUnitTest(test_utils.GenericTestBase): """Test the StoryCommitLogEntryModel class.""" + def test_get_export_policy(self) -> None: + expexted_export_policy_dict = { + 'story_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_cmds': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_message': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'post_commit_community_owned': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'post_commit_is_private': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'post_commit_status': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + story_models.StoryCommitLogEntryModel.get_export_policy(), + expexted_export_policy_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + story_models.StoryCommitLogEntryModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + def test_has_reference_to_user_id(self) -> None: commit = story_models.StoryCommitLogEntryModel.create( 'b', 0, 'committer_id', 'msg', 'create', [{}], @@ -129,6 +188,33 @@ def test_get_deletion_policy(self) -> None: story_models.StorySummaryModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + def test_get_export_policy(self) -> None: + expexted_export_policy_dict = { + 'title': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'description': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'story_model_last_updated': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'story_model_created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'node_titles': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + self.assertEqual( + story_models.StorySummaryModel.get_export_policy(), + expexted_export_policy_dict) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + story_models.StorySummaryModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + def test_story_summary_model(self) -> None: """Method to test the StorySummaryModel.""" diff --git a/core/storage/subtopic/gae_models.py b/core/storage/subtopic/gae_models.py index a22f0f0e8637..0ed31ed5b971 100644 --- a/core/storage/subtopic/gae_models.py +++ b/core/storage/subtopic/gae_models.py @@ -21,14 +21,14 @@ from core.constants import constants from core.platform import models -from typing import Any, Dict, List +from typing import Dict, Mapping MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() @@ -123,15 +123,21 @@ def get_deletion_policy() -> base_models.DELETION_POLICY: """Model doesn't contain any data directly corresponding to a user.""" return base_models.DELETION_POLICY.NOT_APPLICABLE - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + # Here we use MyPy ignore because the signature of this method doesn't + # match with VersionedModel.compute_models_to_commit(). Because argument + # `commit_message` of super class can accept Optional[str] but this method + # can only accept str. + def compute_models_to_commit( # type: ignore[override] + self, + committer_id: str, + commit_type: str, + commit_message: str, + commit_cmds: base_models.AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + additional_models: Mapping[str, base_models.BaseModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -147,17 +153,35 @@ def _trusted_commit( reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. + + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. """ - super(SubtopicPageModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) subtopic_page_commit_log_entry = SubtopicPageCommitLogEntryModel.create( self.id, self.version, committer_id, commit_type, commit_message, commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False ) subtopic_page_commit_log_entry.subtopic_page_id = self.id - subtopic_page_commit_log_entry.update_timestamps() - subtopic_page_commit_log_entry.put() + # The order is important here, as the 'versioned_model' needs to be + # after 'snapshot_content_model' otherwise it leads to problems with + # putting the models into the datastore. + return { + 'snapshot_metadata_model': models_to_put['snapshot_metadata_model'], + 'snapshot_content_model': models_to_put['snapshot_content_model'], + 'commit_log_model': subtopic_page_commit_log_entry, + 'versioned_model': models_to_put['versioned_model'], + } @classmethod def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: diff --git a/core/storage/subtopic/gae_models_test.py b/core/storage/subtopic/gae_models_test.py index f6e42b9caa70..3f8ef53c1306 100644 --- a/core/storage/subtopic/gae_models_test.py +++ b/core/storage/subtopic/gae_models_test.py @@ -24,13 +24,16 @@ from core.platform import models from core.tests import test_utils +from typing import Final + MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import subtopic_models (base_models, subtopic_models) = models.Registry.import_models([ - models.NAMES.base_model, models.NAMES.subtopic]) + models.Names.BASE_MODEL, models.Names.SUBTOPIC +]) class SubtopicPageSnapshotContentModelTests(test_utils.GenericTestBase): @@ -45,7 +48,23 @@ def test_get_deletion_policy_is_not_applicable(self) -> None: class SubtopicPageModelUnitTest(test_utils.GenericTestBase): """Tests the SubtopicPageModel class.""" - SUBTOPIC_PAGE_ID = 'subtopic_page_id' + SUBTOPIC_PAGE_ID: Final = 'subtopic_page_id' + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'page_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'page_contents_schema_version': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + subtopic_models.SubtopicPageModel.get_export_policy(), + expected_export_policy_dict) def test_get_deletion_policy(self) -> None: self.assertEqual( @@ -53,7 +72,7 @@ def test_get_deletion_policy(self) -> None: base_models.DELETION_POLICY.NOT_APPLICABLE) def test_that_subsidiary_models_are_created_when_new_model_is_saved( - self + self ) -> None: """Tests the _trusted_commit() method.""" @@ -108,6 +127,34 @@ def test_has_reference_to_user_id(self) -> None: subtopic_models.SubtopicPageCommitLogEntryModel .has_reference_to_user_id('x_id')) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + subtopic_models.SubtopicPageCommitLogEntryModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + expected_export_policy_dict = { + 'subtopic_page_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_cmds': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_message': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'commit_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'post_commit_community_owned': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'post_commit_is_private': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'post_commit_status': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + self.assertEqual( + subtopic_models.SubtopicPageCommitLogEntryModel.get_export_policy(), + expected_export_policy_dict) + def test__get_instance_id(self) -> None: # Calling create() method calls _get_instance (a protected method) # and sets the instance id equal to the result of calling that method. diff --git a/core/storage/suggestion/gae_models.py b/core/storage/suggestion/gae_models.py index cdd00eccb427..84fdae8e68e8 100644 --- a/core/storage/suggestion/gae_models.py +++ b/core/storage/suggestion/gae_models.py @@ -19,26 +19,32 @@ import datetime from core import feconf +from core.constants import constants from core.platform import models -from typing import Any, Dict, List, Optional, Sequence, Tuple, Union +from typing import ( + Dict, Final, List, Literal, Mapping, Optional, Sequence, Tuple, TypedDict, + Union) MYPY = False if MYPY: # pragma: no cover + # Here, 'change_domain' is imported only for type checking. + from core.domain import change_domain # pylint: disable=invalid-import # isort:skip from mypy_imports import base_models from mypy_imports import datastore_services -(base_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.user]) +(base_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.USER +]) datastore_services = models.Registry.import_datastore_services() # Constants defining the different possible statuses of a suggestion. -STATUS_ACCEPTED = 'accepted' -STATUS_IN_REVIEW = 'review' -STATUS_REJECTED = 'rejected' +STATUS_ACCEPTED: Final = 'accepted' +STATUS_IN_REVIEW: Final = 'review' +STATUS_REJECTED: Final = 'rejected' -STATUS_CHOICES = [ +STATUS_CHOICES: Final = [ STATUS_ACCEPTED, STATUS_IN_REVIEW, STATUS_REJECTED @@ -48,70 +54,85 @@ # Contributor Dashboard to review. The constants below define the number of # question and translation suggestions to fetch to come up with these daily # suggestion recommendations. -MAX_QUESTION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS = 30 -MAX_TRANSLATION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS = 30 +MAX_QUESTION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS: Final = 30 +MAX_TRANSLATION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS: Final = 30 # Defines what is the minimum role required to review suggestions # of a particular type. -SUGGESTION_MINIMUM_ROLE_FOR_REVIEW = { +SUGGESTION_MINIMUM_ROLE_FOR_REVIEW: Final = { feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT: feconf.ROLE_ID_FULL_USER } # Constants defining various contribution types. -SCORE_TYPE_CONTENT = 'content' -SCORE_TYPE_TRANSLATION = 'translation' -SCORE_TYPE_QUESTION = 'question' +SCORE_TYPE_CONTENT: Final = 'content' +SCORE_TYPE_TRANSLATION: Final = 'translation' +SCORE_TYPE_QUESTION: Final = 'question' -SCORE_TYPE_CHOICES = [ +SCORE_TYPE_CHOICES: Final = [ SCORE_TYPE_CONTENT, SCORE_TYPE_TRANSLATION, SCORE_TYPE_QUESTION ] # The delimiter to be used in score category field. -SCORE_CATEGORY_DELIMITER = '.' +SCORE_CATEGORY_DELIMITER: Final = '.' # Threshold number of days after which suggestion will be accepted. -THRESHOLD_DAYS_BEFORE_ACCEPT = 7 +THRESHOLD_DAYS_BEFORE_ACCEPT: Final = 7 # Threshold time after which suggestion is considered stale and auto-accepted. -THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS = ( - THRESHOLD_DAYS_BEFORE_ACCEPT * 24 * 60 * 60 * 1000) +THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS: Final = ( + THRESHOLD_DAYS_BEFORE_ACCEPT * 24 * 60 * 60 * 1000 +) # Threshold number of days after which to notify the admin that the # suggestion has waited too long for a review. The admin will be notified of the # top MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_ADMIN number of suggestions that have # waited for a review longer than the threshold number of days. -SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS = 7 +SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS: Final = 7 # The maximum number of suggestions, that have been waiting too long for review, # to email admins about. -MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_ADMIN = 10 +MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_ADMIN: Final = 10 # The default message to be shown when accepting stale suggestions. -DEFAULT_SUGGESTION_ACCEPT_MESSAGE = ( +DEFAULT_SUGGESTION_ACCEPT_MESSAGE: Final = ( 'Automatically accepting suggestion after' - ' %d days' % THRESHOLD_DAYS_BEFORE_ACCEPT) + ' %d days' % THRESHOLD_DAYS_BEFORE_ACCEPT +) # The message to be shown when rejecting a suggestion with a target ID of a # deleted skill. -DELETED_SKILL_REJECT_MESSAGE = 'The associated skill no longer exists.' +DELETED_SKILL_REJECT_MESSAGE: Final = 'The associated skill no longer exists.' # The message to be shown when rejecting a translation suggestion that is # associated with an exploration that no longer corresponds to the story. # The story could have been deleted or the exploration could have been removed # from the story. -INVALID_STORY_REJECT_TRANSLATION_SUGGESTIONS_MSG = ( +INVALID_STORY_REJECT_TRANSLATION_SUGGESTIONS_MSG: Final = ( 'This text snippet has been removed from the story, and no longer needs ' 'translation. Sorry about that!' ) # The amount to increase the score of the author by after successfuly getting an # accepted suggestion. -INCREMENT_SCORE_OF_AUTHOR_BY = 1 +INCREMENT_SCORE_OF_AUTHOR_BY: Final = 1 # The unique ID for the CommunityContributionStatsModel. -COMMUNITY_CONTRIBUTION_STATS_MODEL_ID = 'community_contribution_stats' +COMMUNITY_CONTRIBUTION_STATS_MODEL_ID: Final = 'community_contribution_stats' + + +class GeneralSuggestionExportDataDict(TypedDict): + """Type for the Dictionary of the data from GeneralSuggestionModel.""" + + suggestion_type: str + target_type: str + target_id: str + target_version_at_submission: int + status: str + change_cmd: Dict[str, change_domain.AcceptableChangeDictTypes] + language_code: str + edited_by_reviewer: bool class GeneralSuggestionModel(base_models.BaseModel): @@ -122,7 +143,7 @@ class GeneralSuggestionModel(base_models.BaseModel): """ # We use the model id as a key in the Takeout dict. - ID_IS_USED_AS_TAKEOUT_KEY = True + ID_IS_USED_AS_TAKEOUT_KEY: Literal[True] = True # The type of suggestion. suggestion_type = datastore_services.StringProperty( @@ -210,8 +231,6 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: cls.author_id == user_id, cls.final_reviewer_id == user_id )).get(keys_only=True) is not None - # TODO(#13523): Change 'change_cmd' to TypedDict/Domain Object - # to remove Any used below. @classmethod def create( cls, @@ -221,8 +240,10 @@ def create( target_version_at_submission: int, status: str, author_id: str, - final_reviewer_id: str, - change_cmd: Dict[str, Any], + final_reviewer_id: Optional[str], + change_cmd: Mapping[ + str, change_domain.AcceptableChangeDictTypes + ], score_category: str, thread_id: str, language_code: Optional[str] @@ -237,8 +258,9 @@ def create( entity at the time of creation of the suggestion. status: str. The status of the suggestion. author_id: str. The ID of the user who submitted the suggestion. - final_reviewer_id: str. The ID of the reviewer who has - accepted/rejected the suggestion. + final_reviewer_id: str|None. The ID of the reviewer who has + accepted/rejected the suggestion, or None if no reviewer is + assigned. change_cmd: dict. The actual content of the suggestion. score_category: str. The scoring category for the suggestion. thread_id: str. The ID of the feedback thread linked to the @@ -280,6 +302,9 @@ def query_suggestions( list(SuggestionModel). A list of suggestions that match the given query values, up to a maximum of feconf.DEFAULT_SUGGESTION_QUERY_LIMIT suggestions. + + Raises: + Exception. The field cannot be queried. """ query = cls.query() for (field, value) in query_fields_and_values: @@ -312,47 +337,9 @@ def get_translation_suggestions_in_review_with_exp_id( cls.target_id == exp_id )).fetch(feconf.DEFAULT_SUGGESTION_QUERY_LIMIT) - @classmethod - def get_multiple_suggestions_from_suggestion_ids( - cls, suggestion_ids: List[str] - ) -> List[Optional[GeneralSuggestionModel]]: - """Returns suggestions matching the supplied suggestion IDs. - - Args: - suggestion_ids: list(str). Suggestion IDs of suggestions that need - to be returned. - - Returns: - list(SuggestionModel|None). A list of suggestions in matching the - supplied suggestion IDs. - """ - return GeneralSuggestionModel.get_multi(suggestion_ids) - - @classmethod - def get_translation_suggestions_in_review_ids_with_exp_id( - cls, target_exp_ids: List[str] - ) -> List[str]: - """Returns IDs of in review translation suggestions matching the - supplied target IDs. - - Args: - target_exp_ids: list(str). Exploration IDs matching the target ID - of the translation suggestions. - - Returns: - list(str). A list of IDs of translation suggestions in review - with given target_exp_ids. - """ - suggestion_keys = GeneralSuggestionModel.query( - cls.status == STATUS_IN_REVIEW, - GeneralSuggestionModel.target_id.IN(target_exp_ids) - ).fetch(keys_only=True) - - return [suggestion_key.id() for suggestion_key in suggestion_keys] - @classmethod def get_translation_suggestion_ids_with_exp_ids( - cls, exp_ids: List[str] + cls, exp_ids: List[str] ) -> List[str]: """Gets the ids of translation suggestions corresponding to explorations with the given exploration ids. @@ -435,6 +422,66 @@ def get_suggestions_waiting_too_long_for_review( cls.last_updated ).fetch(MAX_NUMBER_OF_SUGGESTIONS_TO_EMAIL_ADMIN) + @classmethod + def get_translation_suggestions_submitted_within_given_dates( + cls, + from_date: datetime.datetime, + to_date: datetime.datetime, + user_id: str, + language_code: str + ) -> Sequence[GeneralSuggestionModel]: + """Gets all suggestions which are are submitted within the given + date range. + + Args: + from_date: Date. The date that suggestions are submitted on or + after. + to_date: Date. The date that suggestions are submitted on or before. + user_id: str. The id of the user who made the submissions. + language_code: str. The language that the contributions should be + fetched. + + Returns: + list(SuggestionModel). A list of suggestions that are submitted + within the given date range. + """ + return cls.get_all().filter(datastore_services.all_of( + cls.created_on <= to_date, + cls.created_on >= from_date, + cls.author_id == user_id, + cls.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + cls.language_code == language_code, + cls.status == STATUS_ACCEPTED + )).order(cls.created_on).fetch() + + @classmethod + def get_question_suggestions_submitted_within_given_dates( + cls, + from_date: datetime.datetime, + to_date: datetime.datetime, + user_id: str + ) -> Sequence[GeneralSuggestionModel]: + """Gets all suggestions which are are submitted within the given + date range. + + Args: + from_date: Date. The date that suggestions are submitted on or + after. + to_date: Date. The date that suggestions are submitted on or before. + user_id: str. The id of the user who made the submissions. + + Returns: + list(SuggestionModel). A list of suggestions that are submitted + before the given date range. + """ + return cls.get_all().filter(datastore_services.all_of( + cls.created_on <= to_date, + cls.created_on >= from_date, + cls.author_id == user_id, + cls.suggestion_type == feconf.SUGGESTION_TYPE_ADD_QUESTION, + cls.status == STATUS_ACCEPTED + )).order(cls.created_on).fetch() + @classmethod def get_in_review_suggestions_in_score_categories( cls, score_categories: List[str], user_id: str @@ -452,6 +499,9 @@ def get_in_review_suggestions_in_score_categories( list(SuggestionModel). A list of suggestions that are in the given score categories, which are in review, but not created by the given user. + + Raises: + Exception. Given list of score categories is empty. """ if len(score_categories) == 0: raise Exception('Received empty list of score categories') @@ -464,19 +514,23 @@ def get_in_review_suggestions_in_score_categories( @classmethod def get_in_review_translation_suggestions( - cls, user_id: str, language_codes: List[str] + cls, + user_id: str, + language_codes: List[str] ) -> Sequence[GeneralSuggestionModel]: - """Gets all translation suggestions which are in review. + """Fetches all translation suggestions that are in-review where the + author_id != user_id and language_code matches one of the supplied + language_codes. Args: - user_id: str. The id of the user trying to make this query. - As a user cannot review their own suggestions, suggestions - authored by the user will be excluded. - language_codes: list(str). The list of language codes. + user_id: str. The id of the user trying to make this query. As a + user cannot review their own suggestions, suggestions authored + by the user will be excluded. + language_codes: list(str). List of language codes that the + suggestions should match. Returns: - list(SuggestionModel). A list of suggestions that are of the given - type, which are in review, but not created by the given user. + list(SuggestionModel). A list of the matching suggestions. """ return cls.get_all().filter(datastore_services.all_of( cls.status == STATUS_IN_REVIEW, @@ -486,25 +540,291 @@ def get_in_review_translation_suggestions( )).fetch(feconf.DEFAULT_SUGGESTION_QUERY_LIMIT) @classmethod - def get_in_review_question_suggestions( - cls, user_id: str - ) -> Sequence[GeneralSuggestionModel]: - """Gets all question suggestions which are in review. + def get_in_review_translation_suggestions_by_offset( + cls, + limit: Optional[int], + offset: int, + user_id: str, + sort_key: Optional[str], + language_codes: List[str] + ) -> Tuple[Sequence[GeneralSuggestionModel], int]: + """Fetches translation suggestions that are in-review where the + author_id != user_id and language_code matches one of the supplied + language_codes. + + Args: + limit: int|None. Maximum number of entities to be returned. If None, + returns all matching entities. + offset: int. Number of results to skip from the beginning of all + results matching the query. + user_id: str. The id of the user trying to make this query. As a + user cannot review their own suggestions, suggestions authored + by the user will be excluded. + sort_key: str|None. The key to sort the suggestions by. + language_codes: list(str). List of language codes that the + suggestions should match. + + Returns: + Tuple of (results, next_offset). Where: + results: list(SuggestionModel). A list of suggestions that are + in-review, not authored by the supplied user, and that match + one of the supplied language codes. + next_offset: int. The input offset + the number of results + returned by the current query. + """ + if sort_key == constants.SUGGESTIONS_SORT_KEY_DATE: + # The first sort property must be the same as the property to which + # an inequality filter is applied. Thus, the inequality filter on + # author_id can not be used here. + suggestion_query = cls.get_all().filter(datastore_services.all_of( + cls.status == STATUS_IN_REVIEW, + cls.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + cls.language_code.IN(language_codes) + )).order(-cls.created_on) + + sorted_results: List[GeneralSuggestionModel] = [] + + if limit is None: + suggestion_models: Sequence[GeneralSuggestionModel] = ( + suggestion_query.fetch(offset=offset)) + for suggestion_model in suggestion_models: + offset += 1 + if suggestion_model.author_id != user_id: + sorted_results.append(suggestion_model) + else: + num_suggestions_per_fetch = 1000 + + while len(sorted_results) < limit: + suggestion_models = suggestion_query.fetch( + num_suggestions_per_fetch, offset=offset) + if not suggestion_models: + break + for suggestion_model in suggestion_models: + offset += 1 + if suggestion_model.author_id != user_id: + sorted_results.append(suggestion_model) + if len(sorted_results) == limit: + break + + return ( + sorted_results, + offset + ) + + suggestion_query = cls.get_all().filter(datastore_services.all_of( + cls.status == STATUS_IN_REVIEW, + cls.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + cls.author_id != user_id, + cls.language_code.IN(language_codes) + )) + + results: Sequence[GeneralSuggestionModel] = ( + suggestion_query.fetch(limit, offset=offset) + if limit is not None + else suggestion_query.fetch(offset=offset) + ) + next_offset = offset + len(results) + + return ( + results, + next_offset + ) + + @classmethod + def get_in_review_translation_suggestions_with_exp_ids_by_offset( + cls, + limit: Optional[int], + offset: int, + user_id: str, + sort_key: Optional[str], + language_codes: List[str], + exp_ids: List[str] + ) -> Tuple[Sequence[GeneralSuggestionModel], int]: + """Gets all translation suggestions for the given language + codes which are in review and correspond to the + given exploration IDs. Args: + limit: int|None. Maximum number of entities to be returned. If None, + returns all matching entities. + offset: int. Number of results to skip from the beginning of all + results matching the query. user_id: str. The id of the user trying to make this query. As a user cannot review their own suggestions, suggestions authored by the user will be excluded. + sort_key: str|None. The key to sort the suggestions by. + language_codes: list(str). The list of language codes. + exp_ids: list(str). Exploration IDs matching the target ID of the + translation suggestions. Returns: - list(SuggestionModel). A list of suggestions that are of the given - type, which are in review, but not created by the given user. + Tuple of (results, next_offset). Where: + results: list(SuggestionModel). A list of suggestions that are + in-review, not authored by the supplied user, match + one of the supplied language codes and correspond to the + given exploration IDs. + next_offset: int. The input offset + the number of results + returned by the current query. + """ + if sort_key == constants.SUGGESTIONS_SORT_KEY_DATE: + # The first sort property must be the same as the property to which + # an inequality filter is applied. Thus, the inequality filter on + # author_id can not be used here. + suggestion_query = cls.get_all().filter(datastore_services.all_of( + cls.status == STATUS_IN_REVIEW, + cls.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + cls.language_code.IN(language_codes), + cls.target_id.IN(exp_ids) + )).order(-cls.created_on) + + sorted_results: List[GeneralSuggestionModel] = [] + + if limit is None: + suggestion_models: Sequence[GeneralSuggestionModel] = ( + suggestion_query.fetch(offset=offset)) + for suggestion_model in suggestion_models: + offset += 1 + if suggestion_model.author_id != user_id: + sorted_results.append(suggestion_model) + else: + num_suggestions_per_fetch = 1000 + + while len(sorted_results) < limit: + suggestion_models = suggestion_query.fetch( + num_suggestions_per_fetch, offset=offset) + if not suggestion_models: + break + for suggestion_model in suggestion_models: + offset += 1 + if suggestion_model.author_id != user_id: + sorted_results.append(suggestion_model) + if len(sorted_results) == limit: + break + + return ( + sorted_results, + offset + ) + + suggestion_query = cls.get_all().filter(datastore_services.all_of( + cls.status == STATUS_IN_REVIEW, + cls.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + cls.author_id != user_id, + cls.language_code.IN(language_codes), + cls.target_id.IN(exp_ids) + )) + + results: Sequence[GeneralSuggestionModel] = ( + suggestion_query.fetch(limit, offset=offset) + if limit is not None + else suggestion_query.fetch(offset=offset) + ) + next_offset = offset + len(results) + + return ( + results, + next_offset + ) + + @classmethod + def get_in_review_translation_suggestions_by_exp_ids( + cls, exp_ids: List[str], language_code: str + ) -> Sequence[GeneralSuggestionModel]: + """Gets all in-review translation suggestions matching the supplied + exp_ids and language_code. + + Args: + exp_ids: list(str). Exploration IDs matching the target ID of the + translation suggestions. + language_code: str. The ISO 639-1 language code of the translation + suggestions. + + Returns: + list(SuggestionModel). A list of suggestions matching the supplied + exp_ids and language_code. """ return cls.get_all().filter(datastore_services.all_of( + cls.status == STATUS_IN_REVIEW, + cls.suggestion_type == feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + cls.target_id.IN(exp_ids), + cls.language_code == language_code + )).fetch(feconf.DEFAULT_SUGGESTION_QUERY_LIMIT) + + @classmethod + def get_in_review_question_suggestions_by_offset( + cls, + limit: int, + offset: int, + user_id: str, + sort_key: Optional[str] + ) -> Tuple[Sequence[GeneralSuggestionModel], int]: + """Fetches question suggestions that are in-review and not authored by + the supplied user. + + Args: + limit: int. Maximum number of entities to be returned. + offset: int. Number of of results to skip from the beginning of all + results matching the query. + user_id: str. The id of the user trying to make this query. As a + user cannot review their own suggestions, suggestions authored + by the user will be excluded. + sort_key: str|None. The key to sort the suggestions by. + + Returns: + Tuple of (results, next_offset). Where: + results: list(SuggestionModel). A list of suggestions that are + in-review, not authored by the supplied user, and that match + one of the supplied language codes. + next_offset: int. The input offset + the number of results + returned by the current query. + """ + + if sort_key == constants.SUGGESTIONS_SORT_KEY_DATE: + # The first sort property must be the same as the property to which + # an inequality filter is applied. Thus, the inequality filter on + # author_id can not be used here. + suggestion_query = cls.get_all().filter( + datastore_services.all_of( + cls.status == STATUS_IN_REVIEW, + cls.suggestion_type == feconf.SUGGESTION_TYPE_ADD_QUESTION, + )).order(-cls.created_on) + + sorted_results: List[GeneralSuggestionModel] = [] + num_suggestions_per_fetch = 1000 + + while len(sorted_results) < limit: + suggestion_models: Sequence[GeneralSuggestionModel] = ( + suggestion_query.fetch( + num_suggestions_per_fetch, offset=offset)) + if not suggestion_models: + break + for suggestion_model in suggestion_models: + offset += 1 + if suggestion_model.author_id != user_id: + sorted_results.append(suggestion_model) + if len(sorted_results) == limit: + break + + return ( + sorted_results, + offset + ) + + suggestion_query = cls.get_all().filter(datastore_services.all_of( cls.status == STATUS_IN_REVIEW, cls.suggestion_type == feconf.SUGGESTION_TYPE_ADD_QUESTION, cls.author_id != user_id - )).fetch(feconf.DEFAULT_SUGGESTION_QUERY_LIMIT) + )) + + results: Sequence[GeneralSuggestionModel] = ( + suggestion_query.fetch(limit, offset=offset) + ) + next_offset = offset + len(results) + + return ( + results, + next_offset + ) @classmethod def get_question_suggestions_waiting_longest_for_review( @@ -570,6 +890,51 @@ def get_user_created_suggestions_of_suggestion_type( cls.author_id == user_id )).order(-cls.created_on).fetch(feconf.DEFAULT_SUGGESTION_QUERY_LIMIT) + @classmethod + def get_user_created_suggestions_by_offset( + cls, + limit: int, + offset: int, + suggestion_type: str, + user_id: str, + sort_key: Optional[str] + ) -> Tuple[Sequence[GeneralSuggestionModel], int]: + """Fetches suggestions of suggestion_type which the supplied user has + created. + + Args: + limit: int. Maximum number of entities to be returned. + offset: int. The number of results to skip from the beginning of all + results matching the query. + suggestion_type: str. The type of suggestion to query for. + user_id: str. The id of the user trying to make this query. + sort_key: str|None. The key to sort the suggestions by. + + Returns: + Tuple of (results, next_offset). Where: + results: list(SuggestionModel). A list of suggestions that are + of the supplied type which the supplied user has created. + next_offset: int. The input offset + the number of results + returned by the current query. + """ + suggestion_query = cls.get_all().filter(datastore_services.all_of( + cls.suggestion_type == suggestion_type, + cls.author_id == user_id + )) + + if sort_key == constants.SUGGESTIONS_SORT_KEY_DATE: + suggestion_query = suggestion_query.order(-cls.created_on) + + results: Sequence[GeneralSuggestionModel] = ( + suggestion_query.fetch(limit, offset=offset) + ) + next_offset = offset + len(results) + + return ( + results, + next_offset + ) + @classmethod def get_all_score_categories(cls) -> List[str]: """Gets all the score categories for which suggestions have been @@ -581,12 +946,10 @@ def get_all_score_categories(cls) -> List[str]: query_set = cls.query(projection=['score_category'], distinct=True) return [data.score_category for data in query_set] - # TODO(#13523): Change 'change_cmd' to TypedDict/Domain Object - # to remove Any used below. @classmethod def export_data( - cls, user_id: str - ) -> Dict[str, Dict[str, Union[str, int, bool, Dict[str, Any], None]]]: + cls, user_id: str + ) -> Dict[str, GeneralSuggestionExportDataDict]: """Exports the data from GeneralSuggestionModel into dict format for Takeout. @@ -597,7 +960,7 @@ def export_data( dict. Dictionary of the data from GeneralSuggestionModel. """ - user_data = {} + user_data: Dict[str, GeneralSuggestionExportDataDict] = {} suggestion_models: Sequence[GeneralSuggestionModel] = ( cls.get_all().filter(cls.author_id == user_id).fetch()) @@ -618,189 +981,6 @@ def export_data( return user_data -class GeneralVoiceoverApplicationModel(base_models.BaseModel): - """A general model for voiceover application of an entity. - - The ID of the voiceover application will be a random hashed value. - """ - - # We use the model id as a key in the Takeout dict. - ID_IS_USED_AS_TAKEOUT_KEY = True - - # The type of entity to which the user will be assigned as a voice artist - # once the application will get approved. - target_type = datastore_services.StringProperty(required=True, indexed=True) - # The ID of the entity to which the application belongs. - target_id = datastore_services.StringProperty(required=True, indexed=True) - # The language code for the voiceover audio. - language_code = ( - datastore_services.StringProperty(required=True, indexed=True)) - # The status of the application. One of: accepted, rejected, in-review. - status = datastore_services.StringProperty( - required=True, indexed=True, choices=STATUS_CHOICES) - # The HTML content written in the given language_code. - # This will typically be a snapshot of the content of the initial card of - # the target. - content = datastore_services.TextProperty(required=True) - # The filename of the voiceover audio. The filename will have - # datetime-randomId(length 6)-language_code.mp3 pattern. - filename = datastore_services.StringProperty(required=True, indexed=True) - # The ID of the author of the voiceover application. - author_id = datastore_services.StringProperty(required=True, indexed=True) - # The ID of the reviewer who accepted/rejected the voiceover application. - final_reviewer_id = datastore_services.StringProperty(indexed=True) - # The plain text message submitted by the reviewer while rejecting the - # application. - rejection_message = datastore_services.TextProperty() - - @staticmethod - def get_deletion_policy() -> base_models.DELETION_POLICY: - """Model contains data to pseudonymize corresponding to a user: - author_id, and final_reviewer_id fields. - """ - return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE - - @classmethod - def has_reference_to_user_id(cls, user_id: str) -> bool: - """Check whether GeneralVoiceoverApplicationModel exists for the user. - - Args: - user_id: str. The ID of the user whose data should be checked. - - Returns: - bool. Whether any models refer to the given user ID. - """ - return cls.query(datastore_services.any_of( - cls.author_id == user_id, cls.final_reviewer_id == user_id - )).get(keys_only=True) is not None - - @classmethod - def get_user_voiceover_applications( - cls, author_id: str, status: Optional[str] = None - ) -> Sequence[GeneralVoiceoverApplicationModel]: - """Returns a list of voiceover application submitted by the given user. - - Args: - author_id: str. The id of the user created the voiceover - application. - status: str|None. The status of the voiceover application. - If the status is None, the query will fetch all the - voiceover applications. - - Returns: - list(GeneralVoiceoverApplicationModel). The list of voiceover - applications submitted by the given user. - """ - if status in STATUS_CHOICES: - voiceover_application_query = cls.query( - datastore_services.all_of( - cls.author_id == author_id, cls.status == status)) - else: - voiceover_application_query = cls.query(cls.author_id == author_id) - - return voiceover_application_query.fetch() - - @classmethod - def get_reviewable_voiceover_applications( - cls, user_id: str - ) -> Sequence[GeneralVoiceoverApplicationModel]: - """Returns a list of voiceover application which a given user can - review. - - Args: - user_id: str. The id of the user trying to make this query. - As a user cannot review their own voiceover application, so the - voiceover application created by the user will be excluded. - - Returns: - list(GeneralVoiceoverApplicationModel). The list of voiceover - applications which the given user can review. - """ - return cls.query(datastore_services.all_of( - cls.author_id != user_id, - cls.status == STATUS_IN_REVIEW - )).fetch() - - @classmethod - def get_voiceover_applications( - cls, - target_type: str, - target_id: str, - language_code: str - ) -> Sequence[GeneralVoiceoverApplicationModel]: - """Returns a list of voiceover applications submitted for a give entity - in a given language. - - Args: - target_type: str. The type of entity. - target_id: str. The ID of the targeted entity. - language_code: str. The code of the language in which the voiceover - application is submitted. - - Returns: - list(GeneralVoiceoverApplicationModel). The list of voiceover - application which is submitted to a give entity in a given language. - """ - return cls.query(datastore_services.all_of( - cls.target_type == target_type, - cls.target_id == target_id, - cls.language_code == language_code - )).fetch() - - @staticmethod - def get_model_association_to_user( - ) -> base_models.MODEL_ASSOCIATION_TO_USER: - """Model is exported as multiple instances per user since there are - multiple voiceover applications relevant to a user. - """ - return base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER - - @classmethod - def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: - """Model contains data to export corresponding to a user.""" - return dict(super(cls, cls).get_export_policy(), **{ - 'target_type': base_models.EXPORT_POLICY.EXPORTED, - 'target_id': base_models.EXPORT_POLICY.EXPORTED, - 'language_code': base_models.EXPORT_POLICY.EXPORTED, - 'status': base_models.EXPORT_POLICY.EXPORTED, - 'content': base_models.EXPORT_POLICY.EXPORTED, - 'filename': base_models.EXPORT_POLICY.EXPORTED, - # The author_id and final_reviewer_id are not exported in order to - # keep internal ids private. - 'author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, - 'final_reviewer_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, - 'rejection_message': base_models.EXPORT_POLICY.EXPORTED - }) - - @classmethod - def export_data(cls, user_id: str) -> Dict[str, Dict[str, Optional[str]]]: - """(Takeout) Exports the data from GeneralVoiceoverApplicationModel - into dict format. - - Args: - user_id: str. The ID of the user whose data should be exported. - - Returns: - dict. Dictionary of the data from GeneralVoiceoverApplicationModel. - """ - user_data = {} - - voiceover_models: Sequence[GeneralVoiceoverApplicationModel] = ( - cls.query(cls.author_id == user_id).fetch()) - - for voiceover_model in voiceover_models: - user_data[voiceover_model.id] = { - 'target_type': voiceover_model.target_type, - 'target_id': voiceover_model.target_id, - 'language_code': voiceover_model.language_code, - 'status': voiceover_model.status, - 'content': voiceover_model.content, - 'filename': voiceover_model.filename, - 'rejection_message': voiceover_model.rejection_message - } - return user_data - - class CommunityContributionStatsModel(base_models.BaseModel): """Records the contributor dashboard contribution stats. This includes the total number of reviewers for each suggestion type and the total number of @@ -829,19 +1009,18 @@ class CommunityContributionStatsModel(base_models.BaseModel): question_suggestion_count = ( datastore_services.IntegerProperty(required=True)) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override @classmethod - def get(cls) -> Optional[CommunityContributionStatsModel]: # type: ignore[override] + def get(cls) -> CommunityContributionStatsModel: # type: ignore[override] """Gets the CommunityContributionStatsModel instance. If the CommunityContributionStatsModel does not exist yet, it is created. This method helps enforce that there should only ever be one instance of this model. Returns: - CommunityContributionStatsModel|None. The single model instance, - or None if no such model instance exists. + CommunityContributionStatsModel. The single model instance. """ community_contribution_stats_model = cls.get_by_id( COMMUNITY_CONTRIBUTION_STATS_MODEL_ID @@ -903,7 +1082,7 @@ class TranslationContributionStatsModel(base_models.BaseModel): """ # We use the model id as a key in the Takeout dict. - ID_IS_USED_AS_TAKEOUT_KEY = True + ID_IS_USED_AS_TAKEOUT_KEY: Literal[True] = True # The ISO 639-1 language code for which the translation contributions were # made. @@ -944,23 +1123,23 @@ class TranslationContributionStatsModel(base_models.BaseModel): @classmethod def create( - cls, - language_code: str, - contributor_user_id: str, - topic_id: str, - submitted_translations_count: int, - submitted_translation_word_count: int, - accepted_translations_count: int, - accepted_translations_without_reviewer_edits_count: int, - accepted_translation_word_count: int, - rejected_translations_count: int, - rejected_translation_word_count: int, - contribution_dates: List[datetime.date] + cls, + language_code: str, + contributor_user_id: str, + topic_id: str, + submitted_translations_count: int, + submitted_translation_word_count: int, + accepted_translations_count: int, + accepted_translations_without_reviewer_edits_count: int, + accepted_translation_word_count: int, + rejected_translations_count: int, + rejected_translation_word_count: int, + contribution_dates: List[datetime.date] ) -> str: """Creates a new TranslationContributionStatsModel instance and returns its ID. """ - entity_id = cls.generate_id( + entity_id = cls.construct_id( language_code, contributor_user_id, topic_id) entity = cls( id=entity_id, @@ -981,10 +1160,10 @@ def create( return entity_id @staticmethod - def generate_id( - language_code: str, contributor_user_id: str, topic_id: str + def construct_id( + language_code: str, contributor_user_id: str, topic_id: str ) -> str: - """Generates a unique ID for a TranslationContributionStatsModel + """Constructs a unique ID for a TranslationContributionStatsModel instance. Args: @@ -1001,7 +1180,7 @@ def generate_id( '%s.%s.%s' % (language_code, contributor_user_id, topic_id) ) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override @classmethod @@ -1016,7 +1195,7 @@ def get( # type: ignore[override] TranslationContributionStatsModel, or None if no such model instance exists. """ - entity_id = cls.generate_id( + entity_id = cls.construct_id( language_code, contributor_user_id, topic_id) return cls.get_by_id(entity_id) @@ -1104,7 +1283,7 @@ def apply_deletion_policy(cls, user_id: str) -> None: @classmethod def export_data( - cls, user_id: str + cls, user_id: str ) -> Dict[str, Dict[str, Union[str, int, List[str]]]]: """Exports the data from TranslationContributionStatsModel into dict format for Takeout. @@ -1119,7 +1298,9 @@ def export_data( stats_models: Sequence[TranslationContributionStatsModel] = ( cls.get_all().filter(cls.contributor_user_id == user_id).fetch()) for model in stats_models: - user_data[model.id] = { + splitted_id = model.id.split('.') + id_without_user_id = '%s.%s' % (splitted_id[0], splitted_id[2]) + user_data[id_without_user_id] = { 'language_code': model.language_code, 'topic_id': model.topic_id, 'submitted_translations_count': ( @@ -1140,3 +1321,651 @@ def export_data( date.isoformat() for date in model.contribution_dates] } return user_data + + +class TranslationReviewStatsModel(base_models.BaseModel): + """Records the translation review stats. There is one instance of this model + per (language_code, reviewer_user_id, topic_id) tuple. Its IDs are in the + following structure: [language_code].[reviewer_user_id].[topic_id] + """ + + # We use the model id as a key in the Takeout dict. + ID_IS_USED_AS_TAKEOUT_KEY = True + + # The ISO 639-1 language code for which the translation reviews were + # made. + language_code = datastore_services.StringProperty( + required=True, indexed=True) + # The user ID of the translation reviewer. + reviewer_user_id = datastore_services.StringProperty( + required=True, indexed=True) + # The topic ID of the translation reviews. + topic_id = datastore_services.StringProperty(required=True, indexed=True) + # The number of reviewed translations. + reviewed_translations_count = datastore_services.IntegerProperty( + required=True, indexed=True) + # The total word count of reviewed translations. Excludes HTML tags and + # attributes. + reviewed_translation_word_count = datastore_services.IntegerProperty( + required=True, indexed=True) + # The number of accepted translations. + accepted_translations_count = datastore_services.IntegerProperty( + required=True, indexed=True) + # The number of accepted translations with reviewer edits. + accepted_translations_with_reviewer_edits_count = ( + datastore_services.IntegerProperty(required=True, indexed=True)) + # The total word count of accepted translations. Excludes HTML tags and + # attributes. + accepted_translation_word_count = datastore_services.IntegerProperty( + required=True, indexed=True) + # The first date that the reviewer made a translation review. + first_contribution_date = datastore_services.DateProperty(indexed=True) + # The last date that the reviewer made a translation review. + last_contribution_date = datastore_services.DateProperty(indexed=True) + + @classmethod + def create( + cls, + language_code: str, + reviewer_user_id: str, + topic_id: str, + reviewed_translations_count: int, + reviewed_translation_word_count: int, + accepted_translations_count: int, + accepted_translations_with_reviewer_edits_count: int, + accepted_translation_word_count: int, + first_contribution_date: datetime.date, + last_contribution_date: datetime.date + ) -> str: + """Creates a new TranslationReviewStatsModel instance and returns + its ID. + """ + entity_id = cls.construct_id( + language_code, reviewer_user_id, topic_id) + entity = cls( + id=entity_id, + language_code=language_code, + reviewer_user_id=reviewer_user_id, + topic_id=topic_id, + reviewed_translations_count=reviewed_translations_count, + reviewed_translation_word_count=reviewed_translation_word_count, + accepted_translations_count=accepted_translations_count, + accepted_translations_with_reviewer_edits_count=( + accepted_translations_with_reviewer_edits_count), + accepted_translation_word_count=accepted_translation_word_count, + first_contribution_date=first_contribution_date, + last_contribution_date=last_contribution_date) + entity.update_timestamps() + entity.put() + return entity_id + + @staticmethod + def construct_id( + language_code: str, reviewer_user_id: str, topic_id: str + ) -> str: + """Constructs a unique ID for a TranslationReviewStatsModel + instance. + + Args: + language_code: str. ISO 639-1 language code. + reviewer_user_id: str. User ID. + topic_id: str. Topic ID. + + Returns: + str. An ID of the form: + + [language_code].[reviewer_user_id].[topic_id] + """ + return ( + '%s.%s.%s' % (language_code, reviewer_user_id, topic_id) + ) + + # Here we use MyPy ignore because the signature of this method + # doesn't match with BaseModel.get(). + # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override + @classmethod + def get( # type: ignore[override] + cls, language_code: str, reviewer_user_id: str, topic_id: str + ) -> Optional[TranslationReviewStatsModel]: + """Gets the TranslationReviewStatsModel matching the supplied + language_code, reviewer_user_id, topic_id. + + Returns: + TranslationReviewStatsModel|None. The matching + TranslationReviewStatsModel, or None if no such model + instance exists. + """ + entity_id = cls.construct_id( + language_code, reviewer_user_id, topic_id) + return cls.get_by_id(entity_id) + + @classmethod + def get_all_by_user_id( + cls, user_id: str + ) -> Sequence[TranslationReviewStatsModel]: + """Gets all TranslationReviewStatsModel matching the supplied + user_id. + + Returns: + list(TranslationReviewStatsModel). The matching + TranslationReviewStatsModel. + """ + return cls.get_all().filter( + cls.reviewer_user_id == user_id + ).fetch(feconf.DEFAULT_SUGGESTION_QUERY_LIMIT) + + @classmethod + def has_reference_to_user_id(cls, user_id: str) -> bool: + """Check whether TranslationReviewStatsModel references the + supplied user. + + Args: + user_id: str. The ID of the user whose data should be checked. + + Returns: + bool. Whether any models refer to the given user ID. + """ + return cls.query( + cls.reviewer_user_id == user_id + ).get(keys_only=True) is not None + + @classmethod + def get_deletion_policy(cls) -> base_models.DELETION_POLICY: + """Model contains corresponding to a user: reviewer_user_id.""" + return base_models.DELETION_POLICY.DELETE + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model is exported as multiple instances per user since there are + multiple languages and topics relevant to a user. + """ + return base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model contains data to export corresponding to a user.""" + return dict(super(cls, cls).get_export_policy(), **{ + 'language_code': + base_models.EXPORT_POLICY.EXPORTED, + # User ID is not exported in order to keep internal ids private. + 'reviewer_user_id': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id': + base_models.EXPORT_POLICY.EXPORTED, + 'reviewed_translations_count': + base_models.EXPORT_POLICY.EXPORTED, + 'reviewed_translation_word_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translations_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translations_with_reviewer_edits_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translation_word_count': + base_models.EXPORT_POLICY.EXPORTED, + 'first_contribution_date': + base_models.EXPORT_POLICY.EXPORTED, + 'last_contribution_date': + base_models.EXPORT_POLICY.EXPORTED + }) + + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete instances of TranslationReviewStatsModel for the user. + + Args: + user_id: str. The ID of the user whose data should be deleted. + """ + datastore_services.delete_multi( + cls.query(cls.reviewer_user_id == user_id).fetch(keys_only=True)) + + @classmethod + def export_data( + cls, user_id: str + ) -> Dict[str, Dict[str, Union[str, int, List[str]]]]: + """Exports the data from TranslationReviewStatsModel into dict + format for Takeout. + + Args: + user_id: str. The ID of the user whose data should be exported. + + Returns: + dict. Dictionary of the data from TranslationReviewStatsModel. + """ + user_data = {} + stats_models: Sequence[TranslationReviewStatsModel] = ( + cls.get_all().filter(cls.reviewer_user_id == user_id).fetch()) + for model in stats_models: + splitted_id = model.id.split('.') + id_without_user_id = '%s.%s' % (splitted_id[0], splitted_id[2]) + user_data[id_without_user_id] = { + 'language_code': model.language_code, + 'topic_id': model.topic_id, + 'reviewed_translations_count': ( + model.reviewed_translations_count), + 'reviewed_translation_word_count': ( + model.reviewed_translation_word_count), + 'accepted_translations_count': ( + model.accepted_translations_count), + 'accepted_translations_with_reviewer_edits_count': ( + model.accepted_translations_with_reviewer_edits_count), + 'accepted_translation_word_count': ( + model.accepted_translation_word_count), + 'first_contribution_date': ( + model.first_contribution_date.isoformat()), + 'last_contribution_date': ( + model.last_contribution_date.isoformat()) + } + return user_data + + +class QuestionContributionStatsModel(base_models.BaseModel): + """Records the question contribution stats. There is one instance of this + model per (contributor_user_id, topic_id) tuple. Its IDs are in the + following structure: [contributor_user_id].[topic_id] + """ + + # We use the model id as a key in the Takeout dict. + ID_IS_USED_AS_TAKEOUT_KEY = True + + # The user ID of the question contributor. + contributor_user_id = datastore_services.StringProperty( + required=True, indexed=True) + # The topic ID of the question contribution. + topic_id = datastore_services.StringProperty(required=True, indexed=True) + # The number of submitted questions. + submitted_questions_count = datastore_services.IntegerProperty( + required=True, indexed=True) + # The number of accepted questions. + accepted_questions_count = datastore_services.IntegerProperty( + required=True, indexed=True) + # The number of accepted questions without reviewer edits. + accepted_questions_without_reviewer_edits_count = ( + datastore_services.IntegerProperty(required=True, indexed=True)) + # The first date that the submitter made a question submission. + first_contribution_date = datastore_services.DateProperty(indexed=True) + # The last date that the submitter made a question submission. + last_contribution_date = datastore_services.DateProperty(indexed=True) + + @classmethod + def create( + cls, + contributor_user_id: str, + topic_id: str, + submitted_questions_count: int, + accepted_questions_count: int, + accepted_questions_without_reviewer_edits_count: int, + first_contribution_date: datetime.date, + last_contribution_date: datetime.date + ) -> str: + """Creates a new QuestionContributionStatsModel instance and returns + its ID. + """ + entity_id = cls.construct_id( + contributor_user_id, topic_id) + entity = cls( + id=entity_id, + contributor_user_id=contributor_user_id, + topic_id=topic_id, + submitted_questions_count=submitted_questions_count, + accepted_questions_count=accepted_questions_count, + accepted_questions_without_reviewer_edits_count=( + accepted_questions_without_reviewer_edits_count), + first_contribution_date=first_contribution_date, + last_contribution_date=last_contribution_date) + entity.update_timestamps() + entity.put() + return entity_id + + @staticmethod + def construct_id( + contributor_user_id: str, topic_id: str + ) -> str: + """Constructs a unique ID for a QuestionContributionStatsModel + instance. + + Args: + contributor_user_id: str. User ID. + topic_id: str. Topic ID. + + Returns: + str. An ID of the form: + + [contributor_user_id].[topic_id] + """ + return ( + '%s.%s' % (contributor_user_id, topic_id) + ) + + # Here we use MyPy ignore because the signature of this method + # doesn't match with BaseModel.get(). + # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override + @classmethod + def get( # type: ignore[override] + cls, contributor_user_id: str, topic_id: str + ) -> Optional[QuestionContributionStatsModel]: + """Gets the QuestionContributionStatsModel matching the supplied + contributor_user_id, topic_id. + + Returns: + QuestionContributionStatsModel|None. The matching + QuestionContributionStatsModel, or None if no such model + instance exists. + """ + entity_id = cls.construct_id( + contributor_user_id, topic_id) + return cls.get_by_id(entity_id) + + @classmethod + def get_all_by_user_id( + cls, user_id: str + ) -> Sequence[QuestionContributionStatsModel]: + """Gets all QuestionContributionStatsModel matching the supplied + user_id. + + Returns: + list(QuestionContributionStatsModel). The matching + QuestionContributionStatsModel. + """ + return cls.get_all().filter( + cls.contributor_user_id == user_id + ).fetch(feconf.DEFAULT_SUGGESTION_QUERY_LIMIT) + + @classmethod + def has_reference_to_user_id(cls, user_id: str) -> bool: + """Check whether QuestionContributionStatsModel references the + supplied user. + + Args: + user_id: str. The ID of the user whose data should be checked. + + Returns: + bool. Whether any models refer to the given user ID. + """ + return cls.query( + cls.contributor_user_id == user_id + ).get(keys_only=True) is not None + + @classmethod + def get_deletion_policy(cls) -> base_models.DELETION_POLICY: + """Model contains corresponding to a user: contributor_user_id.""" + return base_models.DELETION_POLICY.DELETE + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model is exported as multiple instances per user since there are + multiple languages and topics relevant to a user. + """ + return base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model contains data to export corresponding to a user.""" + return dict(super(cls, cls).get_export_policy(), **{ + # User ID is not exported in order to keep internal ids private. + 'contributor_user_id': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id': + base_models.EXPORT_POLICY.EXPORTED, + 'submitted_questions_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_questions_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_questions_without_reviewer_edits_count': + base_models.EXPORT_POLICY.EXPORTED, + 'first_contribution_date': + base_models.EXPORT_POLICY.EXPORTED, + 'last_contribution_date': + base_models.EXPORT_POLICY.EXPORTED + }) + + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete instances of QuestionContributionStatsModel for the user. + + Args: + user_id: str. The ID of the user whose data should be deleted. + """ + datastore_services.delete_multi( + cls.query(cls.contributor_user_id == user_id).fetch(keys_only=True)) + + @classmethod + def export_data( + cls, user_id: str + ) -> Dict[str, Dict[str, Union[str, int, List[str]]]]: + """Exports the data from QuestionContributionStatsModel into dict + format for Takeout. + + Args: + user_id: str. The ID of the user whose data should be exported. + + Returns: + dict. Dictionary of the data from QuestionContributionStatsModel. + """ + user_data = {} + stats_models: Sequence[QuestionContributionStatsModel] = ( + cls.get_all().filter(cls.contributor_user_id == user_id).fetch()) + for model in stats_models: + splitted_id = model.id.split('.') + id_without_user_id = '%s' % (splitted_id[1]) + user_data[id_without_user_id] = { + 'topic_id': model.topic_id, + 'submitted_questions_count': ( + model.submitted_questions_count), + 'accepted_questions_count': ( + model.accepted_questions_count), + 'accepted_questions_without_reviewer_edits_count': ( + model.accepted_questions_without_reviewer_edits_count), + 'first_contribution_date': ( + model.first_contribution_date.isoformat()), + 'last_contribution_date': ( + model.last_contribution_date.isoformat()) + } + return user_data + + +class QuestionReviewStatsModel(base_models.BaseModel): + """Records the question review stats. There is one instance of this model + per (reviewer_user_id, topic_id) tuple. Its IDs are in the following + structure: [reviewer_user_id].[topic_id] + """ + + # We use the model id as a key in the Takeout dict. + ID_IS_USED_AS_TAKEOUT_KEY = True + + # The user ID of the question reviewer. + reviewer_user_id = datastore_services.StringProperty( + required=True, indexed=True) + # The topic ID of the question. + topic_id = datastore_services.StringProperty(required=True, indexed=True) + # The number of reviewed questions. + reviewed_questions_count = datastore_services.IntegerProperty( + required=True, indexed=True) + # The number of accepted questions. + accepted_questions_count = datastore_services.IntegerProperty( + required=True, indexed=True) + # The number of accepted questions with reviewer edits. + accepted_questions_with_reviewer_edits_count = ( + datastore_services.IntegerProperty(required=True, indexed=True)) + # The first date that the reviewer made a question review. + first_contribution_date = datastore_services.DateProperty(indexed=True) + # The last date that the reviewer made a question review. + last_contribution_date = datastore_services.DateProperty(indexed=True) + + @classmethod + def create( + cls, + reviewer_user_id: str, + topic_id: str, + reviewed_questions_count: int, + accepted_questions_count: int, + accepted_questions_with_reviewer_edits_count: int, + first_contribution_date: datetime.date, + last_contribution_date: datetime.date + ) -> str: + """Creates a new QuestionReviewStatsModel instance and returns + its ID. + """ + entity_id = cls.construct_id( + reviewer_user_id, topic_id) + entity = cls( + id=entity_id, + reviewer_user_id=reviewer_user_id, + topic_id=topic_id, + reviewed_questions_count=reviewed_questions_count, + accepted_questions_count=accepted_questions_count, + accepted_questions_with_reviewer_edits_count=( + accepted_questions_with_reviewer_edits_count), + first_contribution_date=first_contribution_date, + last_contribution_date=last_contribution_date) + entity.update_timestamps() + entity.put() + return entity_id + + @staticmethod + def construct_id( + reviewer_user_id: str, topic_id: str + ) -> str: + """Constructs a unique ID for a QuestionReviewStatsModel + instance. + + Args: + reviewer_user_id: str. User ID. + topic_id: str. Topic ID. + + Returns: + str. An ID of the form: + + [reviewer_user_id].[topic_id] + """ + return ( + '%s.%s' % (reviewer_user_id, topic_id) + ) + + # Here we use MyPy ignore because the signature of this method + # doesn't match with BaseModel.get(). + # https://mypy.readthedocs.io/en/stable/error_code_list.html#check-validity-of-overrides-override + @classmethod + def get( # type: ignore[override] + cls, reviewer_user_id: str, topic_id: str + ) -> Optional[QuestionReviewStatsModel]: + """Gets the QuestionReviewStatsModel matching the supplied + reviewer_user_id, topic_id. + + Returns: + QuestionReviewStatsModel|None. The matching + QuestionReviewStatsModel, or None if no such model + instance exists. + """ + entity_id = cls.construct_id( + reviewer_user_id, topic_id) + return cls.get_by_id(entity_id) + + @classmethod + def get_all_by_user_id( + cls, user_id: str + ) -> Sequence[QuestionReviewStatsModel]: + """Gets all QuestionReviewStatsModel matching the supplied + user_id. + + Returns: + list(QuestionReviewStatsModel). The matching + QuestionReviewStatsModel. + """ + return cls.get_all().filter( + cls.reviewer_user_id == user_id + ).fetch(feconf.DEFAULT_SUGGESTION_QUERY_LIMIT) + + @classmethod + def has_reference_to_user_id(cls, user_id: str) -> bool: + """Check whether QuestionReviewStatsModel references the + supplied user. + + Args: + user_id: str. The ID of the user whose data should be checked. + + Returns: + bool. Whether any models refer to the given user ID. + """ + return cls.query( + cls.reviewer_user_id == user_id + ).get(keys_only=True) is not None + + @classmethod + def get_deletion_policy(cls) -> base_models.DELETION_POLICY: + """Model contains corresponding to a user: reviewer_user_id.""" + return base_models.DELETION_POLICY.DELETE + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model is exported as multiple instances per user since there are + multiple languages and topics relevant to a user. + """ + return base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model contains data to export corresponding to a user.""" + return dict(super(cls, cls).get_export_policy(), **{ + # User ID is not exported in order to keep internal ids private. + 'reviewer_user_id': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id': + base_models.EXPORT_POLICY.EXPORTED, + 'reviewed_questions_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_questions_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_questions_with_reviewer_edits_count': + base_models.EXPORT_POLICY.EXPORTED, + 'first_contribution_date': + base_models.EXPORT_POLICY.EXPORTED, + 'last_contribution_date': + base_models.EXPORT_POLICY.EXPORTED + }) + + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete instances of QuestionReviewStatsModel for the user. + + Args: + user_id: str. The ID of the user whose data should be deleted. + """ + datastore_services.delete_multi( + cls.query(cls.reviewer_user_id == user_id).fetch(keys_only=True)) + + @classmethod + def export_data( + cls, user_id: str + ) -> Dict[str, Dict[str, Union[str, int, List[str]]]]: + """Exports the data from QuestionReviewStatsModel into dict + format for Takeout. + + Args: + user_id: str. The ID of the user whose data should be exported. + + Returns: + dict. Dictionary of the data from QuestionReviewStatsModel. + """ + user_data = {} + stats_models: Sequence[QuestionReviewStatsModel] = ( + cls.get_all().filter(cls.reviewer_user_id == user_id).fetch()) + for model in stats_models: + splitted_id = model.id.split('.') + id_without_user_id = '%s' % (splitted_id[1]) + user_data[id_without_user_id] = { + 'topic_id': model.topic_id, + 'reviewed_questions_count': ( + model.reviewed_questions_count), + 'accepted_questions_count': ( + model.accepted_questions_count), + 'accepted_questions_with_reviewer_edits_count': ( + model.accepted_questions_with_reviewer_edits_count), + 'first_contribution_date': ( + model.first_contribution_date.isoformat()), + 'last_contribution_date': ( + model.last_contribution_date.isoformat()) + } + return user_data diff --git a/core/storage/suggestion/gae_models_test.py b/core/storage/suggestion/gae_models_test.py index 2f1b91415263..d7fc39ebf75d 100644 --- a/core/storage/suggestion/gae_models_test.py +++ b/core/storage/suggestion/gae_models_test.py @@ -21,33 +21,37 @@ import datetime from core import feconf +from core.constants import constants from core.platform import models from core.tests import test_utils -from typing import Any, Dict +from typing import Dict, Final, Mapping MYPY = False if MYPY: # pragma: no cover + # Here, change domain is imported only for type checking. + from core.domain import change_domain # pylint: disable=invalid-import # isort:skip from mypy_imports import base_models from mypy_imports import suggestion_models -(base_models, suggestion_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.suggestion, models.NAMES.user]) +(base_models, suggestion_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.SUGGESTION, models.Names.USER +]) class SuggestionModelUnitTests(test_utils.GenericTestBase): """Tests for the suggestionModel class.""" - score_category = ( + score_category: str = ( suggestion_models.SCORE_TYPE_TRANSLATION + suggestion_models.SCORE_CATEGORY_DELIMITER + 'English') + topic_name = 'topic' target_id = 'exp1' target_version_at_submission = 1 - # TODO(#13523): Use of Any here in the type annotation below will - # be removed when change_cmd will be changed to - # TypedDict/Domain Object. - change_cmd: Dict[str, Any] = {} + change_cmd: Mapping[ + str, change_domain.AcceptableChangeDictTypes + ] = {} # Language code that would normally be derived from the change_cmd. translation_language_code = 'en' # Language code that would normally be derived from the question_dict in @@ -56,7 +60,7 @@ class SuggestionModelUnitTests(test_utils.GenericTestBase): mocked_datetime_utcnow = datetime.datetime(2020, 6, 15, 5) def setUp(self) -> None: - super(SuggestionModelUnitTests, self).setUp() + super().setUp() suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, feconf.ENTITY_TYPE_EXPLORATION, @@ -93,6 +97,41 @@ def setUp(self) -> None: 'reviewer_2', self.change_cmd, self.score_category, 'exploration.exp1.thread_5', None) + def test_get_all_in_review_translation_suggestions_by_exp_ids( + self) -> None: + model = suggestion_models.GeneralSuggestionModel + self.assertEqual( + model.get_in_review_translation_suggestions_by_exp_ids( + [self.target_id], 'en'), + [] + ) + suggestion_id = 'exploration.exp1.thread_6' + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + self.target_id, self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_1', + 'reviewer_3', self.change_cmd, self.score_category, + suggestion_id, 'en') + + created_suggestion_model = model.get_by_id(suggestion_id) + self.assertEqual( + model.get_in_review_translation_suggestions_by_exp_ids( + [self.target_id], 'en'), + [created_suggestion_model] + ) + + def test_get_all_user_created_suggestions_of_given_suggestion_type( + self) -> None: + model = suggestion_models.GeneralSuggestionModel + expected_suggestion_model = model.get_by_id( + 'exploration.exp1.thread_1') + self.assertEqual( + model.get_user_created_suggestions_of_suggestion_type( + feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, 'author_1'), + [expected_suggestion_model] + ) + def test_get_deletion_policy(self) -> None: self.assertEqual( suggestion_models.GeneralSuggestionModel.get_deletion_policy(), @@ -133,6 +172,46 @@ def test_score_type_contains_delimiter(self) -> None: self.assertTrue( suggestion_models.SCORE_CATEGORY_DELIMITER not in score_type) + def test_get_translation_suggestions_submitted_for_given_date_range( + self + ) -> None: + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + self.target_id, self.target_version_at_submission, + suggestion_models.STATUS_ACCEPTED, 'test_author', + 'reviewer_1', self.change_cmd, self.score_category, + 'exploration.exp1.thread_6', 'hi') + to_date = datetime.datetime.now() + from_date = to_date - datetime.timedelta(days=1) + + suggestions = ( + suggestion_models.GeneralSuggestionModel + .get_translation_suggestions_submitted_within_given_dates( + from_date, to_date, 'test_author', 'hi')) + + self.assertEqual(len(suggestions), 1) + + def test_get_question_suggestions_submitted_for_given_date_range( + self + ) -> None: + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_EXPLORATION, + self.target_id, self.target_version_at_submission, + suggestion_models.STATUS_ACCEPTED, 'test_author', + 'reviewer_1', self.change_cmd, self.score_category, + 'exploration.exp1.thread_6', 'hi') + to_date = datetime.datetime.now() + from_date = to_date - datetime.timedelta(days=1) + + suggestions = ( + suggestion_models.GeneralSuggestionModel + .get_question_suggestions_submitted_within_given_dates( + from_date, to_date, 'test_author')) + + self.assertEqual(len(suggestions), 1) + def test_create_new_object_succesfully(self) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, @@ -169,9 +248,9 @@ def test_create_new_object_succesfully(self) -> None: self.assertEqual(observed_suggestion_model.change_cmd, self.change_cmd) def test_create_suggestion_fails_if_id_collides_with_existing_one( - self + self ) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'There is already a suggestion with the given id: ' 'exploration.exp1.thread_1'): suggestion_models.GeneralSuggestionModel.create( @@ -191,7 +270,7 @@ def test_get_suggestions_by_type(self) -> None: queries)), 5) queries = [('suggestion_type', 'invalid_suggestion_type')] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Value \'invalid_suggestion_type\' for property' ' suggestion_type is not an allowed choice'): suggestion_models.GeneralSuggestionModel.query_suggestions(queries) @@ -295,7 +374,7 @@ def test_query_suggestions(self) -> None: ('target_id', self.target_id), ('invalid_field', 'value') ] - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Not allowed to query on field invalid_field'): suggestion_models.GeneralSuggestionModel.query_suggestions(queries) @@ -329,8 +408,8 @@ def test_query_suggestions_by_language(self) -> None: len(suggestion_models.GeneralSuggestionModel.query_suggestions( queries)), 1) - def test_get_translation_suggestions_in_review_ids_with_valid_exp( - self) -> None: + def test_get_in_review_translation_suggestions(self) -> None: + # Create two in-review translation suggestions. suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, feconf.ENTITY_TYPE_EXPLORATION, @@ -345,38 +424,42 @@ def test_get_translation_suggestions_in_review_ids_with_valid_exp( suggestion_models.STATUS_IN_REVIEW, 'author_4', 'reviewer_2', self.change_cmd, self.score_category, 'exploration.exp1.thread_7', self.translation_language_code) - - suggestion_ids = ( - suggestion_models.GeneralSuggestionModel - .get_translation_suggestions_in_review_ids_with_exp_id( - ['exp1'])) - - self.assertEqual(len(suggestion_ids), 3) - - def test_get_multiple_translation_suggestions_in_review(self) -> None: + # Create accepted and rejected suggestions that should not be returned. suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, feconf.ENTITY_TYPE_EXPLORATION, 'exp1', self.target_version_at_submission, - suggestion_models.STATUS_IN_REVIEW, 'author_3', + suggestion_models.STATUS_ACCEPTED, 'author_4', 'reviewer_2', self.change_cmd, self.score_category, - 'exploration.exp1.thread_6', self.translation_language_code) + 'exploration.exp1.thread_8', self.translation_language_code) suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, feconf.ENTITY_TYPE_EXPLORATION, 'exp1', self.target_version_at_submission, - suggestion_models.STATUS_IN_REVIEW, 'author_4', + suggestion_models.STATUS_REJECTED, 'author_4', 'reviewer_2', self.change_cmd, self.score_category, - 'exploration.exp1.thread_7', self.translation_language_code) + 'exploration.exp1.thread_9', self.translation_language_code) - suggestion_ids = ( - suggestion_models.GeneralSuggestionModel - .get_translation_suggestions_in_review_ids_with_exp_id( - ['exp1'])) suggestions = ( suggestion_models.GeneralSuggestionModel - .get_multiple_suggestions_from_suggestion_ids(suggestion_ids)) - self.assertEqual(len(suggestions), 3) + .get_in_review_translation_suggestions( + 'exp1', [self.translation_language_code])) + + self.assertEqual(len(suggestions), 2) + self.assertEqual(suggestions[0].target_id, 'exp1') + self.assertEqual( + suggestions[0].suggestion_type, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT) + self.assertEqual( + suggestions[0].status, + suggestion_models.STATUS_IN_REVIEW) + self.assertEqual(suggestions[1].target_id, 'exp1') + self.assertEqual( + suggestions[1].suggestion_type, + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT) + self.assertEqual( + suggestions[1].status, + suggestion_models.STATUS_IN_REVIEW) def test_get_translation_suggestions_in_review_with_valid_exp(self) -> None: suggestion_models.GeneralSuggestionModel.create( @@ -408,13 +491,488 @@ def test_get_translation_suggestions_in_review_with_valid_exp(self) -> None: suggestions[0].status, suggestion_models.STATUS_IN_REVIEW) self.assertEqual(suggestions[1].target_id, 'exp1') + + def test_get_translation_suggestions_in_review_with_exp_ids_by_offset( + self + ) -> None: + limit = 1 + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_3', + 'reviewer_2', self.change_cmd, self.score_category, + 'exploration.exp1.thread_6', self.translation_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_4', + 'reviewer_2', self.change_cmd, self.score_category, + 'exploration.exp1.thread_7', self.translation_language_code) + + suggestions, offset_1 = ( + suggestion_models + .GeneralSuggestionModel + .get_in_review_translation_suggestions_with_exp_ids_by_offset( + limit, 0, 'author_4', None, + [self.translation_language_code], ['exp1'])) + + self.assertEqual(len(suggestions), 1) + self.assertEqual(suggestions[0].target_id, 'exp1') + self.assertEqual(offset_1, 1) self.assertEqual( - suggestions[1].suggestion_type, + suggestions[0].suggestion_type, feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT) self.assertEqual( - suggestions[1].status, + suggestions[0].status, suggestion_models.STATUS_IN_REVIEW) + def test_get_translation_suggestions_in_review_with_exp_ids_by_offset_sorted( # pylint: disable=line-too-long + self + ) -> None: + suggestion_1_id = 'exploration.exp1.thread_6' + suggestion_2_id = 'exploration.exp1.thread_7' + suggestion_3_id = 'exploration.exp1.thread_8' + user_id = 'author1' + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_3', + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_1_id, self.translation_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_4', + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_2_id, self.translation_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, user_id, + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_3_id, self.translation_language_code) + + sorted_results, offset_1 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_with_exp_ids_by_offset( + limit=1, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE, + language_codes=[self.translation_language_code], + exp_ids=['exp1'])) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 1) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(offset_1, 2) + + sorted_results, offset_2 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_with_exp_ids_by_offset( + limit=2, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE, + language_codes=[self.translation_language_code], + exp_ids=['exp1'])) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 2) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(sorted_results[1].id, suggestion_1_id) + self.assertEqual(offset_2, 3) + + sorted_results, offset_3 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_with_exp_ids_by_offset( + limit=10, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE, + language_codes=[self.translation_language_code], + exp_ids=['exp1'])) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 2) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(sorted_results[1].id, suggestion_1_id) + self.assertEqual(offset_3, 3) + + sorted_results, offset_4 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_with_exp_ids_by_offset( + limit=None, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE, + language_codes=[self.translation_language_code], + exp_ids=['exp1'])) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 2) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(sorted_results[1].id, suggestion_1_id) + self.assertEqual(offset_4, 3) + + def test_get_in_review_translation_suggestions_by_offset(self) -> None: + suggestion_1_id = 'exploration.exp1.thread_6' + suggestion_2_id = 'exploration.exp1.thread_7' + user_id = 'author1' + limit = 1 + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_3', + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_1_id, self.translation_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_4', + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_2_id, self.translation_language_code) + + results, offset_1 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit=limit, + offset=0, + user_id=user_id, + sort_key=None, + language_codes=[self.translation_language_code])) + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), limit) + self.assertEqual(results[0].id, suggestion_1_id) + self.assertEqual(offset_1, 1) + + results, offset_2 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit=limit, + offset=offset_1, + user_id=user_id, + sort_key=None, + language_codes=[self.translation_language_code])) + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), limit) + self.assertEqual(results[0].id, suggestion_2_id) + self.assertEqual(offset_2, 2) + + results, offset_3 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit=limit, + offset=offset_2, + user_id=user_id, + sort_key=None, + language_codes=[self.translation_language_code])) + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), 0) + self.assertEqual(offset_3, 2) + + def test_get_in_review_translation_suggestions_by_offset_no_limit( + self + ) -> None: + suggestion_1_id = 'exploration.exp1.thread_6' + suggestion_2_id = 'exploration.exp1.thread_7' + user_id = 'author1' + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_3', + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_1_id, self.translation_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_4', + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_2_id, self.translation_language_code) + + results, offset = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit=None, + offset=0, + user_id=user_id, + sort_key=None, + language_codes=[self.translation_language_code])) + + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), 2) + self.assertEqual(results[0].id, suggestion_1_id) + self.assertEqual(results[1].id, suggestion_2_id) + self.assertEqual(offset, 2) + + def test_get_in_review_translation_suggestions_by_offset_sorted( + self + ) -> None: + suggestion_1_id = 'exploration.exp1.thread_6' + suggestion_2_id = 'exploration.exp1.thread_7' + suggestion_3_id = 'exploration.exp1.thread_8' + user_id = 'author1' + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_3', + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_1_id, self.translation_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_4', + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_2_id, self.translation_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, user_id, + 'reviewer_2', self.change_cmd, self.score_category, + suggestion_3_id, self.translation_language_code) + + sorted_results, offset_1 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit=1, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE, + language_codes=[self.translation_language_code])) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 1) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(offset_1, 2) + + sorted_results, offset_2 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit=2, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE, + language_codes=[self.translation_language_code])) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 2) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(sorted_results[1].id, suggestion_1_id) + self.assertEqual(offset_2, 3) + + sorted_results, offset_3 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit=10, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE, + language_codes=[self.translation_language_code])) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 2) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(sorted_results[1].id, suggestion_1_id) + self.assertEqual(offset_3, 3) + + sorted_results, offset_4 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_translation_suggestions_by_offset( + limit=None, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE, + language_codes=[self.translation_language_code])) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 2) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(sorted_results[1].id, suggestion_1_id) + self.assertEqual(offset_4, 3) + + def test_get_in_review_question_suggestions_by_offset(self) -> None: + suggestion_1_id = 'skill1.thread1' + suggestion_2_id = 'skill1.thread2' + suggestion_3_id = 'skill2.thread3' + user_id = 'author1' + limit = 1 + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_SKILL, + 'skill_1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_3', + 'reviewer_2', self.change_cmd, 'category1', + suggestion_1_id, self.question_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_SKILL, + 'skill_1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_4', + 'reviewer_2', self.change_cmd, 'category1', + suggestion_2_id, self.question_language_code) + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_SKILL, + 'skill_1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author1', + 'reviewer_2', self.change_cmd, 'category1', + suggestion_3_id, self.question_language_code) + + results, offset_1 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_question_suggestions_by_offset( + limit=limit, + offset=0, + user_id=user_id, + sort_key=None)) + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), limit) + self.assertEqual(results[0].id, suggestion_1_id) + self.assertEqual(offset_1, 1) + + results, offset_2 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_question_suggestions_by_offset( + limit=limit, + offset=offset_1, + user_id=user_id, + sort_key=None)) + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), limit) + self.assertEqual(results[0].id, suggestion_2_id) + self.assertEqual(offset_2, 2) + + results, offset_3 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_question_suggestions_by_offset( + limit=limit, + offset=offset_2, + user_id=user_id, + sort_key=None)) + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), 0) + self.assertEqual(offset_3, 2) + + sorted_results, offset_4 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_question_suggestions_by_offset( + limit=1, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE)) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 1) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(offset_4, 2) + + sorted_results, offset_5 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_question_suggestions_by_offset( + limit=2, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE)) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 2) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(sorted_results[1].id, suggestion_1_id) + self.assertEqual(offset_5, 3) + + sorted_results, offset_6 = ( + suggestion_models.GeneralSuggestionModel + .get_in_review_question_suggestions_by_offset( + limit=10, + offset=0, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE)) + # Ruling out the possibility of None for mypy type checking. + assert sorted_results is not None + self.assertEqual(len(sorted_results), 2) + self.assertEqual(sorted_results[0].id, suggestion_2_id) + self.assertEqual(sorted_results[1].id, suggestion_1_id) + self.assertEqual(offset_6, 3) + + def test_user_created_suggestions_by_offset(self) -> None: + authored_translation_suggestion_id = 'exploration.exp1.thread_6' + non_authored_translation_suggestion_id = 'exploration.exp1.thread_7' + authored_question_suggestion_id = 'skill1.thread1' + user_id = 'author1' + limit = 1 + # User created translation suggestion. + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, user_id, + 'reviewer_2', self.change_cmd, self.score_category, + authored_translation_suggestion_id, self.translation_language_code) + # Translation suggestion created by a different user. + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + feconf.ENTITY_TYPE_EXPLORATION, + 'exp1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, 'author_4', + 'reviewer_2', self.change_cmd, self.score_category, + non_authored_translation_suggestion_id, + self.translation_language_code) + # User created question suggestion. + suggestion_models.GeneralSuggestionModel.create( + feconf.SUGGESTION_TYPE_ADD_QUESTION, + feconf.ENTITY_TYPE_SKILL, + 'skill_1', self.target_version_at_submission, + suggestion_models.STATUS_IN_REVIEW, user_id, + 'reviewer_2', self.change_cmd, 'category1', + authored_question_suggestion_id, self.question_language_code) + + results, translation_suggestion_offset = ( + suggestion_models.GeneralSuggestionModel + .get_user_created_suggestions_by_offset( + limit=limit, + offset=0, + suggestion_type=feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, + user_id=user_id, + sort_key=constants.SUGGESTIONS_SORT_KEY_DATE)) + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), limit) + self.assertEqual(results[0].id, authored_translation_suggestion_id) + self.assertEqual(translation_suggestion_offset, 1) + + results, question_suggestion_offset = ( + suggestion_models.GeneralSuggestionModel + .get_user_created_suggestions_by_offset( + limit=limit, + offset=0, + suggestion_type=feconf.SUGGESTION_TYPE_ADD_QUESTION, + user_id=user_id, + sort_key=None)) + # Ruling out the possibility of None for mypy type checking. + assert results is not None + self.assertEqual(len(results), limit) + self.assertEqual(results[0].id, authored_question_suggestion_id) + self.assertEqual(question_suggestion_offset, 1) + def test_get_translation_suggestions_in_review_with_exp_id_with_invalid_exp( self ) -> None: @@ -425,7 +983,7 @@ def test_get_translation_suggestions_in_review_with_exp_id_with_invalid_exp( self.assertEqual(len(suggestions), 0) def test_get_translation_suggestion_ids_with_exp_ids_with_one_exp( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -443,7 +1001,7 @@ def test_get_translation_suggestion_ids_with_exp_ids_with_one_exp( ['exp1'])), 1) def test_get_exp_translation_suggestions_in_review_returns_limited_values( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -469,7 +1027,7 @@ def test_get_exp_translation_suggestions_in_review_returns_limited_values( self.assertEqual(len(suggestions), 1) def test_get_exp_translation_suggestions_in_review_for_resolved_suggestion_returns_no_items( # pylint: disable=line-too-long - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -494,7 +1052,7 @@ def test_get_exp_translation_suggestions_in_review_for_resolved_suggestion_retur self.assertEqual(len(suggestions), 0) def test_get_exp_translation_suggestions_in_review_for_non_translation_suggestion_returns_no_items( # pylint: disable=line-too-long - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_ADD_QUESTION, @@ -519,7 +1077,7 @@ def test_get_exp_translation_suggestions_in_review_for_non_translation_suggestio self.assertEqual(len(suggestions), 0) def test_get_exp_translation_suggestions_in_review_for_different_language_code_returns_no_items( # pylint: disable=line-too-long - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -536,7 +1094,7 @@ def test_get_exp_translation_suggestions_in_review_for_different_language_code_r self.assertEqual(len(suggestions), 0) def test_get_translation_suggestion_ids_with_exp_ids_with_multiple_exps( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -561,7 +1119,7 @@ def test_get_translation_suggestion_ids_with_exp_ids_with_multiple_exps( ['exp2', 'exp3'])), 2) def test_get_translation_suggestion_ids_with_exp_ids_with_invalid_exp( - self + self ) -> None: # Assert that there are no translation suggestions with an invalid # exploration id found. @@ -615,11 +1173,11 @@ def test_get_all_stale_suggestion_ids(self) -> None: .get_all_stale_suggestion_ids()), 0) def test_get__suggestions_waiting_too_long_raises_if_suggestion_types_empty( - self + self ) -> None: with self.swap( feconf, 'CONTRIBUTOR_DASHBOARD_SUGGESTION_TYPES', []): - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Expected the suggestion types offered on the Contributor ' 'Dashboard to be nonempty.'): @@ -629,7 +1187,7 @@ def test_get__suggestions_waiting_too_long_raises_if_suggestion_types_empty( ) def test_get_suggestions_waiting_too_long_if_not_contributor_suggestion( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, @@ -656,7 +1214,7 @@ def test_get_suggestions_waiting_too_long_if_not_contributor_suggestion( self.assertEqual(len(suggestions_waiting_too_long_for_review), 0) def test_get_suggestions_waiting_too_long_returns_empty_if_neg_timedelta( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -678,7 +1236,7 @@ def test_get_suggestions_waiting_too_long_returns_empty_if_neg_timedelta( self.assertEqual(len(suggestions_waiting_too_long_for_review), 0) def test_get_suggestions_waiting_too_long_if_suggestions_waited_less_limit( - self + self ) -> None: with self.mock_datetime_utcnow(self.mocked_datetime_utcnow): suggestion_models.GeneralSuggestionModel.create( @@ -706,7 +1264,7 @@ def test_get_suggestions_waiting_too_long_if_suggestions_waited_less_limit( self.assertEqual(len(suggestions_waiting_too_long_for_review), 0) def test_get_suggestions_waiting_too_long_if_suggestion_waited_limit( - self + self ) -> None: with self.mock_datetime_utcnow(self.mocked_datetime_utcnow): suggestion_models.GeneralSuggestionModel.create( @@ -735,7 +1293,7 @@ def test_get_suggestions_waiting_too_long_if_suggestion_waited_limit( self.assertEqual(len(suggestions_waiting_too_long_for_review), 0) def test_get_suggestions_waiting_too_long_if_suggestion_waited_past_limit( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -756,7 +1314,7 @@ def test_get_suggestions_waiting_too_long_if_suggestion_waited_past_limit( self.assertEqual(len(suggestions_waiting_too_long_for_review), 1) def test_get_suggestions_waiting_too_long_with_diff_review_wait_times( - self + self ) -> None: with self.mock_datetime_utcnow(self.mocked_datetime_utcnow): suggestion_models.GeneralSuggestionModel.create( @@ -799,7 +1357,7 @@ def test_get_suggestions_waiting_too_long_with_diff_review_wait_times( 'exploration.exp1.thread1') def test_get_suggestions_waiting_too_long_returns_in_correct_wait_order( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -906,7 +1464,7 @@ def test_get_in_review_suggestions_in_score_categories(self) -> None: suggestion_models.GeneralSuggestionModel .get_in_review_suggestions_in_score_categories( ['category1', 'category_invalid'], 'author_2')), 1) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'Received empty list of score categories'): self.assertEqual(len( suggestion_models.GeneralSuggestionModel @@ -971,7 +1529,7 @@ def test_get_question_suggestions_waiting_longest_for_review(self) -> None: self.assertEqual(question_suggestion_models[2].id, 'skill3.thread1') def test_get_translation_suggestions_waiting_longest_for_review_per_lang( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -1035,7 +1593,7 @@ def test_get_translation_suggestions_waiting_longest_for_review_per_lang( 'exploration.exp4.thread1') def test_get_translation_suggestions_waiting_longest_for_review_wrong_lang( - self + self ) -> None: translation_suggestion_models = ( suggestion_models.GeneralSuggestionModel @@ -1047,7 +1605,7 @@ def test_get_translation_suggestions_waiting_longest_for_review_wrong_lang( self.assertEqual(len(translation_suggestion_models), 0) def test_get_translation_suggestions_waiting_longest_for_review_max_fetch( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, @@ -1080,7 +1638,7 @@ def test_get_translation_suggestions_waiting_longest_for_review_max_fetch( translation_suggestion_models[0].id, 'exploration.exp1.thread1') def test_get_question_suggestions_waiting_longest_for_review_max_fetch( - self + self ) -> None: suggestion_models.GeneralSuggestionModel.create( feconf.SUGGESTION_TYPE_ADD_QUESTION, @@ -1166,229 +1724,52 @@ def test_export_data_nontrivial(self) -> None: self.assertEqual(user_data, test_data) + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'suggestion_type': base_models.EXPORT_POLICY.EXPORTED, + 'target_type': base_models.EXPORT_POLICY.EXPORTED, + 'target_id': base_models.EXPORT_POLICY.EXPORTED, + 'target_version_at_submission': + base_models.EXPORT_POLICY.EXPORTED, + 'status': base_models.EXPORT_POLICY.EXPORTED, + 'author_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'final_reviewer_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'change_cmd': base_models.EXPORT_POLICY.EXPORTED, + 'score_category': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.EXPORTED, + 'edited_by_reviewer': base_models.EXPORT_POLICY.EXPORTED + } + model = suggestion_models.GeneralSuggestionModel + self.assertEqual(model.get_export_policy(), expected_dict) -class GeneralVoiceoverApplicationModelUnitTests(test_utils.GenericTestBase): - """Tests for the GeneralVoiceoverApplicationModel class.""" - - def test_get_deletion_policy(self) -> None: + def test_get_model_association_to_user(self) -> None: + model = suggestion_models.GeneralSuggestionModel self.assertEqual( - suggestion_models.GeneralSuggestionModel.get_deletion_policy(), - base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE) - - def test_has_reference_to_user_id_author(self) -> None: - self.assertFalse( - suggestion_models.GeneralVoiceoverApplicationModel - .has_reference_to_user_id('author_1')) - - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id='author_1', - final_reviewer_id=None, - language_code='en', - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - - self.assertTrue( - suggestion_models.GeneralVoiceoverApplicationModel - .has_reference_to_user_id('author_1')) - self.assertFalse( - suggestion_models.GeneralVoiceoverApplicationModel - .has_reference_to_user_id('author_2')) - - def test_get_user_voiceover_applications(self) -> None: - author_id = 'author' - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_user_voiceover_applications(author_id)) - self.assertEqual(applicant_models, []) - - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id=author_id, - final_reviewer_id=None, - language_code='en', - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_user_voiceover_applications(author_id)) - self.assertEqual(len(applicant_models), 1) - self.assertEqual(applicant_models[0].id, 'application_id') - - def test_get_user_voiceover_applications_with_status(self) -> None: - author_id = 'author' - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_user_voiceover_applications( - author_id, status=suggestion_models.STATUS_IN_REVIEW)) - self.assertEqual(applicant_models, []) - - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id=author_id, - final_reviewer_id=None, - language_code='en', - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_user_voiceover_applications( - author_id, status=suggestion_models.STATUS_IN_REVIEW)) - self.assertEqual(len(applicant_models), 1) - self.assertEqual(applicant_models[0].id, 'application_id') - - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_user_voiceover_applications( - author_id, status=suggestion_models.STATUS_REJECTED)) - self.assertEqual(applicant_models, []) - - def test_get_reviewable_voiceover_applications(self) -> None: - author_id = 'author' - reviewer_id = 'reviewer_id' - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_reviewable_voiceover_applications(reviewer_id)) - self.assertEqual(applicant_models, []) - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_reviewable_voiceover_applications(author_id)) - self.assertEqual(applicant_models, []) - - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id=author_id, - final_reviewer_id=None, - language_code='en', - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_reviewable_voiceover_applications(reviewer_id)) - self.assertEqual(len(applicant_models), 1) - self.assertEqual(applicant_models[0].id, 'application_id') - - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_reviewable_voiceover_applications(author_id)) - self.assertEqual(applicant_models, []) - - def test_get_voiceover_applications(self) -> None: - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id='author_id', - final_reviewer_id=None, - language_code='en', - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_voiceover_applications('exploration', 'exp_id', 'en')) - self.assertEqual(len(applicant_models), 1) - self.assertEqual(applicant_models[0].id, 'application_id') - - applicant_models = ( - suggestion_models.GeneralVoiceoverApplicationModel - .get_voiceover_applications('exploration', 'exp_id', 'hi')) - self.assertEqual(len(applicant_models), 0) - - def test_export_data_trivial(self) -> None: - user_data = ( - suggestion_models.GeneralVoiceoverApplicationModel - .export_data('non_existent_user')) - test_data: Dict[str, str] = {} - self.assertEqual(user_data, test_data) - - def test_export_data_nontrivial(self) -> None: - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_1_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id='author_1', - final_reviewer_id='reviewer_id', - language_code='en', - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - - suggestion_models.GeneralVoiceoverApplicationModel( - id='application_2_id', - target_type='exploration', - target_id='exp_id', - status=suggestion_models.STATUS_IN_REVIEW, - author_id='author_1', - final_reviewer_id=None, - language_code='en', - filename='application_audio.mp3', - content='

    Some content

    ', - rejection_message=None).put() - - expected_data = { - 'application_1_id': { - 'target_type': 'exploration', - 'target_id': 'exp_id', - 'status': 'review', - 'language_code': 'en', - 'filename': 'application_audio.mp3', - 'content': '

    Some content

    ', - 'rejection_message': None - }, - 'application_2_id': { - 'target_type': 'exploration', - 'target_id': 'exp_id', - 'status': 'review', - 'language_code': 'en', - 'filename': 'application_audio.mp3', - 'content': '

    Some content

    ', - 'rejection_message': None - } - } - user_data = ( - suggestion_models.GeneralVoiceoverApplicationModel - .export_data('author_1')) - self.assertEqual(expected_data, user_data) + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) class CommunityContributionStatsModelUnitTests(test_utils.GenericTestBase): """Tests the CommunityContributionStatsModel class.""" - translation_reviewer_counts_by_lang_code = { + translation_reviewer_counts_by_lang_code: Dict[str, int] = { 'hi': 0, 'en': 1 } - translation_suggestion_counts_by_lang_code = { + translation_suggestion_counts_by_lang_code: Dict[str, int] = { 'fr': 6, 'en': 5 } - question_reviewer_count = 1 - question_suggestion_count = 4 + question_reviewer_count: int = 1 + question_suggestion_count: int = 4 def test_get_returns_community_contribution_stats_model_when_it_exists( - self + self ) -> None: suggestion_models.CommunityContributionStatsModel( id=suggestion_models.COMMUNITY_CONTRIBUTION_STATS_MODEL_ID, @@ -1404,8 +1785,6 @@ def test_get_returns_community_contribution_stats_model_when_it_exists( suggestion_models.CommunityContributionStatsModel.get() ) - # Ruling out the possibility of None for mypy type checking. - assert community_contribution_stats_model is not None self.assertEqual( community_contribution_stats_model.id, suggestion_models.COMMUNITY_CONTRIBUTION_STATS_MODEL_ID @@ -1434,7 +1813,7 @@ def test_get_returns_community_contribution_stats_model_when_it_exists( ) def test_get_returns_new_community_contribution_stats_model_if_not_found( - self + self ) -> None: """If the model has not been created yet, get should create the model with default values. @@ -1442,9 +1821,6 @@ def test_get_returns_new_community_contribution_stats_model_if_not_found( community_contribution_stats_model = ( suggestion_models.CommunityContributionStatsModel.get() ) - - # Ruling out the possibility of None for mypy type checking. - assert community_contribution_stats_model is not None self.assertEqual( community_contribution_stats_model.id, suggestion_models.COMMUNITY_CONTRIBUTION_STATS_MODEL_ID @@ -1477,26 +1853,81 @@ def test_get_deletion_policy_returns_not_applicable(self) -> None: base_models.DELETION_POLICY.NOT_APPLICABLE ) + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'translation_reviewer_counts_by_lang_code': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'translation_suggestion_counts_by_lang_code': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'question_reviewer_count': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'question_suggestion_count': + base_models.EXPORT_POLICY.NOT_APPLICABLE + } + model = suggestion_models.CommunityContributionStatsModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = suggestion_models.CommunityContributionStatsModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + class TranslationContributionStatsModelUnitTests(test_utils.GenericTestBase): """Tests the TranslationContributionStatsModel class.""" - LANGUAGE_CODE = 'es' - CONTRIBUTOR_USER_ID = 'user_id' - TOPIC_ID = 'topic_id' - SUBMITTED_TRANSLATIONS_COUNT = 2 - SUBMITTED_TRANSLATION_WORD_COUNT = 100 - ACCEPTED_TRANSLATIONS_COUNT = 1 - ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT = 0 - ACCEPTED_TRANSLATION_WORD_COUNT = 50 - REJECTED_TRANSLATIONS_COUNT = 0 - REJECTED_TRANSLATION_WORD_COUNT = 0 + LANGUAGE_CODE: Final = 'es' + CONTRIBUTOR_USER_ID: Final = 'uid_01234567890123456789012345678912' + TOPIC_ID: Final = 'topic_id' + SUBMITTED_TRANSLATIONS_COUNT: Final = 2 + SUBMITTED_TRANSLATION_WORD_COUNT: Final = 100 + ACCEPTED_TRANSLATIONS_COUNT: Final = 1 + ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT: Final = 0 + ACCEPTED_TRANSLATION_WORD_COUNT: Final = 50 + REJECTED_TRANSLATIONS_COUNT: Final = 0 + REJECTED_TRANSLATION_WORD_COUNT: Final = 0 # Timestamp dates in sec since epoch for Mar 19 2021 UTC. - CONTRIBUTION_DATES = [ + CONTRIBUTION_DATES: Final = [ datetime.date.fromtimestamp(1616173836), datetime.date.fromtimestamp(1616173837) ] + def test_get_all_model_instances_matching_the_given_user_id(self) -> None: + model = suggestion_models.TranslationContributionStatsModel + self.assertEqual( + model.get_all_by_user_id(self.CONTRIBUTOR_USER_ID), []) + + model.create( + language_code=self.LANGUAGE_CODE, + contributor_user_id=self.CONTRIBUTOR_USER_ID, + topic_id=self.TOPIC_ID, + submitted_translations_count=self.SUBMITTED_TRANSLATIONS_COUNT, + submitted_translation_word_count=( + self.SUBMITTED_TRANSLATION_WORD_COUNT), + accepted_translations_count=self.ACCEPTED_TRANSLATIONS_COUNT, + accepted_translations_without_reviewer_edits_count=( + self.ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT), + accepted_translation_word_count=( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + rejected_translations_count=self.REJECTED_TRANSLATIONS_COUNT, + rejected_translation_word_count=( + self.REJECTED_TRANSLATION_WORD_COUNT), + contribution_dates=self.CONTRIBUTION_DATES + ) + translation_contribution_stats_model = ( + model.get( + self.LANGUAGE_CODE, self.CONTRIBUTOR_USER_ID, self.TOPIC_ID + ) + ) + self.assertEqual( + model.get_all_by_user_id(self.CONTRIBUTOR_USER_ID), + [translation_contribution_stats_model] + ) + def test_get_returns_model_when_it_exists(self) -> None: suggestion_models.TranslationContributionStatsModel.create( language_code=self.LANGUAGE_CODE, @@ -1585,6 +2016,40 @@ def test_get_deletion_policy(self) -> None: ), base_models.DELETION_POLICY.DELETE) + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.EXPORTED, + 'contributor_user_id': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id': base_models.EXPORT_POLICY.EXPORTED, + 'submitted_translations_count': + base_models.EXPORT_POLICY.EXPORTED, + 'submitted_translation_word_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translations_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translations_without_reviewer_edits_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translation_word_count': + base_models.EXPORT_POLICY.EXPORTED, + 'rejected_translations_count': + base_models.EXPORT_POLICY.EXPORTED, + 'rejected_translation_word_count': + base_models.EXPORT_POLICY.EXPORTED, + 'contribution_dates': base_models.EXPORT_POLICY.EXPORTED + } + model = suggestion_models.TranslationContributionStatsModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = suggestion_models.TranslationContributionStatsModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + def test_apply_deletion_policy(self) -> None: suggestion_models.TranslationContributionStatsModel.create( language_code=self.LANGUAGE_CODE, @@ -1661,8 +2126,14 @@ def test_export_data_nontrivial(self) -> None: ) dates_in_iso_format = [ date.isoformat() for date in self.CONTRIBUTION_DATES] + model_1_id_without_user_id = model_1_id.replace( + '.%s.' % self.CONTRIBUTOR_USER_ID, '.' + ) + model_2_id_without_user_id = model_2_id.replace( + '.%s.' % self.CONTRIBUTOR_USER_ID, '.' + ) expected_data = { - model_1_id: { + model_1_id_without_user_id: { 'language_code': self.LANGUAGE_CODE, 'topic_id': self.TOPIC_ID, 'submitted_translations_count': ( @@ -1681,7 +2152,7 @@ def test_export_data_nontrivial(self) -> None: self.REJECTED_TRANSLATION_WORD_COUNT), 'contribution_dates': dates_in_iso_format }, - model_2_id: { + model_2_id_without_user_id: { 'language_code': self.LANGUAGE_CODE, 'topic_id': topic_id_2, 'submitted_translations_count': ( @@ -1707,3 +2178,795 @@ def test_export_data_nontrivial(self) -> None: .export_data(self.CONTRIBUTOR_USER_ID)) self.assertEqual(expected_data, user_data) + + +class TranslationReviewStatsModelUnitTests(test_utils.GenericTestBase): + """Tests the TranslationContributionStatsModel class.""" + + LANGUAGE_CODE = 'es' + REVIEWER_USER_ID = 'uid_01234567890123456789012345678912' + TOPIC_ID = 'topic_id' + REVIEWED_TRANSLATIONS_COUNT = 2 + REVIEWED_TRANSLATION_WORD_COUNT = 100 + ACCEPTED_TRANSLATIONS_COUNT = 1 + ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT = 0 + ACCEPTED_TRANSLATION_WORD_COUNT = 50 + FIRST_CONTRIBUTION_DATE = datetime.date.fromtimestamp(1616173836) + LAST_CONTRIBUTION_DATE = datetime.date.fromtimestamp(1616173836) + + def test_get_returns_model_when_it_exists(self) -> None: + suggestion_models.TranslationReviewStatsModel.create( + language_code=self.LANGUAGE_CODE, + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=self.TOPIC_ID, + reviewed_translations_count=self.REVIEWED_TRANSLATIONS_COUNT, + reviewed_translation_word_count=( + self.REVIEWED_TRANSLATION_WORD_COUNT), + accepted_translations_count=self.ACCEPTED_TRANSLATIONS_COUNT, + accepted_translations_with_reviewer_edits_count=( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + accepted_translation_word_count=( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + + translation_review_stats_model = ( + suggestion_models.TranslationReviewStatsModel.get( + self.LANGUAGE_CODE, self.REVIEWER_USER_ID, self.TOPIC_ID + ) + ) + + # Ruling out the possibility of None for mypy type checking. + assert translation_review_stats_model is not None + self.assertEqual( + translation_review_stats_model.language_code, + self.LANGUAGE_CODE + ) + self.assertEqual( + translation_review_stats_model.reviewer_user_id, + self.REVIEWER_USER_ID + ) + self.assertEqual( + translation_review_stats_model.reviewed_translations_count, + self.REVIEWED_TRANSLATIONS_COUNT + ) + self.assertEqual( + ( + translation_review_stats_model + .reviewed_translation_word_count + ), + self.REVIEWED_TRANSLATION_WORD_COUNT + ) + self.assertEqual( + translation_review_stats_model.accepted_translations_count, + self.ACCEPTED_TRANSLATIONS_COUNT + ) + self.assertEqual( + ( + translation_review_stats_model + .accepted_translations_with_reviewer_edits_count + ), + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT + ) + self.assertEqual( + ( + translation_review_stats_model + .accepted_translation_word_count + ), + self.ACCEPTED_TRANSLATION_WORD_COUNT + ) + self.assertEqual( + translation_review_stats_model.first_contribution_date, + self.FIRST_CONTRIBUTION_DATE + ) + self.assertEqual( + translation_review_stats_model.last_contribution_date, + self.LAST_CONTRIBUTION_DATE + ) + + def test_get_deletion_policy(self) -> None: + self.assertEqual( + ( + suggestion_models.TranslationReviewStatsModel + .get_deletion_policy() + ), + base_models.DELETION_POLICY.DELETE) + + def test_get_all_by_user_id(self) -> None: + suggestion_models.TranslationReviewStatsModel.create( + language_code=self.LANGUAGE_CODE, + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=self.TOPIC_ID, + reviewed_translations_count=self.REVIEWED_TRANSLATIONS_COUNT, + reviewed_translation_word_count=( + self.REVIEWED_TRANSLATION_WORD_COUNT), + accepted_translations_count=self.ACCEPTED_TRANSLATIONS_COUNT, + accepted_translations_with_reviewer_edits_count=( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + accepted_translation_word_count=( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + + translation_review_stats_models = ( + suggestion_models.TranslationReviewStatsModel.get_all_by_user_id( + self.REVIEWER_USER_ID + ) + ) + + # Ruling out the possibility of None for mypy type checking. + assert translation_review_stats_models is not None + + self.assertEqual( + len(translation_review_stats_models), + 1 + ) + + translation_review_stats_model = translation_review_stats_models[0] + + self.assertEqual( + translation_review_stats_model.language_code, + self.LANGUAGE_CODE + ) + self.assertEqual( + translation_review_stats_model.reviewer_user_id, + self.REVIEWER_USER_ID + ) + self.assertEqual( + translation_review_stats_model.reviewed_translations_count, + self.REVIEWED_TRANSLATIONS_COUNT + ) + self.assertEqual( + ( + translation_review_stats_model + .reviewed_translation_word_count + ), + self.REVIEWED_TRANSLATION_WORD_COUNT + ) + self.assertEqual( + translation_review_stats_model.accepted_translations_count, + self.ACCEPTED_TRANSLATIONS_COUNT + ) + self.assertEqual( + ( + translation_review_stats_model + .accepted_translations_with_reviewer_edits_count + ), + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT + ) + self.assertEqual( + ( + translation_review_stats_model + .accepted_translation_word_count + ), + self.ACCEPTED_TRANSLATION_WORD_COUNT + ) + self.assertEqual( + translation_review_stats_model.first_contribution_date, + self.FIRST_CONTRIBUTION_DATE + ) + self.assertEqual( + translation_review_stats_model.last_contribution_date, + self.LAST_CONTRIBUTION_DATE + ) + + def test_apply_deletion_policy(self) -> None: + suggestion_models.TranslationReviewStatsModel.create( + language_code=self.LANGUAGE_CODE, + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=self.TOPIC_ID, + reviewed_translations_count=self.REVIEWED_TRANSLATIONS_COUNT, + reviewed_translation_word_count=( + self.REVIEWED_TRANSLATION_WORD_COUNT), + accepted_translations_count=self.ACCEPTED_TRANSLATIONS_COUNT, + accepted_translations_with_reviewer_edits_count=( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + accepted_translation_word_count=( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + self.assertTrue( + suggestion_models.TranslationReviewStatsModel + .has_reference_to_user_id(self.REVIEWER_USER_ID)) + + ( + suggestion_models.TranslationReviewStatsModel + .apply_deletion_policy(self.REVIEWER_USER_ID) + ) + + self.assertFalse( + suggestion_models.TranslationReviewStatsModel + .has_reference_to_user_id(self.REVIEWER_USER_ID)) + + def test_export_data_trivial(self) -> None: + user_data = ( + suggestion_models.TranslationReviewStatsModel + .export_data('non_existent_user')) + self.assertEqual(user_data, {}) + + def test_export_data_nontrivial(self) -> None: + topic_id_2 = 'topic ID 2' + # Seed translation stats data for two different topics. + model_1_id = suggestion_models.TranslationReviewStatsModel.create( + language_code=self.LANGUAGE_CODE, + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=self.TOPIC_ID, + reviewed_translations_count=self.REVIEWED_TRANSLATIONS_COUNT, + reviewed_translation_word_count=( + self.REVIEWED_TRANSLATION_WORD_COUNT), + accepted_translations_count=self.ACCEPTED_TRANSLATIONS_COUNT, + accepted_translations_with_reviewer_edits_count=( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + accepted_translation_word_count=( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + model_2_id = suggestion_models.TranslationReviewStatsModel.create( + language_code=self.LANGUAGE_CODE, + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=topic_id_2, + reviewed_translations_count=self.REVIEWED_TRANSLATIONS_COUNT, + reviewed_translation_word_count=( + self.REVIEWED_TRANSLATION_WORD_COUNT), + accepted_translations_count=self.ACCEPTED_TRANSLATIONS_COUNT, + accepted_translations_with_reviewer_edits_count=( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + accepted_translation_word_count=( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + model_1_id_without_user_id = model_1_id.replace( + '.%s.' % self.REVIEWER_USER_ID, '.' + ) + model_2_id_without_user_id = model_2_id.replace( + '.%s.' % self.REVIEWER_USER_ID, '.' + ) + expected_data = { + model_1_id_without_user_id: { + 'language_code': self.LANGUAGE_CODE, + 'topic_id': self.TOPIC_ID, + 'reviewed_translations_count': ( + self.REVIEWED_TRANSLATIONS_COUNT), + 'reviewed_translation_word_count': ( + self.REVIEWED_TRANSLATION_WORD_COUNT), + 'accepted_translations_count': ( + self.ACCEPTED_TRANSLATIONS_COUNT), + 'accepted_translations_with_reviewer_edits_count': ( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + 'accepted_translation_word_count': ( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + }, + model_2_id_without_user_id: { + 'language_code': self.LANGUAGE_CODE, + 'topic_id': topic_id_2, + 'reviewed_translations_count': ( + self.REVIEWED_TRANSLATIONS_COUNT), + 'reviewed_translation_word_count': ( + self.REVIEWED_TRANSLATION_WORD_COUNT), + 'accepted_translations_count': ( + self.ACCEPTED_TRANSLATIONS_COUNT), + 'accepted_translations_with_reviewer_edits_count': ( + self.ACCEPTED_TRANSLATIONS_WITH_REVIEWER_EDITS_COUNT), + 'accepted_translation_word_count': ( + self.ACCEPTED_TRANSLATION_WORD_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + } + } + + user_data = ( + suggestion_models.TranslationReviewStatsModel + .export_data(self.REVIEWER_USER_ID)) + + self.assertEqual(expected_data, user_data) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.EXPORTED, + 'reviewer_user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id': base_models.EXPORT_POLICY.EXPORTED, + 'reviewed_translations_count': base_models.EXPORT_POLICY.EXPORTED, + 'reviewed_translation_word_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translations_count': base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translations_with_reviewer_edits_count': + base_models.EXPORT_POLICY.EXPORTED, + 'accepted_translation_word_count': + base_models.EXPORT_POLICY.EXPORTED, + 'first_contribution_date': base_models.EXPORT_POLICY.EXPORTED, + 'last_contribution_date': base_models.EXPORT_POLICY.EXPORTED + } + model = suggestion_models.TranslationReviewStatsModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = suggestion_models.TranslationReviewStatsModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + +class QuestionContributionStatsModelUnitTests(test_utils.GenericTestBase): + """Tests the QuestionContributionStatsModel class.""" + + CONTRIBUTOR_USER_ID = 'uid_01234567890123456789012345678912' + TOPIC_ID = 'topic_id' + SUBMITTED_QUESTION_COUNT = 2 + ACCEPTED_QUESTIONS_COUNT = 1 + ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT = 0 + FIRST_CONTRIBUTION_DATE = datetime.date.fromtimestamp(1616173836) + LAST_CONTRIBUTION_DATE = datetime.date.fromtimestamp(1616173836) + + def test_get_returns_model_when_it_exists(self) -> None: + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id=self.CONTRIBUTOR_USER_ID, + topic_id=self.TOPIC_ID, + submitted_questions_count=self.SUBMITTED_QUESTION_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_without_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + + question_contribution_stats_model = ( + suggestion_models.QuestionContributionStatsModel.get( + self.CONTRIBUTOR_USER_ID, self.TOPIC_ID + ) + ) + + # Ruling out the possibility of None for mypy type checking. + assert question_contribution_stats_model is not None + self.assertEqual( + question_contribution_stats_model.contributor_user_id, + self.CONTRIBUTOR_USER_ID + ) + self.assertEqual( + question_contribution_stats_model.submitted_questions_count, + self.SUBMITTED_QUESTION_COUNT + ) + self.assertEqual( + question_contribution_stats_model.accepted_questions_count, + self.ACCEPTED_QUESTIONS_COUNT + ) + self.assertEqual( + ( + question_contribution_stats_model + .accepted_questions_without_reviewer_edits_count + ), + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT + ) + self.assertEqual( + question_contribution_stats_model.first_contribution_date, + self.FIRST_CONTRIBUTION_DATE + ) + self.assertEqual( + question_contribution_stats_model.last_contribution_date, + self.LAST_CONTRIBUTION_DATE + ) + + def test_get_all_by_user_id(self) -> None: + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id=self.CONTRIBUTOR_USER_ID, + topic_id=self.TOPIC_ID, + submitted_questions_count=self.SUBMITTED_QUESTION_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_without_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + + question_contribution_stats_models = ( + suggestion_models.QuestionContributionStatsModel.get_all_by_user_id( + self.CONTRIBUTOR_USER_ID + ) + ) + + # Ruling out the possibility of None for mypy type checking. + assert question_contribution_stats_models is not None + + self.assertEqual( + len(question_contribution_stats_models), + 1 + ) + + question_contribution_stats_model = question_contribution_stats_models[ + 0] + + # Ruling out the possibility of None for mypy type checking. + assert question_contribution_stats_model is not None + self.assertEqual( + question_contribution_stats_model.contributor_user_id, + self.CONTRIBUTOR_USER_ID + ) + self.assertEqual( + question_contribution_stats_model.submitted_questions_count, + self.SUBMITTED_QUESTION_COUNT + ) + self.assertEqual( + question_contribution_stats_model.accepted_questions_count, + self.ACCEPTED_QUESTIONS_COUNT + ) + self.assertEqual( + ( + question_contribution_stats_model + .accepted_questions_without_reviewer_edits_count + ), + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT + ) + self.assertEqual( + question_contribution_stats_model.first_contribution_date, + self.FIRST_CONTRIBUTION_DATE + ) + self.assertEqual( + question_contribution_stats_model.last_contribution_date, + self.LAST_CONTRIBUTION_DATE + ) + + def test_get_deletion_policy(self) -> None: + self.assertEqual( + ( + suggestion_models.QuestionContributionStatsModel + .get_deletion_policy() + ), + base_models.DELETION_POLICY.DELETE) + + def test_apply_deletion_policy(self) -> None: + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id=self.CONTRIBUTOR_USER_ID, + topic_id=self.TOPIC_ID, + submitted_questions_count=self.SUBMITTED_QUESTION_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_without_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + self.assertTrue( + suggestion_models.QuestionContributionStatsModel + .has_reference_to_user_id(self.CONTRIBUTOR_USER_ID)) + + ( + suggestion_models.QuestionContributionStatsModel + .apply_deletion_policy(self.CONTRIBUTOR_USER_ID) + ) + + self.assertFalse( + suggestion_models.QuestionContributionStatsModel + .has_reference_to_user_id(self.CONTRIBUTOR_USER_ID)) + + def test_export_data_trivial(self) -> None: + user_data = ( + suggestion_models.QuestionContributionStatsModel + .export_data('non_existent_user')) + self.assertEqual(user_data, {}) + + def test_export_data_nontrivial(self) -> None: + topic_id_2 = 'topic ID 2' + # Seed question stats data for two different topics. + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id=self.CONTRIBUTOR_USER_ID, + topic_id=self.TOPIC_ID, + submitted_questions_count=self.SUBMITTED_QUESTION_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_without_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + suggestion_models.QuestionContributionStatsModel.create( + contributor_user_id=self.CONTRIBUTOR_USER_ID, + topic_id=topic_id_2, + submitted_questions_count=self.SUBMITTED_QUESTION_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_without_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + model_1_id_without_user_id = self.TOPIC_ID + model_2_id_without_user_id = topic_id_2 + expected_data = { + model_1_id_without_user_id: { + 'topic_id': self.TOPIC_ID, + 'submitted_questions_count': ( + self.SUBMITTED_QUESTION_COUNT), + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_without_reviewer_edits_count': ( + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + }, + model_2_id_without_user_id: { + 'topic_id': topic_id_2, + 'submitted_questions_count': ( + self.SUBMITTED_QUESTION_COUNT), + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_without_reviewer_edits_count': ( + self.ACCEPTED_QUESTIONS_WITHOUT_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + } + } + + user_data = ( + suggestion_models.QuestionContributionStatsModel + .export_data(self.CONTRIBUTOR_USER_ID)) + + self.assertEqual(expected_data, user_data) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'contributor_user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id': base_models.EXPORT_POLICY.EXPORTED, + 'submitted_questions_count': base_models.EXPORT_POLICY.EXPORTED, + 'accepted_questions_count': base_models.EXPORT_POLICY.EXPORTED, + 'accepted_questions_without_reviewer_edits_count': + base_models.EXPORT_POLICY.EXPORTED, + 'first_contribution_date': base_models.EXPORT_POLICY.EXPORTED, + 'last_contribution_date': base_models.EXPORT_POLICY.EXPORTED + } + model = suggestion_models.QuestionContributionStatsModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = suggestion_models.QuestionContributionStatsModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + +class QuestionReviewStatsModelUnitTests(test_utils.GenericTestBase): + """Tests the QuestionReviewStatsModel class.""" + + REVIEWER_USER_ID = 'uid_01234567890123456789012345678912' + TOPIC_ID = 'topic_id' + REVIEWED_QUESTIONS_COUNT = 2 + ACCEPTED_QUESTIONS_COUNT = 1 + ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT = 0 + FIRST_CONTRIBUTION_DATE = datetime.date.fromtimestamp(1616173836) + LAST_CONTRIBUTION_DATE = datetime.date.fromtimestamp(1616173836) + + def test_get_returns_model_when_it_exists(self) -> None: + suggestion_models.QuestionReviewStatsModel.create( + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=self.TOPIC_ID, + reviewed_questions_count=self.REVIEWED_QUESTIONS_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_with_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + + question_review_stats_model = ( + suggestion_models.QuestionReviewStatsModel.get( + self.REVIEWER_USER_ID, self.TOPIC_ID + ) + ) + + # Ruling out the possibility of None for mypy type checking. + assert question_review_stats_model is not None + self.assertEqual( + question_review_stats_model.reviewer_user_id, + self.REVIEWER_USER_ID + ) + self.assertEqual( + question_review_stats_model.reviewed_questions_count, + self.REVIEWED_QUESTIONS_COUNT + ) + self.assertEqual( + question_review_stats_model.accepted_questions_count, + self.ACCEPTED_QUESTIONS_COUNT + ) + self.assertEqual( + ( + question_review_stats_model + .accepted_questions_with_reviewer_edits_count + ), + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT + ) + self.assertEqual( + question_review_stats_model.first_contribution_date, + self.FIRST_CONTRIBUTION_DATE + ) + self.assertEqual( + question_review_stats_model.last_contribution_date, + self.LAST_CONTRIBUTION_DATE + ) + + def test_get_all_by_user_id(self) -> None: + suggestion_models.QuestionReviewStatsModel.create( + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=self.TOPIC_ID, + reviewed_questions_count=self.REVIEWED_QUESTIONS_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_with_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + + question_review_stats_models = ( + suggestion_models.QuestionReviewStatsModel.get_all_by_user_id( + self.REVIEWER_USER_ID + ) + ) + + # Ruling out the possibility of None for mypy type checking. + assert question_review_stats_models is not None + + question_review_stats_model = question_review_stats_models[0] + + self.assertEqual( + question_review_stats_model.reviewer_user_id, + self.REVIEWER_USER_ID + ) + self.assertEqual( + question_review_stats_model.reviewed_questions_count, + self.REVIEWED_QUESTIONS_COUNT + ) + self.assertEqual( + question_review_stats_model.accepted_questions_count, + self.ACCEPTED_QUESTIONS_COUNT + ) + self.assertEqual( + ( + question_review_stats_model + .accepted_questions_with_reviewer_edits_count + ), + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT + ) + self.assertEqual( + question_review_stats_model.first_contribution_date, + self.FIRST_CONTRIBUTION_DATE + ) + self.assertEqual( + question_review_stats_model.last_contribution_date, + self.LAST_CONTRIBUTION_DATE + ) + + def test_get_deletion_policy(self) -> None: + self.assertEqual( + ( + suggestion_models.QuestionReviewStatsModel + .get_deletion_policy() + ), + base_models.DELETION_POLICY.DELETE) + + def test_apply_deletion_policy(self) -> None: + suggestion_models.QuestionReviewStatsModel.create( + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=self.TOPIC_ID, + reviewed_questions_count=self.REVIEWED_QUESTIONS_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_with_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + self.assertTrue( + suggestion_models.QuestionReviewStatsModel + .has_reference_to_user_id(self.REVIEWER_USER_ID)) + + ( + suggestion_models.QuestionReviewStatsModel + .apply_deletion_policy(self.REVIEWER_USER_ID) + ) + + self.assertFalse( + suggestion_models.QuestionReviewStatsModel + .has_reference_to_user_id(self.REVIEWER_USER_ID)) + + def test_export_data_trivial(self) -> None: + user_data = ( + suggestion_models.QuestionReviewStatsModel + .export_data('non_existent_user')) + self.assertEqual(user_data, {}) + + def test_export_data_nontrivial(self) -> None: + topic_id_2 = 'topic ID 2' + # Seed question stats data for two different topics. + suggestion_models.QuestionReviewStatsModel.create( + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=self.TOPIC_ID, + reviewed_questions_count=self.REVIEWED_QUESTIONS_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_with_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + suggestion_models.QuestionReviewStatsModel.create( + reviewer_user_id=self.REVIEWER_USER_ID, + topic_id=topic_id_2, + reviewed_questions_count=self.REVIEWED_QUESTIONS_COUNT, + accepted_questions_count=self.ACCEPTED_QUESTIONS_COUNT, + accepted_questions_with_reviewer_edits_count=( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + first_contribution_date=self.FIRST_CONTRIBUTION_DATE, + last_contribution_date=self.LAST_CONTRIBUTION_DATE + ) + model_1_id_without_user_id = self.TOPIC_ID + model_2_id_without_user_id = topic_id_2 + expected_data = { + model_1_id_without_user_id: { + 'topic_id': self.TOPIC_ID, + 'reviewed_questions_count': ( + self.REVIEWED_QUESTIONS_COUNT), + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_with_reviewer_edits_count': ( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + }, + model_2_id_without_user_id: { + 'topic_id': topic_id_2, + 'reviewed_questions_count': ( + self.REVIEWED_QUESTIONS_COUNT), + 'accepted_questions_count': ( + self.ACCEPTED_QUESTIONS_COUNT), + 'accepted_questions_with_reviewer_edits_count': ( + self.ACCEPTED_QUESTIONS_WITH_REVIEWER_EDITS_COUNT), + 'first_contribution_date': ( + self.FIRST_CONTRIBUTION_DATE.isoformat()), + 'last_contribution_date': ( + self.LAST_CONTRIBUTION_DATE.isoformat()) + } + } + + user_data = ( + suggestion_models.QuestionReviewStatsModel + .export_data(self.REVIEWER_USER_ID)) + + self.assertEqual(expected_data, user_data) + + def test_get_export_policy(self) -> None: + expected_dict = { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'reviewer_user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_id': base_models.EXPORT_POLICY.EXPORTED, + 'reviewed_questions_count': base_models.EXPORT_POLICY.EXPORTED, + 'accepted_questions_count': base_models.EXPORT_POLICY.EXPORTED, + 'accepted_questions_with_reviewer_edits_count': + base_models.EXPORT_POLICY.EXPORTED, + 'first_contribution_date': base_models.EXPORT_POLICY.EXPORTED, + 'last_contribution_date': base_models.EXPORT_POLICY.EXPORTED + } + model = suggestion_models.QuestionReviewStatsModel + self.assertEqual(model.get_export_policy(), expected_dict) + + def test_get_model_association_to_user(self) -> None: + model = suggestion_models.QuestionReviewStatsModel + self.assertEqual( + model.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) diff --git a/core/storage/topic/gae_models.py b/core/storage/topic/gae_models.py index 17ace9b83f23..f4a06d05a9d1 100644 --- a/core/storage/topic/gae_models.py +++ b/core/storage/topic/gae_models.py @@ -22,7 +22,7 @@ from core.constants import constants from core.platform import models -from typing import Any, Dict, List, Optional, Sequence +from typing import Dict, List, Mapping, Optional, Sequence, cast MYPY = False if MYPY: # pragma: no cover @@ -30,7 +30,8 @@ from mypy_imports import datastore_services (base_models, user_models) = models.Registry.import_models([ - models.NAMES.base_model, models.NAMES.user]) + models.Names.BASE_MODEL, models.Names.USER +]) datastore_services = models.Registry.import_datastore_services() @@ -165,22 +166,43 @@ class TopicModel(base_models.VersionedModel): # the page title fragment field represents the middle value 'Add, Subtract, # Multiply and Divide'. page_title_fragment_for_web = datastore_services.StringProperty( - indexed=True, default='') + required=True, indexed=True) + # A diagnostic test contains a set of questions covering multiple topics and + # based on the user's performance in the test, a topic is recommended to + # them. Now, this field is used for listing the skill IDs from which the + # questions should be fetched for the diagnostic test. + skill_ids_for_diagnostic_test = datastore_services.StringProperty( + repeated=True, indexed=True) @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: """Model doesn't contain any data directly corresponding to a user.""" return base_models.DELETION_POLICY.NOT_APPLICABLE - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't be allowed. + def _prepare_additional_models(self) -> Mapping[str, base_models.BaseModel]: + """Prepares additional models needed for the commit process. + + Returns: + dict(str, BaseModel). Additional models needed for + the commit process. Contains the TopicRightsModel. + """ + return { + 'rights_model': TopicRightsModel.get_by_id(self.id) + } + + def compute_models_to_commit( + self, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: base_models.AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + additional_models: Mapping[str, base_models.BaseModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -190,17 +212,33 @@ def _trusted_commit( change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit description message, for + unpublished topics, it may be equal to None. commit_cmds: list(dict). A list of commands, describing changes made in this model, which should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. + + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. """ - super(TopicModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) - topic_rights = TopicRightsModel.get_by_id(self.id) + # Here we use cast because we are narrowing down the type from + # BaseModel to TopicRightsModel. + topic_rights = cast( + TopicRightsModel, additional_models['rights_model'] + ) if topic_rights.topic_is_published: status = constants.ACTIVITY_STATUS_PUBLIC else: @@ -211,8 +249,12 @@ def _trusted_commit( commit_message, commit_cmds, status, False ) topic_commit_log_entry.topic_id = self.id - topic_commit_log_entry.update_timestamps() - topic_commit_log_entry.put() + return { + 'snapshot_metadata_model': models_to_put['snapshot_metadata_model'], + 'snapshot_content_model': models_to_put['snapshot_content_model'], + 'commit_log_model': topic_commit_log_entry, + 'versioned_model': models_to_put['versioned_model'], + } @classmethod def get_by_name(cls, topic_name: str) -> Optional[TopicModel]: @@ -279,6 +321,8 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'practice_tab_is_displayed': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_ids_for_diagnostic_test': + base_models.EXPORT_POLICY.NOT_APPLICABLE }) @@ -471,15 +515,17 @@ def get_by_user(cls, user_id: str) -> Sequence[TopicRightsModel]: """ return cls.query(cls.manager_ids == user_id).fetch() - # TODO(#13523): Change 'commit_cmds' to TypedDict/Domain Object - # to remove Any used below. - def _trusted_commit( - self, - committer_id: str, - commit_type: str, - commit_message: str, - commit_cmds: List[Dict[str, Any]] - ) -> None: + def compute_models_to_commit( + self, + committer_id: str, + commit_type: str, + commit_message: Optional[str], + commit_cmds: base_models.AllowedCommitCmdsListType, + # We expect Mapping because we want to allow models that inherit + # from BaseModel as the values, if we used Dict this wouldn't + # be allowed. + additional_models: Mapping[str, base_models.BaseModel] + ) -> base_models.ModelsToPutDict: """Record the event to the commit log after the model commit. Note that this extends the superclass method. @@ -489,23 +535,34 @@ def _trusted_commit( change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. - commit_message: str. The commit description message. + commit_message: str|None. The commit description message, for + unpublished topic, it may be equal to None. commit_cmds: list(dict). A list of commands, describing changes made in this model, which should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. + additional_models: dict(str, BaseModel). Additional models that are + needed for the commit process. + + Returns: + ModelsToPutDict. A dict of models that should be put into + the datastore. """ - super(TopicRightsModel, self)._trusted_commit( - committer_id, commit_type, commit_message, commit_cmds) + models_to_put = super().compute_models_to_commit( + committer_id, + commit_type, + commit_message, + commit_cmds, + additional_models + ) - topic_rights = TopicRightsModel.get_by_id(self.id) - if topic_rights.topic_is_published: + if self.topic_is_published: status = constants.ACTIVITY_STATUS_PUBLIC else: status = constants.ACTIVITY_STATUS_PRIVATE - TopicCommitLogEntryModel( + topic_commit_log = TopicCommitLogEntryModel( id=('rights-%s-%s' % (self.id, self.version)), user_id=committer_id, topic_id=self.id, @@ -515,14 +572,10 @@ def _trusted_commit( version=None, post_commit_status=status, post_commit_community_owned=False, - post_commit_is_private=not topic_rights.topic_is_published - ).put() - - snapshot_metadata_model = self.SNAPSHOT_METADATA_CLASS.get( - self.get_snapshot_id(self.id, self.version)) + post_commit_is_private=not self.topic_is_published + ) - # Ruling out the possibility of None for mypy type checking. - assert snapshot_metadata_model is not None + snapshot_metadata_model = models_to_put['snapshot_metadata_model'] snapshot_metadata_model.content_user_ids = list(sorted(set( self.manager_ids))) @@ -534,12 +587,20 @@ def _trusted_commit( if cmd['name'] == commit_cmd['cmd'] ) for user_id_attribute_name in user_id_attribute_names: - commit_cmds_user_ids.add(commit_cmd[user_id_attribute_name]) + user_id_attribute = commit_cmd[user_id_attribute_name] + # Ruling out the possibility of Any other type for mypy type + # checking. + assert isinstance(user_id_attribute, str) + commit_cmds_user_ids.add(user_id_attribute) snapshot_metadata_model.commit_cmds_user_ids = list( sorted(commit_cmds_user_ids)) - snapshot_metadata_model.update_timestamps() - snapshot_metadata_model.put() + return { + 'snapshot_metadata_model': models_to_put['snapshot_metadata_model'], + 'snapshot_content_model': models_to_put['snapshot_content_model'], + 'commit_log_model': topic_commit_log, + 'versioned_model': models_to_put['versioned_model'], + } @staticmethod def get_model_association_to_user( diff --git a/core/storage/topic/gae_models_test.py b/core/storage/topic/gae_models_test.py index e6ef75c1a151..dfe948b1a21a 100644 --- a/core/storage/topic/gae_models_test.py +++ b/core/storage/topic/gae_models_test.py @@ -25,15 +25,16 @@ from core.platform import models from core.tests import test_utils -from typing import Dict, List +from typing import Dict, Final, List MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import topic_models -(base_models, topic_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.topic, models.NAMES.user]) +(base_models, topic_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.TOPIC, models.Names.USER +]) class TopicSnapshotContentModelTests(test_utils.GenericTestBase): @@ -47,9 +48,9 @@ def test_get_deletion_policy_is_not_applicable(self) -> None: class TopicModelUnitTests(test_utils.GenericTestBase): """Tests the TopicModel class.""" - TOPIC_NAME = 'tOpic_NaMe' - TOPIC_CANONICAL_NAME = 'topic_name' - TOPIC_ID = 'topic_id' + TOPIC_NAME: Final = 'tOpic_NaMe' + TOPIC_CANONICAL_NAME: Final = 'topic_name' + TOPIC_ID: Final = 'topic_id' def test_get_deletion_policy(self) -> None: self.assertEqual( @@ -57,7 +58,7 @@ def test_get_deletion_policy(self) -> None: base_models.DELETION_POLICY.NOT_APPLICABLE) def test_that_subsidiary_models_are_created_when_new_model_is_saved( - self + self ) -> None: """Tests the _trusted_commit() method.""" @@ -78,7 +79,9 @@ def test_that_subsidiary_models_are_created_when_new_model_is_saved( story_reference_schema_version=( feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION), next_subtopic_id=1, - language_code='en' + language_code='en', + page_title_fragment_for_web='fragm', + skill_ids_for_diagnostic_test=[] ) # We check that topic has not been saved before calling commit(). self.assertIsNone(topic_models.TopicModel.get_by_name(self.TOPIC_NAME)) @@ -101,9 +104,9 @@ def test_that_subsidiary_models_are_created_when_new_model_is_saved( ) def test_get_by_name(self) -> None: - topic = topic_domain.Topic.create_default_topic( # type: ignore[no-untyped-call] - self.TOPIC_ID, self.TOPIC_NAME, 'name', 'description') - topic_services.save_new_topic(feconf.SYSTEM_COMMITTER_ID, topic) # type: ignore[no-untyped-call] + topic = topic_domain.Topic.create_default_topic( + self.TOPIC_ID, self.TOPIC_NAME, 'name', 'description', 'fragm') + topic_services.save_new_topic(feconf.SYSTEM_COMMITTER_ID, topic) topic_model = topic_models.TopicModel.get_by_name(self.TOPIC_NAME) # Ruling out the possibility of None for mypy type checking. assert topic_model is not None @@ -111,9 +114,10 @@ def test_get_by_name(self) -> None: self.assertEqual(topic_model.id, self.TOPIC_ID) def test_get_by_url_fragment(self) -> None: - topic = topic_domain.Topic.create_default_topic( # type: ignore[no-untyped-call] - self.TOPIC_ID, self.TOPIC_NAME, 'name-two', 'description') - topic_services.save_new_topic(feconf.SYSTEM_COMMITTER_ID, topic) # type: ignore[no-untyped-call] + topic = topic_domain.Topic.create_default_topic( + self.TOPIC_ID, self.TOPIC_NAME, 'name-two', 'description', + 'fragm') + topic_services.save_new_topic(feconf.SYSTEM_COMMITTER_ID, topic) topic_model = topic_models.TopicModel.get_by_name(self.TOPIC_NAME) # Ruling out the possibility of None for mypy type checking. assert topic_model is not None @@ -176,10 +180,10 @@ def test_get_deletion_policy(self) -> None: class TopicRightsRightsSnapshotContentModelTests(test_utils.GenericTestBase): - TOPIC_ID_1 = '1' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_COMMITTER = 'id_committer' + TOPIC_ID_1: Final = '1' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_COMMITTER: Final = 'id_committer' def test_get_deletion_policy_is_locally_pseudonymize(self) -> None: self.assertEqual( @@ -211,22 +215,22 @@ def test_has_reference_to_user_id(self) -> None: class TopicRightsModelUnitTests(test_utils.GenericTestBase): """Tests the TopicRightsModel class.""" - TOPIC_1_ID = 'topic_1_id' - TOPIC_2_ID = 'topic_2_id' - TOPIC_3_ID = 'topic_3_id' - TOPIC_4_ID = 'topic_4_id' - TOPIC_5_ID = 'topic_5_id' - MANAGER_1_ID_OLD = 'manager_1_id_old' - MANAGER_1_ID_NEW = 'manager_1_id_new' - MANAGER_2_ID_OLD = 'manager_2_id_old' - MANAGER_2_ID_NEW = 'manager_2_id_new' - MANAGER_3_ID_OLD = 'manager_3_id_old' - MANAGER_3_ID_NEW = 'manager_3_id_old' - USER_ID_1 = 'user_id_1' - USER_ID_2 = 'user_id_2' + TOPIC_1_ID: Final = 'topic_1_id' + TOPIC_2_ID: Final = 'topic_2_id' + TOPIC_3_ID: Final = 'topic_3_id' + TOPIC_4_ID: Final = 'topic_4_id' + TOPIC_5_ID: Final = 'topic_5_id' + MANAGER_1_ID_OLD: Final = 'manager_1_id_old' + MANAGER_1_ID_NEW: Final = 'manager_1_id_new' + MANAGER_2_ID_OLD: Final = 'manager_2_id_old' + MANAGER_2_ID_NEW: Final = 'manager_2_id_new' + MANAGER_3_ID_OLD: Final = 'manager_3_id_old' + MANAGER_3_ID_NEW: Final = 'manager_3_id_old' + USER_ID_1: Final = 'user_id_1' + USER_ID_2: Final = 'user_id_2' def setUp(self) -> None: - super(TopicRightsModelUnitTests, self).setUp() + super().setUp() topic_models.TopicRightsModel( id=self.TOPIC_4_ID, manager_ids=[self.USER_ID_2], diff --git a/core/storage/translation/gae_models.py b/core/storage/translation/gae_models.py index 19fc8aaf6b3a..113976e06f79 100644 --- a/core/storage/translation/gae_models.py +++ b/core/storage/translation/gae_models.py @@ -18,22 +18,183 @@ from __future__ import annotations +from core import feconf from core import utils from core.platform import models -from typing import Dict, Optional +from typing import Dict, Optional, Sequence MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models( - [models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([ + models.Names.BASE_MODEL +]) datastore_services = models.Registry.import_datastore_services() +class EntityTranslationsModel(base_models.BaseModel): + """Model for storing entity translations.""" + + # The id of the corresponding entity. + entity_id = datastore_services.StringProperty(required=True, indexed=True) + # The type of the corresponding entity. + entity_type = datastore_services.StringProperty( + required=True, indexed=True, choices=[ + feconf.ENTITY_TYPE_EXPLORATION, + feconf.ENTITY_TYPE_QUESTION + ]) + # The version of the corresponding entity. + entity_version = datastore_services.IntegerProperty( + required=True, indexed=True) + # The ISO 639-1 code for the language an entity is written in. + language_code = datastore_services.StringProperty( + required=True, indexed=True) + # A dict representing content-id as keys and dict(TranslatedContent) + # as values. + translations = datastore_services.JsonProperty(required=True) + + @staticmethod + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model doesn't contain any data directly corresponding to a user.""" + return base_models.DELETION_POLICY.NOT_APPLICABLE + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model does not contain user data.""" + return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model doesn't contain any data directly corresponding to a user.""" + return dict(super(cls, cls).get_export_policy(), **{ + 'entity_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'translations': base_models.EXPORT_POLICY.NOT_APPLICABLE, + }) + + @staticmethod + def _generate_id( + entity_type: feconf.TranslatableEntityType, + entity_id: str, + entity_version: int, + language_code: str + ) -> str: + """Generates the ID for an entity translations model. + + Args: + entity_type: TranslatableEntityType. The type of the entity. + entity_id: str. The ID of the entity. + entity_version: int. The version of the entity. + language_code: str. The language code for the entity. + + Returns: + str. Returns a unique id of the form + [entity_type]-[entity_id]-[entity_version]-[language_code]. + """ + return '%s-%s-%s-%s' % ( + entity_type.value, entity_id, str(entity_version), language_code) + + @classmethod + def get_model( + cls, + entity_type: feconf.TranslatableEntityType, + entity_id: str, + entity_version: int, + language_code: str + ) -> EntityTranslationsModel: + """Gets EntityTranslationsModel by help of entity_type, entity_id, + entity_version and language_code. + + Args: + entity_type: TranslatableEntityType. The type of the entity whose + translations are to be fetched. + entity_id: str. The ID of the entity whose translations are to be + fetched. + entity_version: int. The version of the entity whose translations + are to be fetched. + language_code: str. The language code of the entity whose + translations are to be fetched. + + Returns: + EntityTranslationsModel. The EntityTranslationsModel + instance corresponding to the given inputs, if such a translation + exists, or None if no translation is found. + """ + model_id = cls._generate_id( + entity_type, entity_id, entity_version, language_code) + return cls.get_by_id(model_id) + + @classmethod + def get_all_for_entity( + cls, + entity_type: feconf.TranslatableEntityType, + entity_id: str, + entity_version: int + ) -> Sequence[EntityTranslationsModel]: + """Gets EntityTranslationsModels corresponding to the given entity, for + all languages in which such models exist. + + Args: + entity_type: TranslatableEntityType. The type of the entity whose + translations are to be fetched. + entity_id: str. The ID of the entity whose translations are to be + fetched. + entity_version: int. The version of the entity whose translations + are to be fetched. + + Returns: + list(EntityTranslationsModel|None). The EntityTranslationsModel + instances corresponding to the given inputs, if such translations + exist. + """ + return cls.query( + cls.entity_type == entity_type.value, + cls.entity_id == entity_id, + cls.entity_version == entity_version + ).fetch() + + @classmethod + def create_new( + cls, + entity_type: str, + entity_id: str, + entity_version: int, + language_code: str, + translations: Dict[str, feconf.TranslatedContentDict] + ) -> EntityTranslationsModel: + """Creates and returns a new EntityTranslationsModel instance. + + Args: + entity_type: TranslatableEntityType. The type of the entity. + entity_id: str. The ID of the entity. + entity_version: int. The version of the entity. + language_code: str. The language code for the entity. + translations: dict(str, TranslatedContentDict). A dict representing + content-id as keys and dict(TranslatedContent) as values. + + Returns: + EntityTranslationsModel. Returns a new EntityTranslationsModel. + """ + return cls( + id=cls._generate_id( + feconf.TranslatableEntityType( + entity_type), + entity_id, entity_version, language_code), + entity_type=entity_type, + entity_id=entity_id, + entity_version=entity_version, + language_code=language_code, + translations=translations + ) + + class MachineTranslationModel(base_models.BaseModel): """Model for storing machine generated translations for the purpose of preventing duplicate generation. Machine translations are used for reference @@ -70,11 +231,11 @@ class MachineTranslationModel(base_models.BaseModel): @classmethod def create( - cls, - source_language_code: str, - target_language_code: str, - source_text: str, - translated_text: str + cls, + source_language_code: str, + target_language_code: str, + source_text: str, + translated_text: str ) -> Optional[str]: """Creates a new MachineTranslationModel instance and returns its ID. @@ -112,9 +273,9 @@ def create( @staticmethod def _generate_id( - source_language_code: str, - target_language_code: str, - hashed_source_text: str + source_language_code: str, + target_language_code: str, + hashed_source_text: str ) -> str: """Generates a valid, deterministic key for a MachineTranslationModel instance. diff --git a/core/storage/translation/gae_models_test.py b/core/storage/translation/gae_models_test.py index 9e249c01a76a..ef3979f0acae 100644 --- a/core/storage/translation/gae_models_test.py +++ b/core/storage/translation/gae_models_test.py @@ -18,6 +18,7 @@ from __future__ import annotations +from core import feconf from core.platform import models from core.tests import test_utils @@ -26,8 +27,133 @@ from mypy_imports import base_models from mypy_imports import translation_models -(base_models, translation_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.translation]) +(base_models, translation_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.TRANSLATION +]) + + +class EntityTranslationsModelTest(test_utils.GenericTestBase): + """Unit tests for EntityTranslationsModel class.""" + + def test_create_new_model(self) -> None: + enitity_translation_model = ( + translation_models.EntityTranslationsModel.create_new( + feconf.TranslatableEntityType.EXPLORATION.value, + 'exp_id', 1, 'hi', { + '123': { + 'content_value': 'Hello world!', + 'needs_update': False, + 'content_format': 'html' + } + }) + ) + self.assertEqual(enitity_translation_model.entity_type, 'exploration') + self.assertEqual(enitity_translation_model.entity_id, 'exp_id') + self.assertEqual(enitity_translation_model.entity_version, 1) + self.assertEqual(enitity_translation_model.language_code, 'hi') + self.assertEqual( + enitity_translation_model.translations['123']['content_value'], + 'Hello world!') + self.assertEqual( + enitity_translation_model.translations['123']['needs_update'], + False) + + def test_get_model_method_returns_correctly(self) -> None: + translation_models.EntityTranslationsModel.create_new( + feconf.TranslatableEntityType.EXPLORATION.value, + 'exp_id', 1, 'hi', { + '123': { + 'content_value': 'Hello world!', + 'needs_update': False, + 'content_format': 'html' + } + } + ).put() + enitity_translation_model = ( + translation_models.EntityTranslationsModel.get_model( + feconf.TranslatableEntityType.EXPLORATION, 'exp_id', 1, 'hi')) + self.assertEqual(enitity_translation_model.entity_type, 'exploration') + self.assertEqual(enitity_translation_model.entity_id, 'exp_id') + self.assertEqual(enitity_translation_model.entity_version, 1) + self.assertEqual(enitity_translation_model.language_code, 'hi') + self.assertEqual( + enitity_translation_model.translations['123']['content_value'], + 'Hello world!') + self.assertEqual( + enitity_translation_model.translations['123']['needs_update'], + False) + + def test_get_all_for_entity_returns_correctly(self) -> None: + translation_models.EntityTranslationsModel.create_new( + feconf.TranslatableEntityType.EXPLORATION.value, + 'exp_id', 1, 'en', { + '123': { + 'content_value': 'Hey I am Jhon.', + 'needs_update': False, + 'content_format': 'html' + } + } + ).put() + translation_models.EntityTranslationsModel.create_new( + feconf.TranslatableEntityType.EXPLORATION.value, + 'exp_id2', 2, 'hi', { + '123': { + 'content_value': 'Hello world!', + 'needs_update': False, + 'content_format': 'html' + } + } + ).put() + translation_models.EntityTranslationsModel.create_new( + feconf.TranslatableEntityType.EXPLORATION.value, + 'exp_id', 1, 'hi', { + '123': { + 'content_value': 'Hey I am Nikhil.', + 'needs_update': False, + 'content_format': 'html' + } + } + ).put() + + enitity_translation_models = ( + translation_models.EntityTranslationsModel.get_all_for_entity( + feconf.TranslatableEntityType.EXPLORATION, 'exp_id', 1)) + self.assertEqual(len(enitity_translation_models), 2) + + enitity_translation_models = ( + translation_models.EntityTranslationsModel.get_all_for_entity( + feconf.TranslatableEntityType.EXPLORATION, 'exp_id2', 2)) + self.assertEqual(len(enitity_translation_models), 1) + + def test_get_export_policy_not_applicable(self) -> None: + self.assertEqual( + translation_models.EntityTranslationsModel.get_export_policy(), + { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_type': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'entity_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'translations': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + + def test_get_deletion_policy_not_applicable(self) -> None: + self.assertEqual( + translation_models.EntityTranslationsModel.get_deletion_policy(), + base_models.DELETION_POLICY.NOT_APPLICABLE) + + def test_get_model_association_to_user_not_corresponding_to_user( + self + ) -> None: + self.assertEqual( + ( + translation_models.EntityTranslationsModel + .get_model_association_to_user() + ), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) class MachineTranslationModelTests(test_utils.GenericTestBase): @@ -42,12 +168,10 @@ def test_create_model(self) -> None: assert model_id is not None translation_model = ( translation_models.MachineTranslationModel.get(model_id)) - # Ruling out the possibility of None for mypy type checking. - assert translation_model is not None self.assertEqual(translation_model.translated_text, 'hola mundo') def test_create_model_with_same_source_target_language_codes_returns_none( - self + self ) -> None: model_id = translation_models.MachineTranslationModel.create( source_language_code='en', @@ -78,7 +202,7 @@ def test_get_machine_translation_with_existing_translation(self) -> None: self.assertEqual(translation.translated_text, 'hola mundo') def test_get_machine_translation_with_no_existing_translation_returns_none( - self + self ) -> None: translation = ( translation_models.MachineTranslationModel @@ -96,7 +220,7 @@ def test_get_deletion_policy_not_applicable(self) -> None: base_models.DELETION_POLICY.NOT_APPLICABLE) def test_get_model_association_to_user_not_corresponding_to_user( - self + self ) -> None: self.assertEqual( ( diff --git a/core/storage/user/gae_models.py b/core/storage/user/gae_models.py index ff84b252cb68..71ffd2d0d237 100644 --- a/core/storage/user/gae_models.py +++ b/core/storage/user/gae_models.py @@ -18,6 +18,7 @@ from __future__ import annotations +import itertools import random import string @@ -26,14 +27,16 @@ from core.constants import constants from core.platform import models -from typing import Dict, List, Optional, Sequence, Tuple, Union, cast +from typing import ( + Dict, Final, List, Literal, Optional, Sequence, Tuple, TypedDict, Union, + overload) MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import datastore_services -(base_models,) = models.Registry.import_models([models.NAMES.base_model]) +(base_models,) = models.Registry.import_models([models.Names.BASE_MODEL]) datastore_services = models.Registry.import_datastore_services() transaction_services = models.Registry.import_transaction_services() @@ -90,6 +93,10 @@ class UserSettingsModel(base_models.BaseModel): preferred_audio_language_code = datastore_services.StringProperty( default=None, choices=[ language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES]) + # Language preference when submitting text translations in the + # contributor dashboard. + preferred_translation_language_code = datastore_services.StringProperty( + default=None) # Attributes used for full users only. @@ -131,6 +138,10 @@ class UserSettingsModel(base_models.BaseModel): repeated=True, indexed=True, choices=feconf.ALLOWED_USER_ROLES) # Flag to indicate whether the user is banned. banned = datastore_services.BooleanProperty(indexed=True, default=False) + # Flag to check whether the user has viewed lesson info modal once which + # shows the progress of the user through exploration checkpoints. + has_viewed_lesson_info_modal_once = datastore_services.BooleanProperty( + indexed=True, default=False) @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: @@ -181,6 +192,8 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: base_models.EXPORT_POLICY.EXPORTED, 'preferred_audio_language_code': base_models.EXPORT_POLICY.EXPORTED, + 'preferred_translation_language_code': + base_models.EXPORT_POLICY.EXPORTED, 'username': base_models.EXPORT_POLICY.EXPORTED, 'normalized_username': base_models.EXPORT_POLICY.EXPORTED, 'last_started_state_editor_tutorial': @@ -196,6 +209,8 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: base_models.EXPORT_POLICY.EXPORTED, 'first_contribution_msec': base_models.EXPORT_POLICY.EXPORTED, + 'has_viewed_lesson_info_modal_once': + base_models.EXPORT_POLICY.EXPORTED, # Pin is not exported since this is an auth mechanism. 'pin': base_models.EXPORT_POLICY.NOT_APPLICABLE, @@ -227,7 +242,7 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: @staticmethod def export_data( - user_id: str + user_id: str ) -> Dict[str, Union[str, float, bool, List[str], None]]: """Exports the data from UserSettingsModel into dict format for Takeout. @@ -238,8 +253,6 @@ def export_data( dict. Dictionary of the data from UserSettingsModel. """ user = UserSettingsModel.get(user_id) - # Ruling out the possibility of None for mypy type checking. - assert user is not None return { 'email': user.email, 'roles': user.roles, @@ -288,7 +301,11 @@ def export_data( 'preferred_language_codes': user.preferred_language_codes, 'preferred_site_language_code': user.preferred_site_language_code, 'preferred_audio_language_code': user.preferred_audio_language_code, + 'preferred_translation_language_code': ( + user.preferred_translation_language_code), 'display_alias': user.display_alias, + 'has_viewed_lesson_info_modal_once': ( + user.has_viewed_lesson_info_modal_once) } @classmethod @@ -684,7 +701,7 @@ def create( return cls( id=instance_id, user_id=user_id, exploration_id=exploration_id) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). @classmethod def get( # type: ignore[override] @@ -1113,8 +1130,21 @@ def apply_deletion_policy(cls, user_id: str) -> None: Args: user_id: str. The ID of the user whose data should be deleted. """ - keys = cls.query(cls.creator_ids == user_id).fetch(keys_only=True) - datastore_services.delete_multi(keys) + user_subscriptions_models: List[UserSubscriptionsModel] = list( + cls.query( + cls.creator_ids == user_id + ).fetch() + ) + + for user_subscribers_model in user_subscriptions_models: + user_subscribers_model.creator_ids.remove(user_id) + + # Delete the references to this user from other user subscriptions + # models. + cls.update_timestamps_multi(user_subscriptions_models) + cls.put_multi(user_subscriptions_models) + + # Delete the model for the user. cls.delete_by_id(user_id) @classmethod @@ -1130,8 +1160,10 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: """ return ( cls.query( - cls.creator_ids == user_id).get(keys_only=True) is not None or - cls.get_by_id(user_id) is not None) + cls.creator_ids == user_id + ).get(keys_only=True) is not None or + cls.get_by_id(user_id) is not None + ) @staticmethod def export_data(user_id: str) -> Dict[str, Union[List[str], float, None]]: @@ -1142,6 +1174,9 @@ def export_data(user_id: str) -> Dict[str, Union[List[str], float, None]]: Returns: dict. Dictionary of data from UserSubscriptionsModel. + + Raises: + Exception. No UserSettingsModel exist for the given creator_id. """ user_model = UserSubscriptionsModel.get(user_id, strict=False) @@ -1150,11 +1185,16 @@ def export_data(user_id: str) -> Dict[str, Union[List[str], float, None]]: # Ruling out the possibility of None for mypy type checking. assert user_model is not None - creator_user_models = cast( - List[UserSettingsModel], - UserSettingsModel.get_multi(user_model.creator_ids)) + creator_user_models = ( + UserSettingsModel.get_multi(user_model.creator_ids) + ) + filtered_creator_user_models = [] + for creator_user_model in creator_user_models: + if creator_user_model is None: + continue + filtered_creator_user_models.append(creator_user_model) creator_usernames = [ - creator.username for creator in creator_user_models] + creator.username for creator in filtered_creator_user_models] user_data = { 'exploration_ids': user_model.exploration_ids, @@ -1192,8 +1232,18 @@ def apply_deletion_policy(cls, user_id: str) -> None: Args: user_id: str. The ID of the user whose data should be deleted. """ - keys = cls.query(cls.subscriber_ids == user_id).fetch(keys_only=True) - datastore_services.delete_multi(keys) + user_subscribers_models: List[UserSubscribersModel] = list(cls.query( + cls.subscriber_ids == user_id + ).fetch()) + + for user_subscribers_model in user_subscribers_models: + user_subscribers_model.subscriber_ids.remove(user_id) + + # Delete the references to this user from other user subscribers models. + cls.update_timestamps_multi(user_subscribers_models) + cls.put_multi(user_subscribers_models) + + # Delete the model for the user. cls.delete_by_id(user_id) @classmethod @@ -1475,6 +1525,18 @@ class ExplorationUserDataModel(base_models.BaseModel): # The user's preference for receiving feedback emails for this exploration. mute_feedback_notifications = datastore_services.BooleanProperty( default=feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE) + # The state name of the furthest reached checkpoint. + furthest_reached_checkpoint_state_name = datastore_services.StringProperty( + default=None) + # The exploration version of the furthest reached checkpoint. + furthest_reached_checkpoint_exp_version = ( + datastore_services.IntegerProperty(default=None)) + # The state name of the most recently reached checkpoint. + most_recently_reached_checkpoint_state_name = ( + datastore_services.StringProperty(default=None)) + # The exploration version of the most recently reached checkpoint. + most_recently_reached_checkpoint_exp_version = ( + datastore_services.IntegerProperty(default=None)) @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: @@ -1530,6 +1592,14 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: 'mute_suggestion_notifications': base_models.EXPORT_POLICY.EXPORTED, 'mute_feedback_notifications': + base_models.EXPORT_POLICY.EXPORTED, + 'furthest_reached_checkpoint_state_name': + base_models.EXPORT_POLICY.EXPORTED, + 'furthest_reached_checkpoint_exp_version': + base_models.EXPORT_POLICY.EXPORTED, + 'most_recently_reached_checkpoint_state_name': + base_models.EXPORT_POLICY.EXPORTED, + 'most_recently_reached_checkpoint_exp_version': base_models.EXPORT_POLICY.EXPORTED }) @@ -1581,7 +1651,7 @@ def create( return cls( id=instance_id, user_id=user_id, exploration_id=exploration_id) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). @classmethod def get( # type: ignore[override] @@ -1602,25 +1672,28 @@ def get( # type: ignore[override] return super(ExplorationUserDataModel, cls).get( instance_id, strict=False) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get_multi(). @classmethod def get_multi( # type: ignore[override] - cls, user_ids: List[str], exploration_id: str + cls, user_id_exp_id_combinations: List[Tuple[str, str]] ) -> List[Optional[ExplorationUserDataModel]]: - """Gets the ExplorationUserDataModel for the given user and exploration - ids. + """Gets all ExplorationUserDataModels for the given pairs of user ids + and exploration ids. Args: - user_ids: list(str). A list of user_ids. - exploration_id: str. The id of the exploration. + user_id_exp_id_combinations: list(tuple(str, str)). A list of + combinations of user_id and exploration_id pairs for which + ExplorationUserDataModels are to be fetched. Returns: list(ExplorationUserDataModel|None). The ExplorationUserDataModel - instance which matches with the given user_ids and exploration_id. + instance which matches with the given user_ids and exploration_ids. """ instance_ids = [ - cls._generate_id(user_id, exploration_id) for user_id in user_ids] + cls._generate_id(user_id, exploration_id) + for (user_id, exploration_id) in user_id_exp_id_combinations + ] return super(ExplorationUserDataModel, cls).get_multi(instance_ids) @@ -1661,7 +1734,15 @@ def export_data( 'mute_suggestion_notifications': ( user_model.mute_suggestion_notifications), 'mute_feedback_notifications': ( - user_model.mute_feedback_notifications) + user_model.mute_feedback_notifications), + 'furthest_reached_checkpoint_exp_version': ( + user_model.furthest_reached_checkpoint_exp_version), + 'furthest_reached_checkpoint_state_name': ( + user_model.furthest_reached_checkpoint_state_name), + 'most_recently_reached_checkpoint_exp_version': ( + user_model.most_recently_reached_checkpoint_exp_version), + 'most_recently_reached_checkpoint_state_name': ( + user_model.most_recently_reached_checkpoint_state_name) } return user_data @@ -1773,7 +1854,7 @@ def create( return cls( id=instance_id, user_id=user_id, collection_id=collection_id) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). @classmethod def get( # type: ignore[override] @@ -1794,7 +1875,7 @@ def get( # type: ignore[override] return super(CollectionProgressModel, cls).get( instance_id, strict=False) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get_multi(). @classmethod def get_multi( # type: ignore[override] @@ -1960,7 +2041,33 @@ def create(cls, user_id: str, story_id: str) -> StoryProgressModel: return cls( id=instance_id, user_id=user_id, story_id=story_id) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method + # doesn't match with BaseModel.get_multi(). + @overload # type: ignore[override] + @classmethod + def get( + cls, user_id: str, story_id: str + ) -> StoryProgressModel: ... + + @overload + @classmethod + def get( + cls, user_id: str, story_id: str, *, strict: Literal[True] + ) -> StoryProgressModel: ... + + @overload + @classmethod + def get( + cls, user_id: str, story_id: str, *, strict: Literal[False] + ) -> Optional[StoryProgressModel]: ... + + @overload + @classmethod + def get( + cls, user_id: str, story_id: str, *, strict: bool = ... + ) -> Optional[StoryProgressModel]: ... + + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). @classmethod def get( # type: ignore[override] @@ -1983,25 +2090,28 @@ def get( # type: ignore[override] return super(StoryProgressModel, cls).get( instance_id, strict=strict) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get_multi(). @classmethod def get_multi( # type: ignore[override] - cls, user_id: str, story_ids: List[str] + cls, user_ids: List[str], story_ids: List[str] ) -> List[Optional[StoryProgressModel]]: - """Gets the StoryProgressModels for the given user and story + """Gets the StoryProgressModels for the given user ids and story ids. Args: - user_id: str. The id of the user. + user_ids: list(str). The ids of the users. story_ids: list(str). The ids of the stories. Returns: list(StoryProgressModel|None). The list of StoryProgressModel - instances which matches the given user_id and story_ids. + instances which matches the given user_ids and story_ids. """ - instance_ids = [cls._generate_id(user_id, story_id) - for story_id in story_ids] + all_posssible_combinations = itertools.product(user_ids, story_ids) + instance_ids = [ + cls._generate_id(user_id, story_id) + for (user_id, story_id) in all_posssible_combinations + ] return super(StoryProgressModel, cls).get_multi( instance_ids) @@ -2061,8 +2171,8 @@ class UserQueryModel(base_models.BaseModel): shown after each UserQueryOneOffJob. """ - _use_cache = False - _use_memcache = False + _use_cache: bool = False + _use_memcache: bool = False # Options for a query specified by query submitter. # Query option to specify whether user has created or edited one or more # explorations in last n days. This only returns users who have ever @@ -2103,6 +2213,10 @@ class UserQueryModel(base_models.BaseModel): def get_deletion_policy() -> base_models.DELETION_POLICY: """Model contains data to delete corresponding to a user: user_ids and submitter_id fields. + + This model is marked as deleted after a period of time after its + creation. See MODEL_CLASSES_TO_MARK_AS_DELETED and + mark_outdated_models_as_deleted() in cron_services.py. """ return base_models.DELETION_POLICY.DELETE @@ -2226,10 +2340,8 @@ class UserBulkEmailsModel(base_models.BaseModel): @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: - """Model contains data corresponding to a user: id field, but it isn't - deleted because it is needed for auditing purposes. - """ - return base_models.DELETION_POLICY.KEEP + """Model contains data corresponding to a user: id field.""" + return base_models.DELETION_POLICY.DELETE @classmethod def has_reference_to_user_id(cls, user_id: str) -> bool: @@ -2243,6 +2355,15 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: """ return cls.get_by_id(user_id) is not None + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete instance of UserBulkEmailsModel for the user. + + Args: + user_id: str. The ID of the user whose data should be deleted. + """ + cls.delete_by_id(user_id) + @staticmethod def get_model_association_to_user( ) -> base_models.MODEL_ASSOCIATION_TO_USER: @@ -2404,8 +2525,8 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: @classmethod def export_data( - cls, - user_id: str + cls, + user_id: str ) -> Dict[str, Dict[str, Union[float, bool]]]: """(Takeout) Exports the data from UserContributionProficiencyModel into dict format. @@ -2450,8 +2571,8 @@ def has_reference_to_user_id(cls, user_id: str) -> bool: @classmethod def get_all_categories_where_user_can_review( - cls, - user_id: str + cls, + user_id: str ) -> List[str]: """Gets all the score categories where the user has a score above the threshold. @@ -2519,7 +2640,7 @@ def _get_instance_id(cls, user_id: str, score_category: str) -> str: """ return '.'.join([score_category, user_id]) - # We have ignored [override] here because the signature of this method + # Here we use MyPy ignore because the signature of this method # doesn't match with BaseModel.get(). @classmethod def get( # type: ignore[override] @@ -2622,8 +2743,8 @@ def apply_deletion_policy(cls, user_id: str) -> None: @classmethod def export_data( - cls, - user_id: str + cls, + user_id: str ) -> Dict[str, Union[bool, List[str], None]]: """(Takeout) Exports the data from UserContributionRightsModel into dict format. @@ -2668,8 +2789,8 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: @classmethod def get_translation_reviewer_user_ids( - cls, - language_code: str + cls, + language_code: str ) -> List[str]: """Returns the IDs of the users who have rights to review translations in the given language code. @@ -2753,7 +2874,7 @@ class PendingDeletionRequestModel(base_models.BaseModel): # A dict mapping model IDs to pseudonymous user IDs. Each type of entity # is grouped under different key (e.g. config, feedback, story, skill, - # question), the keys need to be from the core.platform.models.NAMES enum. + # question), the keys need to be from the core.platform.models.Names enum. # For each entity, we use a different pseudonymous user ID. Note that all # these pseudonymous user IDs originate from the same about-to-be-deleted # user. If a key is absent from the pseudonymizable_entity_mappings dict, @@ -2919,7 +3040,7 @@ class DeletedUsernameModel(base_models.BaseModel): in the ID of this model. """ - ID_LENGTH = 32 + ID_LENGTH: Final = 32 @staticmethod def get_deletion_policy() -> base_models.DELETION_POLICY: @@ -2945,3 +3066,153 @@ def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: """ empty_dict: Dict[str, base_models.EXPORT_POLICY] = {} return dict(super(cls, cls).get_export_policy(), **empty_dict) + + +class LearnerGroupUserDetailsDict(TypedDict): + """Dictionary for user details of a particular learner group to export.""" + + group_id: str + progress_sharing_is_turned_on: bool + + +class LearnerGroupsUserDataDict(TypedDict): + """Dictionary for user data to export.""" + + invited_to_learner_groups_ids: List[str] + learner_groups_user_details: List[LearnerGroupUserDetailsDict] + + +class LearnerGroupsUserModel(base_models.BaseModel): + """Model for storing user's learner groups related data. + + Instances of this class are keyed by the user id. + """ + + # List of learner group ids which the learner has been invited to join. + invited_to_learner_groups_ids = ( + datastore_services.StringProperty(repeated=True, indexed=True)) + # List of LearnerGroupUserDetailsDict, each dict corresponds to a learner + # group and has details of the user correspoding to that group. + learner_groups_user_details = ( + datastore_services.JsonProperty(repeated=True, indexed=False)) + # Version of learner group details blob schema. + learner_groups_user_details_schema_version = ( + datastore_services.IntegerProperty( + required=True, default=0, indexed=True)) + + @staticmethod + def get_deletion_policy() -> base_models.DELETION_POLICY: + """Model contains data to delete corresponding to a user: id field.""" + return base_models.DELETION_POLICY.DELETE + + @classmethod + def has_reference_to_user_id(cls, user_id: str) -> bool: + """Check whether LearnerGroupsUserModel exists for the given user. + + Args: + user_id: str. The ID of the user whose data should be checked. + + Returns: + bool. Whether any models refer to the given user ID. + """ + return cls.get_by_id(user_id) is not None + + @classmethod + def apply_deletion_policy(cls, user_id: str) -> None: + """Delete instances of LearnerGroupsUserModel for the user. + + Args: + user_id: str. The ID of the user whose data should be deleted. + """ + cls.delete_by_id(user_id) + + # Here we use MyPy ignore because the signature of this method + # doesn't match with BaseModel.export_data(). + @classmethod + def export_data(cls, user_id: str) -> LearnerGroupsUserDataDict: # type: ignore[override] + """(Takeout) Exports the data from LearnerGroupsUserModel + into dict format. + + Args: + user_id: str. The ID of the user whose data should be exported. + + Returns: + dict. Dictionary of the data from LearnerGroupsUserModel. + """ + learner_grp_user_model = cls.get_by_id(user_id) + + if learner_grp_user_model is None: + return {} + + return { + 'invited_to_learner_groups_ids': ( + learner_grp_user_model.invited_to_learner_groups_ids), + 'learner_groups_user_details': ( + learner_grp_user_model.learner_groups_user_details) + } + + @staticmethod + def get_model_association_to_user( + ) -> base_models.MODEL_ASSOCIATION_TO_USER: + """Model is exported as one instance per user.""" + return base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER + + @classmethod + def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]: + """Model contains data to export corresponding to a user.""" + return dict(super(cls, cls).get_export_policy(), **{ + 'invited_to_learner_groups_ids': + base_models.EXPORT_POLICY.EXPORTED, + 'learner_groups_user_details': + base_models.EXPORT_POLICY.EXPORTED, + 'learner_groups_user_details_schema_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE + }) + + @classmethod + def delete_learner_group_references( + cls, group_id: str, user_ids: List[str] + ) -> None: + """Delete all references of given learner group stored in learner + groups user model. + + Args: + group_id: str. The group_id denotes which group's reference to + delete. + user_ids: list(str). The user_ids denotes ids of users that were + referenced in the given group. + """ + found_models = cls.get_multi(user_ids) + + learner_groups_user_models_to_put = [] + + for learner_grp_usr_model in found_models: + if learner_grp_usr_model is None: + continue + + # If the user has been invited to join the group as learner, delete + # the group id from the invited_to_learner_groups_ids list. + if ( + group_id in learner_grp_usr_model.invited_to_learner_groups_ids + ): + learner_grp_usr_model.invited_to_learner_groups_ids.remove( + group_id) + + # If the user is a learner of the group, delete the corresponding + # learner group details of the learner stored in + # learner_groups_user_details field. + updated_details = [] + + for learner_group_details in ( + learner_grp_usr_model.learner_groups_user_details + ): + if learner_group_details['group_id'] != group_id: + updated_details.append(learner_group_details) + + learner_grp_usr_model.learner_groups_user_details = ( + updated_details) + + learner_groups_user_models_to_put.append(learner_grp_usr_model) + + cls.update_timestamps_multi(learner_groups_user_models_to_put) + cls.put_multi(learner_groups_user_models_to_put) diff --git a/core/storage/user/gae_models_test.py b/core/storage/user/gae_models_test.py index 03e1bab0b55a..fbab864f4d22 100644 --- a/core/storage/user/gae_models_test.py +++ b/core/storage/user/gae_models_test.py @@ -28,44 +28,47 @@ from core.platform import models from core.tests import test_utils -from typing import Dict, List, Set, Union +from typing import Dict, Final, List, Set, Union MYPY = False if MYPY: # pragma: no cover from mypy_imports import base_models from mypy_imports import user_models -(base_models, user_models) = models.Registry.import_models( - [models.NAMES.base_model, models.NAMES.user]) +(base_models, user_models) = models.Registry.import_models([ + models.Names.BASE_MODEL, models.Names.USER +]) class UserSettingsModelTest(test_utils.GenericTestBase): """Tests for UserSettingsModel class.""" - NONEXISTENT_USER_ID = 'id_x' - USER_1_ID = 'user_id' - USER_1_EMAIL = 'user@example.com' - USER_1_ROLE = feconf.ROLE_ID_CURRICULUM_ADMIN - USER_2_ID = 'user2_id' - USER_2_EMAIL = 'user2@example.com' - USER_3_ID = 'user3_id' - USER_3_EMAIL = 'user3@example.com' - USER_3_ROLE = feconf.ROLE_ID_CURRICULUM_ADMIN - GENERIC_PIN = '12345' - PROFILE_1_ID = 'profile_id' - PROFILE_1_EMAIL = 'user@example.com' - PROFILE_1_ROLE = feconf.ROLE_ID_MOBILE_LEARNER - GENERIC_USERNAME = 'user' - GENERIC_DATE = datetime.datetime(2019, 5, 20) - GENERIC_EPOCH = utils.get_time_in_millisecs(datetime.datetime(2019, 5, 20)) - GENERIC_IMAGE_URL = 'www.example.com/example.png' - GENERIC_USER_BIO = 'I am a user of Oppia!' - GENERIC_SUBJECT_INTERESTS = ['Math', 'Science'] - GENERIC_LANGUAGE_CODES = ['en', 'es'] - GENERIC_DISPLAY_ALIAS = 'display_alias' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'user_id' + USER_1_EMAIL: Final = 'user@example.com' + USER_1_ROLE: Final = feconf.ROLE_ID_CURRICULUM_ADMIN + USER_2_ID: Final = 'user2_id' + USER_2_EMAIL: Final = 'user2@example.com' + USER_3_ID: Final = 'user3_id' + USER_3_EMAIL: Final = 'user3@example.com' + USER_3_ROLE: Final = feconf.ROLE_ID_CURRICULUM_ADMIN + GENERIC_PIN: Final = '12345' + PROFILE_1_ID: Final = 'profile_id' + PROFILE_1_EMAIL: Final = 'user@example.com' + PROFILE_1_ROLE: Final = feconf.ROLE_ID_MOBILE_LEARNER + GENERIC_USERNAME: Final = 'user' + GENERIC_DATE: Final = datetime.datetime(2019, 5, 20) + GENERIC_EPOCH: Final = utils.get_time_in_millisecs( + datetime.datetime(2019, 5, 20) + ) + GENERIC_IMAGE_URL: Final = 'www.example.com/example.png' + GENERIC_USER_BIO: Final = 'I am a user of Oppia!' + GENERIC_SUBJECT_INTERESTS: Final = ['Math', 'Science'] + GENERIC_LANGUAGE_CODES: Final = ['en', 'es'] + GENERIC_DISPLAY_ALIAS: Final = 'display_alias' def setUp(self) -> None: - super(UserSettingsModelTest, self).setUp() + super().setUp() user_models.UserSettingsModel( id=self.USER_1_ID, email=self.USER_1_EMAIL, @@ -107,6 +110,7 @@ def setUp(self) -> None: preferred_language_codes=self.GENERIC_LANGUAGE_CODES, preferred_site_language_code=self.GENERIC_LANGUAGE_CODES[0], preferred_audio_language_code=self.GENERIC_LANGUAGE_CODES[0], + preferred_translation_language_code=self.GENERIC_LANGUAGE_CODES[0], display_alias=self.GENERIC_DISPLAY_ALIAS, pin=self.GENERIC_PIN ).put() @@ -116,8 +120,74 @@ def test_get_deletion_policy_is_delete(self) -> None: user_models.UserSettingsModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE_AT_END) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserSettingsModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_field_names_for_takeout(self) -> None: + expected_results = { + 'last_agreed_to_terms': 'last_agreed_to_terms_msec', + 'last_started_state_editor_tutorial': + 'last_started_state_editor_tutorial_msec', + 'last_started_state_translation_tutorial': + 'last_started_state_translation_tutorial_msec', + 'last_logged_in': 'last_logged_in_msec', + 'last_edited_an_exploration': 'last_edited_an_exploration_msec', + 'last_created_an_exploration': 'last_created_an_exploration_msec' + } + self.assertEqual( + user_models.UserSettingsModel.get_field_names_for_takeout(), + expected_results) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserSettingsModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'email': base_models.EXPORT_POLICY.EXPORTED, + 'last_agreed_to_terms': base_models.EXPORT_POLICY.EXPORTED, + 'roles': base_models.EXPORT_POLICY.EXPORTED, + 'banned': base_models.EXPORT_POLICY.EXPORTED, + 'last_logged_in': base_models.EXPORT_POLICY.EXPORTED, + 'display_alias': base_models.EXPORT_POLICY.EXPORTED, + 'user_bio': base_models.EXPORT_POLICY.EXPORTED, + 'profile_picture_data_url': + base_models.EXPORT_POLICY.EXPORTED, + 'subject_interests': base_models.EXPORT_POLICY.EXPORTED, + 'preferred_language_codes': + base_models.EXPORT_POLICY.EXPORTED, + 'preferred_site_language_code': + base_models.EXPORT_POLICY.EXPORTED, + 'preferred_audio_language_code': + base_models.EXPORT_POLICY.EXPORTED, + 'preferred_translation_language_code': + base_models.EXPORT_POLICY.EXPORTED, + 'username': base_models.EXPORT_POLICY.EXPORTED, + 'normalized_username': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_started_state_editor_tutorial': + base_models.EXPORT_POLICY.EXPORTED, + 'last_started_state_translation_tutorial': + base_models.EXPORT_POLICY.EXPORTED, + 'last_edited_an_exploration': + base_models.EXPORT_POLICY.EXPORTED, + 'last_created_an_exploration': + base_models.EXPORT_POLICY.EXPORTED, + 'default_dashboard': base_models.EXPORT_POLICY.EXPORTED, + 'creator_dashboard_display_pref': + base_models.EXPORT_POLICY.EXPORTED, + 'first_contribution_msec': + base_models.EXPORT_POLICY.EXPORTED, + 'pin': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'role': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'has_viewed_lesson_info_modal_once': + base_models.EXPORT_POLICY.EXPORTED + } + ) + def test_apply_deletion_policy_for_registered_users_deletes_them( - self + self ) -> None: # Case for a full user. self.assertIsNotNone( @@ -141,7 +211,7 @@ def test_apply_deletion_policy_for_banned_user_deletes_them(self) -> None: user_models.UserSettingsModel.get_by_id(self.USER_2_ID)) def test_apply_deletion_policy_nonexistent_user_raises_no_exception( - self + self ) -> None: self.assertIsNone( user_models.UserSettingsModel.get_by_id(self.NONEXISTENT_USER_ID)) @@ -173,24 +243,47 @@ def test_has_reference_to_non_existing_user_id_is_false(self) -> None: .has_reference_to_user_id(self.NONEXISTENT_USER_ID) ) + def test_get_by_normalized_username_valid_username(self) -> None: + actual_user = user_models.UserSettingsModel.get_by_id(self.USER_3_ID) + self.assertEqual( + user_models.UserSettingsModel.get_by_normalized_username( + self.GENERIC_USERNAME), actual_user) + + def test_get_normalized_username_invalid_username(self) -> None: + invalid_username = 'user_x' + self.assertIsNone( + user_models.UserSettingsModel.get_by_normalized_username( + invalid_username)) + + def test_get_by_email_valid_user(self) -> None: + actual_user = user_models.UserSettingsModel.get_by_id(self.USER_3_ID) + self.assertEqual( + user_models.UserSettingsModel + .get_by_email(self.USER_3_EMAIL), actual_user) + + def test_get_by_email_invalid_user(self) -> None: + self.assertIsNone( + user_models.UserSettingsModel.get_by_email( + 'invalid_user@example.com')) + def test_get_by_role_for_admin_returns_admin_users(self) -> None: actual_users = [ user_models.UserSettingsModel.get_by_id(self.USER_1_ID), user_models.UserSettingsModel.get_by_id(self.USER_3_ID) ] - self.assertItemsEqual( # type: ignore[no-untyped-call] + self.assertItemsEqual( user_models.UserSettingsModel.get_by_role( feconf.ROLE_ID_CURRICULUM_ADMIN), actual_users) def test_export_data_for_nonexistent_user_raises_exception(self) -> None: - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( user_models.UserSettingsModel.EntityNotFoundError, 'Entity for class UserSettingsModel with id fake_user not found'): user_models.UserSettingsModel.export_data('fake_user') def test_export_data_for_trivial_case_returns_data_correctly(self) -> None: - user = user_models.UserSettingsModel.get_by_id(self.USER_1_ID) - user_data = user.export_data(user.id) + user_model = user_models.UserSettingsModel.get_by_id(self.USER_1_ID) + user_data = user_model.export_data(user_model.id) expected_user_data = { 'email': 'user@example.com', 'roles': [feconf.ROLE_ID_CURRICULUM_ADMIN], @@ -212,15 +305,17 @@ def test_export_data_for_trivial_case_returns_data_correctly(self) -> None: 'preferred_language_codes': [], 'preferred_site_language_code': None, 'preferred_audio_language_code': None, - 'display_alias': None + 'preferred_translation_language_code': None, + 'display_alias': None, + 'has_viewed_lesson_info_modal_once': False } self.assertEqual(expected_user_data, user_data) def test_export_data_for_nontrivial_case_returns_data_correctly( - self + self ) -> None: - user = user_models.UserSettingsModel.get_by_id(self.USER_3_ID) - user_data = user.export_data(user.id) + user_model = user_models.UserSettingsModel.get_by_id(self.USER_3_ID) + user_data = user_model.export_data(user_model.id) expected_user_data = { 'email': self.USER_3_EMAIL, 'roles': [feconf.ROLE_ID_CURRICULUM_ADMIN], @@ -242,7 +337,10 @@ def test_export_data_for_nontrivial_case_returns_data_correctly( 'preferred_language_codes': self.GENERIC_LANGUAGE_CODES, 'preferred_site_language_code': self.GENERIC_LANGUAGE_CODES[0], 'preferred_audio_language_code': self.GENERIC_LANGUAGE_CODES[0], - 'display_alias': self.GENERIC_DISPLAY_ALIAS + 'preferred_translation_language_code': ( + self.GENERIC_LANGUAGE_CODES[0]), + 'display_alias': self.GENERIC_DISPLAY_ALIAS, + 'has_viewed_lesson_info_modal_once': False } self.assertEqual(expected_user_data, user_data) @@ -261,7 +359,7 @@ def test_get_new_id_with_deleted_user_model(self) -> None: user_models.DeletedUserModel, 'get_by_id', types.MethodType( lambda _, __: True, user_models.DeletedUserModel)) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'New id generator is producing too many collisions.') with assert_raises_regexp_context_manager, get_by_id_swap: @@ -273,7 +371,7 @@ def test_get_new_id_for_too_many_collisions_raises_error(self) -> None: user_models.UserSettingsModel, 'get_by_id', types.MethodType( lambda _, __: True, user_models.UserSettingsModel)) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'New id generator is producing too many collisions.') with assert_raises_regexp_context_manager, get_by_id_swap: @@ -283,17 +381,17 @@ def test_get_new_id_for_too_many_collisions_raises_error(self) -> None: class CompletedActivitiesModelTests(test_utils.GenericTestBase): """Tests for the CompletedActivitiesModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_1_ID = 'id_1' - USER_2_ID = 'id_2' - EXPLORATION_IDS_1 = ['exp_1', 'exp_2', 'exp_3'] - COLLECTION_IDS_1 = ['col_1', 'col_2', 'col_3'] - STORY_IDS_1 = ['story_1', 'story_2', 'story_3'] - TOPIC_IDS_1 = ['topic_1', 'topic_2', 'topic_3'] + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'id_1' + USER_2_ID: Final = 'id_2' + EXPLORATION_IDS_1: Final = ['exp_1', 'exp_2', 'exp_3'] + COLLECTION_IDS_1: Final = ['col_1', 'col_2', 'col_3'] + STORY_IDS_1: Final = ['story_1', 'story_2', 'story_3'] + TOPIC_IDS_1: Final = ['topic_1', 'topic_2', 'topic_3'] def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(CompletedActivitiesModelTests, self).setUp() + super().setUp() user_models.CompletedActivitiesModel( id=self.USER_1_ID, @@ -316,6 +414,26 @@ def test_get_deletion_policy(self) -> None: user_models.CompletedActivitiesModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.CompletedActivitiesModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.CompletedActivitiesModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_ids': base_models.EXPORT_POLICY.EXPORTED, + 'collection_ids': base_models.EXPORT_POLICY.EXPORTED, + 'story_ids': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'learnt_topic_ids': base_models.EXPORT_POLICY.EXPORTED, + 'mastered_topic_ids': base_models.EXPORT_POLICY.EXPORTED + } + ) + def test_apply_deletion_policy(self) -> None: user_models.CompletedActivitiesModel.apply_deletion_policy( self.USER_1_ID) @@ -363,17 +481,17 @@ def test_export_data_on_existent_user(self) -> None: class IncompleteActivitiesModelTests(test_utils.GenericTestBase): """Tests for the IncompleteActivitiesModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_1_ID = 'id_1' - USER_2_ID = 'id_2' - EXPLORATION_IDS_1 = ['exp_1', 'exp_2', 'exp_3'] - COLLECTION_IDS_1 = ['col_1', 'col_2', 'col_3'] - STORY_IDS_1 = ['story_1', 'story_2', 'story_3'] - TOPIC_IDS_1 = ['topic_1', 'topic_2', 'topic_3'] + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'id_1' + USER_2_ID: Final = 'id_2' + EXPLORATION_IDS_1: Final = ['exp_1', 'exp_2', 'exp_3'] + COLLECTION_IDS_1: Final = ['col_1', 'col_2', 'col_3'] + STORY_IDS_1: Final = ['story_1', 'story_2', 'story_3'] + TOPIC_IDS_1: Final = ['topic_1', 'topic_2', 'topic_3'] def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(IncompleteActivitiesModelTests, self).setUp() + super().setUp() user_models.IncompleteActivitiesModel( id=self.USER_1_ID, @@ -396,6 +514,28 @@ def test_get_deletion_policy(self) -> None: user_models.IncompleteActivitiesModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.IncompleteActivitiesModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.IncompleteActivitiesModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_ids': base_models.EXPORT_POLICY.EXPORTED, + 'collection_ids': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'story_ids': base_models.EXPORT_POLICY.EXPORTED, + 'partially_learnt_topic_ids': + base_models.EXPORT_POLICY.EXPORTED, + 'partially_mastered_topic_ids': ( + base_models.EXPORT_POLICY.EXPORTED) + } + ) + def test_apply_deletion_policy(self) -> None: user_models.IncompleteActivitiesModel.apply_deletion_policy( self.USER_1_ID) @@ -443,14 +583,14 @@ def test_export_data_on_existent_user(self) -> None: class LearnerGoalsModelTests(test_utils.GenericTestBase): """Tests for the LearnerGoalsModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_1_ID = 'id_1' - USER_2_ID = 'id_2' - TOPIC_IDS = ['topic_1', 'topic_2', 'topic_3'] + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'id_1' + USER_2_ID: Final = 'id_2' + TOPIC_IDS: Final = ['topic_1', 'topic_2', 'topic_3'] def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(LearnerGoalsModelTests, self).setUp() + super().setUp() user_models.LearnerGoalsModel( id=self.USER_1_ID, @@ -469,6 +609,22 @@ def test_get_deletion_policy(self) -> None: user_models.LearnerGoalsModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.LearnerGoalsModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.LearnerGoalsModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'topic_ids_to_learn': base_models.EXPORT_POLICY.EXPORTED, + 'topic_ids_to_master': base_models.EXPORT_POLICY.EXPORTED, + } + ) + def test_apply_deletion_policy(self) -> None: user_models.LearnerGoalsModel.apply_deletion_policy( self.USER_1_ID) @@ -513,18 +669,18 @@ def test_export_data_on_existent_user(self) -> None: class ExpUserLastPlaythroughModelTest(test_utils.GenericTestBase): """Tests for ExpUserLastPlaythroughModel class.""" - NONEXISTENT_USER_ID = 'user_id_0' - USER_ID_1 = 'user_id_1' - USER_ID_2 = 'user_id_2' - USER_ID_3 = 'user_id_3' - EXP_ID_0 = 'exp_id_0' - EXP_ID_1 = 'exp_id_1' - STATE_NAME_1 = 'state_name_1' - STATE_NAME_2 = 'state_name_2' - EXP_VERSION = 1 + NONEXISTENT_USER_ID: Final = 'user_id_0' + USER_ID_1: Final = 'user_id_1' + USER_ID_2: Final = 'user_id_2' + USER_ID_3: Final = 'user_id_3' + EXP_ID_0: Final = 'exp_id_0' + EXP_ID_1: Final = 'exp_id_1' + STATE_NAME_1: Final = 'state_name_1' + STATE_NAME_2: Final = 'state_name_2' + EXP_VERSION: Final = 1 def setUp(self) -> None: - super(ExpUserLastPlaythroughModelTest, self).setUp() + super().setUp() user_models.ExpUserLastPlaythroughModel( id='%s.%s' % (self.USER_ID_1, self.EXP_ID_0), @@ -561,6 +717,27 @@ def test_get_deletion_policy(self) -> None: user_models.ExpUserLastPlaythroughModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.ExpUserLastPlaythroughModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.ExpUserLastPlaythroughModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': + base_models.EXPORT_POLICY.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_played_exp_version': + base_models.EXPORT_POLICY.EXPORTED, + 'last_played_state_name': base_models.EXPORT_POLICY.EXPORTED + } + ) + def test_apply_deletion_policy(self) -> None: user_models.ExpUserLastPlaythroughModel.apply_deletion_policy( self.USER_ID_1) @@ -614,7 +791,6 @@ def test_get_success(self) -> None: def test_get_failure(self) -> None: retrieved_object = user_models.ExpUserLastPlaythroughModel.get( self.USER_ID_1, 'unknown_exp_id') - self.assertEqual(retrieved_object, None) def test_export_data_none(self) -> None: @@ -656,15 +832,15 @@ def test_export_data_multi(self) -> None: class LearnerPlaylistModelTests(test_utils.GenericTestBase): """Tests for the LearnerPlaylistModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - EXPLORATION_IDS_1 = ['exp_1', 'exp_2', 'exp_3'] - COLLECTION_IDS_1 = ['col_1', 'col_2', 'col_3'] + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + EXPLORATION_IDS_1: Final = ['exp_1', 'exp_2', 'exp_3'] + COLLECTION_IDS_1: Final = ['col_1', 'col_2', 'col_3'] def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(LearnerPlaylistModelTests, self).setUp() + super().setUp() user_models.LearnerPlaylistModel( id=self.USER_ID_1, @@ -683,6 +859,22 @@ def test_get_deletion_policy(self) -> None: user_models.LearnerPlaylistModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.LearnerPlaylistModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_export_policy(self) -> None: + self.assertEqual( + user_models.LearnerPlaylistModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_ids': base_models.EXPORT_POLICY.EXPORTED, + 'collection_ids': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + } + ) + def test_apply_deletion_policy(self) -> None: user_models.LearnerPlaylistModel.apply_deletion_policy(self.USER_ID_1) self.assertIsNone( @@ -725,42 +917,42 @@ def test_export_data_on_existent_user(self) -> None: class UserContributionsModelTests(test_utils.GenericTestBase): """Tests for the UserContributionsModel class.""" - NONEXISTENT_USER_ID = 'id_x' - USER_C_ID = 'id_c' - USER_A_EMAIL = 'a@example.com' - USER_B_EMAIL = 'b@example.com' - USER_A_USERNAME = 'a' - USER_B_USERNAME = 'b' - EXP_ID_1 = 'exp_1' - EXP_ID_2 = 'exp_2' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_C_ID: Final = 'id_c' + USER_A_EMAIL: Final = 'a@example.com' + USER_B_EMAIL: Final = 'b@example.com' + USER_A_USERNAME: Final = 'a' + USER_B_USERNAME: Final = 'b' + EXP_ID_1: Final = 'exp_1' + EXP_ID_2: Final = 'exp_2' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserContributionsModelTests, self).setUp() + super().setUp() # User A has no created explorations, one edited exploration. # User B has two created and edited explorations. self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME) - self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL) # type: ignore[no-untyped-call] + self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL) self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME) - self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL) # type: ignore[no-untyped-call] + self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL) # Note that creating an exploration counts as editing it. - self.save_new_valid_exploration( # type: ignore[no-untyped-call] + self.save_new_valid_exploration( self.EXP_ID_1, self.user_b_id, end_state_name='End') - exp_services.update_exploration( # type: ignore[no-untyped-call] - self.user_a_id, self.EXP_ID_1, [exp_domain.ExplorationChange( # type: ignore[no-untyped-call] + exp_services.update_exploration( + self.user_a_id, self.EXP_ID_1, [exp_domain.ExplorationChange( { 'cmd': 'edit_exploration_property', 'property_name': 'objective', 'new_value': 'the objective' })], 'Test edit') - self.save_new_valid_exploration( # type: ignore[no-untyped-call] + self.save_new_valid_exploration( self.EXP_ID_2, self.user_b_id, end_state_name='End') - exp_services.update_exploration( # type: ignore[no-untyped-call] - self.user_a_id, self.EXP_ID_2, [exp_domain.ExplorationChange( # type: ignore[no-untyped-call] + exp_services.update_exploration( + self.user_a_id, self.EXP_ID_2, [exp_domain.ExplorationChange( { 'cmd': 'edit_exploration_property', 'property_name': 'objective', @@ -777,6 +969,22 @@ def test_get_deletion_policy(self) -> None: user_models.UserContributionsModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserContributionsModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserContributionsModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_exploration_ids': base_models.EXPORT_POLICY.EXPORTED, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'edited_exploration_ids': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + def test_apply_deletion_policy(self) -> None: user_models.UserContributionsModel.apply_deletion_policy(self.user_a_id) self.assertIsNone( @@ -830,14 +1038,14 @@ def test_export_data_on_highly_involved_user(self) -> None: class UserEmailPreferencesModelTests(test_utils.GenericTestBase): """Tests for the UserEmailPreferencesModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_3 = 'id_3' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_3: Final = 'id_3' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserEmailPreferencesModelTests, self).setUp() + super().setUp() user_models.UserEmailPreferencesModel(id=self.USER_ID_1).put() user_models.UserEmailPreferencesModel( @@ -857,6 +1065,26 @@ def test_get_deletion_policy(self) -> None: user_models.UserEmailPreferencesModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserEmailPreferencesModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_export_policy(self) -> None: + self.assertEqual( + user_models.UserEmailPreferencesModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'site_updates': base_models.EXPORT_POLICY.EXPORTED, + 'editor_role_notifications': base_models.EXPORT_POLICY.EXPORTED, + 'feedback_message_notifications': + base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'subscription_notifications': base_models.EXPORT_POLICY.EXPORTED + } + ) + def test_apply_deletion_policy(self) -> None: user_models.UserEmailPreferencesModel.apply_deletion_policy( self.USER_ID_1) @@ -907,27 +1135,32 @@ def test_export_data_nontrivial(self) -> None: } ) + def test_export_data_empty(self) -> None: + user_data = user_models.UserEmailPreferencesModel.export_data( + 'fake_user_id') + self.assertFalse(user_data) + class UserSubscriptionsModelTests(test_utils.GenericTestBase): """Tests for UserSubscriptionsModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID_1 = 'user_id_1' - USER_ID_2 = 'user_id_2' - USER_ID_3 = 'user_id_3' - USER_ID_4 = 'user_id_4' - USER_ID_5 = 'user_id_5' - USER_ID_6 = 'user_id_6' - CREATOR_IDS = [USER_ID_5, USER_ID_6] - CREATOR_USERNAMES = ['usernameuser_id_5', 'usernameuser_id_6'] - COLLECTION_IDS = ['23', '42', '4'] - EXPLORATION_IDS = ['exp_1', 'exp_2', 'exp_3'] - GENERAL_FEEDBACK_THREAD_IDS = ['42', '4', '8'] - GENERIC_DATETIME = datetime.datetime(2020, 6, 2) + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID_1: Final = 'user_id_1' + USER_ID_2: Final = 'user_id_2' + USER_ID_3: Final = 'user_id_3' + USER_ID_4: Final = 'user_id_4' + USER_ID_5: Final = 'user_id_5' + USER_ID_6: Final = 'user_id_6' + CREATOR_IDS: Final = [USER_ID_5, USER_ID_6] + CREATOR_USERNAMES: Final = ['usernameuser_id_5', 'usernameuser_id_6'] + COLLECTION_IDS: Final = ['23', '42', '4'] + EXPLORATION_IDS: Final = ['exp_1', 'exp_2', 'exp_3'] + GENERAL_FEEDBACK_THREAD_IDS: Final = ['42', '4', '8'] + GENERIC_DATETIME: Final = datetime.datetime(2020, 6, 2) def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserSubscriptionsModelTests, self).setUp() + super().setUp() user_models.UserSettingsModel( id=self.USER_ID_1, email='some@email.com' @@ -945,6 +1178,7 @@ def setUp(self) -> None: username='username' + creator_id, email=creator_id + '@example.com' ).put() + user_models.UserSubscriptionsModel(id=creator_id).put() user_models.UserSubscriptionsModel( id=self.USER_ID_2, @@ -960,15 +1194,86 @@ def setUp(self) -> None: deleted=True ).put() + def test_exclude_non_existing_creator_user_model_while_exporting_data( + self + ) -> None: + user_models.UserSettingsModel( + id='test_user', + email='some@email.com' + ).put() + test_creator_ids = self.CREATOR_IDS + ['Invalid_id'] + + user_models.UserSubscriptionsModel( + id='test_user', + creator_ids=test_creator_ids, + collection_ids=self.COLLECTION_IDS, + exploration_ids=self.EXPLORATION_IDS, + general_feedback_thread_ids=self.GENERAL_FEEDBACK_THREAD_IDS, + last_checked=self.GENERIC_DATETIME + ).put() + + exported_data = user_models.UserSubscriptionsModel.export_data( + 'test_user' + ) + + # Here we are deleting 'last_checked_msec', because this key contains + # the time stamp which can be different at the time of creation of model + # and checking the output. + del exported_data['last_checked_msec'] + expected_dict = { + 'exploration_ids': ['exp_1', 'exp_2', 'exp_3'], + 'collection_ids': ['23', '42', '4'], + 'general_feedback_thread_ids': ['42', '4', '8'], + 'creator_usernames': ['usernameuser_id_5', 'usernameuser_id_6'] + } + self.assertEqual(expected_dict, exported_data) + def test_get_deletion_policy(self) -> None: self.assertEqual( user_models.UserSubscriptionsModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) - def test_apply_deletion_policy(self) -> None: + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserSubscriptionsModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER + ) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserSubscriptionsModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_ids': base_models.EXPORT_POLICY.EXPORTED, + 'collection_ids': base_models.EXPORT_POLICY.EXPORTED, + 'general_feedback_thread_ids': + base_models.EXPORT_POLICY.EXPORTED, + 'creator_ids': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_checked': base_models.EXPORT_POLICY.EXPORTED + } + ) + + def test_get_field_names_for_takeout(self) -> None: + self.assertEqual( + user_models.UserSubscriptionsModel.get_field_names_for_takeout(), { + 'creator_ids': 'creator_usernames', + 'last_checked': 'last_checked_msec' + } + ) + + def test_apply_deletion_policy_deletes_model_for_user(self) -> None: user_models.UserSubscriptionsModel.apply_deletion_policy(self.USER_ID_1) self.assertIsNone( user_models.UserSubscriptionsModel.get_by_id(self.USER_ID_1)) + + def test_apply_deletion_policy_deletes_user_from_creator_ids(self) -> None: + user_models.UserSubscriptionsModel.apply_deletion_policy(self.USER_ID_5) + user_subscriptions_model = ( + user_models.UserSubscriptionsModel.get_by_id(self.USER_ID_2)) + self.assertNotIn(self.USER_ID_5, user_subscriptions_model.creator_ids) + + def test_apply_deletion_policy_for_non_existing_user_passes(self) -> None: # Test that calling apply_deletion_policy with no existing model # doesn't fail. user_models.UserSubscriptionsModel.apply_deletion_policy( @@ -1041,14 +1346,14 @@ def test_export_data_on_nonexistent_user(self) -> None: class UserSubscribersModelTests(test_utils.GenericTestBase): """Tests for UserSubscribersModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_3 = 'id_3' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_3: Final = 'id_3' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserSubscribersModelTests, self).setUp() + super().setUp() user_models.UserSettingsModel( id=self.USER_ID_1, @@ -1062,16 +1367,25 @@ def setUp(self) -> None: user_models.UserSubscribersModel( id=self.USER_ID_1, subscriber_ids=[self.USER_ID_3]).put() user_models.UserSubscribersModel(id=self.USER_ID_2, deleted=True).put() + user_models.UserSubscribersModel(id=self.USER_ID_3).put() def test_get_deletion_policy(self) -> None: self.assertEqual( user_models.UserSubscribersModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) - def test_apply_deletion_policy(self) -> None: + def test_apply_deletion_policy_deletes_model_for_user(self) -> None: user_models.UserSubscribersModel.apply_deletion_policy(self.USER_ID_1) self.assertIsNone( user_models.UserSubscribersModel.get_by_id(self.USER_ID_1)) + + def test_apply_deletion_policy_deletes_user_from_creator_ids(self) -> None: + user_models.UserSubscribersModel.apply_deletion_policy(self.USER_ID_3) + user_subscribers_model = ( + user_models.UserSubscribersModel.get_by_id(self.USER_ID_1)) + self.assertNotIn(self.USER_ID_3, user_subscribers_model.subscriber_ids) + + def test_apply_deletion_policy_for_non_existing_user_passes(self) -> None: # Test that calling apply_deletion_policy with no existing model # doesn't fail. user_models.UserSubscribersModel.apply_deletion_policy( @@ -1095,17 +1409,33 @@ def test_has_reference_to_user_id(self) -> None: .has_reference_to_user_id(self.NONEXISTENT_USER_ID) ) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserSubscribersModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserSubscribersModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'subscriber_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + class UserRecentChangesBatchModelTests(test_utils.GenericTestBase): """Tests for the UserRecentChangesBatchModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserRecentChangesBatchModelTests, self).setUp() + super().setUp() user_models.UserRecentChangesBatchModel(id=self.USER_ID_1).put() user_models.UserRecentChangesBatchModel( @@ -1142,20 +1472,37 @@ def test_has_reference_to_user_id(self) -> None: .has_reference_to_user_id(self.NONEXISTENT_USER_ID) ) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserRecentChangesBatchModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserRecentChangesBatchModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'output': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'job_queued_msec': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + class UserStatsModelTest(test_utils.GenericTestBase): """Tests for the UserStatsModel class.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_3 = 'id_3' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_3: Final = 'id_3' - USER_1_IMPACT_SCORE = 0.87 - USER_1_TOTAL_PLAYS = 33 - USER_1_AVERAGE_RATINGS = 4.37 - USER_1_NUM_RATINGS = 22 - USER_1_WEEKLY_CREATOR_STATS_LIST = [ + USER_1_IMPACT_SCORE: Final = 0.87 + USER_1_TOTAL_PLAYS: Final = 33 + USER_1_AVERAGE_RATINGS: Final = 4.37 + USER_1_NUM_RATINGS: Final = 22 + USER_1_WEEKLY_CREATOR_STATS_LIST: Final = [ { ('2019-05-21'): { 'average_ratings': 4.00, @@ -1170,11 +1517,11 @@ class UserStatsModelTest(test_utils.GenericTestBase): } ] - USER_2_IMPACT_SCORE = 0.33 - USER_2_TOTAL_PLAYS = 15 - USER_2_AVERAGE_RATINGS = 2.50 - USER_2_NUM_RATINGS = 10 - USER_2_WEEKLY_CREATOR_STATS_LIST = [ + USER_2_IMPACT_SCORE: Final = 0.33 + USER_2_TOTAL_PLAYS: Final = 15 + USER_2_AVERAGE_RATINGS: Final = 2.50 + USER_2_NUM_RATINGS: Final = 10 + USER_2_WEEKLY_CREATOR_STATS_LIST: Final = [ { ('2019-05-21'): { 'average_ratings': 2.50, @@ -1191,7 +1538,7 @@ class UserStatsModelTest(test_utils.GenericTestBase): def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserStatsModelTest, self).setUp() + super().setUp() user_models.UserStatsModel( id=self.USER_ID_1, @@ -1219,6 +1566,36 @@ def setUp(self) -> None: deleted=True ).put() + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserStatsModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_or_create_user_stats_model_success(self) -> None: + actual_user_existing = user_models.UserStatsModel.get_or_create( + self.USER_ID_1) + actual_user_new = user_models.UserStatsModel.get_or_create( + 'new_user_id') + self.assertEqual(actual_user_existing.id, self.USER_ID_1) + self.assertEqual( + actual_user_existing.impact_score, self.USER_1_IMPACT_SCORE) + self.assertEqual(actual_user_new.id, 'new_user_id') + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserStatsModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'impact_score': base_models.EXPORT_POLICY.EXPORTED, + 'total_plays': base_models.EXPORT_POLICY.EXPORTED, + 'average_ratings': base_models.EXPORT_POLICY.EXPORTED, + 'num_ratings': base_models.EXPORT_POLICY.EXPORTED, + 'weekly_creator_stats_list': base_models.EXPORT_POLICY.EXPORTED, + 'schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + def test_get_deletion_policy(self) -> None: self.assertEqual( user_models.UserStatsModel.get_deletion_policy(), @@ -1292,17 +1669,19 @@ def test_export_data_on_nonexistent_user(self) -> None: class ExplorationUserDataModelTest(test_utils.GenericTestBase): """Tests for the ExplorationUserDataModel class.""" - NONEXISTENT_USER_ID = 'id_x' - DATETIME_OBJECT = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d') - DATETIME_EPOCH = utils.get_time_in_millisecs(DATETIME_OBJECT) - USER_1_ID = 'id_1' - USER_2_ID = 'id_2' - EXP_ID_ONE = 'exp_id_one' - EXP_ID_TWO = 'exp_id_two' - EXP_ID_THREE = 'exp_id_three' + NONEXISTENT_USER_ID: Final = 'id_x' + DATETIME_OBJECT: Final = datetime.datetime.strptime( + '2016-02-16', '%Y-%m-%d' + ) + DATETIME_EPOCH: Final = utils.get_time_in_millisecs(DATETIME_OBJECT) + USER_1_ID: Final = 'id_1' + USER_2_ID: Final = 'id_2' + EXP_ID_ONE: Final = 'exp_id_one' + EXP_ID_TWO: Final = 'exp_id_two' + EXP_ID_THREE: Final = 'exp_id_three' def setUp(self) -> None: - super(ExplorationUserDataModelTest, self).setUp() + super().setUp() user_models.ExplorationUserDataModel( id='%s.%s' % (self.USER_1_ID, self.EXP_ID_ONE), user_id=self.USER_1_ID, @@ -1312,7 +1691,11 @@ def setUp(self) -> None: draft_change_list={'new_content': {}}, draft_change_list_last_updated=self.DATETIME_OBJECT, draft_change_list_exp_version=3, - draft_change_list_id=1 + draft_change_list_id=1, + furthest_reached_checkpoint_exp_version=1, + furthest_reached_checkpoint_state_name='checkpoint1', + most_recently_reached_checkpoint_exp_version=1, + most_recently_reached_checkpoint_state_name='checkpoint1' ).put() user_models.ExplorationUserDataModel.create( self.USER_1_ID, self.EXP_ID_TWO).put() @@ -1325,7 +1708,11 @@ def setUp(self) -> None: draft_change_list={'new_content': {}}, draft_change_list_last_updated=self.DATETIME_OBJECT, draft_change_list_exp_version=3, - draft_change_list_id=1 + draft_change_list_id=1, + furthest_reached_checkpoint_exp_version=1, + furthest_reached_checkpoint_state_name='checkpoint1', + most_recently_reached_checkpoint_exp_version=1, + most_recently_reached_checkpoint_state_name='checkpoint1' ).put() def test_get_deletion_policy(self) -> None: @@ -1346,6 +1733,54 @@ def test_apply_deletion_policy(self) -> None: user_models.ExplorationUserDataModel.apply_deletion_policy( self.NONEXISTENT_USER_ID) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.ExplorationUserDataModel + .get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + def test_get_field_names_for_takeout(self) -> None: + self.assertEqual( + user_models.ExplorationUserDataModel + .get_field_names_for_takeout(), { + 'rated_on': 'rated_on_msec', + 'draft_change_list_last_updated': + 'draft_change_list_last_updated_msec' + } + ) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.ExplorationUserDataModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'exploration_id': + base_models.EXPORT_POLICY.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT, + 'rating': base_models.EXPORT_POLICY.EXPORTED, + 'rated_on': base_models.EXPORT_POLICY.EXPORTED, + 'draft_change_list': base_models.EXPORT_POLICY.EXPORTED, + 'draft_change_list_last_updated': + base_models.EXPORT_POLICY.EXPORTED, + 'draft_change_list_exp_version': + base_models.EXPORT_POLICY.EXPORTED, + 'draft_change_list_id': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'mute_suggestion_notifications': + base_models.EXPORT_POLICY.EXPORTED, + 'mute_feedback_notifications': + base_models.EXPORT_POLICY.EXPORTED, + 'furthest_reached_checkpoint_state_name': + base_models.EXPORT_POLICY.EXPORTED, + 'furthest_reached_checkpoint_exp_version': + base_models.EXPORT_POLICY.EXPORTED, + 'most_recently_reached_checkpoint_state_name': + base_models.EXPORT_POLICY.EXPORTED, + 'most_recently_reached_checkpoint_exp_version': + base_models.EXPORT_POLICY.EXPORTED + } + ) + def test_has_reference_to_user_id(self) -> None: self.assertTrue( user_models.ExplorationUserDataModel @@ -1393,6 +1828,30 @@ def test_get_failure(self) -> None: self.assertEqual(retrieved_object, None) + def test_get_multiple_exploration_model_success(self) -> None: + user_id_exp_id_combinations = [ + (self.USER_1_ID, self.EXP_ID_ONE), + (self.USER_2_ID, self.EXP_ID_ONE) + ] + retrieved_object = user_models.ExplorationUserDataModel.get_multi( + user_id_exp_id_combinations) + # Mypy Type checking for None. + assert retrieved_object[0] is not None + assert retrieved_object[1] is not None + self.assertEqual(len(retrieved_object), 2) + self.assertEqual(retrieved_object[0].user_id, self.USER_1_ID) + self.assertEqual( + retrieved_object[0].id, + '%s.%s' % (self.USER_1_ID, self.EXP_ID_ONE)) + self.assertEqual( + retrieved_object[0].exploration_id, self.EXP_ID_ONE) + self.assertEqual(retrieved_object[1].user_id, self.USER_2_ID) + self.assertEqual( + retrieved_object[1].id, + '%s.%s' % (self.USER_2_ID, self.EXP_ID_ONE)) + self.assertEqual( + retrieved_object[1].exploration_id, self.EXP_ID_ONE) + def test_export_data_nonexistent_user(self) -> None: user_data = user_models.ExplorationUserDataModel.export_data( 'fake_user') @@ -1413,7 +1872,11 @@ def test_export_data_one_exploration(self) -> None: 'mute_suggestion_notifications': ( feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), 'mute_feedback_notifications': ( - feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE) + feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), + 'furthest_reached_checkpoint_exp_version': 1, + 'furthest_reached_checkpoint_state_name': 'checkpoint1', + 'most_recently_reached_checkpoint_exp_version': 1, + 'most_recently_reached_checkpoint_state_name': 'checkpoint1' } } self.assertDictEqual(expected_data, user_data) @@ -1429,7 +1892,11 @@ def test_export_data_multiple_explorations(self) -> None: draft_change_list={'new_content': {'content': 3}}, draft_change_list_last_updated=self.DATETIME_OBJECT, draft_change_list_exp_version=2, - draft_change_list_id=2).put() + draft_change_list_id=2, + furthest_reached_checkpoint_exp_version=1, + furthest_reached_checkpoint_state_name='checkpoint3', + most_recently_reached_checkpoint_exp_version=1, + most_recently_reached_checkpoint_state_name='checkpoint2').put() user_data = user_models.ExplorationUserDataModel.export_data( self.USER_1_ID) @@ -1445,7 +1912,11 @@ def test_export_data_multiple_explorations(self) -> None: 'mute_suggestion_notifications': ( feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), 'mute_feedback_notifications': ( - feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE) + feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), + 'furthest_reached_checkpoint_exp_version': 1, + 'furthest_reached_checkpoint_state_name': 'checkpoint1', + 'most_recently_reached_checkpoint_exp_version': 1, + 'most_recently_reached_checkpoint_state_name': 'checkpoint1' }, self.EXP_ID_TWO: { 'rating': None, @@ -1457,7 +1928,11 @@ def test_export_data_multiple_explorations(self) -> None: 'mute_suggestion_notifications': ( feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), 'mute_feedback_notifications': ( - feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE) + feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), + 'furthest_reached_checkpoint_exp_version': None, + 'furthest_reached_checkpoint_state_name': None, + 'most_recently_reached_checkpoint_exp_version': None, + 'most_recently_reached_checkpoint_state_name': None }, self.EXP_ID_THREE: { 'rating': 5, @@ -1469,27 +1944,30 @@ def test_export_data_multiple_explorations(self) -> None: 'mute_suggestion_notifications': ( feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), 'mute_feedback_notifications': ( - feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE) + feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE), + 'furthest_reached_checkpoint_exp_version': 1, + 'furthest_reached_checkpoint_state_name': 'checkpoint3', + 'most_recently_reached_checkpoint_exp_version': 1, + 'most_recently_reached_checkpoint_state_name': 'checkpoint2' } } - self.assertDictEqual(expected_data, user_data) class CollectionProgressModelTests(test_utils.GenericTestBase): """Tests for CollectionProgressModel.""" - NONEXISTENT_USER_ID = 'user_id_x' - USER_ID_1 = 'user_id_1' - USER_ID_2 = 'user_id_2' - USER_ID_3 = 'user_id_3' - COLLECTION_ID_1 = 'col_id_1' - COLLECTION_ID_2 = 'col_id_2' - COMPLETED_EXPLORATION_IDS_1 = ['exp_id_1', 'exp_id_2', 'exp_id_3'] - COMPLETED_EXPLORATION_IDS_2 = ['exp_id_4', 'exp_id_5', 'exp_id_6'] + NONEXISTENT_USER_ID: Final = 'user_id_x' + USER_ID_1: Final = 'user_id_1' + USER_ID_2: Final = 'user_id_2' + USER_ID_3: Final = 'user_id_3' + COLLECTION_ID_1: Final = 'col_id_1' + COLLECTION_ID_2: Final = 'col_id_2' + COMPLETED_EXPLORATION_IDS_1: Final = ['exp_id_1', 'exp_id_2', 'exp_id_3'] + COMPLETED_EXPLORATION_IDS_2: Final = ['exp_id_4', 'exp_id_5', 'exp_id_6'] def setUp(self) -> None: - super(CollectionProgressModelTests, self).setUp() + super().setUp() user_models.CollectionProgressModel( id='%s.%s' % (self.USER_ID_1, self.COLLECTION_ID_1), @@ -1522,6 +2000,24 @@ def test_get_deletion_policy(self) -> None: user_models.CollectionProgressModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.CollectionProgressModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.CollectionProgressModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'collection_id': + base_models.EXPORT_POLICY.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT, + 'completed_explorations': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + def test_apply_deletion_policy(self) -> None: user_models.CollectionProgressModel.apply_deletion_policy( self.USER_ID_1) @@ -1553,6 +2049,71 @@ def test_has_reference_to_user_id(self) -> None: .has_reference_to_user_id(self.NONEXISTENT_USER_ID) ) + def test_create_collection_progress_model_success(self) -> None: + retrieved_object = user_models.CollectionProgressModel.create( + self.USER_ID_1, self.COLLECTION_ID_1) + self.assertEqual(retrieved_object.user_id, self.USER_ID_1) + self.assertEqual( + retrieved_object.id, + '%s.%s' % (self.USER_ID_1, self.COLLECTION_ID_1)) + self.assertEqual(retrieved_object.collection_id, self.COLLECTION_ID_1) + + def test_get_collection_progress_model_success(self) -> None: + retrieved_object = user_models.CollectionProgressModel.get( + self.USER_ID_1, self.COLLECTION_ID_2) + assert retrieved_object is not None + self.assertEqual( + retrieved_object.id, + '%s.%s' % (self.USER_ID_1, self.COLLECTION_ID_2)) + self.assertEqual( + retrieved_object.user_id, self.USER_ID_1) + self.assertEqual( + retrieved_object.collection_id, self.COLLECTION_ID_2) + self.assertEqual( + retrieved_object.completed_explorations, + self.COMPLETED_EXPLORATION_IDS_2 + ) + + def test_get_collection_progress_model_failure(self) -> None: + retrieved_object = user_models.CollectionProgressModel.get( + self.USER_ID_1, 'fake_exp_id') + self.assertIsNone(retrieved_object) + + def test_get_multiple_collection_progress_model_success(self) -> None: + retrieved_object = user_models.CollectionProgressModel.get_multi( + self.USER_ID_1, [self.COLLECTION_ID_1, self.COLLECTION_ID_2]) + # Mypy checking for None. + assert retrieved_object[0] is not None + assert retrieved_object[1] is not None + self.assertEqual(len(retrieved_object), 2) + self.assertEqual(retrieved_object[0].user_id, self.USER_ID_1) + self.assertEqual( + retrieved_object[0].id, + '%s.%s' % (self.USER_ID_1, self.COLLECTION_ID_1)) + self.assertEqual( + retrieved_object[0].collection_id, self.COLLECTION_ID_1) + self.assertEqual( + retrieved_object[1].collection_id, self.COLLECTION_ID_2) + self.assertEqual( + retrieved_object[1].id, + '%s.%s' % (self.USER_ID_1, self.COLLECTION_ID_2)) + self.assertEqual(retrieved_object[1].user_id, self.USER_ID_1) + + def test_get_or_create_collection_progress_model_success(self) -> None: + retrieved_object = user_models.CollectionProgressModel.get_or_create( + self.USER_ID_1, self.COLLECTION_ID_1) + self.assertIsNotNone(retrieved_object) + self.assertEqual(retrieved_object.user_id, self.USER_ID_1) + self.assertEqual( + retrieved_object.id, + '%s.%s' % (self.USER_ID_1, self.COLLECTION_ID_1)) + user_data_new = user_models.CollectionProgressModel.get_or_create( + 'new_user_id', 'new_coll_id') + self.assertIsNotNone(user_data_new) + self.assertEqual(user_data_new.user_id, 'new_user_id') + self.assertEqual( + user_data_new.id, 'new_user_id.new_coll_id') + def test_export_data_on_nonexistent_user(self) -> None: """Test export data on nonexistent user.""" user_data = user_models.CollectionProgressModel.export_data( @@ -1589,17 +2150,17 @@ def test_export_data_multiple_collections(self) -> None: class StoryProgressModelTests(test_utils.GenericTestBase): """Tests for StoryProgressModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - USER_ID_3 = 'id_3' - STORY_ID_1 = 'story_id_1' - STORY_ID_2 = 'story_id_2' - COMPLETED_NODE_IDS_1 = ['node_id_1', 'node_id_2'] - COMPLETED_NODE_IDS_2 = ['node_id_a'] + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + USER_ID_3: Final = 'id_3' + STORY_ID_1: Final = 'story_id_1' + STORY_ID_2: Final = 'story_id_2' + COMPLETED_NODE_IDS_1: Final = ['node_id_1', 'node_id_2'] + COMPLETED_NODE_IDS_2: Final = ['node_id_a'] def setUp(self) -> None: - super(StoryProgressModelTests, self).setUp() + super().setUp() user_models.StoryProgressModel( id='%s.%s' % (self.USER_ID_1, self.STORY_ID_1), user_id=self.USER_ID_1, @@ -1631,6 +2192,24 @@ def test_get_deletion_policy(self) -> None: user_models.StoryProgressModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.StoryProgressModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.StoryProgressModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'story_id': + base_models.EXPORT_POLICY.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT, + 'completed_node_ids': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + def test_apply_deletion_policy(self) -> None: user_models.StoryProgressModel.apply_deletion_policy(self.USER_ID_2) self.assertIsNone( @@ -1690,6 +2269,23 @@ def test_export_data_on_multi_story(self) -> None: } self.assertEqual(expected_data, user_data) + def test_get_story_progress_model_success(self) -> None: + retrieved_object = user_models.StoryProgressModel.get( + self.USER_ID_1, self.STORY_ID_1) + + self.assertEqual(retrieved_object.user_id, self.USER_ID_1) + self.assertEqual(retrieved_object.story_id, self.STORY_ID_1) + self.assertEqual( + retrieved_object.id, '%s.%s' % (self.USER_ID_1, self.STORY_ID_1)) + self.assertEqual( + retrieved_object.completed_node_ids, self.COMPLETED_NODE_IDS_1) + + def test_get_story_progress_model_failure(self) -> None: + retrieved_object = user_models.StoryProgressModel.get( + 'unknown_user_id', + 'unknown_story_id', strict=False) + self.assertEqual(retrieved_object, None) + def test_get_multi(self) -> None: model = user_models.StoryProgressModel.create( 'user_id', 'story_id_1') @@ -1702,7 +2298,7 @@ def test_get_multi(self) -> None: model.put() story_progress_models = user_models.StoryProgressModel.get_multi( - 'user_id', ['story_id_1', 'story_id_2']) + ['user_id'], ['story_id_1', 'story_id_2']) # Ruling out the possibility of None for mypy type checking. assert story_progress_models[0] is not None assert story_progress_models[1] is not None @@ -1713,20 +2309,35 @@ def test_get_multi(self) -> None: self.assertEqual(story_progress_models[1].user_id, 'user_id') self.assertEqual(story_progress_models[1].story_id, 'story_id_2') + def test_get_or_create_story_progress_model(self) -> None: + story_progress_model = user_models.StoryProgressModel.get_or_create( + self.USER_ID_1, self.STORY_ID_1) + self.assertIsNotNone(story_progress_model) + self.assertEqual(story_progress_model.user_id, self.USER_ID_1) + self.assertEqual( + story_progress_model.id, + '%s.%s' % (self.USER_ID_1, self.STORY_ID_1)) + story_progress_model_new = user_models.StoryProgressModel.get_or_create( + 'new_user_id', 'new_story_id') + self.assertIsNotNone(story_progress_model_new) + self.assertEqual(story_progress_model_new.user_id, 'new_user_id') + self.assertEqual( + story_progress_model_new.id, 'new_user_id.new_story_id') + class UserQueryModelTests(test_utils.GenericTestBase): """Tests for UserQueryModel.""" - QUERY_1_ID = 'id_1' - QUERY_2_ID = 'id_2' - QUERY_3_ID = 'id_3' - NONEXISTENT_USER_ID = 'submitter_id_x' - USER_ID_1 = 'submitter_id_1' - USER_ID_2 = 'submitter_id_2' + QUERY_1_ID: Final = 'id_1' + QUERY_2_ID: Final = 'id_2' + QUERY_3_ID: Final = 'id_3' + NONEXISTENT_USER_ID: Final = 'submitter_id_x' + USER_ID_1: Final = 'submitter_id_1' + USER_ID_2: Final = 'submitter_id_2' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserQueryModelTests, self).setUp() + super().setUp() user_models.UserQueryModel( id=self.QUERY_1_ID, @@ -1747,6 +2358,38 @@ def test_get_deletion_policy(self) -> None: user_models.UserQueryModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserQueryModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER + ) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserQueryModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'inactive_in_last_n_days': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'has_not_logged_in_for_n_days': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_at_least_n_exps': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_fewer_than_n_exps': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'edited_at_least_n_exps': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'edited_fewer_than_n_exps': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'created_collection': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'submitter_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'sent_email_model_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'query_status': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + def test_apply_deletion_policy(self) -> None: user_models.UserQueryModel.apply_deletion_policy(self.USER_ID_1) self.assertIsNone( @@ -1793,8 +2436,6 @@ def test_instance_stores_correct_data(self) -> None: submitter_id=self.USER_ID_1).put() query_model = user_models.UserQueryModel.get(self.QUERY_1_ID) - # Ruling out the possibility of None for mypy type checking. - assert query_model is not None self.assertEqual(query_model.submitter_id, self.USER_ID_1) self.assertEqual( query_model.inactive_in_last_n_days, inactive_in_last_n_days) @@ -1894,13 +2535,13 @@ def test_fetch_page(self) -> None: class UserBulkEmailsModelTests(test_utils.GenericTestBase): """Tests for UserBulkEmailsModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserBulkEmailsModelTests, self).setUp() + super().setUp() user_models.UserBulkEmailsModel(id=self.USER_ID_1).put() user_models.UserBulkEmailsModel(id=self.USER_ID_2, deleted=True).put() @@ -1908,7 +2549,7 @@ def setUp(self) -> None: def test_get_deletion_policy(self) -> None: self.assertEqual( user_models.UserBulkEmailsModel.get_deletion_policy(), - base_models.DELETION_POLICY.KEEP) + base_models.DELETION_POLICY.DELETE) def test_has_reference_to_user_id(self) -> None: self.assertTrue( @@ -1924,19 +2565,46 @@ def test_has_reference_to_user_id(self) -> None: .has_reference_to_user_id(self.NONEXISTENT_USER_ID) ) + def test_apply_deletion_policy_deletes_model_for_user(self) -> None: + user_models.UserBulkEmailsModel.apply_deletion_policy( + self.USER_ID_1) + self.assertIsNone( + user_models.UserBulkEmailsModel.get_by_id(self.USER_ID_1)) + + def test_apply_deletion_policy_raises_no_exception_for_nonexistent_user( + self + ) -> None: + user_models.UserBulkEmailsModel.apply_deletion_policy( + self.NONEXISTENT_USER_ID) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserBulkEmailsModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserBulkEmailsModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'sent_email_model_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + class UserSkillMasteryModelTests(test_utils.GenericTestBase): """Tests for UserSkillMasteryModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_1_ID = 'user_1_id' - USER_2_ID = 'user_2_id' - SKILL_ID_1 = 'skill_id_1' - SKILL_ID_2 = 'skill_id_2' - DEGREE_OF_MASTERY = 0.5 + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'user_1_id' + USER_2_ID: Final = 'user_2_id' + SKILL_ID_1: Final = 'skill_id_1' + SKILL_ID_2: Final = 'skill_id_2' + DEGREE_OF_MASTERY: Final = 0.5 def setUp(self) -> None: - super(UserSkillMasteryModelTests, self).setUp() + super().setUp() user_models.UserSkillMasteryModel( id=user_models.UserSkillMasteryModel.construct_model_id( self.USER_1_ID, self.SKILL_ID_1), @@ -1965,6 +2633,24 @@ def test_get_deletion_policy(self) -> None: user_models.UserSkillMasteryModel.get_deletion_policy(), base_models.DELETION_POLICY.DELETE) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserSkillMasteryModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserSkillMasteryModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'skill_id': + base_models.EXPORT_POLICY.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT, + 'degree_of_mastery': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + def test_apply_deletion_policy(self) -> None: user_models.UserSkillMasteryModel.apply_deletion_policy(self.USER_1_ID) self.assertIsNone( @@ -2005,8 +2691,6 @@ def test_get_success(self) -> None: retrieved_object = user_models.UserSkillMasteryModel.get( constructed_model_id) - # Ruling out the possibility of None for mypy type checking. - assert retrieved_object is not None self.assertEqual(retrieved_object.user_id, self.USER_1_ID) self.assertEqual(retrieved_object.skill_id, self.SKILL_ID_1) self.assertEqual(retrieved_object.degree_of_mastery, 0.5) @@ -2065,17 +2749,17 @@ def test_export_data_nontrivial(self) -> None: class UserContributionProficiencyModelTests(test_utils.GenericTestBase): """Tests for UserContributionProficiencyModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_1_ID = 'user_1_id' - USER_2_ID = 'user_2_id' - USER_3_ID_OLD = 'user_3_id_old' - USER_3_ID_NEW = 'user_3_id_new' - SCORE_CATEGORY_1 = 'category_1' - SCORE_CATEGORY_2 = 'category_2' + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'user_1_id' + USER_2_ID: Final = 'user_2_id' + USER_3_ID_OLD: Final = 'user_3_id_old' + USER_3_ID_NEW: Final = 'user_3_id_new' + SCORE_CATEGORY_1: Final = 'category_1' + SCORE_CATEGORY_2: Final = 'category_2' def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(UserContributionProficiencyModelTests, self).setUp() + super().setUp() user_models.UserContributionProficiencyModel( id='%s.%s' % (self.SCORE_CATEGORY_1, self.USER_1_ID), @@ -2121,6 +2805,26 @@ def test_export_data_nontrivial(self) -> None: } self.assertEqual(user_data, expected_data) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserContributionProficiencyModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserContributionProficiencyModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'score_category': + base_models.EXPORT_POLICY.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT, + 'score': base_models.EXPORT_POLICY.EXPORTED, + 'onboarding_email_sent': base_models.EXPORT_POLICY.EXPORTED + } + ) + def test_get_deletion_policy(self) -> None: self.assertEqual( user_models.UserContributionProficiencyModel.get_deletion_policy(), @@ -2169,7 +2873,7 @@ def test_create_model(self) -> None: def test_create_entry_already_exists_failure(self) -> None: user_models.UserContributionProficiencyModel.create( 'user1', 'category1', 1) - with self.assertRaisesRegexp( # type: ignore[no-untyped-call] + with self.assertRaisesRegex( Exception, 'There is already a UserContributionProficiencyModel ' 'entry with the given id: category1.user1'): user_models.UserContributionProficiencyModel.create( @@ -2290,9 +2994,9 @@ def test_get_categories_where_user_can_review_with_invalid_user_id( class UserContributionRightsModelTests(test_utils.GenericTestBase): """Tests for UserContributionRightsModel.""" - USER_ID_1 = 'id_1' - USER_ID_2 = 'id_2' - NONEXISTENT_USER_ID = 'id_3' + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + NONEXISTENT_USER_ID: Final = 'id_3' def test_get_deletion_policy(self) -> None: self.assertEqual( @@ -2359,6 +3063,27 @@ def test_export_data_trivial(self) -> None: } self.assertEqual(user_data, expected_data) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.UserContributionRightsModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.UserContributionRightsModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'can_review_translation_for_language_codes': + base_models.EXPORT_POLICY.EXPORTED, + 'can_review_voiceover_for_language_codes': + base_models.EXPORT_POLICY.EXPORTED, + 'can_review_questions': base_models.EXPORT_POLICY.EXPORTED, + 'can_submit_questions': base_models.EXPORT_POLICY.EXPORTED, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + def test_get_translation_reviewer_user_ids(self) -> None: translation_reviewer_ids = ( user_models.UserContributionRightsModel @@ -2457,18 +3182,29 @@ def test_get_question_submitter_user_ids(self) -> None: self.assertFalse(self.USER_ID_1 in question_submitter_ids) self.assertTrue(self.USER_ID_2 in question_submitter_ids) + def test_apply_deletion_policy(self) -> None: + user_models.UserContributionRightsModel.apply_deletion_policy( + self.USER_ID_1) + self.assertFalse( + user_models.UserContributionRightsModel.has_reference_to_user_id( + self.USER_ID_1) + ) + # Check if passing a non-existent user_id does not fail. + user_models.UserContributionRightsModel.apply_deletion_policy( + 'fake_user_id') + class PendingDeletionRequestModelTests(test_utils.GenericTestBase): """Tests for PendingDeletionRequestModel.""" - NONEXISTENT_USER_ID = 'id_x' - USER_1_ID = 'user_1_id' - USER_1_EMAIL = 'email@email.com' - USER_1_ROLE = feconf.ROLE_ID_MOBILE_LEARNER + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'user_1_id' + USER_1_EMAIL: Final = 'email@email.com' + USER_1_ROLE: Final = feconf.ROLE_ID_MOBILE_LEARNER def setUp(self) -> None: """Set up user models in datastore for use in testing.""" - super(PendingDeletionRequestModelTests, self).setUp() + super().setUp() user_models.PendingDeletionRequestModel( id=self.USER_1_ID, @@ -2497,6 +3233,27 @@ def test_apply_deletion_policy_nonexistent_user_raises_no_exception( user_models.PendingDeletionRequestModel.apply_deletion_policy( self.NONEXISTENT_USER_ID) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.PendingDeletionRequestModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.PendingDeletionRequestModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'email': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'normalized_long_term_username': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'deletion_complete': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'pseudonymizable_entity_mappings': ( + base_models.EXPORT_POLICY.NOT_APPLICABLE), + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + def test_has_reference_to_user_id(self) -> None: self.assertTrue( user_models.PendingDeletionRequestModel @@ -2511,19 +3268,55 @@ def test_has_reference_to_user_id(self) -> None: class DeletedUserModelTests(test_utils.GenericTestBase): """Tests for DeletedUserModel.""" + NONEXISTENT_USER_ID: Final = 'id_x' + USER_1_ID: Final = 'user_1_id' + + def setUp(self) -> None: + super().setUp() + user_models.DeletedUserModel( + id=self.USER_1_ID + ).put() + def test_get_deletion_policy(self) -> None: self.assertEqual( user_models.DeletedUserModel.get_deletion_policy(), base_models.DELETION_POLICY.KEEP) + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.DeletedUserModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.DeletedUserModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + + def test_has_reference_to_user_id(self) -> None: + self.assertTrue( + user_models.DeletedUserModel.has_reference_to_user_id( + self.USER_1_ID)) + self.assertFalse( + user_models.DeletedUserModel.has_reference_to_user_id( + self.NONEXISTENT_USER_ID)) + class PseudonymizedUserModelTests(test_utils.GenericTestBase): """Tests for PseudonymizedUserModel.""" def test_get_deletion_policy(self) -> None: self.assertEqual( - user_models.PendingDeletionRequestModel.get_deletion_policy(), - base_models.DELETION_POLICY.DELETE_AT_END) + user_models.PseudonymizedUserModel.get_deletion_policy(), + base_models.DELETION_POLICY.NOT_APPLICABLE) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.PseudonymizedUserModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) def test_create_raises_error_when_many_id_collisions_occur(self) -> None: # Swap dependent method get_by_id to simulate collision every time. @@ -2531,7 +3324,36 @@ def test_create_raises_error_when_many_id_collisions_occur(self) -> None: user_models.PseudonymizedUserModel, 'get_by_id', types.MethodType( lambda _, __: True, user_models.PseudonymizedUserModel)) - assert_raises_regexp_context_manager = self.assertRaisesRegexp( # type: ignore[no-untyped-call] + assert_raises_regexp_context_manager = self.assertRaisesRegex( + Exception, 'New id generator is producing too many collisions.') + + with assert_raises_regexp_context_manager, get_by_id_swap: + user_models.PseudonymizedUserModel.get_new_id('exploration') + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.PseudonymizedUserModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + + def test_get_new_id_normal_behaviour_returns_unique_ids(self) -> None: + ids: Set[str] = set() + for _ in range(100): + new_id = user_models.PseudonymizedUserModel.get_new_id('') + self.assertNotIn(new_id, ids) + user_models.PseudonymizedUserModel( + id=new_id).put() + ids.add(new_id) + + def test_get_new_id_simulate_collisions(self) -> None: + get_by_id_swap = self.swap( + user_models.PseudonymizedUserModel, 'get_by_id', types.MethodType( + lambda _, __: True, user_models.PseudonymizedUserModel)) + + assert_raises_regexp_context_manager = self.assertRaisesRegex( Exception, 'New id generator is producing too many collisions.') with assert_raises_regexp_context_manager, get_by_id_swap: @@ -2545,3 +3367,194 @@ def test_get_deletion_policy(self) -> None: self.assertEqual( user_models.DeletedUsernameModel.get_deletion_policy(), base_models.DELETION_POLICY.NOT_APPLICABLE) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.DeletedUsernameModel.get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.DeletedUsernameModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + + +class LearnerGroupsUserModelTests(test_utils.GenericTestBase): + """Tests for LearnerGroupsUserModel.""" + + USER_ID_1: Final = 'id_1' + USER_ID_2: Final = 'id_2' + NONEXISTENT_USER_ID: Final = 'id_3' + + def test_get_deletion_policy(self) -> None: + self.assertEqual( + user_models.LearnerGroupsUserModel.get_deletion_policy(), + base_models.DELETION_POLICY.DELETE) + + def test_has_reference_to_user_id(self) -> None: + self.assertFalse( + user_models.LearnerGroupsUserModel + .has_reference_to_user_id(self.USER_ID_1) + ) + self.assertFalse( + user_models.LearnerGroupsUserModel + .has_reference_to_user_id(self.USER_ID_2) + ) + self.assertFalse( + user_models.LearnerGroupsUserModel + .has_reference_to_user_id(self.NONEXISTENT_USER_ID) + ) + + user_models.LearnerGroupsUserModel( + id=self.USER_ID_1, + invited_to_learner_groups_ids=['group_id_1', 'group_id_2'], + learner_groups_user_details=[ + { + 'group_id': 'group_id_3', + 'progress_sharing_is_turned_on': False + }, + { + 'group_id': 'group_id_4', + 'progress_sharing_is_turned_on': True + } + ]).put() + user_models.LearnerGroupsUserModel( + id=self.USER_ID_2, + invited_to_learner_groups_ids=['group_id_1', 'group_id_1'], + learner_groups_user_details=[ + { + 'group_id': 'group_id_3', + 'progress_sharing_is_turned_on': False + }, + { + 'group_id': 'group_id_4', + 'progress_sharing_is_turned_on': True + } + ]).put() + + self.assertTrue( + user_models.LearnerGroupsUserModel + .has_reference_to_user_id(self.USER_ID_1) + ) + self.assertTrue( + user_models.LearnerGroupsUserModel + .has_reference_to_user_id(self.USER_ID_2) + ) + self.assertFalse( + user_models.LearnerGroupsUserModel + .has_reference_to_user_id(self.NONEXISTENT_USER_ID) + ) + + def test_export_data_trivial(self) -> None: + user_data = user_models.LearnerGroupsUserModel.export_data( + self.USER_ID_1) + self.assertEqual(user_data, {}) + + user_models.LearnerGroupsUserModel( + id=self.USER_ID_1, + invited_to_learner_groups_ids=['group_id_1', 'group_id_2'], + learner_groups_user_details=[ + { + 'group_id': 'group_id_3', + 'progress_sharing_is_turned_on': False + }, + { + 'group_id': 'group_id_4', + 'progress_sharing_is_turned_on': True + } + ]).put() + + user_data = user_models.LearnerGroupsUserModel.export_data( + self.USER_ID_1) + expected_data: user_models.LearnerGroupsUserDataDict = { + 'invited_to_learner_groups_ids': ['group_id_1', 'group_id_2'], + 'learner_groups_user_details': [ + { + 'group_id': 'group_id_3', + 'progress_sharing_is_turned_on': False + }, + { + 'group_id': 'group_id_4', + 'progress_sharing_is_turned_on': True + } + ] + } + self.assertEqual(user_data, expected_data) + + def test_get_model_association_to_user(self) -> None: + self.assertEqual( + user_models.LearnerGroupsUserModel. + get_model_association_to_user(), + base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER) + + def test_get_export_policy(self) -> None: + self.assertEqual( + user_models.LearnerGroupsUserModel.get_export_policy(), { + 'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'invited_to_learner_groups_ids': + base_models.EXPORT_POLICY.EXPORTED, + 'learner_groups_user_details': + base_models.EXPORT_POLICY.EXPORTED, + 'learner_groups_user_details_schema_version': + base_models.EXPORT_POLICY.NOT_APPLICABLE, + 'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE + } + ) + + def test_apply_deletion_policy(self) -> None: + user_models.LearnerGroupsUserModel.apply_deletion_policy( + self.USER_ID_1) + self.assertFalse( + user_models.LearnerGroupsUserModel.has_reference_to_user_id( + self.USER_ID_1) + ) + # Check if passing a non-existent user_id does not fail. + user_models.LearnerGroupsUserModel.apply_deletion_policy( + 'fake_user_id') + + def test_delete_learner_group_references(self) -> None: + """Test delete_learner_group_references function.""" + + user_models.LearnerGroupsUserModel( + id='user_34', + invited_to_learner_groups_ids=['group_id_1', 'group_id_2'], + learner_groups_user_details=[ + { + 'group_id': 'group_id_3', + 'progress_sharing_is_turned_on': False + }, + { + 'group_id': 'group_id_4', + 'progress_sharing_is_turned_on': True + } + ]).put() + + # Delete reference for a group id in learner_groups_user_details. + user_models.LearnerGroupsUserModel.delete_learner_group_references( + 'group_id_3', ['user_34']) + + # Delete reference for a group id in invited_to_learner_groups_ids. + user_models.LearnerGroupsUserModel.delete_learner_group_references( + 'group_id_1', ['user_34']) + + # Test delete reference for a group id for uninvolved users. + user_models.LearnerGroupsUserModel.delete_learner_group_references( + 'group_id_1', ['uninvolved_user_1']) + + user_data = user_models.LearnerGroupsUserModel.export_data( + 'user_34') + expected_data = { + 'invited_to_learner_groups_ids': ['group_id_2'], + 'learner_groups_user_details': [ + { + 'group_id': 'group_id_4', + 'progress_sharing_is_turned_on': True + } + ] + } + self.assertEqual(user_data, expected_data) diff --git a/core/templates/Polyfills.ts b/core/templates/Polyfills.ts index fcfad199f8b9..db448f0b6f8f 100644 --- a/core/templates/Polyfills.ts +++ b/core/templates/Polyfills.ts @@ -18,6 +18,7 @@ import 'globalthis/auto'; import 'proxy-polyfill'; +import '@webcomponents/custom-elements'; // Add a String.prototype.trim() polyfill for IE8. if (typeof String.prototype.trim !== 'function') { @@ -159,3 +160,17 @@ if (navigator.mediaDevices.getUserMedia === undefined) { }); }; } + +// Object.entries() polyfill for Chrome 53 and below. +if (!Object.entries) { + Object.entries = (obj: Object) => { + let objectProperties = Object.keys(obj); + let i = objectProperties.length; + let objectEntriesArray = new Array(i); // Preallocate the array. + + while (i--) { + objectEntriesArray[i] = [objectProperties[i], obj[objectProperties[i]]]; + return objectEntriesArray; + } + }; +} diff --git a/core/templates/app-events/event-bus.service.ts b/core/templates/app-events/event-bus.service.ts index ffabeba16bc3..5240ad339c66 100644 --- a/core/templates/app-events/event-bus.service.ts +++ b/core/templates/app-events/event-bus.service.ts @@ -60,7 +60,7 @@ export class EventBusService { action.call(callbackContext, event); } catch (error: unknown) { if (error instanceof Error) { - this._errorHandler(error as Error); + this._errorHandler(error); } else { throw error; } diff --git a/core/templates/app.constants.ajs.ts b/core/templates/app.constants.ajs.ts index 9ab7fbc39968..d07725e1ce83 100644 --- a/core/templates/app.constants.ajs.ts +++ b/core/templates/app.constants.ajs.ts @@ -69,6 +69,9 @@ angular.module('oppia').constant( // We use a slash because this character is forbidden in a state name. angular.module('oppia').constant( 'PLACEHOLDER_OUTCOME_DEST', AppConstants.PLACEHOLDER_OUTCOME_DEST); +angular.module('oppia').constant( + 'PLACEHOLDER_OUTCOME_DEST_IF_STUCK', + AppConstants.PLACEHOLDER_OUTCOME_DEST_IF_STUCK); angular.module('oppia').constant( 'INTERACTION_DISPLAY_MODE_INLINE', AppConstants.INTERACTION_DISPLAY_MODE_INLINE); diff --git a/core/templates/app.constants.ts b/core/templates/app.constants.ts index 776fe74b1a72..0392de513b51 100644 --- a/core/templates/app.constants.ts +++ b/core/templates/app.constants.ts @@ -50,6 +50,11 @@ export const AppConstants = { EXPLORATION_DATA_URL_TEMPLATE: '/explorehandler/init/', EXPLORATION_VERSION_DATA_URL_TEMPLATE: ( '/explorehandler/init/?v='), + ENTITY_TRANSLATIONS_HANDLER_URL_TEMPLATE: ( + '/entity_translations_handler////' + + ''), + EXPLORATION_PROGRESS_PID_URL_TEMPLATE: ( + '/explorehandler/init/?pid='), WARNING_TYPES: { // These must be fixed before the exploration can be saved. @@ -69,6 +74,8 @@ export const AppConstants = { INCORRECT_SOLUTION: 'The current solution does not lead to another card.', UNRESOLVED_ANSWER: 'There is an answer among the top 10 which has no explicit feedback.', + INVALID_REDIRECTION: 'Learner should not be directed back by more than' + + ' 3 cards in the lesson.' }, CHECKPOINT_ERROR_MESSAGES: { @@ -86,6 +93,7 @@ export const AppConstants = { // We use a slash because this character is forbidden in a state name. PLACEHOLDER_OUTCOME_DEST: '/', + PLACEHOLDER_OUTCOME_DEST_IF_STUCK: '/', INTERACTION_DISPLAY_MODE_INLINE: 'inline', LOADING_INDICATOR_URL: '/activity/loadingIndicator.gif', OBJECT_EDITOR_URL_PREFIX: '/object_editor_template/', @@ -100,6 +108,14 @@ export const AppConstants = { UNICODE_STRING: 'UnicodeString', }, + // Flag to toggle the support for a new state to direct the learners + // to if they get stuck. + DEST_IF_REALLY_STUCK_FEAT_ENABLED: false, + + INTERACTION_NAMES: { + TEXT_INPUT: 'TextInput' + }, + // The maximum number of nodes to show in a row of the state graph. MAX_NODES_PER_ROW: 4, // The following variable must be at least 3. It represents the maximum @@ -110,6 +126,10 @@ export const AppConstants = { // displayed. FATAL_ERROR_CODES: [400, 401, 404, 500], + // Maximum number of states the learner can be directed back from a state + // by an editor in an exploration. + MAX_CARD_COUNT_FOR_VALID_REDIRECTION: 3, + // Do not modify these, for backwards-compatibility reasons. These strings are // used to identify components, to generate content ids, and to determine what // type of content a given content id is associated with. If you wish to @@ -187,5 +207,33 @@ export const AppConstants = { IMAGE_SAVE_DESTINATION_SERVER: 'imageSaveDestinationServer', IMAGE_SAVE_DESTINATION_LOCAL_STORAGE: - 'imageSaveDestinationLocalStorage' + 'imageSaveDestinationLocalStorage', + SVG_MIME_TYPE: 'data:image/svg+xml', + + CONTRIBUTION_STATS_TYPE_TRANSLATION: 'translation', + CONTRIBUTION_STATS_TYPE_QUESTION: 'question', + CONTRIBUTION_STATS_SUBTYPE_SUBMISSION: 'submission', + CONTRIBUTION_STATS_SUBTYPE_REVIEW: 'review', + CONTRIBUTION_STATS_SUBTYPE_CORRECTION: 'correction', + CONTRIBUTION_STATS_TYPES: { + TRANSLATION_CONTRIBUTION: { + NAME: 'translationContribution', + DISPLAY_NAME: 'Translation Contributions' + }, + TRANSLATION_REVIEW: { + NAME: 'translationReview', + DISPLAY_NAME: 'Translation Reviews' + }, + QUESTION_CONTRIBUTION: { + NAME: 'questionContribution', + DISPLAY_NAME: 'Question Contributions' + }, + QUESTION_REVIEW: { + NAME: 'questionReview', + DISPLAY_NAME: 'Question Reviews' + } + }, + CONTRIBUTOR_BADGE_INITIAL_LEVELS: [1, 10, 20, 50, 100, 200, 300, 400, 500], + CONTRIBUTOR_CERTIFICATE_LOGO: + '/assets/images/contributor_dashboard/oppia-logo.jpg' } as const; diff --git a/core/templates/base-components/base-content.component.css b/core/templates/base-components/base-content.component.css new file mode 100644 index 000000000000..2691731546c3 --- /dev/null +++ b/core/templates/base-components/base-content.component.css @@ -0,0 +1,90 @@ +/* + NOTE to developers: We generate RTL CSS from this file in the webpack + compilation, here are sme additional rules that can be added to the CSS files: + https://rtlcss.com/learn/usage-guide/control-directives . +*/ +/* + Note that adding "overflow-y: scroll;" will break infinite scrolling in the + search results page. +*/ +.oppia-content-container { + height: 100%; + left: 0; + position: relative; + -webkit-transition: -webkit-transform 0.5s; + transition: transform 0.5s; +} +.oppia-base-content-pre-logo-container { + align-items: center; + display: flex; + float: left; + height: 56px; + transform: translateX(50%); + width: 20px; +} +.navbar-mobile-container { + bottom: 0; + display: block; + position: fixed; + right: 0; + text-align: right; + width: 100%; + z-index: 999; +} +.navbar-mobile-options { + background-color: #e6e6e6; + border-top: 1px solid #000; +} +.show-mobile-navbar-icon { + background-color: #e6e6e6; + border-left: 1.5px solid #000; + border-radius: 10px 0; + border-top: 1.5px solid #000; + color: #000; + font-size: 25px; + padding: 10px; +} +.oppia-cookie-banner-container { + background: rgb(0,0,0,0.9); + bottom: 0; + color: #fff; + font-family: 'Roboto', Arial, sans-serif; + padding: 24px 0; + position: fixed; + width: 100%; + z-index: 2000; +} +.oppia-cookie-banner-container .oppia-cookie-banner-explanation /deep/ a { + color: #aed2e9; +} + +.oppia-cookie-banner-container .oppia-cookie-banner { + margin: auto; + width: 704px; +} + +.oppia-cookie-banner-container .oppia-cookie-banner-accept-button { + background-color: #00609c; + color: #fff; + display: block; + float: right; + text-transform: uppercase; + width: 168px; +} + +@media(max-width: 1024px) { + .oppia-cookie-banner-container .oppia-cookie-banner { + width: 80vw; + } +} + +@media(max-width: 500px) { + .oppia-cookie-banner-container .oppia-cookie-banner { + padding: 15px 24px 0 24px; + width: 90vw; + } + .oppia-cookie-banner-container .oppia-cookie-banner-accept-button { + float: none; + margin: 0 auto; + } +} diff --git a/core/templates/base-components/base-content.component.html b/core/templates/base-components/base-content.component.html index ab7637178536..d6def8fe8410 100644 --- a/core/templates/base-components/base-content.component.html +++ b/core/templates/base-components/base-content.component.html @@ -15,7 +15,7 @@
    + class="e2e-test-main-content">
    @@ -26,22 +26,22 @@
    -
    -
    +